Compare commits
1 commit
fork
...
fix-compre
Author | SHA1 | Date | |
---|---|---|---|
|
c000e67769 |
507 changed files with 34902 additions and 23190 deletions
|
@ -1,5 +0,0 @@
|
|||
[gerrit]
|
||||
host=cl.tvl.fyi
|
||||
port=29418
|
||||
project=depot
|
||||
defaultbranch=canon
|
1
.mailmap
1
.mailmap
|
@ -1,2 +1 @@
|
|||
Alyssa Ross <hi@alyssa.is>
|
||||
Aspen Smith <root@gws.fyi> <aspen@gws.fyi> <grfn@gws.fyi>
|
||||
|
|
|
@ -15,9 +15,10 @@ partially see this as [an experiment][] in tooling for monorepos.
|
|||
|
||||
## Services
|
||||
|
||||
* Source code can be viewed primarily via `cgit-pink` on
|
||||
[code.tvl.fyi](https://code.tvl.fyi), with code search being available through
|
||||
Livegrep on [grep.tvl.fyi](https://grep.tvl.fyi).
|
||||
* Source code is available primarily through Sourcegraph on
|
||||
[cs.tvl.fyi](https://cs.tvl.fyi), where it is searchable and even semantically
|
||||
indexed. A lower-tech view of the repository is also available via cgit-pink
|
||||
on [code.tvl.fyi](https://code.tvl.fyi).
|
||||
|
||||
The repository can be cloned using `git` from `https://cl.tvl.fyi/depot`.
|
||||
|
||||
|
|
633
corp/rih/backend/Cargo.lock
generated
633
corp/rih/backend/Cargo.lock
generated
File diff suppressed because it is too large
Load diff
21
corp/rih/frontend/Cargo.lock
generated
21
corp/rih/frontend/Cargo.lock
generated
|
@ -1500,20 +1500,19 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
|
||||
checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
"wasm-bindgen-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
|
||||
checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"log",
|
||||
|
@ -1538,9 +1537,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
|
||||
checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
|
@ -1548,9 +1547,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
|
||||
checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -1561,9 +1560,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
|
||||
checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
|
|
|
@ -19,7 +19,7 @@ yew-router = "0.17"
|
|||
wasm-bindgen-futures = "0.4"
|
||||
|
||||
# needs to be in sync with nixpkgs
|
||||
wasm-bindgen = "= 0.2.93"
|
||||
wasm-bindgen = "= 0.2.92"
|
||||
uuid = { version = "1.3.3", features = ["v4", "serde"] }
|
||||
|
||||
[dependencies.serde]
|
||||
|
|
21
corp/russian/predlozhnik/Cargo.lock
generated
21
corp/russian/predlozhnik/Cargo.lock
generated
|
@ -363,20 +363,19 @@ checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5"
|
||||
checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
"wasm-bindgen-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-backend"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b"
|
||||
checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"log",
|
||||
|
@ -401,9 +400,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf"
|
||||
checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
|
@ -411,9 +410,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836"
|
||||
checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -424,9 +423,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.93"
|
||||
version = "0.2.92"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484"
|
||||
checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96"
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
|
|
|
@ -9,4 +9,4 @@ lazy_static = "1.4"
|
|||
yew = "0.19"
|
||||
|
||||
# needs to be in sync with nixpkgs
|
||||
wasm-bindgen = "= 0.2.93"
|
||||
wasm-bindgen = "= 0.2.92"
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
Importing projects into depot
|
||||
=============================
|
||||
|
||||
Before importing an existing `git`-based project into depot, a few questions
|
||||
need to be answered:
|
||||
|
||||
|
||||
* Is the project licensed under a free software license, or public domain?
|
||||
* Do you need to import existing history?
|
||||
* Do you need to export new history with hashes that continue on from the old
|
||||
history? (e.g. importing an existing repository, and exporting from depot to
|
||||
the old upstream)
|
||||
|
||||
Think about this and then pick an approach below:
|
||||
|
||||
## Import with no history (just commit)
|
||||
|
||||
Simply copy the files to where you want them to be in depot, and commit. Nothing
|
||||
else to do!
|
||||
|
||||
## Import without continuous history (subtree merge)
|
||||
|
||||
This import approach lets you drop an existing project into depot, keep its
|
||||
existing history, but not retain the ability to continue external history.
|
||||
|
||||
This means that if you, for example, import a project from a different git host
|
||||
using this method, and then continue to commit to it inside of depot, you will
|
||||
not be able to export a history consistent with your previous hashes using
|
||||
`josh`.
|
||||
|
||||
Commit hashes before the import will exist in depot and be valid.
|
||||
|
||||
Still, this approach might be viable if a project "moves into" depot, or has
|
||||
nothing depending on it externally.
|
||||
|
||||
1. Pick a location in depot where you want your project to be (`$loc` from now on).
|
||||
2. Fetch your project into the same git store as your depot clone (e.g. by
|
||||
adding it as an upstream and fetching it).
|
||||
3. Pick the commit you want to merge (`$commit` from now on).
|
||||
4. Run `git subtree add --prefix=$loc $commit`, which will create the correct
|
||||
merge commit.
|
||||
5. Ensure Gerrit [knows about your commit](#preparing-merges-in-gerrit) for the
|
||||
parent that is being merged.
|
||||
6. Modify the merge commit's message to start with `subtree($project_name):`.
|
||||
Gerrit **will not** allow merge commits that do not follow this format.
|
||||
7. Push your subtree commit for review as normal.
|
||||
|
||||
## Import with continuous history
|
||||
|
||||
This approach imports the history using `josh`, which means that external
|
||||
history before/after the import is consistent (you can continue committing in
|
||||
`depot`, export the history back out, and from an external perspective nothing
|
||||
changes).
|
||||
|
||||
This is what we did with repositories like `nix-1p` and `nixery`.
|
||||
|
||||
Note: Inside of depot, the pre-import commit hashes will **not make sense**.
|
||||
`josh` will rewrite them in such a way that exporting the project will yield the
|
||||
same hashes, but this rewriting changes the hashes of your commits inside of
|
||||
depot.
|
||||
|
||||
1. Pick a location in depot where you want your project to be (`$loc`).
|
||||
2. Fetch your project into the same git store as your depot clone (e.g. by
|
||||
adding it as an upstream and fetching it).
|
||||
3. Check out the commit you want to merge into depot.
|
||||
4. Run `josh-filter ":prefix=$loc"`, and take note of the `FILTERED_HEAD` ref
|
||||
that it produces (`$filtered` from now on).
|
||||
5. Ensure Gerrit [knows about the filtered commit](#preparing-merges-in-gerrit).
|
||||
6. Merge the filtered commit into depot using a standard merge, but make sure to
|
||||
add the `--allow-unrelated-histories` flag. Your commit message **must**
|
||||
start with `subtree($project_name):`, otherwise Gerrit will not let you push
|
||||
a merge.
|
||||
7. Push the merge commit for review as usual.
|
||||
|
||||
------------------------------------------------------
|
||||
|
||||
## Preparing merges in Gerrit
|
||||
|
||||
When pushing a merge to Gerrit, it needs to know about all ancestors of the
|
||||
merge, otherwise it will try to interpret commits as new CLs and reject them for
|
||||
not having a change ID (or create a huge number of CLs, if they do have one).
|
||||
|
||||
To prevent this, we have a special git ref called `subtree-staging` which you
|
||||
can push external trees to.
|
||||
|
||||
Access to `subtree-staging` has to be granted by a TVL admin, so ping tazjin,
|
||||
lukegb, flokli, sterni and so on before proceeding.
|
||||
|
||||
1. Determine the commit you want to merge (`$commit`).
|
||||
2. Run `git push -f $commit origin/subtree-staging` (or replace `origin` with
|
||||
whatever the TVL Gerrit remote is called in your clone).
|
|
@ -1,7 +1,6 @@
|
|||
{ makeSetupHook }:
|
||||
|
||||
makeSetupHook
|
||||
{
|
||||
makeSetupHook {
|
||||
name = "rules_java_bazel_hook";
|
||||
substitutions = {
|
||||
local_java = ./local_java;
|
||||
|
|
|
@ -37,9 +37,7 @@ let
|
|||
cp -R . $out
|
||||
'';
|
||||
};
|
||||
in
|
||||
makeSetupHook
|
||||
{
|
||||
in makeSetupHook {
|
||||
name = "bazelbuild-rules_nodejs-5-hook";
|
||||
propagatedBuildInputs = [
|
||||
nodejs
|
||||
|
|
|
@ -16,16 +16,12 @@
|
|||
|
||||
let
|
||||
cleanAttrs = lib.flip removeAttrs [
|
||||
"bazelTargets"
|
||||
"depsHash"
|
||||
"extraCacheInstall"
|
||||
"extraBuildSetup"
|
||||
"extraBuildInstall"
|
||||
"bazelTargets" "depsHash" "extraCacheInstall" "extraBuildSetup" "extraBuildInstall"
|
||||
];
|
||||
attrs = cleanAttrs baseAttrs;
|
||||
|
||||
base = stdenv.mkDerivation (attrs // {
|
||||
nativeBuildInputs = (attrs.nativeBuildInputs or [ ]) ++ [
|
||||
nativeBuildInputs = (attrs.nativeBuildInputs or []) ++ [
|
||||
bazel
|
||||
];
|
||||
|
||||
|
@ -73,7 +69,7 @@ let
|
|||
|
||||
inherit cache;
|
||||
|
||||
nativeBuildInputs = (base.nativeBuildInputs or [ ]) ++ [
|
||||
nativeBuildInputs = (base.nativeBuildInputs or []) ++ [
|
||||
coreutils
|
||||
];
|
||||
|
||||
|
@ -106,5 +102,4 @@ let
|
|||
runHook postInstall
|
||||
'';
|
||||
});
|
||||
in
|
||||
build
|
||||
in build
|
||||
|
|
|
@ -23,6 +23,7 @@ let
|
|||
{ name
|
||||
, dependencies ? [ ]
|
||||
, doCheck ? true
|
||||
,
|
||||
}: src:
|
||||
(if doCheck then testRustSimple else pkgs.lib.id)
|
||||
(pkgs.buildRustCrate ({
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# This program is used as a Gerrit hook to trigger builds on
|
||||
# Buildkite and perform other maintenance tasks.
|
||||
# Buildkite, Sourcegraph reindexing and other maintenance tasks.
|
||||
{ depot, ... }:
|
||||
|
||||
depot.nix.buildGo.program {
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
//
|
||||
// Gerrit (ref-updated) hook:
|
||||
// - Trigger Buildkite CI builds
|
||||
// - Trigger SourceGraph repository index updates
|
||||
//
|
||||
// Buildkite (post-command) hook:
|
||||
// - Submit CL verification status back to Gerrit
|
||||
|
@ -54,6 +55,10 @@ type config struct {
|
|||
BuildkiteProject string `json:"buildkiteProject"`
|
||||
BuildkiteToken string `json:"buildkiteToken"`
|
||||
GerritChangeName string `json:"gerritChangeName"`
|
||||
|
||||
// Optional configuration for Sourcegraph trigger updates.
|
||||
SourcegraphUrl string `json:"sourcegraphUrl"`
|
||||
SourcegraphToken string `json:"sourcegraphToken"`
|
||||
}
|
||||
|
||||
// buildTrigger represents the information passed to besadii when it
|
||||
|
@ -149,6 +154,11 @@ func loadConfig() (*config, error) {
|
|||
return nil, fmt.Errorf("invalid 'gerritChangeName': %s", cfg.GerritChangeName)
|
||||
}
|
||||
|
||||
// Rudimentary config validation logic
|
||||
if cfg.SourcegraphUrl != "" && cfg.SourcegraphToken == "" {
|
||||
return nil, fmt.Errorf("'SourcegraphToken' must be set if 'SourcegraphUrl' is set")
|
||||
}
|
||||
|
||||
if cfg.Repository == "" || cfg.Branch == "" {
|
||||
return nil, fmt.Errorf("missing repository configuration (required: repository, branch)")
|
||||
}
|
||||
|
@ -289,6 +299,26 @@ func triggerBuild(cfg *config, log *syslog.Writer, trigger *buildTrigger) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// Trigger a Sourcegraph repository index update.
|
||||
//
|
||||
// https://docs.sourcegraph.com/admin/repo/webhooks
|
||||
func triggerIndexUpdate(cfg *config, log *syslog.Writer) error {
|
||||
req, err := http.NewRequest("POST", cfg.SourcegraphUrl, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Add("Authorization", "token "+cfg.SourcegraphToken)
|
||||
|
||||
_, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to trigger Sourcegraph index update: %w", err)
|
||||
}
|
||||
|
||||
log.Info("triggered sourcegraph index update")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gerrit passes more flags than we want, but Rob Pike decided[0] in
|
||||
// 2013 that the Go art project will not allow users to ignore flags
|
||||
// because he "doesn't like it". This function allows users to ignore
|
||||
|
@ -428,6 +458,13 @@ func gerritHookMain(cfg *config, log *syslog.Writer, trigger *buildTrigger) {
|
|||
if err != nil {
|
||||
log.Err(fmt.Sprintf("failed to trigger Buildkite build: %s", err))
|
||||
}
|
||||
|
||||
if cfg.SourcegraphUrl != "" && trigger.ref == cfg.Branch {
|
||||
err = triggerIndexUpdate(cfg, log)
|
||||
if err != nil {
|
||||
log.Err(fmt.Sprintf("failed to trigger sourcegraph index update: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func postCommandMain(cfg *config) {
|
||||
|
|
|
@ -8,9 +8,7 @@ terraform {
|
|||
}
|
||||
|
||||
backend "s3" {
|
||||
endpoints = {
|
||||
s3 = "https://objects.dc-sto1.glesys.net"
|
||||
}
|
||||
endpoint = "https://objects.dc-sto1.glesys.net"
|
||||
bucket = "tvl-state"
|
||||
key = "terraform/tvl-buildkite"
|
||||
region = "glesys"
|
||||
|
@ -18,8 +16,6 @@ terraform {
|
|||
skip_credentials_validation = true
|
||||
skip_region_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_requesting_account_id = true
|
||||
skip_s3_checksum = true
|
||||
}
|
||||
}
|
||||
|
||||
|
|
274
ops/journaldriver/Cargo.lock
generated
274
ops/journaldriver/Cargo.lock
generated
|
@ -4,18 +4,18 @@ version = 3
|
|||
|
||||
[[package]]
|
||||
name = "aho-corasick"
|
||||
version = "1.1.3"
|
||||
version = "1.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
|
||||
checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anyhow"
|
||||
version = "1.0.86"
|
||||
version = "1.0.75"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
|
||||
checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6"
|
||||
|
||||
[[package]]
|
||||
name = "base64"
|
||||
|
@ -25,9 +25,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
|
|||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.6.0"
|
||||
version = "2.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
|
||||
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
|
||||
|
||||
[[package]]
|
||||
name = "build-env"
|
||||
|
@ -37,11 +37,11 @@ checksum = "e068f31938f954b695423ecaf756179597627d0828c0d3e48c0a722a8b23cf9e"
|
|||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.1.15"
|
||||
version = "1.0.84"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6"
|
||||
checksum = "0f8e7c90afad890484a21653d08b6e209ae34770fb5ee298f9c699fcc1e5c856"
|
||||
dependencies = [
|
||||
"shlex",
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -73,9 +73,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "curl"
|
||||
version = "0.4.46"
|
||||
version = "0.4.44"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e2161dd6eba090ff1594084e95fd67aeccf04382ffea77999ea94ed42ec67b6"
|
||||
checksum = "509bd11746c7ac09ebd19f0b17782eae80aadee26237658a6b4808afb5c11a22"
|
||||
dependencies = [
|
||||
"curl-sys",
|
||||
"libc",
|
||||
|
@ -83,14 +83,14 @@ dependencies = [
|
|||
"openssl-sys",
|
||||
"schannel",
|
||||
"socket2",
|
||||
"windows-sys 0.52.0",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "curl-sys"
|
||||
version = "0.4.74+curl-8.9.0"
|
||||
version = "0.4.68+curl-8.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8af10b986114528fcdc4b63b6f5f021b7057618411046a4de2ba0f0149a097bf"
|
||||
checksum = "b4a0d18d88360e374b16b2273c832b5e57258ffc1d4aa4f96b108e0738d5752f"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
|
@ -98,14 +98,14 @@ dependencies = [
|
|||
"openssl-sys",
|
||||
"pkg-config",
|
||||
"vcpkg",
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "deranged"
|
||||
version = "0.3.11"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
|
||||
checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3"
|
||||
dependencies = [
|
||||
"powerfmt",
|
||||
"serde",
|
||||
|
@ -113,9 +113,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "env_logger"
|
||||
version = "0.10.2"
|
||||
version = "0.10.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
|
||||
checksum = "95b3f3e67048839cb0d0781f445682a35113da7121f7c949db0e2be96a4fbece"
|
||||
dependencies = [
|
||||
"humantime",
|
||||
"is-terminal",
|
||||
|
@ -124,6 +124,16 @@ dependencies = [
|
|||
"termcolor",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "errno"
|
||||
version = "0.3.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "foreign-types"
|
||||
version = "0.3.2"
|
||||
|
@ -168,9 +178,9 @@ checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b"
|
|||
|
||||
[[package]]
|
||||
name = "hermit-abi"
|
||||
version = "0.4.0"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
|
||||
checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7"
|
||||
|
||||
[[package]]
|
||||
name = "humantime"
|
||||
|
@ -180,20 +190,20 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
|
|||
|
||||
[[package]]
|
||||
name = "is-terminal"
|
||||
version = "0.4.13"
|
||||
version = "0.4.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b"
|
||||
checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"rustix",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.11"
|
||||
version = "1.0.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
|
||||
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
|
||||
|
||||
[[package]]
|
||||
name = "journaldriver"
|
||||
|
@ -214,15 +224,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "lazy_static"
|
||||
version = "1.5.0"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
|
||||
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
|
||||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.158"
|
||||
version = "0.2.150"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
|
||||
checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c"
|
||||
|
||||
[[package]]
|
||||
name = "libsystemd-sys"
|
||||
|
@ -237,9 +247,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "libz-sys"
|
||||
version = "1.1.20"
|
||||
version = "1.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472"
|
||||
checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
|
@ -248,10 +258,16 @@ dependencies = [
|
|||
]
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.22"
|
||||
name = "linux-raw-sys"
|
||||
version = "0.4.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
|
||||
checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829"
|
||||
|
||||
[[package]]
|
||||
name = "log"
|
||||
version = "0.4.20"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f"
|
||||
|
||||
[[package]]
|
||||
name = "medallion"
|
||||
|
@ -269,27 +285,21 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "memchr"
|
||||
version = "2.7.4"
|
||||
version = "2.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
|
||||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
|
||||
checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
|
||||
|
||||
[[package]]
|
||||
name = "once_cell"
|
||||
version = "1.19.0"
|
||||
version = "1.18.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
|
||||
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.66"
|
||||
version = "0.10.59"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1"
|
||||
checksum = "7a257ad03cd8fb16ad4172fedf8094451e1af1c4b70097636ef2eac9a5f0cc33"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"cfg-if",
|
||||
|
@ -319,9 +329,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
|
|||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.103"
|
||||
version = "0.9.95"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6"
|
||||
checksum = "40a4130519a360279579c2053038317e40eff64d13fd3f004f9e1b72b8a6aaf9"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
|
@ -331,9 +341,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "pkg-config"
|
||||
version = "0.3.30"
|
||||
version = "0.3.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
|
||||
checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964"
|
||||
|
||||
[[package]]
|
||||
name = "powerfmt"
|
||||
|
@ -343,27 +353,27 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
|
|||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.86"
|
||||
version = "1.0.69"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
|
||||
checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quote"
|
||||
version = "1.0.37"
|
||||
version = "1.0.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
|
||||
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.10.6"
|
||||
version = "1.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
|
||||
checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -373,9 +383,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex-automata"
|
||||
version = "0.4.7"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
|
||||
checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"memchr",
|
||||
|
@ -384,39 +394,52 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "regex-syntax"
|
||||
version = "0.8.4"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
|
||||
checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f"
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "0.38.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b426b0506e5d50a7d8dafcf2e81471400deb602392c7dd110815afb4eaf02a3"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.18"
|
||||
version = "1.0.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
|
||||
checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
|
||||
|
||||
[[package]]
|
||||
name = "schannel"
|
||||
version = "0.1.23"
|
||||
version = "0.1.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534"
|
||||
checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88"
|
||||
dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
"windows-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.209"
|
||||
version = "1.0.192"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
|
||||
checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001"
|
||||
dependencies = [
|
||||
"serde_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_derive"
|
||||
version = "1.0.209"
|
||||
version = "1.0.192"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
|
||||
checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -425,37 +448,30 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.127"
|
||||
version = "1.0.108"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad"
|
||||
checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
"ryu",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "shlex"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.7"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
|
||||
checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.77"
|
||||
version = "2.0.39"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed"
|
||||
checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
@ -478,22 +494,21 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "termcolor"
|
||||
version = "1.4.1"
|
||||
version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
|
||||
checksum = "6093bad37da69aab9d123a8091e4be0aa4a03e4d601ec641c327398315f62b64"
|
||||
dependencies = [
|
||||
"winapi-util",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.36"
|
||||
version = "0.3.30"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
|
||||
checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5"
|
||||
dependencies = [
|
||||
"deranged",
|
||||
"itoa",
|
||||
"num-conv",
|
||||
"powerfmt",
|
||||
"serde",
|
||||
"time-core",
|
||||
|
@ -508,11 +523,10 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
|
|||
|
||||
[[package]]
|
||||
name = "time-macros"
|
||||
version = "0.2.18"
|
||||
version = "0.2.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
|
||||
checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20"
|
||||
dependencies = [
|
||||
"num-conv",
|
||||
"time-core",
|
||||
]
|
||||
|
||||
|
@ -534,43 +548,55 @@ version = "0.2.15"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
|
||||
|
||||
[[package]]
|
||||
name = "winapi"
|
||||
version = "0.3.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
|
||||
dependencies = [
|
||||
"winapi-i686-pc-windows-gnu",
|
||||
"winapi-x86_64-pc-windows-gnu",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winapi-i686-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
|
||||
|
||||
[[package]]
|
||||
name = "winapi-util"
|
||||
version = "0.1.9"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.52.0"
|
||||
name = "winapi-x86_64-pc-windows-gnu"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.59.0"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
|
||||
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
|
||||
dependencies = [
|
||||
"windows-targets",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
|
||||
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm",
|
||||
"windows_aarch64_msvc",
|
||||
"windows_i686_gnu",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc",
|
||||
"windows_x86_64_gnu",
|
||||
"windows_x86_64_gnullvm",
|
||||
|
@ -579,48 +605,42 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
|
||||
|
|
|
@ -10,29 +10,20 @@ terraform {
|
|||
}
|
||||
|
||||
backend "s3" {
|
||||
endpoints = {
|
||||
s3 = "https://objects.dc-sto1.glesys.net"
|
||||
}
|
||||
bucket = "tvl-state"
|
||||
key = "terraform/tvl-keycloak"
|
||||
region = "glesys"
|
||||
endpoint = "https://objects.dc-sto1.glesys.net"
|
||||
bucket = "tvl-state"
|
||||
key = "terraform/tvl-keycloak"
|
||||
region = "glesys"
|
||||
|
||||
skip_credentials_validation = true
|
||||
skip_region_validation = true
|
||||
skip_metadata_api_check = true
|
||||
skip_requesting_account_id = true
|
||||
skip_s3_checksum = true
|
||||
}
|
||||
}
|
||||
|
||||
provider "keycloak" {
|
||||
client_id = "terraform"
|
||||
url = "https://auth.tvl.fyi"
|
||||
# NOTE: Docs mention this applies to "users of the legacy distribution of keycloak".
|
||||
# However, we get a "failed to perform initial login to Keycloak: error
|
||||
# sending POST request to https://auth.tvl.fyi/realms/master/protocol/openid-connect/token: 404 Not Found"
|
||||
# if we don't set this.
|
||||
base_path = "/auth"
|
||||
}
|
||||
|
||||
resource "keycloak_realm" "tvl" {
|
||||
|
|
|
@ -22,13 +22,6 @@ resource "keycloak_ldap_user_federation" "tvl_ldap" {
|
|||
"inetOrgPerson",
|
||||
"organizationalPerson",
|
||||
]
|
||||
|
||||
lifecycle {
|
||||
# Without this, terraform wants to recreate the resource.
|
||||
ignore_changes = [
|
||||
delete_default_mappers
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# keycloak_oidc_identity_provider.github will be destroyed
|
||||
|
@ -36,7 +29,7 @@ resource "keycloak_ldap_user_federation" "tvl_ldap" {
|
|||
resource "keycloak_oidc_identity_provider" "github" {
|
||||
alias = "github"
|
||||
provider_id = "github"
|
||||
client_id = "Iv23liXfGNIr7InMg5Uo"
|
||||
client_id = "6d7f8bb2e82bb6739556"
|
||||
client_secret = var.github_client_secret
|
||||
realm = keycloak_realm.tvl.id
|
||||
backchannel_supported = false
|
||||
|
|
|
@ -29,29 +29,13 @@ func Merge(in1 *map[string]interface{}, in2 *map[string]interface{}) *map[string
|
|||
return in1
|
||||
}
|
||||
|
||||
// The maps are map[string]interface{} with unknown depth.
|
||||
// Loop over both maps into every level and merge them.
|
||||
new := make(map[string]interface{})
|
||||
|
||||
for k, v := range *in1 {
|
||||
new[k] = v
|
||||
}
|
||||
|
||||
for k, v := range *in2 {
|
||||
if existing, ok := new[k]; ok {
|
||||
// If both values are maps, merge them recursively
|
||||
if existingMap, ok := existing.(map[string]interface{}); ok {
|
||||
if newMap, ok := v.(map[string]interface{}); ok {
|
||||
new[k] = *Merge(&existingMap, &newMap)
|
||||
} else {
|
||||
new[k] = v
|
||||
}
|
||||
} else {
|
||||
new[k] = v
|
||||
}
|
||||
} else {
|
||||
new[k] = v
|
||||
}
|
||||
new[k] = v
|
||||
}
|
||||
|
||||
return &new
|
||||
|
|
|
@ -47,9 +47,6 @@ func TestMergeWithNilMap(t *testing.T) {
|
|||
func TestMergeMaps(t *testing.T) {
|
||||
map1 := map[string]interface{}{
|
||||
"foo": "bar",
|
||||
"baz": map[string]interface{}{
|
||||
"qux": "quux",
|
||||
},
|
||||
}
|
||||
|
||||
map2 := map[string]interface{}{
|
||||
|
@ -59,9 +56,6 @@ func TestMergeMaps(t *testing.T) {
|
|||
result := Merge(&map1, &map2)
|
||||
expected := map[string]interface{}{
|
||||
"foo": "bar",
|
||||
"baz": map[string]interface{}{
|
||||
"qux": "quux",
|
||||
},
|
||||
"bar": "baz",
|
||||
}
|
||||
|
||||
|
|
|
@ -3,8 +3,6 @@
|
|||
(with depot.ops.machines; [
|
||||
sanduny
|
||||
whitby
|
||||
nixery-01
|
||||
volgasprint-cache
|
||||
]) ++
|
||||
|
||||
(with depot.users.tazjin.nixos; [
|
||||
|
|
|
@ -1,153 +0,0 @@
|
|||
# temporary machine for local binary cache proxy during VolgaSprint
|
||||
|
||||
{ depot, lib, pkgs, ... }: # readTree options
|
||||
{ config, ... }: # passed by module system
|
||||
|
||||
let
|
||||
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
(mod "tvl-users.nix")
|
||||
];
|
||||
|
||||
boot = {
|
||||
kernelPackages = pkgs.linuxKernel.packages.linux_rpi4;
|
||||
initrd.availableKernelModules = [ "xhci_pci" "usbhid" "usb_storage" ];
|
||||
loader = {
|
||||
grub.enable = false;
|
||||
generic-extlinux-compatible.enable = true;
|
||||
};
|
||||
};
|
||||
|
||||
depot.auto-deploy = {
|
||||
enable = true;
|
||||
interval = "hourly";
|
||||
};
|
||||
|
||||
fileSystems = {
|
||||
"/" = {
|
||||
device = "/dev/disk/by-label/NIXOS_SD";
|
||||
fsType = "ext4";
|
||||
options = [ "noatime" ];
|
||||
};
|
||||
"/var/public-nix-cache" = {
|
||||
device = "/dev/sda1";
|
||||
fsType = "ext4";
|
||||
};
|
||||
};
|
||||
|
||||
networking = {
|
||||
firewall = {
|
||||
enable = true;
|
||||
allowedTCPPorts = [ 80 443 8098 ];
|
||||
};
|
||||
|
||||
hostName = "volgacache";
|
||||
domain = "volgasprint.org";
|
||||
|
||||
wireless = {
|
||||
enable = true;
|
||||
networks.VolgaSprint.psk = "nixos-unstable";
|
||||
interfaces = [ "wlan0" ];
|
||||
};
|
||||
|
||||
wg-quick.interfaces = {
|
||||
wg0 = {
|
||||
address = [ "10.10.10.2/24" "fd42::1/128" ];
|
||||
dns = [ "1.1.1.1" ];
|
||||
privateKeyFile = "/etc/wireguard_private_key";
|
||||
|
||||
peers = [
|
||||
{
|
||||
publicKey = "2MZzEGJzA3HrwkHf91TaKJEHwCNyVvsTLWoIYHrCxhY=";
|
||||
presharedKeyFile = "/etc/wireguard_preshared_key";
|
||||
allowedIPs = [ "0.0.0.0/0" "::/0" ];
|
||||
endpoint = "195.201.63.240:8098";
|
||||
persistentKeepalive = 15;
|
||||
}
|
||||
];
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
services.openssh.enable = true;
|
||||
|
||||
services.nginx = {
|
||||
enable = true;
|
||||
recommendedGzipSettings = true;
|
||||
recommendedOptimisation = true;
|
||||
|
||||
appendHttpConfig = ''
|
||||
proxy_cache_path /tmp/pkgcache levels=1:2 keys_zone=cachecache:100m max_size=20g inactive=365d use_temp_path=off;
|
||||
|
||||
# Cache only success status codes; in particular we don't want to cache 404s.
|
||||
# See https://serverfault.com/a/690258/128321
|
||||
map $status $cache_header {
|
||||
200 "public";
|
||||
302 "public";
|
||||
default "no-cache";
|
||||
}
|
||||
access_log /var/log/nginx/access.log;
|
||||
'';
|
||||
|
||||
virtualHosts."cache.volgasprint.org" = {
|
||||
sslCertificate = "/etc/ssl/cache.volgasprint.org/key.pem";
|
||||
sslCertificateKey = "/etc/ssl/cache.volgasprint.org/key.pem";
|
||||
sslTrustedCertificate = "/etc/ssl/cache.volgasprint.org/chain.pem";
|
||||
|
||||
locations."/" = {
|
||||
root = "/var/public-nix-cache";
|
||||
extraConfig = ''
|
||||
expires max;
|
||||
add_header Cache-Control $cache_header always;
|
||||
# Ask the upstream server if a file isn't available locally
|
||||
error_page 404 = @fallback;
|
||||
'';
|
||||
};
|
||||
|
||||
extraConfig = ''
|
||||
# Using a variable for the upstream endpoint to ensure that it is
|
||||
# resolved at runtime as opposed to once when the config file is loaded
|
||||
# and then cached forever (we don't want that):
|
||||
# see https://tenzer.dk/nginx-with-dynamic-upstreams/
|
||||
# This fixes errors like
|
||||
# nginx: [emerg] host not found in upstream "upstream.example.com"
|
||||
# when the upstream host is not reachable for a short time when
|
||||
# nginx is started.
|
||||
resolver 80.67.169.12; # fdn dns
|
||||
set $upstream_endpoint http://cache.nixos.org;
|
||||
'';
|
||||
|
||||
locations."@fallback" = {
|
||||
proxyPass = "$upstream_endpoint";
|
||||
extraConfig = ''
|
||||
proxy_cache cachecache;
|
||||
proxy_cache_valid 200 302 60d;
|
||||
expires max;
|
||||
add_header Cache-Control $cache_header always;
|
||||
'';
|
||||
};
|
||||
|
||||
# We always want to copy cache.nixos.org's nix-cache-info file,
|
||||
# and ignore our own, because `nix-push` by default generates one
|
||||
# without `Priority` field, and thus that file by default has priority
|
||||
# 50 (compared to cache.nixos.org's `Priority: 40`), which will make
|
||||
# download clients prefer `cache.nixos.org` over our binary cache.
|
||||
locations."= /nix-cache-info" = {
|
||||
# Note: This is duplicated with the `@fallback` above,
|
||||
# would be nicer if we could redirect to the @fallback instead.
|
||||
proxyPass = "$upstream_endpoint";
|
||||
extraConfig = ''
|
||||
proxy_cache cachecache;
|
||||
proxy_cache_valid 200 302 60d;
|
||||
expires max;
|
||||
add_header Cache-Control $cache_header always;
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
hardware.enableRedistributableFirmware = true;
|
||||
system.stateVersion = "23.11";
|
||||
}
|
|
@ -11,7 +11,6 @@ in
|
|||
imports = [
|
||||
(mod "atward.nix")
|
||||
(mod "cgit.nix")
|
||||
(mod "cheddar.nix")
|
||||
(mod "clbot.nix")
|
||||
(mod "gerrit-autosubmit.nix")
|
||||
(mod "irccat.nix")
|
||||
|
@ -25,6 +24,7 @@ in
|
|||
(mod "paroxysm.nix")
|
||||
(mod "restic.nix")
|
||||
(mod "smtprelay.nix")
|
||||
(mod "sourcegraph.nix")
|
||||
(mod "teleirc.nix")
|
||||
(mod "tvl-buildkite.nix")
|
||||
(mod "tvl-slapd/default.nix")
|
||||
|
@ -306,9 +306,6 @@ in
|
|||
agentCount = 32;
|
||||
};
|
||||
|
||||
# Run Markdown/code renderer
|
||||
services.depot.cheddar.enable = true;
|
||||
|
||||
# Start a local SMTP relay to Gmail (used by gerrit)
|
||||
services.depot.smtprelay = {
|
||||
enable = true;
|
||||
|
@ -377,6 +374,9 @@ in
|
|||
};
|
||||
|
||||
services.depot = {
|
||||
# Run a SourceGraph code search instance
|
||||
sourcegraph.enable = true;
|
||||
|
||||
# Run a livegrep code search instance
|
||||
livegrep.enable = true;
|
||||
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
{ depot, config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.cheddar;
|
||||
description = "cheddar - markdown/highlighting server";
|
||||
in
|
||||
{
|
||||
options.services.depot.cheddar = with lib; {
|
||||
enable = mkEnableOption description;
|
||||
port = mkOption {
|
||||
description = "Port on which cheddar should listen";
|
||||
type = types.int;
|
||||
default = 4238;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.cheddar-server = {
|
||||
inherit description;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = "${depot.tools.cheddar}/bin/cheddar --listen 0.0.0.0:${toString cfg.port} --sourcegraph-server";
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
60
ops/modules/sourcegraph.nix
Normal file
60
ops/modules/sourcegraph.nix
Normal file
|
@ -0,0 +1,60 @@
|
|||
# Run sourcegraph, including its entire machinery, in a container.
|
||||
# Running it outside of a container is a futile endeavour for now.
|
||||
{ depot, config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.sourcegraph;
|
||||
in
|
||||
{
|
||||
options.services.depot.sourcegraph = with lib; {
|
||||
enable = mkEnableOption "SourceGraph code search engine";
|
||||
|
||||
port = mkOption {
|
||||
description = "Port on which SourceGraph should listen";
|
||||
type = types.int;
|
||||
default = 3463;
|
||||
};
|
||||
|
||||
cheddarPort = mkOption {
|
||||
description = "Port on which cheddar should listen";
|
||||
type = types.int;
|
||||
default = 4238;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# Run a cheddar syntax highlighting server
|
||||
systemd.services.cheddar-server = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = "${depot.tools.cheddar}/bin/cheddar --listen 0.0.0.0:${toString cfg.cheddarPort} --sourcegraph-server";
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
|
||||
virtualisation.oci-containers.containers.sourcegraph = {
|
||||
image = "sourcegraph/server:3.40.0";
|
||||
|
||||
ports = [
|
||||
"127.0.0.1:${toString cfg.port}:7080"
|
||||
];
|
||||
|
||||
volumes = [
|
||||
"/var/lib/sourcegraph/etc:/etc/sourcegraph"
|
||||
"/var/lib/sourcegraph/data:/var/opt/sourcegraph"
|
||||
];
|
||||
|
||||
# TODO(tazjin): Figure out what changed in the protocol.
|
||||
# environment.SRC_SYNTECT_SERVER = "http://172.17.0.1:${toString cfg.cheddarPort}";
|
||||
|
||||
# Sourcegraph needs a higher nofile limit, it logs warnings
|
||||
# otherwise (unclear whether it actually affects the service).
|
||||
extraOptions = [
|
||||
"--ulimit"
|
||||
"nofile=10000:10000"
|
||||
];
|
||||
};
|
||||
};
|
||||
}
|
|
@ -1,5 +1,3 @@
|
|||
# This configuration redirects from the previous Sourcegraph instance to
|
||||
# livegrep/cgit where appropriate.
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
|
@ -15,50 +13,17 @@
|
|||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
set $lineno "";
|
||||
|
||||
# depot root
|
||||
location = /depot {
|
||||
return 301 https://code.tvl.fyi/tree/;
|
||||
}
|
||||
|
||||
# folder/file on canon
|
||||
location ~ ^/depot/-/(blob|tree)/([^\s]*)$ {
|
||||
set $path $2;
|
||||
if ($args ~ ^L(\d+)(-\d+)?$) {
|
||||
set $lineno "#n$1";
|
||||
}
|
||||
|
||||
return 302 https://code.tvl.fyi/tree/$path$lineno;
|
||||
}
|
||||
|
||||
# folder/file on specific commit
|
||||
location ~ ^/depot@([a-f0-9]+)/-/(blob|tree)/([^\s]*)$ {
|
||||
set $commit $1;
|
||||
set $path $3;
|
||||
|
||||
if ($args ~ ^L(\d+)(-\d+)?$) {
|
||||
set $lineno "#n$1";
|
||||
}
|
||||
|
||||
return 302 https://code.tvl.fyi/tree/$path?id=$commit$lineno;
|
||||
}
|
||||
|
||||
# commit info
|
||||
location ~ ^/depot/-/commit/([a-f0-9]+)$ {
|
||||
set $commit $1;
|
||||
return 302 https://code.tvl.fyi/commit/?id=$commit;
|
||||
}
|
||||
|
||||
# search handler
|
||||
# This only redirects to the new search, it doesn't try to parse and
|
||||
# rewrite the query.
|
||||
location /search {
|
||||
return 302 https://grep.tvl.fyi/search;
|
||||
location = / {
|
||||
return 301 https://cs.tvl.fyi/depot;
|
||||
}
|
||||
|
||||
location / {
|
||||
return 404 "TVL code search has moved to grep.tvl.fyi and we could not figure out how to rewrite your query. Sorry!";
|
||||
proxy_set_header X-Sg-Auth "Anonymous";
|
||||
proxy_pass http://localhost:${toString config.services.depot.sourcegraph.port};
|
||||
}
|
||||
|
||||
location /users/Anonymous/settings {
|
||||
return 301 https://cs.tvl.fyi;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
|
54
ops/modules/www/tazj.in.nix
Normal file
54
ops/modules/www/tazj.in.nix
Normal file
|
@ -0,0 +1,54 @@
|
|||
# serve tazjin's website & blog
|
||||
{ depot, config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."tazj.in" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
root = depot.users.tazjin.homepage;
|
||||
serverAliases = [ "www.tazj.in" ];
|
||||
|
||||
extraConfig = ''
|
||||
location = /en/rss.xml {
|
||||
return 301 https://tazj.in/feed.atom;
|
||||
}
|
||||
|
||||
${depot.users.tazjin.blog.oldRedirects}
|
||||
location /blog/ {
|
||||
alias ${depot.users.tazjin.blog.rendered}/;
|
||||
|
||||
if ($request_uri ~ ^/(.*)\.html$) {
|
||||
return 302 /$1;
|
||||
}
|
||||
|
||||
try_files $uri $uri.html $uri/ =404;
|
||||
}
|
||||
|
||||
location = /predlozhnik {
|
||||
return 302 https://predlozhnik.ru;
|
||||
}
|
||||
|
||||
# redirect for easier entry on a TV
|
||||
location = /tv {
|
||||
return 302 https://tazj.in/blobs/play.html;
|
||||
}
|
||||
|
||||
# Temporary place for serving static files.
|
||||
location /blobs/ {
|
||||
alias /var/lib/tazjins-blobs/;
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."git.tazj.in" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
extraConfig = "return 301 https://code.tvl.fyi$request_uri;";
|
||||
};
|
||||
};
|
||||
}
|
|
@ -63,6 +63,5 @@ in rec {
|
|||
whitbySystem = (nixosFor depot.ops.machines.whitby).system;
|
||||
sandunySystem = (nixosFor depot.ops.machines.sanduny).system;
|
||||
nixeryDev01System = (nixosFor depot.ops.machines.nixery-01).system;
|
||||
volgaSprintCacheSystem = (nixosFor depot.ops.machines.volgasprint-cache).system;
|
||||
meta.ci.targets = [ "sandunySystem" "whitbySystem" "nixeryDev01System" ];
|
||||
}
|
||||
|
|
|
@ -88,12 +88,10 @@ steps:
|
|||
continue_on_failure: true
|
||||
|
||||
# Exit with success or failure depending on whether any other steps
|
||||
# failed (but not retried).
|
||||
# failed.
|
||||
#
|
||||
# This information is checked by querying the Buildkite GraphQL API
|
||||
# and fetching all failed steps, then filtering out the ones that were
|
||||
# retried (retried jobs create new jobs, which would also show up in the
|
||||
# query).
|
||||
# and fetching the count of failed steps.
|
||||
#
|
||||
# This step must be :duck: (yes, really!) because the post-command
|
||||
# hook will inspect this name.
|
||||
|
@ -111,8 +109,8 @@ steps:
|
|||
readonly FAILED_JOBS=$(curl 'https://graphql.buildkite.com/v1' \
|
||||
--silent \
|
||||
-H "Authorization: Bearer $(cat ${BUILDKITE_TOKEN_PATH})" \
|
||||
-d "{\"query\": \"query BuildStatusQuery { build(uuid: \\\"$BUILDKITE_BUILD_ID\\\") { jobs(passed: false, first: 500 ) { edges { node { ... on JobTypeCommand { retried } } } } } }\"}" | \
|
||||
jq -r '.data.build.jobs.edges | map(select(.node.retried == false)) | length')
|
||||
-d "{\"query\": \"query BuildStatusQuery { build(uuid: \\\"$BUILDKITE_BUILD_ID\\\") { jobs(passed: false) { count } } }\"}" | \
|
||||
jq -r '.data.build.jobs.count')
|
||||
|
||||
echo "$$FAILED_JOBS build jobs failed."
|
||||
|
||||
|
|
Binary file not shown.
|
@ -21,11 +21,6 @@
|
|||
email = "root@gws.fyi";
|
||||
password = "{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$5NEYPJ19nDITK5sGr4bzhQ$Xzpzth6y4w+HGvioHiYgzqFiwMDx0B7HAh+PVbkRuuk";
|
||||
}
|
||||
{
|
||||
username = "azahi";
|
||||
email = "azat@bahawi.net";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$BVRzgfs8YIorOTq62B00CQ$5UXHyG/Ivn5TqB7UNgfjYJMxTjun3NDvAStWFom4oas";
|
||||
}
|
||||
{
|
||||
username = "chickadee";
|
||||
email = "matthewktromp@gmail.com";
|
||||
|
@ -41,21 +36,11 @@
|
|||
email = "me@cynthia.re";
|
||||
password = "{ARGON2}$argon2id$v=19$m=65536,t=4,p=1$TxjbMGenhEmkyYLrg5uGhbr60THB86YeRZg5bPdiTJo$k9gbRlAPjmxwdUwzbavvsAVkckgQZ0jS2oTtvZBPysk";
|
||||
}
|
||||
{
|
||||
username = "domenkozar";
|
||||
email = "domen@cachix.org";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$c9WgMrTqPJZenOr5+wlnnQ$XOpRZRTkduzP2+NJBxkg2jhffurg7PDla4/RoAyclwI";
|
||||
}
|
||||
{
|
||||
username = "edef";
|
||||
email = "edef@edef.eu";
|
||||
password = "{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$OORx4ERbkgvTmuYCJA8cIw$i5qaBzHkRVw7Tl+wZsTFTDqJwF0vuZqhW3VpknMYMc0";
|
||||
}
|
||||
{
|
||||
username = "elle";
|
||||
email = "lnajt4@gmail.com";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$b5Bfq6u+fEKbtpixOl+yPw$nCyTLbSDYsw30ZiSxhJ6N99IIPQAnS2XRNlpEx9os+0";
|
||||
}
|
||||
{
|
||||
username = "ericvolp12";
|
||||
email = "ericvolp12@gmail.com";
|
||||
|
@ -126,11 +111,6 @@
|
|||
email = "lukegb@tvl.fyi";
|
||||
password = "{SSHA}7a85VNhpFElFw+N5xcjgGmt4HnBsaGp4";
|
||||
}
|
||||
{
|
||||
username = "mrflos";
|
||||
email = "mrflos@yeswiki.pro";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$/D1y+6n3+0GigG9mCMqK8A$9PseWm3+QATxN/M3Wu4JM+CnIppLD/LbQaVEKLItv9o";
|
||||
}
|
||||
{
|
||||
username = "noteed";
|
||||
email = "noteed@gmail.com";
|
||||
|
@ -173,11 +153,6 @@
|
|||
email = "tazjin@tvl.su";
|
||||
password = "{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$wOPEl9D3kSke//oLtbvqrg$j0npwwXgaXQ/emefKUwL59tH8hdmtzbgH2rQzWSmE2Y";
|
||||
}
|
||||
{
|
||||
username = "yl3dy";
|
||||
email = "aleksandr.kiselyov@gmail.com";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$vPvOa0/7RzDLuD/icQuIzQ$IVMSI7hh/frihuL11sNRj6Jz8TTn1wZZHjZZGszz3pI";
|
||||
}
|
||||
{
|
||||
username = "implr";
|
||||
email = "implr@hackerspace.pl";
|
||||
|
@ -284,24 +259,4 @@
|
|||
email = "tvix@sinavir.fr";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$5GXvoN/enVljV97yE/Zasg$OrgY9/ge2LoxNm9OOqxh/kKLxoAvU54MbQa9WWiT0jY";
|
||||
}
|
||||
{
|
||||
username = "emery";
|
||||
email = "emery@dmz.rs";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$b2k5UpTJafqM7yjHfVRjBg$zFGy/ZeI9Hb71TUfJwFp7qDKyUl8tdyFDUK1uNBYfUI";
|
||||
}
|
||||
{
|
||||
username = "aziz";
|
||||
email = "abd.aziz89@gmail.com";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$xTvdtTF+gavMfF8556CiiQ$IshnauhlEr80skpv5s6ueJLkQxlynzBt6oCp3cQrNCY";
|
||||
}
|
||||
{
|
||||
username = "nikiv";
|
||||
email = "nikita@nikiv.dev";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$79mMAD2XYa5dg7D9ueWMpw$Edf5WODrFpkNDyWaMdLKcgcErFLx4khmPIk8wzmYGUE";
|
||||
}
|
||||
{
|
||||
username = "ein-shved";
|
||||
email = "mestofel13@gmail.com";
|
||||
password = "{ARGON2}$argon2id$v=19$m=19456,t=2,p=1$D4wzfJoyFye48QNdrC66VA$aBJ/ZaL+rTgXoQa/nFdpHap3G/Oju8WlHaWTii95X8E";
|
||||
}
|
||||
]
|
||||
|
|
47
third_party/chicago95/default.nix
vendored
47
third_party/chicago95/default.nix
vendored
|
@ -1,47 +0,0 @@
|
|||
# A rendition of everyone's favourite computer theme.
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
# Chicago95 has no GTK-4 theme (because GTK-4 removed important features that
|
||||
# it needs), but there is a project with an approximation.
|
||||
#
|
||||
# This is a bit of a hack, but I inject that project's GTK-4 theme as if it
|
||||
# was a part of Chicago95.
|
||||
#
|
||||
# This other project is GPL-3.0, under which Chicago95 is also distributed.
|
||||
gtk4ProjectSrc = pkgs.fetchFromGitHub {
|
||||
owner = "B00merang-Project";
|
||||
repo = "Windows-95";
|
||||
rev = "055abd7a3608afdcb2ef021732e07020f2b416b2";
|
||||
hash = "sha256:1li6wzyn3y09d188xki1h96pmn4xcx2lklfc4rkiq2y2r22wx7kz";
|
||||
};
|
||||
in
|
||||
pkgs.stdenvNoCC.mkDerivation {
|
||||
pname = "Chicago95";
|
||||
version = "master";
|
||||
|
||||
src = pkgs.fetchFromGitHub {
|
||||
owner = "grassmunk";
|
||||
repo = "Chicago95";
|
||||
rev = "bdf5cf36a16102aaac297f3de887c601c2b1146f";
|
||||
hash = "sha256:11fsy3bam1rhp1292zflvzmf1432z1p0ncwy3601wl2f8rnvfdfm";
|
||||
};
|
||||
|
||||
# The project has a Makefile, but it's broken in all sorts of ways, so we just
|
||||
# copy the important stuff manually.
|
||||
dontBuild = true;
|
||||
installPhase = ''
|
||||
mkdir -p $out/share/{icons,fonts,themes,sounds,qt5ct/colors}
|
||||
|
||||
cp -r Theme/Chicago95 $out/share/themes
|
||||
cp -r Icons/* $out/share/icons
|
||||
cp -r Cursors/* $out/share/icons
|
||||
cp -r Fonts/* $out/share/fonts
|
||||
cp Extras/Chicago95_qt.conf $out/share/qt5ct/colors
|
||||
|
||||
cp -r ${gtk4ProjectSrc}/gtk-4.0 $out/share/themes/Chicago95
|
||||
'';
|
||||
|
||||
meta.license = pkgs.lib.licenses.gpl3;
|
||||
}
|
||||
|
2
third_party/gerrit_plugins/builder.nix
vendored
2
third_party/gerrit_plugins/builder.nix
vendored
|
@ -4,7 +4,7 @@
|
|||
{ name
|
||||
, version
|
||||
, src
|
||||
, depsHash ? null
|
||||
, depsHash ? null
|
||||
, overlayPluginCmd ? ''
|
||||
cp -R "${src}" "$out/plugins/${name}"
|
||||
echo "STABLE_BUILD_${lib.toUpper name}_LABEL v${version}-nix${if patches != [] then "-dirty" else ""}" >> $out/.version
|
||||
|
|
12
third_party/gitignoreSource/default.nix
vendored
12
third_party/gitignoreSource/default.nix
vendored
|
@ -1,7 +1,15 @@
|
|||
{ depot, lib, ... }:
|
||||
{ pkgs, ... }:
|
||||
|
||||
let
|
||||
gitignoreNix = import depot.third_party.sources."gitignore.nix" { inherit lib; };
|
||||
gitignoreNix = import
|
||||
(pkgs.fetchFromGitHub {
|
||||
owner = "hercules-ci";
|
||||
repo = "gitignore";
|
||||
rev = "f9e996052b5af4032fe6150bba4a6fe4f7b9d698";
|
||||
sha256 = "0jrh5ghisaqdd0vldbywags20m2cxpkbbk5jjjmwaw0gr8nhsafv";
|
||||
})
|
||||
{ inherit (pkgs) lib; };
|
||||
|
||||
in
|
||||
{
|
||||
__functor = _: gitignoreNix.gitignoreSource;
|
||||
|
|
3
third_party/nixpkgs/default.nix
vendored
3
third_party/nixpkgs/default.nix
vendored
|
@ -58,9 +58,6 @@ let
|
|||
|
||||
# the big lis package change breaks everything in //3p/lisp, undo it for now.
|
||||
lispPackages = stableNixpkgs.lispPackages;
|
||||
|
||||
# mypaint is broken on stable (2024-09-05)
|
||||
mypaint = stableNixpkgs.mypaint;
|
||||
};
|
||||
|
||||
# Overlay to expose the nixpkgs commits we are using to other Nix code.
|
||||
|
|
140
third_party/overlays/patches/cbtemulator-uds.patch
vendored
Normal file
140
third_party/overlays/patches/cbtemulator-uds.patch
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
commit 1397e10225d8c6fd079a86fccd58fb5d0f4200bc
|
||||
Author: Florian Klink <flokli@flokli.de>
|
||||
Date: Fri Mar 29 10:06:34 2024 +0100
|
||||
|
||||
feat(bigtable/emulator): allow listening on Unix Domain Sockets
|
||||
|
||||
cbtemulator listening on unix domain sockets is much easier than trying
|
||||
to allocate free TCP ports, especially if many cbtemulators are run at
|
||||
the same time in integration tests.
|
||||
|
||||
This adds an additional flag, address, which has priority if it's set,
|
||||
rather than host:port.
|
||||
|
||||
`NewServer` already takes a `laddr string`, so we simply check for it to
|
||||
contain slashes, and if so, listen on unix, rather than TCP.
|
||||
|
||||
diff --git a/bigtable/bttest/inmem.go b/bigtable/bttest/inmem.go
|
||||
index 556abc2a85..33e4bf2667 100644
|
||||
--- a/bttest/inmem.go
|
||||
+++ b/bttest/inmem.go
|
||||
@@ -40,6 +40,7 @@ import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"net"
|
||||
+ "os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -106,7 +107,15 @@ type server struct {
|
||||
// The Server will be listening for gRPC connections, without TLS,
|
||||
// on the provided address. The resolved address is named by the Addr field.
|
||||
func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) {
|
||||
- l, err := net.Listen("tcp", laddr)
|
||||
+ var l net.Listener
|
||||
+ var err error
|
||||
+
|
||||
+ // If the address contains slashes, listen on a unix domain socket instead.
|
||||
+ if strings.Contains(laddr, "/") {
|
||||
+ l, err = net.Listen("unix", laddr)
|
||||
+ } else {
|
||||
+ l, err = net.Listen("tcp", laddr)
|
||||
+ }
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
diff --git a/bigtable/cmd/emulator/cbtemulator.go b/bigtable/cmd/emulator/cbtemulator.go
|
||||
index 144c09ffb1..deaf69b717 100644
|
||||
--- a/cmd/emulator/cbtemulator.go
|
||||
+++ b/cmd/emulator/cbtemulator.go
|
||||
@@ -27,8 +27,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
- host = flag.String("host", "localhost", "the address to bind to on the local machine")
|
||||
- port = flag.Int("port", 9000, "the port number to bind to on the local machine")
|
||||
+ host = flag.String("host", "localhost", "the address to bind to on the local machine")
|
||||
+ port = flag.Int("port", 9000, "the port number to bind to on the local machine")
|
||||
+ address = flag.String("address", "", "address:port number or unix socket path to listen on. Has priority over host/port")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -42,7 +43,15 @@ func main() {
|
||||
grpc.MaxRecvMsgSize(maxMsgSize),
|
||||
grpc.MaxSendMsgSize(maxMsgSize),
|
||||
}
|
||||
- srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port), opts...)
|
||||
+
|
||||
+ var laddr string
|
||||
+ if *address != "" {
|
||||
+ laddr = *address
|
||||
+ } else {
|
||||
+ laddr = fmt.Sprintf("%s:%d", *host, *port)
|
||||
+ }
|
||||
+
|
||||
+ srv, err := bttest.NewServer(laddr, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start emulator: %v", err)
|
||||
}
|
||||
commit ce16f843d6c93159d86b3807c6d9ff66e43aac67
|
||||
Author: Florian Klink <flokli@flokli.de>
|
||||
Date: Fri Mar 29 11:53:15 2024 +0100
|
||||
|
||||
feat(bigtable): clean up unix socket on close
|
||||
|
||||
Call srv.Close when receiving an interrupt, and delete the unix domain
|
||||
socket in that function.
|
||||
|
||||
diff --git a/bigtable/bttest/inmem.go b/bigtable/bttest/inmem.go
|
||||
index 33e4bf2667..0dc96024b1 100644
|
||||
--- a/bttest/inmem.go
|
||||
+++ b/bttest/inmem.go
|
||||
@@ -148,6 +148,11 @@ func (s *Server) Close() {
|
||||
|
||||
s.srv.Stop()
|
||||
s.l.Close()
|
||||
+
|
||||
+ // clean up unix socket
|
||||
+ if strings.Contains(s.Addr, "/") {
|
||||
+ _ = os.Remove(s.Addr)
|
||||
+ }
|
||||
}
|
||||
|
||||
func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) {
|
||||
diff --git a/bigtable/cmd/emulator/cbtemulator.go b/bigtable/cmd/emulator/cbtemulator.go
|
||||
index deaf69b717..5a9e8f7a8c 100644
|
||||
--- a/cmd/emulator/cbtemulator.go
|
||||
+++ b/cmd/emulator/cbtemulator.go
|
||||
@@ -18,9 +18,12 @@ cbtemulator launches the in-memory Cloud Bigtable server on the given address.
|
||||
package main
|
||||
|
||||
import (
|
||||
+ "context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
+ "os"
|
||||
+ "os/signal"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"google.golang.org/grpc"
|
||||
@@ -51,11 +54,18 @@ func main() {
|
||||
laddr = fmt.Sprintf("%s:%d", *host, *port)
|
||||
}
|
||||
|
||||
+ ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt)
|
||||
+ defer stop()
|
||||
+
|
||||
srv, err := bttest.NewServer(laddr, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start emulator: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr)
|
||||
- select {}
|
||||
+ select {
|
||||
+ case <-ctx.Done():
|
||||
+ srv.Close()
|
||||
+ stop()
|
||||
+ }
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
From 96f66ec32e003c6c215aa2a644281289a71dae7d Mon Sep 17 00:00:00 2001
|
||||
From: Ilan Joselevich <personal@ilanjoselevich.com>
|
||||
Date: Sun, 4 Aug 2024 02:35:27 +0300
|
||||
Subject: [PATCH] Fix: Use mkDerivation with src instead of runCommand for test
|
||||
derivation
|
||||
|
||||
The problem with using runCommand and recreating the src directory with
|
||||
lndir is that it changes the file types of individual files, they will
|
||||
now be a symlink instead of a regular file. If you have a crate that tests
|
||||
that a file is of regular type then it will fail inside the crate2nix derivation.
|
||||
---
|
||||
templates/nix/crate2nix/default.nix | 81 ++++++++-----------
|
||||
1 file changed, 35 insertions(+), 46 deletions(-)
|
||||
|
||||
diff --git a/templates/nix/crate2nix/default.nix b/templates/nix/crate2nix/default.nix
|
||||
index c53925e..90e10c6 100644
|
||||
--- a/templates/nix/crate2nix/default.nix
|
||||
+++ b/templates/nix/crate2nix/default.nix
|
||||
@@ -120,52 +120,41 @@ rec {
|
||||
testPostRun
|
||||
]);
|
||||
in
|
||||
- pkgs.runCommand "run-tests-${testCrate.name}"
|
||||
- {
|
||||
- inherit testCrateFlags;
|
||||
- buildInputs = testInputs;
|
||||
- } ''
|
||||
- set -e
|
||||
-
|
||||
- export RUST_BACKTRACE=1
|
||||
-
|
||||
- # recreate a file hierarchy as when running tests with cargo
|
||||
-
|
||||
- # the source for test data
|
||||
- # It's necessary to locate the source in $NIX_BUILD_TOP/source/
|
||||
- # instead of $NIX_BUILD_TOP/
|
||||
- # because we compiled those test binaries in the former and not the latter.
|
||||
- # So all paths will expect source tree to be there and not in the build top directly.
|
||||
- # For example: $NIX_BUILD_TOP := /build in general, if you ask yourself.
|
||||
- # NOTE: There could be edge cases if `crate.sourceRoot` does exist but
|
||||
- # it's very hard to reason about them.
|
||||
- # Open a bug if you run into this!
|
||||
- mkdir -p source/
|
||||
- cd source/
|
||||
-
|
||||
- ${pkgs.buildPackages.xorg.lndir}/bin/lndir ${crate.src}
|
||||
-
|
||||
- # build outputs
|
||||
- testRoot=target/debug
|
||||
- mkdir -p $testRoot
|
||||
-
|
||||
- # executables of the crate
|
||||
- # we copy to prevent std::env::current_exe() to resolve to a store location
|
||||
- for i in ${crate}/bin/*; do
|
||||
- cp "$i" "$testRoot"
|
||||
- done
|
||||
- chmod +w -R .
|
||||
-
|
||||
- # test harness executables are suffixed with a hash, like cargo does
|
||||
- # this allows to prevent name collision with the main
|
||||
- # executables of the crate
|
||||
- hash=$(basename $out)
|
||||
- for file in ${drv}/tests/*; do
|
||||
- f=$testRoot/$(basename $file)-$hash
|
||||
- cp $file $f
|
||||
- ${testCommand}
|
||||
- done
|
||||
- '';
|
||||
+ pkgs.stdenvNoCC.mkDerivation {
|
||||
+ name = "run-tests-${testCrate.name}";
|
||||
+
|
||||
+ inherit (crate) src;
|
||||
+
|
||||
+ inherit testCrateFlags;
|
||||
+
|
||||
+ buildInputs = testInputs;
|
||||
+
|
||||
+ buildPhase = ''
|
||||
+ set -e
|
||||
+ export RUST_BACKTRACE=1
|
||||
+
|
||||
+ # build outputs
|
||||
+ testRoot=target/debug
|
||||
+ mkdir -p $testRoot
|
||||
+
|
||||
+ # executables of the crate
|
||||
+ # we copy to prevent std::env::current_exe() to resolve to a store location
|
||||
+ for i in ${crate}/bin/*; do
|
||||
+ cp "$i" "$testRoot"
|
||||
+ done
|
||||
+ chmod +w -R .
|
||||
+
|
||||
+ # test harness executables are suffixed with a hash, like cargo does
|
||||
+ # this allows to prevent name collision with the main
|
||||
+ # executables of the crate
|
||||
+ hash=$(basename $out)
|
||||
+ for file in ${drv}/tests/*; do
|
||||
+ f=$testRoot/$(basename $file)-$hash
|
||||
+ cp $file $f
|
||||
+ ${testCommand}
|
||||
+ done
|
||||
+ '';
|
||||
+ };
|
||||
in
|
||||
pkgs.runCommand "${crate.name}-linked"
|
||||
{
|
||||
--
|
||||
2.44.0
|
||||
|
38
third_party/overlays/tvl.nix
vendored
38
third_party/overlays/tvl.nix
vendored
|
@ -90,10 +90,15 @@ depot.nix.readTree.drvTargets {
|
|||
};
|
||||
}));
|
||||
|
||||
# https://github.com/googleapis/google-cloud-go/pull/9665
|
||||
cbtemulator = super.cbtemulator.overrideAttrs (old: {
|
||||
patches = old.patches or [ ] ++ [
|
||||
./patches/cbtemulator-uds.patch
|
||||
];
|
||||
});
|
||||
|
||||
crate2nix = super.crate2nix.overrideAttrs (old: {
|
||||
patches = old.patches or [ ] ++ [
|
||||
# TODO(Kranzes): Remove in next release.
|
||||
./patches/crate2nix-0001-Fix-Use-mkDerivation-with-src-instead-of-runCommand.patch
|
||||
# https://github.com/nix-community/crate2nix/pull/301
|
||||
./patches/crate2nix-tests-debug.patch
|
||||
];
|
||||
|
@ -107,25 +112,6 @@ depot.nix.readTree.drvTargets {
|
|||
];
|
||||
});
|
||||
|
||||
# https://github.com/NixOS/nixpkgs/pull/329415/files
|
||||
grpc-health-check = super.rustPlatform.buildRustPackage {
|
||||
pname = "grpc-health-check";
|
||||
version = "unstable-2022-08-19";
|
||||
|
||||
src = super.fetchFromGitHub {
|
||||
owner = "paypizza";
|
||||
repo = "grpc-health-check";
|
||||
rev = "f61bb5e10beadc5ed53144cc540d66e19fc510bd";
|
||||
hash = "sha256-nKut9c1HHIacdRcmvlXe0GrtkgCWN6sxJ4ImO0CIDdo=";
|
||||
};
|
||||
|
||||
cargoHash = "sha256-lz+815iE+oXBQ3PfqBO0QBpZY6x1SNR7OU7BjkRszzI=";
|
||||
|
||||
nativeBuildInputs = [ super.protobuf ];
|
||||
# tests fail
|
||||
doCheck = false;
|
||||
};
|
||||
|
||||
# Imports a patch that fixes usage of this package on versions
|
||||
# >=1.9. The patch has been proposed upstream, but so far with no
|
||||
# reactions from the maintainer:
|
||||
|
@ -135,11 +121,6 @@ depot.nix.readTree.drvTargets {
|
|||
patches = (old.patches or [ ]) ++ [ ./patches/tpm2-pkcs11-190-dbupgrade.patch ];
|
||||
});
|
||||
|
||||
# Dependency isn't supported by Python 3.12
|
||||
html5validator = super.html5validator.override {
|
||||
python3 = self.python311;
|
||||
};
|
||||
|
||||
# macFUSE bump containing fix for https://github.com/osxfuse/osxfuse/issues/974
|
||||
# https://github.com/NixOS/nixpkgs/pull/320197
|
||||
fuse =
|
||||
|
@ -152,4 +133,9 @@ depot.nix.readTree.drvTargets {
|
|||
hash = "sha256-ucTzO2qdN4QkowMVvC3+4pjEVjbwMsB0xFk+bvQxwtQ=";
|
||||
};
|
||||
}) else super.fuse;
|
||||
|
||||
treefmt = super.treefmt.overrideAttrs (old: {
|
||||
# https://github.com/numtide/treefmt/pull/328
|
||||
patches = old.patches or [ ] ++ [ ./patches/treefmt-fix-no-cache.patch ];
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,78 +0,0 @@
|
|||
From cc4718cbea1bd70de21a2be515a944802246ffc7 Mon Sep 17 00:00:00 2001
|
||||
From: Vincent Ambo <mail@tazj.in>
|
||||
Date: Sun, 15 Sep 2024 03:08:28 +0300
|
||||
Subject: [PATCH] remove dependency on plausible
|
||||
|
||||
We don't need spyware, thanks.
|
||||
---
|
||||
package-lock.json | 9 ---------
|
||||
package.json | 1 -
|
||||
src/App.svelte | 8 --------
|
||||
3 files changed, 18 deletions(-)
|
||||
|
||||
diff --git a/package-lock.json b/package-lock.json
|
||||
index d52de6c0..d96e342f 100644
|
||||
--- a/package-lock.json
|
||||
+++ b/package-lock.json
|
||||
@@ -29,7 +29,6 @@
|
||||
"marked-katex-extension": "^5.1.1",
|
||||
"marked-linkify-it": "^3.1.11",
|
||||
"md5": "^2.3.0",
|
||||
- "plausible-tracker": "^0.3.9",
|
||||
"svelte": "^4.2.19",
|
||||
"twemoji": "^14.0.2",
|
||||
"zod": "^3.23.8"
|
||||
@@ -3697,14 +3696,6 @@
|
||||
"url": "https://github.com/sponsors/jonschlinkert"
|
||||
}
|
||||
},
|
||||
- "node_modules/plausible-tracker": {
|
||||
- "version": "0.3.9",
|
||||
- "resolved": "https://registry.npmjs.org/plausible-tracker/-/plausible-tracker-0.3.9.tgz",
|
||||
- "integrity": "sha512-hMhneYm3GCPyQon88SZrVJx+LlqhM1kZFQbuAgXPoh/Az2YvO1B6bitT9qlhpiTdJlsT5lsr3gPmzoVjb5CDXA==",
|
||||
- "engines": {
|
||||
- "node": ">=10"
|
||||
- }
|
||||
- },
|
||||
"node_modules/playwright": {
|
||||
"version": "1.46.1",
|
||||
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.46.1.tgz",
|
||||
diff --git a/package.json b/package.json
|
||||
index 6d569ad9..61e8d892 100644
|
||||
--- a/package.json
|
||||
+++ b/package.json
|
||||
@@ -73,7 +73,6 @@
|
||||
"marked-katex-extension": "^5.1.1",
|
||||
"marked-linkify-it": "^3.1.11",
|
||||
"md5": "^2.3.0",
|
||||
- "plausible-tracker": "^0.3.9",
|
||||
"svelte": "^4.2.19",
|
||||
"twemoji": "^14.0.2",
|
||||
"zod": "^3.23.8"
|
||||
diff --git a/src/App.svelte b/src/App.svelte
|
||||
index 8161c390..4281ba61 100644
|
||||
--- a/src/App.svelte
|
||||
+++ b/src/App.svelte
|
||||
@@ -1,6 +1,4 @@
|
||||
<script lang="ts">
|
||||
- import Plausible from "plausible-tracker";
|
||||
-
|
||||
import * as router from "@app/lib/router";
|
||||
import { unreachable } from "@app/lib/utils";
|
||||
|
||||
@@ -28,12 +26,6 @@
|
||||
|
||||
void router.loadFromLocation();
|
||||
|
||||
- if (import.meta.env.PROD) {
|
||||
- const plausible = Plausible({ domain: "app.radicle.xyz" });
|
||||
-
|
||||
- plausible.enableAutoPageviews();
|
||||
- }
|
||||
-
|
||||
$: document.documentElement.setAttribute("data-codefont", $codeFont);
|
||||
$: document.documentElement.setAttribute("data-theme", $theme);
|
||||
</script>
|
||||
--
|
||||
2.46.0
|
||||
|
66
third_party/radicle-explorer/default.nix
vendored
66
third_party/radicle-explorer/default.nix
vendored
|
@ -1,66 +0,0 @@
|
|||
# radicle-explorer is the web UI for Radicle.
|
||||
#
|
||||
# They have an upstream Nix derivation, but it only works with experimental
|
||||
# features Nix and is quite messy, so this is a copy of the relevant parts.
|
||||
{ lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
twemoji-assets = pkgs.fetchFromGitHub {
|
||||
owner = "twitter";
|
||||
repo = "twemoji";
|
||||
rev = "v14.0.2";
|
||||
hash = "sha256-YoOnZ5uVukzi/6bLi22Y8U5TpplPzB7ji42l+/ys5xI=";
|
||||
};
|
||||
|
||||
httpdSrc = pkgs.radicle-httpd.src;
|
||||
in
|
||||
lib.fix (self: pkgs.buildNpmPackage rec {
|
||||
pname = "radicle-explorer";
|
||||
version = (builtins.fromJSON (builtins.readFile "${src}/package.json")).version;
|
||||
|
||||
# source should be synced with the httpd, which is already in nixpkgs
|
||||
src = pkgs.fetchgit {
|
||||
inherit (httpdSrc) url rev;
|
||||
hash = "sha256:09m13238h6j7g02r6332ihgyyzbjx90pgz14rz29pgv7936h6il8";
|
||||
};
|
||||
|
||||
# This might change during nixpkgs bumps and will need updating. Need to fix
|
||||
# upstream so that there is a normal, callable derivation.
|
||||
npmDepsHash = "sha256:1hbrzfjkfc0q8qk03yi6qb9zqm57h7hnkn7fl0yxkrzbrljaljaz";
|
||||
|
||||
patches = [
|
||||
./0001-remove-dependency-on-plausible.patch
|
||||
];
|
||||
|
||||
postPatch = ''
|
||||
patchShebangs --build ./scripts
|
||||
mkdir -p "public/twemoji"
|
||||
cp -t public/twemoji -r -- ${twemoji-assets}/assets/svg/*
|
||||
: >scripts/install-twemoji-assets
|
||||
'';
|
||||
dontConfigure = true;
|
||||
doCheck = false;
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
mkdir -p "$out"
|
||||
cp -r -t "$out" build/*
|
||||
runHook postInstall
|
||||
'';
|
||||
|
||||
# Override the build-time configuration with other preferred seeds which are
|
||||
# displayed on the landing page.
|
||||
passthru.withPreferredSeeds = seeds:
|
||||
let
|
||||
originalConfig = builtins.fromJSON (builtins.readFile "${src}/config/default.json");
|
||||
config = originalConfig // {
|
||||
preferredSeeds = seeds;
|
||||
};
|
||||
newConfig = pkgs.writeText "local.json" (builtins.toJSON config);
|
||||
in
|
||||
self.overrideAttrs (_: {
|
||||
preBuild = ''
|
||||
cp ${newConfig} config/local.json
|
||||
'';
|
||||
});
|
||||
})
|
126
third_party/rust-crates/default.nix
vendored
126
third_party/rust-crates/default.nix
vendored
|
@ -292,4 +292,130 @@ depot.nix.readTree.drvTargets rec{
|
|||
sha256 = "1kd047p8jv6mhmfzddjvfa2nwkfrb3l1wml6lfm51n1cr06cc9lz";
|
||||
};
|
||||
|
||||
libz-sys = buildRustCrate {
|
||||
pname = "libz-sys";
|
||||
version = "1.1.2";
|
||||
sha256 = "1y7v6bkwr4b6yaf951p1ns7mx47b29ziwdd5wziaic14gs1gwq30";
|
||||
buildDependencies = [
|
||||
cc
|
||||
pkg-config
|
||||
];
|
||||
};
|
||||
|
||||
libgit2-sys = buildRustCrate {
|
||||
pname = "libgit2-sys";
|
||||
version = "0.16.2+1.7.2";
|
||||
sha256 = "0bs446idbmg8s13jvb0ck6qmrskcdn2mp3d4mn9ggxbmiw4ryd3g";
|
||||
dependencies = [
|
||||
libc
|
||||
libz-sys
|
||||
];
|
||||
libPath = "lib.rs";
|
||||
libName = "libgit2_sys";
|
||||
# TODO: this should be available via `pkgs.defaultCrateOverrides`,
|
||||
# I thought that was included by default?
|
||||
nativeBuildInputs = [ pkg-config ];
|
||||
buildInputs = [ pkgs.zlib pkgs.libgit2 ];
|
||||
buildDependencies = [
|
||||
cc
|
||||
pkg-config
|
||||
];
|
||||
env.LIBGIT2_NO_VENDOR = "1";
|
||||
};
|
||||
|
||||
matches = buildRustCrate {
|
||||
pname = "matches";
|
||||
version = "0.1.8";
|
||||
sha256 = "03hl636fg6xggy0a26200xs74amk3k9n0908rga2szn68agyz3cv";
|
||||
libPath = "lib.rs";
|
||||
};
|
||||
|
||||
percent-encoding = buildRustCrate {
|
||||
pname = "percent-encoding";
|
||||
version = "2.1.0";
|
||||
sha256 = "0i838f2nr81585ckmfymf8l1x1vdmx6n8xqvli0lgcy60yl2axy3";
|
||||
libPath = "lib.rs";
|
||||
};
|
||||
|
||||
form_urlencoded = buildRustCrate {
|
||||
pname = "form_urlencoded";
|
||||
version = "1.0.1";
|
||||
sha256 = "0rhv2hfrzk2smdh27walkm66zlvccnnwrbd47fmf8jh6m420dhj8";
|
||||
dependencies = [
|
||||
matches
|
||||
percent-encoding
|
||||
];
|
||||
};
|
||||
|
||||
tinyvec_macros = buildRustCrate {
|
||||
pname = "tinyvec_macros";
|
||||
version = "0.1.0";
|
||||
sha256 = "0aim73hyq5g8b2hs9gjq2sv0xm4xzfbwp5fdyg1frljqzkapq682";
|
||||
};
|
||||
|
||||
tinyvec = buildRustCrate {
|
||||
pname = "tinyvec";
|
||||
version = "1.2.0";
|
||||
sha256 = "1c95nma20kiyrjwfsk7hzd5ir6yy4bm63fmfbfb4dm9ahnlvdp3y";
|
||||
features = [ "alloc" ];
|
||||
dependencies = [
|
||||
tinyvec_macros
|
||||
];
|
||||
};
|
||||
|
||||
unicode-normalization = buildRustCrate {
|
||||
pname = "unicode-normalization";
|
||||
version = "0.1.17";
|
||||
sha256 = "0w4s0avzlf7pzcclhhih93aap613398sshm6jrxcwq0f9lhis11c";
|
||||
dependencies = [
|
||||
tinyvec
|
||||
];
|
||||
};
|
||||
|
||||
unicode-bidi = buildRustCrate {
|
||||
pname = "unicode-bidi";
|
||||
version = "0.3.5";
|
||||
sha256 = "193jzlxj1dfcms2381lyd45zh4ywlicj9lzcfpid1zbkmfarymkz";
|
||||
dependencies = [
|
||||
matches
|
||||
];
|
||||
};
|
||||
|
||||
idna = buildRustCrate {
|
||||
pname = "idna";
|
||||
version = "0.2.3";
|
||||
sha256 = "0hwypd0fpym9lmd4bbqpwyr5lhrlvmvzhi1vy9asc5wxwkzrh299";
|
||||
dependencies = [
|
||||
matches
|
||||
unicode-normalization
|
||||
unicode-bidi
|
||||
];
|
||||
};
|
||||
|
||||
url = buildRustCrate {
|
||||
pname = "url";
|
||||
version = "2.2.1";
|
||||
sha256 = "1ci1djafh83qhpzbmxnr9w5gcrjs3ghf8rrxdy4vklqyji6fvn5v";
|
||||
dependencies = [
|
||||
form_urlencoded
|
||||
idna
|
||||
matches
|
||||
percent-encoding
|
||||
];
|
||||
};
|
||||
|
||||
|
||||
git2 = buildRustCrate {
|
||||
pname = "git2";
|
||||
edition = "2018";
|
||||
version = "0.18.1";
|
||||
sha256 = "1d1wm8cn37svyxgvzfapwilkkc9d2x7fcrgciwn8b2pv9aqz102k";
|
||||
dependencies = [
|
||||
bitflags
|
||||
libc
|
||||
libgit2-sys
|
||||
log
|
||||
url
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
66
third_party/sources/sources.json
vendored
66
third_party/sources/sources.json
vendored
|
@ -5,22 +5,10 @@
|
|||
"homepage": "https://matrix.to/#/#agenix:nixos.org",
|
||||
"owner": "ryantm",
|
||||
"repo": "agenix",
|
||||
"rev": "f6291c5935fdc4e0bef208cfc0dcab7e3f7a1c41",
|
||||
"sha256": "1x8nd8hvsq6mvzig122vprwigsr3z2skanig65haqswn7z7amsvg",
|
||||
"rev": "c2fc0762bbe8feb06a2e59a364fa81b3a57671c9",
|
||||
"sha256": "1lpkwinlax40b7xgzspbkm9rsi4a1x48hxhixnni4irxxwnav0ah",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/ryantm/agenix/archive/f6291c5935fdc4e0bef208cfc0dcab7e3f7a1c41.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"gitignore.nix": {
|
||||
"branch": "master",
|
||||
"description": "Nix functions for filtering local git sources",
|
||||
"homepage": "",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
|
||||
"sha256": "02wxkdpbhlm3yk5mhkhsp3kwakc16xpmsf2baw57nz1dg459qv8w",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/hercules-ci/gitignore.nix/archive/637db329424fd7e46cf4185293b9cc8c88c95394.tar.gz",
|
||||
"url": "https://github.com/ryantm/agenix/archive/c2fc0762bbe8feb06a2e59a364fa81b3a57671c9.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"home-manager": {
|
||||
|
@ -29,10 +17,10 @@
|
|||
"homepage": "https://nix-community.github.io/home-manager/",
|
||||
"owner": "nix-community",
|
||||
"repo": "home-manager",
|
||||
"rev": "a9c9cc6e50f7cbd2d58ccb1cd46a1e06e9e445ff",
|
||||
"sha256": "1cxp9rgczr4rhhx1klwcr7a61khizq8hv63gvmy9gfsx7fp4h60a",
|
||||
"rev": "a7117efb3725e6197dd95424136f79147aa35e5b",
|
||||
"sha256": "02q3ck1hjs8xzdhfikqxrnsfs9vh4p7rmdha3vbp6nkkdbdvhgg7",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/nix-community/home-manager/archive/a9c9cc6e50f7cbd2d58ccb1cd46a1e06e9e445ff.tar.gz",
|
||||
"url": "https://github.com/nix-community/home-manager/archive/a7117efb3725e6197dd95424136f79147aa35e5b.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"impermanence": {
|
||||
|
@ -41,10 +29,10 @@
|
|||
"homepage": "",
|
||||
"owner": "nix-community",
|
||||
"repo": "impermanence",
|
||||
"rev": "63f4d0443e32b0dd7189001ee1894066765d18a5",
|
||||
"sha256": "0xnshgwfg834dm9l14p2w3wmhjysjpqpgfk37im0vrk1qgva19g2",
|
||||
"rev": "a33ef102a02ce77d3e39c25197664b7a636f9c30",
|
||||
"sha256": "1mig6ns8l5iynsm6pfbnx2b9hmr592s1kqbw6gq1n25czdlcniam",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/nix-community/impermanence/archive/63f4d0443e32b0dd7189001ee1894066765d18a5.tar.gz",
|
||||
"url": "https://github.com/nix-community/impermanence/archive/a33ef102a02ce77d3e39c25197664b7a636f9c30.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"naersk": {
|
||||
|
@ -53,10 +41,10 @@
|
|||
"homepage": "",
|
||||
"owner": "nmattia",
|
||||
"repo": "naersk",
|
||||
"rev": "3fb418eaf352498f6b6c30592e3beb63df42ef11",
|
||||
"sha256": "0v6ncaqm8q2mdv1jhkjjwi1sx4firlhjxpw4wachkwkriyjnkz5g",
|
||||
"rev": "fa19d8c135e776dc97f4dcca08656a0eeb28d5c0",
|
||||
"sha256": "1mif058gcbw5d5yixsmzalqlr0h9m9mmbsgv8v4r2mmsbw83k2x0",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/nmattia/naersk/archive/3fb418eaf352498f6b6c30592e3beb63df42ef11.tar.gz",
|
||||
"url": "https://github.com/nmattia/naersk/archive/fa19d8c135e776dc97f4dcca08656a0eeb28d5c0.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"napalm": {
|
||||
|
@ -65,10 +53,10 @@
|
|||
"homepage": "",
|
||||
"owner": "nix-community",
|
||||
"repo": "napalm",
|
||||
"rev": "e1babff744cd278b56abe8478008b4a9e23036cf",
|
||||
"sha256": "04h62p4hxw7fhclki7hcn739hhig3rh9q4njp24j7bm0dk2kj8h6",
|
||||
"rev": "edcb26c266ca37c9521f6a97f33234633cbec186",
|
||||
"sha256": "0ai1ax380nnpz0mbgbc5vdzafyjilcmdj7kgv087x2vagpprb4yy",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/nix-community/napalm/archive/e1babff744cd278b56abe8478008b4a9e23036cf.tar.gz",
|
||||
"url": "https://github.com/nix-community/napalm/archive/edcb26c266ca37c9521f6a97f33234633cbec186.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"nixpkgs": {
|
||||
|
@ -77,10 +65,10 @@
|
|||
"homepage": "",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "99dc8785f6a0adac95f5e2ab05cc2e1bf666d172",
|
||||
"sha256": "11vz7dshwxszab91da1x98qdlmpxi0v7daz24jj3crpll68n93w0",
|
||||
"rev": "7f993cdf26ccef564eabf31fdb40d140821e12bc",
|
||||
"sha256": "0dypbvibfdmv14rqlamf451625fw2fyk11prw9bbywi0q2i313d5",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/99dc8785f6a0adac95f5e2ab05cc2e1bf666d172.tar.gz",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/7f993cdf26ccef564eabf31fdb40d140821e12bc.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
|
@ -89,10 +77,10 @@
|
|||
"homepage": "",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "205fd4226592cc83fd4c0885a3e4c9c400efabb5",
|
||||
"sha256": "1f5d2g1p6nfwycpmrnnmc2xmcszp804adp16knjvdkj8nz36y1fg",
|
||||
"rev": "a2e1d0414259a144ebdc048408a807e69e0565af",
|
||||
"sha256": "1jv90bz3s7j294fhpb29k735fg3xfs9z848szicqarpbz7wfg03g",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/205fd4226592cc83fd4c0885a3e4c9c400efabb5.tar.gz",
|
||||
"url": "https://github.com/NixOS/nixpkgs/archive/a2e1d0414259a144ebdc048408a807e69e0565af.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"rust-overlay": {
|
||||
|
@ -101,10 +89,10 @@
|
|||
"homepage": "",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "20c8461785d8f5af32d8d4d5c128589e23d7f033",
|
||||
"sha256": "1zy2jcy2ika83dwcpxxvmimk317zimwn7hv8h3v43apqwssl0nxv",
|
||||
"rev": "6dc3e45fe4aee36efeed24d64fc68b1f989d5465",
|
||||
"sha256": "0vqgkzbfdj920lbm1dy8kylrv2gk4ard38lb3i20xvp2mp1d39n2",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/oxalica/rust-overlay/archive/20c8461785d8f5af32d8d4d5c128589e23d7f033.tar.gz",
|
||||
"url": "https://github.com/oxalica/rust-overlay/archive/6dc3e45fe4aee36efeed24d64fc68b1f989d5465.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"rustsec-advisory-db": {
|
||||
|
@ -113,10 +101,10 @@
|
|||
"homepage": "https://rustsec.org",
|
||||
"owner": "RustSec",
|
||||
"repo": "advisory-db",
|
||||
"rev": "3cae2352cf82b5815b98aa309e0f4df6aa737cec",
|
||||
"sha256": "0bba56sk4dlrf8rm3dmy9bxf95bq4rm1g3ppk4n2vfw0wzf7v7ap",
|
||||
"rev": "af76d4423761499f954411bb3071dcc72e6b0450",
|
||||
"sha256": "167qxr66j638km3z7zk2drjdr4bgqz77hr35vkwdp0lbafmd6y1c",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/RustSec/advisory-db/archive/3cae2352cf82b5815b98aa309e0f4df6aa737cec.tar.gz",
|
||||
"url": "https://github.com/RustSec/advisory-db/archive/af76d4423761499f954411bb3071dcc72e6b0450.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
}
|
||||
}
|
||||
|
|
598
tools/cheddar/Cargo.lock
generated
598
tools/cheddar/Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -8,7 +8,6 @@ depot.nix.lazy-deps {
|
|||
depotfmt.attr = "tools.depotfmt";
|
||||
fetch-depot-inbox.attr = "tools.fetch-depot-inbox";
|
||||
git-r.attr = "tools.git-r";
|
||||
git-review.attr = "third_party.nixpkgs.git-review";
|
||||
gerrit-update.attr = "tools.gerrit-update";
|
||||
gerrit.attr = "tools.gerrit-cli";
|
||||
hash-password.attr = "tools.hash-password";
|
||||
|
|
|
@ -1,14 +1,24 @@
|
|||
# Builds treefmt for depot, with a hardcoded configuration that
|
||||
# includes the right paths to formatters.
|
||||
{ pkgs, ... }:
|
||||
{ depot, pkgs, ... }:
|
||||
|
||||
let
|
||||
# terraform fmt can't handle multiple paths at once, but treefmt
|
||||
# expects this
|
||||
terraformat = pkgs.writeShellScript "terraformat" ''
|
||||
echo "$@" | xargs -n1 ${pkgs.terraform}/bin/terraform fmt
|
||||
'';
|
||||
|
||||
config = pkgs.writeText "depot-treefmt-config" ''
|
||||
[formatter.go]
|
||||
command = "${pkgs.go}/bin/gofmt"
|
||||
command = "${depot.nix.buildGo.go}/bin/gofmt"
|
||||
options = [ "-w" ]
|
||||
includes = ["*.go"]
|
||||
|
||||
[formatter.tf]
|
||||
command = "${terraformat}"
|
||||
includes = [ "*.tf" ]
|
||||
|
||||
[formatter.nix]
|
||||
command = "${pkgs.nixpkgs-fmt}/bin/nixpkgs-fmt"
|
||||
includes = [ "*.nix" ]
|
||||
|
@ -18,10 +28,8 @@ let
|
|||
|
||||
[formatter.rust]
|
||||
command = "${pkgs.rustfmt}/bin/rustfmt"
|
||||
options = ["--edition", "2021"]
|
||||
includes = [ "*.rs" ]
|
||||
excludes = [
|
||||
"users/emery/*",
|
||||
"users/tazjin/*",
|
||||
]
|
||||
'';
|
||||
|
@ -45,12 +53,10 @@ let
|
|||
'';
|
||||
in
|
||||
depotfmt.overrideAttrs (_: {
|
||||
passthru = {
|
||||
inherit config check;
|
||||
meta.ci.extraSteps.check = {
|
||||
label = "depot formatting check";
|
||||
command = check;
|
||||
alwaysRun = true;
|
||||
};
|
||||
passthru.config = config;
|
||||
passthru.meta.ci.extraSteps.check = {
|
||||
label = "depot formatting check";
|
||||
command = check;
|
||||
alwaysRun = true;
|
||||
};
|
||||
})
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
{ depot, pkgs, ... }:
|
||||
|
||||
let
|
||||
em = depot.tools.eaglemode;
|
||||
in
|
||||
em.mkCommand {
|
||||
name = "9 B";
|
||||
hotkey = "Ctrl+E";
|
||||
icon = "${./plan9.tga}";
|
||||
|
||||
description = ''
|
||||
Plumb target to Sam or Acme
|
||||
'';
|
||||
|
||||
code = ''
|
||||
ErrorIfNotSingleTarget();
|
||||
|
||||
my @tgt=GetTgt();
|
||||
my $dir=$tgt[0];
|
||||
|
||||
ExecOrError('${pkgs.plan9port}/bin/9', 'B', $dir);
|
||||
'';
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
{ depot, pkgs, ... }:
|
||||
|
||||
let
|
||||
em = depot.tools.eaglemode;
|
||||
icon = em.mkTGA "emacs" "${pkgs.emacs}/share/icons/hicolor/128x128/apps/emacs.png";
|
||||
in
|
||||
em.mkCommand {
|
||||
name = "Emacsclient";
|
||||
hotkey = "Ctrl+E";
|
||||
icon = "${icon}";
|
||||
|
||||
description = ''
|
||||
Open target in Emacsclient.
|
||||
|
||||
Emacs server must be running already for this to have any effect.
|
||||
'';
|
||||
|
||||
code = ''
|
||||
ErrorIfNotSingleTarget();
|
||||
|
||||
my @tgt=GetTgt();
|
||||
my $dir=$tgt[0];
|
||||
|
||||
ExecOrError('${pkgs.emacs}/bin/emacsclient', '-n', $dir);
|
||||
'';
|
||||
}
|
Binary file not shown.
|
@ -1,146 +0,0 @@
|
|||
# Helper functions for extending Eagle Mode with useful stuff.
|
||||
#
|
||||
# Eagle Mode's customisation usually expects people to copy the entire
|
||||
# configuration into their user folder, which we can automate fairly easily
|
||||
# using Nix, letting users choose whether to keep upstream config or not.
|
||||
{ depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
mkDesc = d: lib.concatMapStringsSep "\n"
|
||||
(x: "# Descr =${x}")
|
||||
(builtins.filter (s: s != "") (lib.splitString "\n" d));
|
||||
|
||||
configWrapper = pkgs.runCommand "eaglemode-config-wrapper" { } ''
|
||||
cp ${./wrapper.go} wrapper.go
|
||||
export HOME=$PWD
|
||||
${pkgs.go}/bin/go build wrapper.go
|
||||
install -Dm755 wrapper $out/bin/wrapper
|
||||
'';
|
||||
in
|
||||
rec {
|
||||
# mkCommand creates an Eagle Mode command for the file browser.
|
||||
#
|
||||
# Commands are basically little Perl scripts with a command standard library
|
||||
# available. They receive the user's selected target from Eagle Mode.
|
||||
mkCommand = lib.makeOverridable (
|
||||
{
|
||||
# Name of the command.
|
||||
name
|
||||
, # User-facing description, displayed in Eagle Mode UI. Can be multi-line.
|
||||
description
|
||||
, # Verbatim Perl code of the command. Command library is already available.
|
||||
code
|
||||
, # Caption for the UI button (defaults to name).
|
||||
caption ? name
|
||||
, icon ? "terminal.tga"
|
||||
, # TODO: what's a good default?
|
||||
hotkey ? ""
|
||||
, order ? 1.0
|
||||
}: pkgs.writeTextDir "emFileMan/Commands/${name}.pl" (''
|
||||
#!${pkgs.perl}/bin/perl
|
||||
#[[BEGIN PROPERTIES]]
|
||||
# Type = Command
|
||||
# Interpreter = perl
|
||||
# DefaultFor = directory
|
||||
# Caption = ${caption}
|
||||
# Order = ${toString order}
|
||||
# Icon = ${icon}
|
||||
''
|
||||
+ (lib.optionalString (description != "") "${mkDesc description}\n")
|
||||
+ (lib.optionalString (hotkey != "") "# Hotkey = ${hotkey}\n")
|
||||
+ ''
|
||||
#[[END PROPERTIES]]
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
BEGIN { require "$ENV{'EM_DIR'}/res/emFileMan/scripts/cmd-util.pl"; }
|
||||
|
||||
${if builtins.isString code
|
||||
then code
|
||||
else (if builtins.isPath code
|
||||
then builtins.readFile code
|
||||
else throw "code must be a string (literal code) or path to file")}
|
||||
'')
|
||||
);
|
||||
|
||||
# mkTGA converts the given image to a TGA image.
|
||||
mkTGA = name: path: pkgs.runCommand "${name}.tga" { } ''
|
||||
${pkgs.imagemagick}/bin/convert ${path} $out
|
||||
'';
|
||||
|
||||
buildPlugin = lib.makeOverridable (
|
||||
{ name
|
||||
, src
|
||||
, version
|
||||
, eaglemode ? pkgs.eaglemode
|
||||
, target ? name
|
||||
, extraNativeBuildInputs ? [ ]
|
||||
, extraBuildInputs ? [ ]
|
||||
}:
|
||||
pkgs.stdenv.mkDerivation {
|
||||
pname = "eaglemode-plugin-${name}";
|
||||
inherit src version;
|
||||
# inherit (eaglemode.drvAttrs) dontPatchELF;
|
||||
|
||||
nativeBuildInputs = eaglemode.drvAttrs.nativeBuildInputs ++ extraNativeBuildInputs;
|
||||
buildInputs = eaglemode.drvAttrs.buildInputs ++ extraBuildInputs ++ [ eaglemode ];
|
||||
|
||||
buildPhase = ''
|
||||
runHook preBuild
|
||||
|
||||
# merge eaglemode & plugin folders
|
||||
cp -r ${pkgs.srcOnly eaglemode} merged-src && chmod -R u+rw merged-src
|
||||
cp -r $src/* merged-src && chmod -R u+rw merged-src
|
||||
cd merged-src
|
||||
|
||||
export NIX_LDFLAGS="$NIX_LDFLAGS -lXxf86vm -lXext -lXinerama"
|
||||
perl make.pl build projects=${target} continue=no
|
||||
|
||||
runHook postBuild
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
mkdir -p $out/lib
|
||||
cp -r lib/lib${target}.so $out/lib
|
||||
|
||||
if [ -d "$src/etc" ]; then
|
||||
cp -r $src/etc/* $out
|
||||
fi
|
||||
|
||||
runHook postInstall
|
||||
'';
|
||||
}
|
||||
);
|
||||
|
||||
# etcDir creates a directory layout suitable for use in the EM_USER_CONFIG_DIR
|
||||
# environment variable.
|
||||
#
|
||||
# Note that Eagle Mode requires the value of that variable to be mutable at
|
||||
# runtime (it is the same place where it persists all of its user-controlled
|
||||
# state), so the results of this function can not be used directly.
|
||||
etcDir =
|
||||
{ eaglemode ? pkgs.eaglemode
|
||||
, extraPaths ? [ ]
|
||||
}: pkgs.runCommand "eaglemode-config" { } ''
|
||||
mkdir $out
|
||||
|
||||
${
|
||||
lib.concatMapStringsSep "\n" (s: "cp -rT ${s} $out/\nchmod -R u+rw $out/\n") ([ "${eaglemode}/etc"] ++ extraPaths)
|
||||
}
|
||||
'';
|
||||
|
||||
# withConfig creates an Eagle Mode wrapper that runs it with the given
|
||||
# configuration.
|
||||
withConfig = { eaglemode ? pkgs.eaglemode, config }: pkgs.writeShellScriptBin "eaglemode" ''
|
||||
${configWrapper}/bin/wrapper --em-config "${config}"
|
||||
|
||||
if [ -d "${config}/lib" ]; then
|
||||
export LD_LIBRARY_PATH="${config}/lib:$LD_LIBRARY_PATH"
|
||||
exec ${eaglemode}/bin/eaglemode "$@"
|
||||
fi
|
||||
|
||||
exec ${eaglemode}/bin/eaglemode "$@"
|
||||
'';
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{ depot, pkgs, ... }:
|
||||
|
||||
depot.tools.eaglemode.buildPlugin {
|
||||
name = "avif";
|
||||
version = "canon";
|
||||
src = ./.;
|
||||
target = "PlAvif";
|
||||
extraBuildInputs = [ pkgs.libavif ];
|
||||
extraNativeBuildInputs = [ pkgs.pkg-config ];
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#%rec:emFpPlugin%#
|
||||
|
||||
FileTypes = { ".avif" }
|
||||
Priority = 1.0
|
||||
Library = "PlAvif"
|
||||
Function = "PlAvifFpPluginFunc"
|
|
@ -1,64 +0,0 @@
|
|||
package PlAvif;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub GetDependencies
|
||||
{
|
||||
return ('emCore');
|
||||
}
|
||||
|
||||
sub IsEssential
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub GetFileHandlingrules
|
||||
{
|
||||
return ();
|
||||
}
|
||||
|
||||
sub GetExtraBuildOptions
|
||||
{
|
||||
return ();
|
||||
}
|
||||
|
||||
sub Build
|
||||
{
|
||||
shift;
|
||||
my %options=@_;
|
||||
|
||||
my @libAvifFlags=();
|
||||
if ($options{'avif-inc-dir'} eq '' && $options{'avif-lib-dir'} eq '') {
|
||||
@libAvifFlags=split("\n",readpipe(
|
||||
"perl \"".$options{'utils'}."/PkgConfig.pl\" libavif"
|
||||
));
|
||||
}
|
||||
if (!@libAvifFlags) {
|
||||
if ($options{'avif-inc-dir'} ne '') {
|
||||
push(@libAvifFlags, "--inc-search-dir", $options{'avif-inc-dir'});
|
||||
}
|
||||
if ($options{'avif-lib-dir'} ne '') {
|
||||
push(@libAvifFlags, "--lib-search-dir", $options{'avif-lib-dir'});
|
||||
}
|
||||
push(@libAvifFlags, "--link", "avif");
|
||||
}
|
||||
|
||||
system(
|
||||
@{$options{'unicc_call'}},
|
||||
"--math",
|
||||
"--rtti",
|
||||
"--exceptions",
|
||||
"--bin-dir" , "bin",
|
||||
"--lib-dir" , "lib",
|
||||
"--obj-dir" , "obj",
|
||||
"--inc-search-dir", "include",
|
||||
@libAvifFlags,
|
||||
"--link" , "emCore",
|
||||
"--type" , "dynlib",
|
||||
"--name" , "PlAvif",
|
||||
"src/PlAvif.cpp"
|
||||
)==0 or return 0;
|
||||
|
||||
return 1;
|
||||
}
|
|
@ -1,190 +0,0 @@
|
|||
#include <emCore/emFpPlugin.h>
|
||||
#include <emCore/emImageFile.h>
|
||||
|
||||
#include "avif/avif.h"
|
||||
|
||||
class PlAvifImageFileModel : public emImageFileModel
|
||||
{
|
||||
public:
|
||||
|
||||
static emRef<PlAvifImageFileModel> Acquire(
|
||||
emContext & context, const emString & name, bool common=true
|
||||
);
|
||||
|
||||
protected:
|
||||
PlAvifImageFileModel(emContext & context, const emString & name);
|
||||
virtual ~PlAvifImageFileModel();
|
||||
virtual void TryStartLoading();
|
||||
virtual bool TryContinueLoading();
|
||||
virtual void QuitLoading();
|
||||
virtual void TryStartSaving();
|
||||
virtual bool TryContinueSaving();
|
||||
virtual void QuitSaving();
|
||||
virtual emUInt64 CalcMemoryNeed();
|
||||
virtual double CalcFileProgress();
|
||||
|
||||
private:
|
||||
struct LoadingState;
|
||||
LoadingState * L = NULL;
|
||||
};
|
||||
|
||||
|
||||
struct PlAvifImageFileModel::LoadingState {
|
||||
avifRGBImage rgb;
|
||||
avifDecoder * decoder;
|
||||
};
|
||||
|
||||
|
||||
emRef<PlAvifImageFileModel> PlAvifImageFileModel::Acquire(
|
||||
emContext & context, const emString & name, bool common
|
||||
)
|
||||
{
|
||||
EM_IMPL_ACQUIRE(PlAvifImageFileModel, context, name, common)
|
||||
}
|
||||
|
||||
|
||||
PlAvifImageFileModel::PlAvifImageFileModel(
|
||||
emContext & context, const emString & name
|
||||
)
|
||||
: emImageFileModel(context, name)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PlAvifImageFileModel::~PlAvifImageFileModel()
|
||||
{
|
||||
PlAvifImageFileModel::QuitLoading();
|
||||
PlAvifImageFileModel::QuitSaving();
|
||||
}
|
||||
|
||||
|
||||
void PlAvifImageFileModel::TryStartLoading()
|
||||
{
|
||||
avifResult result;
|
||||
|
||||
L = new LoadingState;
|
||||
memset(L, 0, sizeof(LoadingState));
|
||||
|
||||
L->decoder = avifDecoderCreate();
|
||||
if (L->decoder == NULL) {
|
||||
throw emException("failed to create AVIF decoder");
|
||||
}
|
||||
|
||||
result = avifDecoderSetIOFile(L->decoder, GetFilePath());
|
||||
if (result != AVIF_RESULT_OK) {
|
||||
throw emException("%s", avifResultToString(result));
|
||||
}
|
||||
|
||||
result = avifDecoderParse(L->decoder);
|
||||
if (result != AVIF_RESULT_OK) {
|
||||
throw emException("%s", avifResultToString(result));
|
||||
}
|
||||
|
||||
FileFormatInfo = emString::Format(
|
||||
"AVIF %s %ubpc",
|
||||
avifPixelFormatToString(L->decoder->image->yuvFormat),
|
||||
L->decoder->image->depth
|
||||
);
|
||||
|
||||
|
||||
Signal(ChangeSignal);
|
||||
}
|
||||
|
||||
|
||||
bool PlAvifImageFileModel::TryContinueLoading()
|
||||
{
|
||||
avifResult result;
|
||||
|
||||
if (!Image.GetHeight()) {
|
||||
Image.Setup(
|
||||
L->decoder->image->width,
|
||||
L->decoder->image->height,
|
||||
L->decoder->alphaPresent ? 4 : 3
|
||||
);
|
||||
}
|
||||
|
||||
result = avifDecoderNextImage(L->decoder);
|
||||
if (result != AVIF_RESULT_OK) {
|
||||
throw emException("%s", avifResultToString(result));
|
||||
}
|
||||
|
||||
avifRGBImageSetDefaults(&L->rgb, L->decoder->image);
|
||||
L->rgb.format = L->decoder->alphaPresent ?
|
||||
AVIF_RGB_FORMAT_RGBA : AVIF_RGB_FORMAT_RGB;
|
||||
L->rgb.pixels = Image.GetWritableMap();
|
||||
L->rgb.width = Image.GetWidth();
|
||||
L->rgb.height = Image.GetHeight();
|
||||
L->rgb.depth = 8;
|
||||
L->rgb.rowBytes = Image.GetWidth() * Image.GetChannelCount();
|
||||
|
||||
result = avifImageYUVToRGB(L->decoder->image, &L->rgb);
|
||||
if (result != AVIF_RESULT_OK) {
|
||||
throw emException("%s", avifResultToString(result));
|
||||
}
|
||||
|
||||
Signal(ChangeSignal);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void PlAvifImageFileModel::QuitLoading()
|
||||
{
|
||||
if (L) {
|
||||
if (L->decoder) avifDecoderDestroy(L->decoder);
|
||||
delete L;
|
||||
L = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PlAvifImageFileModel::TryStartSaving()
|
||||
{
|
||||
throw emException("PlAvifImageFileModel: Saving not implemented.");
|
||||
}
|
||||
|
||||
|
||||
bool PlAvifImageFileModel::TryContinueSaving()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void PlAvifImageFileModel::QuitSaving()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
emUInt64 PlAvifImageFileModel::CalcMemoryNeed()
|
||||
{
|
||||
return
|
||||
(emUInt64)
|
||||
L->decoder->image->width *
|
||||
L->decoder->image->height *
|
||||
(L->decoder->alphaPresent ? 4 : 3);
|
||||
}
|
||||
|
||||
|
||||
double PlAvifImageFileModel::CalcFileProgress()
|
||||
{
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
emPanel * PlAvifFpPluginFunc(
|
||||
emPanel::ParentArg parent, const emString & name,
|
||||
const emString & path, emFpPlugin * plugin,
|
||||
emString * errorBuf
|
||||
)
|
||||
{
|
||||
if (plugin->Properties.GetCount()) {
|
||||
*errorBuf="PlAvifFpPlugin: No properties allowed.";
|
||||
return NULL;
|
||||
}
|
||||
return new emImageFilePanel(
|
||||
parent, name,
|
||||
PlAvifImageFileModel::Acquire(
|
||||
parent.GetRootContext(), path
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
{ depot, pkgs, ... }:
|
||||
|
||||
let
|
||||
em = depot.tools.eaglemode;
|
||||
emSrc = with pkgs; srcOnly eaglemode;
|
||||
in
|
||||
em.buildPlugin {
|
||||
name = "example";
|
||||
version = "canon";
|
||||
|
||||
src = pkgs.runCommand "em-plugin-example-src" { } ''
|
||||
set -ux
|
||||
cp -r ${emSrc}/doc/examples/CppApiExamples/PluginExample $out
|
||||
'';
|
||||
|
||||
target = "PlEx";
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{ depot, pkgs, ... }:
|
||||
|
||||
let
|
||||
em = depot.tools.eaglemode;
|
||||
emSrc = pkgs.srcOnly pkgs.em;
|
||||
in
|
||||
em.buildPlugin {
|
||||
name = "qoi";
|
||||
version = "canon";
|
||||
src = ./.;
|
||||
target = "PlQoi";
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#%rec:emFpPlugin%#
|
||||
|
||||
FileTypes = { ".qoi" }
|
||||
Priority = 1.0
|
||||
Library = "PlQoi"
|
||||
Function = "PlQoiFpPluginFunc"
|
|
@ -1,47 +0,0 @@
|
|||
package PlQoi;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub GetDependencies
|
||||
{
|
||||
return ('emCore');
|
||||
}
|
||||
|
||||
sub IsEssential
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub GetFileHandlingrules
|
||||
{
|
||||
return ();
|
||||
}
|
||||
|
||||
sub GetExtraBuildOptions
|
||||
{
|
||||
return ();
|
||||
}
|
||||
|
||||
sub Build
|
||||
{
|
||||
shift;
|
||||
my %options=@_;
|
||||
|
||||
system(
|
||||
@{$options{'unicc_call'}},
|
||||
"--math",
|
||||
"--rtti",
|
||||
"--exceptions",
|
||||
"--bin-dir" , "bin",
|
||||
"--lib-dir" , "lib",
|
||||
"--obj-dir" , "obj",
|
||||
"--inc-search-dir", "include",
|
||||
"--link" , "emCore",
|
||||
"--type" , "dynlib",
|
||||
"--name" , "PlQoi",
|
||||
"src/PlQoi.cpp"
|
||||
)==0 or return 0;
|
||||
|
||||
return 1;
|
||||
}
|
|
@ -1,273 +0,0 @@
|
|||
#include <emCore/emFpPlugin.h>
|
||||
#include <emCore/emImageFile.h>
|
||||
|
||||
/*
|
||||
QOI Utilities
|
||||
|
||||
Copyright (c) 2021, Dominic Szablewski - https://phoboslab.org
|
||||
SPDX-License-Identifier: MIT
|
||||
*/
|
||||
|
||||
#define QOI_OP_INDEX 0x00 /* 00xxxxxx */
|
||||
#define QOI_OP_DIFF 0x40 /* 01xxxxxx */
|
||||
#define QOI_OP_LUMA 0x80 /* 10xxxxxx */
|
||||
#define QOI_OP_RUN 0xc0 /* 11xxxxxx */
|
||||
#define QOI_OP_RGB 0xfe /* 11111110 */
|
||||
#define QOI_OP_RGBA 0xff /* 11111111 */
|
||||
|
||||
#define QOI_MASK_2 0xc0 /* 11000000 */
|
||||
|
||||
#define QOI_COLOR_HASH(C) (C.GetRed()*3 + C.GetGreen()*5 + C.GetBlue()*7 + C.GetAlpha()*11)
|
||||
|
||||
#define QOI_MAGIC \
|
||||
(((unsigned int)'q') << 24 | ((unsigned int)'o') << 16 | \
|
||||
((unsigned int)'i') << 8 | ((unsigned int)'f'))
|
||||
|
||||
#define QOI_HEADER_SIZE 14
|
||||
|
||||
static unsigned int qoi_read_32(const unsigned char *bytes, int *p) {
|
||||
unsigned int a = bytes[(*p)++];
|
||||
unsigned int b = bytes[(*p)++];
|
||||
unsigned int c = bytes[(*p)++];
|
||||
unsigned int d = bytes[(*p)++];
|
||||
return a << 24 | b << 16 | c << 8 | d;
|
||||
}
|
||||
|
||||
|
||||
class PlQoiImageFileModel : public emImageFileModel
|
||||
{
|
||||
public:
|
||||
|
||||
static emRef<PlQoiImageFileModel> Acquire(
|
||||
emContext & context, const emString & name, bool common=true
|
||||
);
|
||||
|
||||
protected:
|
||||
PlQoiImageFileModel(emContext & context, const emString & name);
|
||||
virtual ~PlQoiImageFileModel();
|
||||
virtual void TryStartLoading();
|
||||
virtual bool TryContinueLoading();
|
||||
virtual void QuitLoading();
|
||||
virtual void TryStartSaving();
|
||||
virtual bool TryContinueSaving();
|
||||
virtual void QuitSaving();
|
||||
virtual emUInt64 CalcMemoryNeed();
|
||||
virtual double CalcFileProgress();
|
||||
|
||||
private:
|
||||
struct LoadingState;
|
||||
LoadingState * L = NULL;
|
||||
};
|
||||
|
||||
|
||||
struct PlQoiImageFileModel::LoadingState {
|
||||
FILE * file;
|
||||
unsigned int width, height, channels;
|
||||
size_t file_len;
|
||||
};
|
||||
|
||||
|
||||
emRef<PlQoiImageFileModel> PlQoiImageFileModel::Acquire(
|
||||
emContext & context, const emString & name, bool common
|
||||
)
|
||||
{
|
||||
EM_IMPL_ACQUIRE(PlQoiImageFileModel, context, name, common)
|
||||
}
|
||||
|
||||
|
||||
PlQoiImageFileModel::PlQoiImageFileModel(
|
||||
emContext & context, const emString & name
|
||||
)
|
||||
: emImageFileModel(context, name)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PlQoiImageFileModel::~PlQoiImageFileModel()
|
||||
{
|
||||
PlQoiImageFileModel::QuitLoading();
|
||||
PlQoiImageFileModel::QuitSaving();
|
||||
}
|
||||
|
||||
|
||||
void PlQoiImageFileModel::TryStartLoading()
|
||||
{
|
||||
unsigned char header[QOI_HEADER_SIZE];
|
||||
unsigned int header_magic, colorspace;
|
||||
int pos = 0;
|
||||
|
||||
L = new LoadingState;
|
||||
memset(L, 0, sizeof(LoadingState));
|
||||
L->file = fopen(GetFilePath(),"rb");
|
||||
if (!L->file) throw emException("%s",emGetErrorText(errno).Get());
|
||||
|
||||
if (fread(header, 1, sizeof(header), L->file) != sizeof(header)) {
|
||||
if (ferror(L->file)) {
|
||||
throw emException("%s",emGetErrorText(errno).Get());
|
||||
}
|
||||
else {
|
||||
throw emException("QOI header not found");
|
||||
}
|
||||
}
|
||||
|
||||
header_magic = qoi_read_32(header, &pos);
|
||||
L->width = qoi_read_32(header, &pos);
|
||||
L->height = qoi_read_32(header, &pos);
|
||||
L->channels = header[pos++];
|
||||
colorspace = header[pos++];
|
||||
|
||||
if (
|
||||
L->width == 0 || L->height == 0 ||
|
||||
L->channels < 3 || L->channels > 4 ||
|
||||
colorspace > 1 ||
|
||||
header_magic != QOI_MAGIC
|
||||
) {
|
||||
throw emException("QOI header not valid");
|
||||
}
|
||||
|
||||
fseek(L->file, 0, SEEK_END);
|
||||
L->file_len = ftell(L->file);
|
||||
|
||||
if (L->file_len <= QOI_HEADER_SIZE || fseek(L->file, 0, SEEK_SET) != 0) {
|
||||
throw emException("QOI data incomplete");
|
||||
}
|
||||
|
||||
FileFormatInfo = "QOI ";
|
||||
FileFormatInfo += (
|
||||
colorspace ? "all channels linear" : "sRGB with linear alpha"
|
||||
);
|
||||
|
||||
Signal(ChangeSignal);
|
||||
}
|
||||
|
||||
|
||||
bool PlQoiImageFileModel::TryContinueLoading()
|
||||
{
|
||||
emArray<unsigned char> data;
|
||||
emColor index[64];
|
||||
emColor px { 0, 0, 0, 255 };
|
||||
int pos = QOI_HEADER_SIZE;
|
||||
int run = 0;
|
||||
|
||||
if (!Image.GetHeight()) {
|
||||
Image.Setup(L->width, L->height, L->channels);
|
||||
}
|
||||
|
||||
data.SetCount(L->file_len);
|
||||
if (fread(data.GetWritable(), 1, L->file_len, L->file) < L->file_len) {
|
||||
if (ferror(L->file)) {
|
||||
throw emException("%s",emGetErrorText(errno).Get());
|
||||
}
|
||||
else {
|
||||
throw emException("QOI data incomplete");
|
||||
}
|
||||
}
|
||||
|
||||
memset(index, 0, sizeof(index));
|
||||
|
||||
for (int px_y = 0; px_y < L->height; px_y++) {
|
||||
for (int px_x = 0; px_x < L->width; px_x++) {
|
||||
if (run > 0) {
|
||||
run--;
|
||||
} else if (pos < data.GetCount()) {
|
||||
int b1 = data.Get(pos++);
|
||||
|
||||
if (b1 == QOI_OP_RGB) {
|
||||
px.SetRed( data.Get(pos++));
|
||||
px.SetGreen( data.Get(pos++));
|
||||
px.SetBlue( data.Get(pos++));
|
||||
} else if (b1 == QOI_OP_RGBA) {
|
||||
px.SetRed( data.Get(pos++));
|
||||
px.SetGreen( data.Get(pos++));
|
||||
px.SetBlue( data.Get(pos++));
|
||||
px.SetAlpha( data.Get(pos++));
|
||||
} else if ((b1 & QOI_MASK_2) == QOI_OP_INDEX) {
|
||||
px = index[b1];
|
||||
} else if ((b1 & QOI_MASK_2) == QOI_OP_DIFF) {
|
||||
px.SetRed(
|
||||
px.GetRed() + ((b1 >> 4) & 0x03) - 2);
|
||||
px.SetGreen(
|
||||
px.GetGreen() + ((b1 >> 2) & 0x03) - 2);
|
||||
px.SetBlue(
|
||||
px.GetBlue() + ( b1 & 0x03) - 2);
|
||||
} else if ((b1 & QOI_MASK_2) == QOI_OP_LUMA) {
|
||||
int b2 = data.Get(pos++);
|
||||
int vg = (b1 & 0x3f) - 32;
|
||||
px.SetRed(
|
||||
px.GetRed() + vg - 8 + ((b2 >> 4) & 0x0f));
|
||||
px.SetGreen(
|
||||
px.GetGreen() + vg);
|
||||
px.SetBlue(
|
||||
px.GetBlue() + vg - 8 + (b2 & 0x0f));
|
||||
} else if ((b1 & QOI_MASK_2) == QOI_OP_RUN) {
|
||||
run = (b1 & 0x3f);
|
||||
}
|
||||
index[QOI_COLOR_HASH(px) % 64] = px;
|
||||
}
|
||||
Image.SetPixel(px_x, px_y, px);
|
||||
}
|
||||
}
|
||||
|
||||
Signal(ChangeSignal);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void PlQoiImageFileModel::QuitLoading()
|
||||
{
|
||||
if (L) {
|
||||
if (L->file) fclose(L->file);
|
||||
delete L;
|
||||
L = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PlQoiImageFileModel::TryStartSaving()
|
||||
{
|
||||
throw emException("PlQoiImageFileModel: Saving not implemented.");
|
||||
}
|
||||
|
||||
|
||||
bool PlQoiImageFileModel::TryContinueSaving()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void PlQoiImageFileModel::QuitSaving()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
emUInt64 PlQoiImageFileModel::CalcMemoryNeed()
|
||||
{
|
||||
return
|
||||
(emUInt64)L->width * L->height * L->channels + L->file_len;
|
||||
}
|
||||
|
||||
|
||||
double PlQoiImageFileModel::CalcFileProgress()
|
||||
{
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
emPanel * PlQoiFpPluginFunc(
|
||||
emPanel::ParentArg parent, const emString & name,
|
||||
const emString & path, emFpPlugin * plugin,
|
||||
emString * errorBuf
|
||||
)
|
||||
{
|
||||
if (plugin->Properties.GetCount()) {
|
||||
*errorBuf="PlQoiFpPlugin: No properties allowed.";
|
||||
return NULL;
|
||||
}
|
||||
return new emImageFilePanel(
|
||||
parent, name,
|
||||
PlQoiImageFileModel::Acquire(
|
||||
parent.GetRootContext(), path
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
{ depot, pkgs, ... }:
|
||||
|
||||
let
|
||||
em = depot.tools.eaglemode;
|
||||
emSrc = with pkgs; srcOnly eaglemode;
|
||||
in
|
||||
(em.buildPlugin {
|
||||
name = "yatracker";
|
||||
version = "canon";
|
||||
src = ./.;
|
||||
target = "PlYaTracker";
|
||||
}).overrideAttrs (_: {
|
||||
postInstall = ''
|
||||
mkdir -p $out/icons
|
||||
${pkgs.imagemagick}/bin/convert $src/logo.webp $out/icons/yandex-tracker.tga
|
||||
'';
|
||||
})
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#%rec:emFpPlugin%#
|
||||
|
||||
FileTypes = { ".YaTracker" }
|
||||
Priority = 1.0
|
||||
Library = "PlYaTracker"
|
||||
Function = "PlYaTrackerPluginFunc"
|
Binary file not shown.
Before Width: | Height: | Size: 14 KiB |
|
@ -1,47 +0,0 @@
|
|||
package PlYaTracker;
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
sub GetDependencies
|
||||
{
|
||||
return ('emCore');
|
||||
}
|
||||
|
||||
sub IsEssential
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
sub GetFileHandlingRules
|
||||
{
|
||||
return ();
|
||||
}
|
||||
|
||||
sub GetExtraBuildOptions
|
||||
{
|
||||
return ();
|
||||
}
|
||||
|
||||
sub Build
|
||||
{
|
||||
shift;
|
||||
my %options=@_;
|
||||
|
||||
system(
|
||||
@{$options{'unicc_call'}},
|
||||
"--math",
|
||||
"--rtti",
|
||||
"--exceptions",
|
||||
"--bin-dir" , "bin",
|
||||
"--lib-dir" , "lib",
|
||||
"--obj-dir" , "obj",
|
||||
"--inc-search-dir", "include",
|
||||
"--link" , "emCore",
|
||||
"--type" , "dynlib",
|
||||
"--name" , "PlYaTracker",
|
||||
"src/PlYaTracker/PlYaTracker.cpp"
|
||||
)==0 or return 0;
|
||||
|
||||
return 1;
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
#include <emCore/emFilePanel.h>
|
||||
#include <emCore/emFpPlugin.h>
|
||||
#include <emCore/emRecFileModel.h>
|
||||
#include <emCore/emToolkit.h>
|
||||
|
||||
class PlYaTrackerConfig final : public emRecFileModel, public emStructRec {
|
||||
public:
|
||||
static emRef<PlYaTrackerConfig> Acquire(emContext& context,
|
||||
const emString& name,
|
||||
bool common = true);
|
||||
|
||||
virtual const char* GetFormatName() const;
|
||||
|
||||
emStringRec URL;
|
||||
emStringRec Token;
|
||||
|
||||
protected:
|
||||
PlYaTrackerConfig(emContext& context, const emString& name);
|
||||
};
|
||||
|
||||
emRef<PlYaTrackerConfig> PlYaTrackerConfig::Acquire(emContext& context,
|
||||
const emString& name,
|
||||
bool common) {
|
||||
EM_IMPL_ACQUIRE(PlYaTrackerConfig, context, name, common)
|
||||
}
|
||||
|
||||
const char* PlYaTrackerConfig::GetFormatName() const { return "PlYaTracker"; }
|
||||
|
||||
PlYaTrackerConfig::PlYaTrackerConfig(emContext& context, const emString& name)
|
||||
: emRecFileModel(context, name),
|
||||
emStructRec(),
|
||||
URL(this, "URL"),
|
||||
Token(this, "Token") {
|
||||
PostConstruct(*this);
|
||||
}
|
||||
|
||||
class PlYaTrackerFilePanel : public emFilePanel {
|
||||
public:
|
||||
PlYaTrackerFilePanel(ParentArg parent, const emString& name,
|
||||
emRef<PlYaTrackerConfig> config);
|
||||
|
||||
private:
|
||||
emRef<PlYaTrackerConfig> Config;
|
||||
};
|
||||
|
||||
PlYaTrackerFilePanel::PlYaTrackerFilePanel(ParentArg parent,
|
||||
const emString& name,
|
||||
emRef<PlYaTrackerConfig> config)
|
||||
: emFilePanel(parent, name, config), Config(config) {}
|
||||
|
||||
extern "C" {
|
||||
emPanel* PlYaTrackerPluginFunc(emPanel::ParentArg parent, const emString& name,
|
||||
const emString& path, emFpPlugin* plugin,
|
||||
emString* errorBuf) {
|
||||
return new PlYaTrackerFilePanel(
|
||||
parent, name, PlYaTrackerConfig::Acquire(parent.GetRootContext(), path));
|
||||
}
|
||||
}
|
|
@ -1,156 +0,0 @@
|
|||
// Eagle Mode configuration wrapper that recreates the required directory
|
||||
// structure for Eagle Mode based on the output of depot.tools.eaglemode.etcDir
|
||||
//
|
||||
// This will replace *all* symlinks in the Eagle Mode configuration directory,
|
||||
// but it will not touch actual files. Missing folders will be created.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func configDir() (string, error) {
|
||||
v := os.Getenv("EM_USER_CONFIG_DIR")
|
||||
if v != "" {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get current user: %w", err)
|
||||
}
|
||||
|
||||
return path.Join(usr.HomeDir, ".eaglemode"), nil
|
||||
}
|
||||
|
||||
// cleanupConfig removes *all* existing symlinks in the configuration which do
|
||||
// not point into the right Nix store path.
|
||||
func cleanupConfig(conf string, dir string) (map[string]bool, error) {
|
||||
// In case of first launch, we might have to create the directory.
|
||||
_ = os.MkdirAll(dir, 0755)
|
||||
c := 0
|
||||
|
||||
currentFiles := map[string]bool{}
|
||||
|
||||
walker := func(p string, d fs.DirEntry, e error) error {
|
||||
if e != nil {
|
||||
return fmt.Errorf("could not walk %s in config directory: %w", p, e)
|
||||
}
|
||||
|
||||
if d.Type()&fs.ModeSymlink != 0 {
|
||||
target, err := os.Readlink(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not read link for %s: %w", p, err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(target, conf) {
|
||||
err = os.Remove(p)
|
||||
c++
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not remove stale link %q: %w", p, err)
|
||||
}
|
||||
log.Printf("removed stale symlink %q", p)
|
||||
} else {
|
||||
currentFiles[p] = false
|
||||
}
|
||||
}
|
||||
|
||||
if d.Type().IsRegular() {
|
||||
currentFiles[p] = true
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := filepath.WalkDir(dir, walker)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c > 0 {
|
||||
log.Printf("removed %v stale symlinks", c)
|
||||
}
|
||||
|
||||
return currentFiles, nil
|
||||
}
|
||||
|
||||
// linkConfig traverses the given Eagle Mode configuration and links everything
|
||||
// to the expected location in the user's configuration directory.
|
||||
//
|
||||
// If the user placed actual files in the configuration directory at paths that
|
||||
// would be overwritten, they will not be touched.
|
||||
func linkConfig(conf string, dir string, existing map[string]bool) error {
|
||||
walker := func(p string, d fs.DirEntry, e error) error {
|
||||
if e != nil {
|
||||
return fmt.Errorf("could not walk %s in config directory: %w", p, e)
|
||||
}
|
||||
|
||||
target := path.Join(dir, strings.TrimPrefix(p, conf))
|
||||
|
||||
if d.Type().IsDir() {
|
||||
err := os.MkdirAll(target, 0755)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create directory %q: %w", target, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if shadow, exists := existing[target]; exists {
|
||||
if shadow {
|
||||
log.Printf("WARN: file %q already exists and shadows a file from configuration", target)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
err := os.Symlink(p, target)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to link %q: %w", target, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return filepath.WalkDir(conf, walker)
|
||||
}
|
||||
|
||||
func main() {
|
||||
emConfig := flag.String("em-config", "", "path to em-config dir")
|
||||
|
||||
flag.Parse()
|
||||
log.Println("verifying current Eagle Mode configuration")
|
||||
|
||||
if *emConfig == "" {
|
||||
log.Fatalf("Eagle Mode configuration must be given")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(*emConfig, "/nix/store/") {
|
||||
log.Fatalf("Eagle Mode configuration must be in Nix store")
|
||||
}
|
||||
|
||||
dir, err := configDir()
|
||||
if err != nil {
|
||||
log.Fatalf("could not determine Eagle Mode config dir: %v", err)
|
||||
}
|
||||
|
||||
currentFiles, err := cleanupConfig(*emConfig, dir)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to remove stale symlinks: %v", err)
|
||||
}
|
||||
|
||||
err = linkConfig(*emConfig, dir, currentFiles)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to link new configuration: %v", err)
|
||||
}
|
||||
|
||||
log.Println("Eagle Mode configuration updated")
|
||||
}
|
1411
tvix/Cargo.lock
generated
1411
tvix/Cargo.lock
generated
File diff suppressed because it is too large
Load diff
3848
tvix/Cargo.nix
3848
tvix/Cargo.nix
File diff suppressed because it is too large
Load diff
124
tvix/Cargo.toml
124
tvix/Cargo.toml
|
@ -27,8 +27,6 @@ members = [
|
|||
"glue",
|
||||
"nar-bridge",
|
||||
"nix-compat",
|
||||
"nix-compat-derive",
|
||||
"nix-compat-derive-tests",
|
||||
"serde",
|
||||
"store",
|
||||
"tracing",
|
||||
|
@ -39,128 +37,6 @@ members = [
|
|||
# https://github.com/rust-lang/rust-clippy/issues/12281
|
||||
blocks_in_conditions = "allow"
|
||||
|
||||
[workspace.dependencies]
|
||||
anyhow = "1.0.86"
|
||||
async-compression = "0.4.12"
|
||||
async-process = "2.2.4"
|
||||
async-stream = "0.3.5"
|
||||
async-tempfile = "0.4.0"
|
||||
axum = "0.7.5"
|
||||
# https://github.com/liufuyang/bigtable_rs/pull/86
|
||||
bigtable_rs = { git = "https://github.com/liufuyang/bigtable_rs", rev = "1818355a5373a5bc2c84287e3a4e3807154ac8ef" }
|
||||
bitflags = "2.6.0"
|
||||
blake3 = "1.5.4"
|
||||
bstr = "1.10.0"
|
||||
bytes = "1.7.1"
|
||||
clap = "4.5.16"
|
||||
codemap = "0.1.3"
|
||||
codemap-diagnostic = "0.1.2"
|
||||
count-write = "0.1.0"
|
||||
criterion = "0.5"
|
||||
data-encoding = "2.6.0"
|
||||
digest = "0.10.7"
|
||||
dirs = "4.0.0"
|
||||
ed25519 = "2.2.3"
|
||||
ed25519-dalek = "2.1.1"
|
||||
enum-primitive-derive = "0.3.0"
|
||||
erased-serde = "0.4.5"
|
||||
expect-test = "1.5.0"
|
||||
fastcdc = "3.1.0"
|
||||
fuse-backend-rs = "0.11.0"
|
||||
futures = "0.3.30"
|
||||
genawaiter = { version = "0.99.1", default-features = false }
|
||||
glob = "0.3.1"
|
||||
hex-literal = "0.4.1"
|
||||
http = "1.1.0"
|
||||
hyper-util = "0.1.7"
|
||||
indicatif = "0.17.8"
|
||||
itertools = "0.12.1"
|
||||
lazy_static = "1.5.0"
|
||||
lexical-core = "0.8.5"
|
||||
libc = "0.2.158"
|
||||
lru = "0.12.4"
|
||||
magic = "0.16.2"
|
||||
md-5 = "0.10.6"
|
||||
mimalloc = "0.1.43"
|
||||
nix = "0.27.1"
|
||||
nohash-hasher = "0.2.0"
|
||||
nom = "7.1.3"
|
||||
num-traits = "0.2.19"
|
||||
object_store = "0.10.2"
|
||||
opentelemetry = "0.24.0"
|
||||
opentelemetry-http = "0.13.0"
|
||||
opentelemetry-otlp = "0.17.0"
|
||||
opentelemetry_sdk = "0.24.1"
|
||||
os_str_bytes = "6.6"
|
||||
parking_lot = "0.12.3"
|
||||
path-clean = "0.1"
|
||||
petgraph = "0.6.5"
|
||||
pin-project = "1.1"
|
||||
pin-project-lite = "0.2.14"
|
||||
pretty_assertions = "1.4.0"
|
||||
proc-macro2 = "1.0.86"
|
||||
proptest = { version = "1.5.0", default-features = false }
|
||||
prost = "0.13.1"
|
||||
prost-build = "0.13.1"
|
||||
quote = "1.0.37"
|
||||
redb = "2.1.2"
|
||||
regex = "1.10.6"
|
||||
reqwest = { version = "0.12.7", default-features = false }
|
||||
reqwest-middleware = "0.3.3"
|
||||
reqwest-tracing = { version = "0.5.3", default-features = false }
|
||||
rnix = "0.11.0"
|
||||
rowan = "*"
|
||||
rstest = "0.19.0"
|
||||
rstest_reuse = "0.6.0"
|
||||
rustc-hash = "2.0.0"
|
||||
rustyline = "10.1.1"
|
||||
serde = "1.0.209"
|
||||
serde_json = "1.0"
|
||||
serde_qs = "0.12.0"
|
||||
serde_tagged = "0.3.0"
|
||||
serde_with = "3.9.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.8"
|
||||
sled = "0.34.7"
|
||||
smol_str = "0.2.2"
|
||||
tabwriter = "1.4"
|
||||
tempfile = "3.12.0"
|
||||
test-strategy = "0.2.1"
|
||||
thiserror = "1.0.63"
|
||||
threadpool = "1.8.1"
|
||||
tokio = "1.39.3"
|
||||
tokio-listener = "0.4.3"
|
||||
tokio-retry = "0.3.0"
|
||||
tokio-stream = "0.1.15"
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = "0.7.11"
|
||||
tonic = "0.12.2"
|
||||
tonic-build = "0.12.2"
|
||||
tonic-health = { version = "0.12.2", default-features = false }
|
||||
tonic-reflection = "0.12.2"
|
||||
tower = "0.4.13"
|
||||
tower-http = "0.5.2"
|
||||
tracing = "0.1.40"
|
||||
tracing-indicatif = "0.3.6"
|
||||
tracing-opentelemetry = "0.25.0"
|
||||
tracing-subscriber = "0.3.18"
|
||||
tracing-tracy = "0.11.2"
|
||||
trybuild = "1.0.99"
|
||||
url = "2.5.2"
|
||||
vhost = "0.6"
|
||||
vhost-user-backend = "0.8"
|
||||
virtio-bindings = "0.2.2"
|
||||
virtio-queue = "0.7"
|
||||
vm-memory = "0.10"
|
||||
vmm-sys-util = "0.11"
|
||||
vu128 = "1.1.0"
|
||||
walkdir = "2.5.0"
|
||||
# https://github.com/jneem/wu-manber/pull/1
|
||||
wu-manber = { git = "https://github.com/tvlfyi/wu-manber.git" }
|
||||
xattr = "1.3.1"
|
||||
zstd = "0.13.2"
|
||||
|
||||
# Add a profile to all targets that enables release optimisations, but
|
||||
# retains debug symbols. This is great for use with
|
||||
# benchmarking/profiling tools.
|
||||
|
|
|
@ -61,7 +61,7 @@ This folder contains the following components:
|
|||
* `//tvix/castore` - subtree storage/transfer in a content-addressed fashion
|
||||
* `//tvix/cli` - preliminary REPL & CLI implementation for Tvix
|
||||
* `//tvix/eval` - an implementation of the Nix programming language
|
||||
* `//tvix/nar-bridge` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
|
||||
* `//tvix/nar-bridge-go` - a HTTP webserver providing a Nix HTTP Binary Cache interface in front of a tvix-store
|
||||
* `//tvix/nix-compat` - a Rust library for compatibility with C++ Nix, features like encodings and hashing schemes and formats
|
||||
* `//tvix/serde` - a Rust library for using the Nix language for app configuration
|
||||
* `//tvix/store` - a "filesystem" linking Nix store paths and metadata with the content-addressed layer
|
||||
|
|
|
@ -31,37 +31,23 @@ the `tvix` directory:
|
|||
export PATH=$PATH:$PWD/target/release-with-debug
|
||||
```
|
||||
|
||||
Now, spin up tvix-daemon, connecting to some (local) backends:
|
||||
Secondly, configure tvix to use the local backend:
|
||||
|
||||
```
|
||||
tvix-store --otlp=false daemon \
|
||||
--blob-service-addr=objectstore+file://$PWD/blobs \
|
||||
--directory-service-addr=sled://$PWD/directories.sled \
|
||||
--path-info-service-addr=sled://$PWD/pathinfo.sled &
|
||||
```
|
||||
|
||||
Copy some data into tvix-store (we use `nar-bridge` for this for now):
|
||||
|
||||
```
|
||||
mg run //tvix:nar-bridge -- --otlp=false &
|
||||
rm -Rf ~/.cache/nix; nix copy --to http://localhost:9000\?compression\=none $(mg build //third_party/nixpkgs:hello)
|
||||
pkill nar-bridge
|
||||
```
|
||||
|
||||
By default, the `tvix-store virtiofs` command used in the `runVM` script
|
||||
connects to a running `tvix-store daemon` via gRPC - in which case you want to
|
||||
keep `tvix-store daemon` running.
|
||||
|
||||
In case you want to have `tvix-store virtiofs` open the stores directly, kill
|
||||
`tvix-store daemon` too, and export the addresses from above:
|
||||
|
||||
```
|
||||
pkill tvix-store
|
||||
export BLOB_SERVICE_ADDR=objectstore+file://$PWD/blobs
|
||||
export BLOB_SERVICE_ADDR=sled://$PWD/blobs.sled
|
||||
export DIRECTORY_SERVICE_ADDR=sled://$PWD/directories.sled
|
||||
export PATH_INFO_SERVICE_ADDR=sled://$PWD/pathinfo.sled
|
||||
```
|
||||
|
||||
Potentially copy some data into tvix-store (via nar-bridge):
|
||||
|
||||
```
|
||||
mg run //tvix:store -- daemon &
|
||||
$(mg build //tvix:nar-bridge-go)/bin/nar-bridge-http &
|
||||
rm -Rf ~/.cache/nix; nix copy --to http://localhost:9000\?compression\=none $(mg build //third_party/nixpkgs:hello)
|
||||
pkill nar-bridge-http; pkill tvix-store
|
||||
```
|
||||
|
||||
#### Interactive shell
|
||||
Run the VM like this:
|
||||
|
||||
|
@ -114,12 +100,9 @@ Hello, world!
|
|||
[ 0.299422] reboot: Power down
|
||||
```
|
||||
|
||||
#### Boot a NixOS system closure
|
||||
It's also possible to boot a system closure. To do this, tvix-init honors the
|
||||
init= cmdline option, and will `switch_root` to it.
|
||||
|
||||
Make sure to first copy that system closure into tvix-store,
|
||||
using a similar `nix copy` comamnd as above.
|
||||
#### Execute a NixOS system closure
|
||||
It's also possible to invoke a system closure. To do this, tvix-init honors the
|
||||
init= cmdline option, and will switch_root to it.
|
||||
|
||||
|
||||
```
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
{ lib, pkgs, ... }:
|
||||
{ depot, pkgs, ... }:
|
||||
|
||||
rec {
|
||||
# A binary that sets up /nix/store from virtiofs, lists all store paths, and
|
||||
# powers off the machine.
|
||||
tvix-init = pkgs.buildGoModule rec {
|
||||
tvix-init = depot.nix.buildGo.program {
|
||||
name = "tvix-init";
|
||||
src = lib.fileset.toSource {
|
||||
root = ./.;
|
||||
fileset = ./tvix-init.go;
|
||||
};
|
||||
vendorHash = null;
|
||||
postPatch = "go mod init ${name}";
|
||||
srcs = [
|
||||
./tvix-init.go
|
||||
];
|
||||
};
|
||||
|
||||
# A kernel with virtiofs support baked in
|
||||
|
|
|
@ -65,8 +65,8 @@ let
|
|||
--otlp=false \
|
||||
daemon -l $PWD/tvix-store.sock &
|
||||
|
||||
# Wait for the service to report healthy.
|
||||
timeout 22 sh -c "until ${pkgs.ip2unix}/bin/ip2unix -r out,path=$PWD/tvix-store.sock ${pkgs.grpc-health-check}/bin/grpc-health-check --address 127.0.0.1 --port 8080; do sleep 1; done"
|
||||
# Wait for the socket to be created.
|
||||
while [ ! -e $PWD/tvix-store.sock ]; do sleep 1; done
|
||||
|
||||
# Export env vars so that subsequent tvix-store commands will talk to
|
||||
# our tvix-store daemon over the unix socket.
|
||||
|
@ -89,8 +89,8 @@ let
|
|||
--otlp=false \
|
||||
-l $PWD/nar-bridge.sock &
|
||||
|
||||
# Wait for nar-bridge to report healthy.
|
||||
timeout 22 sh -c "until ${pkgs.curl}/bin/curl -s --unix-socket $PWD/nar-bridge.sock http:///nix-binary-cache; do sleep 1; done"
|
||||
# Wait for the socket to be created.
|
||||
while [ ! -e $PWD/nar-bridge.sock ]; do sleep 1; done
|
||||
|
||||
# Upload. We can't use nix copy --to http://…, as it wants access to the nix db.
|
||||
# However, we can use mkBinaryCache to assemble .narinfo and .nar.xz to upload,
|
||||
|
@ -119,20 +119,14 @@ let
|
|||
grep "${assertVMOutput}" output.txt
|
||||
'';
|
||||
requiredSystemFeatures = [ "kvm" ];
|
||||
# HACK: The boot tests are sometimes flaky, and we don't want them to
|
||||
# periodically fail other build. Have Buildkite auto-retry them 2 times
|
||||
# on failure.
|
||||
# Logs for individual failures are still available, so it won't hinder
|
||||
# flakiness debuggability.
|
||||
meta.ci.buildkiteExtraStepArgs = {
|
||||
retry.automatic = true;
|
||||
};
|
||||
} // lib.optionalAttrs (isClosure && !useNarBridge) {
|
||||
__structuredAttrs = true;
|
||||
exportReferencesGraph.closure = [ path ];
|
||||
});
|
||||
|
||||
testSystem = (pkgs.nixos {
|
||||
systemFor = sys: (depot.ops.nixos.nixosFor sys).system;
|
||||
|
||||
testSystem = systemFor ({ modulesPath, pkgs, ... }: {
|
||||
# Set some options necessary to evaluate.
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
# TODO: figure out how to disable this without causing eval to fail
|
||||
|
@ -152,10 +146,7 @@ let
|
|||
|
||||
# Don't warn about stateVersion.
|
||||
system.stateVersion = "24.05";
|
||||
|
||||
# Speed-up evaluation and building.
|
||||
documentation.enable = lib.mkForce false;
|
||||
}).config.system.build.toplevel;
|
||||
});
|
||||
|
||||
in
|
||||
depot.nix.readTree.drvTargets
|
||||
|
@ -166,8 +157,8 @@ depot.nix.readTree.drvTargets
|
|||
});
|
||||
docs-persistent = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
directoryServiceAddr = "redb:///build/directories.redb";
|
||||
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
|
||||
directoryServiceAddr = "sled:///build/directories.sled";
|
||||
pathInfoServiceAddr = "sled:///build/pathinfo.sled";
|
||||
path = ../../docs;
|
||||
importPathName = "docs";
|
||||
});
|
||||
|
@ -180,8 +171,6 @@ depot.nix.readTree.drvTargets
|
|||
|
||||
closure-nixos = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
|
||||
directoryServiceAddr = "redb:///build/directories.redb";
|
||||
path = testSystem;
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc (unknown)
|
||||
// source: tvix/build/protos/build.proto
|
||||
|
||||
|
@ -66,8 +66,8 @@ type BuildRequest struct {
|
|||
|
||||
// The list of all root nodes that should be visible in `inputs_dir` at the
|
||||
// time of the build.
|
||||
// As all references are content-addressed, no additional signatures are
|
||||
// needed to substitute / make these available in the build environment.
|
||||
// As root nodes are content-addressed, no additional signatures are needed
|
||||
// to substitute / make these available in the build environment.
|
||||
// Inputs MUST be sorted by their names.
|
||||
Inputs []*castore_go.Node `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"`
|
||||
// The command (and its args) executed as the build script.
|
||||
|
@ -560,7 +560,7 @@ func file_tvix_build_protos_build_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_tvix_build_protos_build_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_tvix_build_protos_build_proto_goTypes = []any{
|
||||
var file_tvix_build_protos_build_proto_goTypes = []interface{}{
|
||||
(*BuildRequest)(nil), // 0: tvix.build.v1.BuildRequest
|
||||
(*Build)(nil), // 1: tvix.build.v1.Build
|
||||
(*BuildRequest_EnvVar)(nil), // 2: tvix.build.v1.BuildRequest.EnvVar
|
||||
|
@ -588,7 +588,7 @@ func file_tvix_build_protos_build_proto_init() {
|
|||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_tvix_build_protos_build_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
file_tvix_build_protos_build_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BuildRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -600,7 +600,7 @@ func file_tvix_build_protos_build_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_build_protos_build_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
file_tvix_build_protos_build_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Build); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -612,7 +612,7 @@ func file_tvix_build_protos_build_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_build_protos_build_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||
file_tvix_build_protos_build_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BuildRequest_EnvVar); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -624,7 +624,7 @@ func file_tvix_build_protos_build_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_build_protos_build_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
||||
file_tvix_build_protos_build_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BuildRequest_BuildConstraints); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -636,7 +636,7 @@ func file_tvix_build_protos_build_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_build_protos_build_proto_msgTypes[4].Exporter = func(v any, i int) any {
|
||||
file_tvix_build_protos_build_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BuildRequest_AdditionalFile); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc (unknown)
|
||||
// source: tvix/build/protos/rpc_build.proto
|
||||
|
||||
|
@ -40,7 +40,7 @@ var file_tvix_build_protos_rpc_build_proto_rawDesc = []byte{
|
|||
0x69, 0x6c, 0x64, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_tvix_build_protos_rpc_build_proto_goTypes = []any{
|
||||
var file_tvix_build_protos_rpc_build_proto_goTypes = []interface{}{
|
||||
(*BuildRequest)(nil), // 0: tvix.build.v1.BuildRequest
|
||||
(*Build)(nil), // 1: tvix.build.v1.Build
|
||||
}
|
||||
|
|
|
@ -4,31 +4,33 @@ version = "0.1.0"
|
|||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
clap = { workspace = true, features = ["derive", "env"] }
|
||||
itertools = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true }
|
||||
tokio-listener = { workspace = true, features = ["tonic012"] }
|
||||
tonic = { workspace = true, features = ["tls", "tls-roots"] }
|
||||
bytes = "1.4.0"
|
||||
clap = { version = "4.0", features = ["derive", "env"] }
|
||||
itertools = "0.12.0"
|
||||
prost = "0.13.1"
|
||||
thiserror = "1.0.56"
|
||||
tokio = { version = "1.32.0" }
|
||||
tokio-listener = { version = "0.4.1", features = [ "tonic012" ] }
|
||||
tonic = { version = "0.12.0", features = ["tls", "tls-roots"] }
|
||||
tvix-castore = { path = "../castore" }
|
||||
tvix-tracing = { path = "../tracing" }
|
||||
tracing = { workspace = true }
|
||||
url = { workspace = true }
|
||||
mimalloc = { workspace = true }
|
||||
tonic-reflection = { workspace = true, optional = true }
|
||||
tracing = "0.1.40"
|
||||
url = "2.4.0"
|
||||
|
||||
[dependencies.tonic-reflection]
|
||||
optional = true
|
||||
version = "0.12.0"
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = { workspace = true }
|
||||
tonic-build = { workspace = true }
|
||||
prost-build = "0.13.1"
|
||||
tonic-build = "0.12.0"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
tonic-reflection = ["dep:tonic-reflection", "tvix-castore/tonic-reflection"]
|
||||
|
||||
[dev-dependencies]
|
||||
rstest = { workspace = true }
|
||||
rstest = "0.19.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -20,15 +20,15 @@ fn main() -> Result<()> {
|
|||
.extern_path(".tvix.castore.v1", "::tvix_castore::proto")
|
||||
.compile(
|
||||
&[
|
||||
"build/protos/build.proto",
|
||||
"build/protos/rpc_build.proto",
|
||||
"tvix/build/protos/build.proto",
|
||||
"tvix/build/protos/rpc_build.proto",
|
||||
],
|
||||
// If we are in running `cargo build` manually, using `../..` works fine,
|
||||
// but in case we run inside a nix build, we need to instead point PROTO_ROOT
|
||||
// to a custom tree containing that structure.
|
||||
// to a sparseTree containing that structure.
|
||||
&[match std::env::var_os("PROTO_ROOT") {
|
||||
Some(proto_root) => proto_root.to_str().unwrap().to_owned(),
|
||||
None => "..".to_string(),
|
||||
None => "../..".to_string(),
|
||||
}],
|
||||
)
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
runTests = true;
|
||||
}).overrideAttrs (old: rec {
|
||||
meta.ci.targets = lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (lib.attrNames passthru);
|
||||
passthru = old.passthru // (depot.tvix.utils.mkFeaturePowerset {
|
||||
passthru = depot.tvix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = [ "tonic-reflection" ];
|
||||
});
|
||||
};
|
||||
})
|
||||
|
|
|
@ -5,7 +5,7 @@ syntax = "proto3";
|
|||
|
||||
package tvix.build.v1;
|
||||
|
||||
import "castore/protos/castore.proto";
|
||||
import "tvix/castore/protos/castore.proto";
|
||||
|
||||
option go_package = "code.tvl.fyi/tvix/build-go;buildv1";
|
||||
|
||||
|
@ -47,8 +47,8 @@ option go_package = "code.tvl.fyi/tvix/build-go;buildv1";
|
|||
message BuildRequest {
|
||||
// The list of all root nodes that should be visible in `inputs_dir` at the
|
||||
// time of the build.
|
||||
// As all references are content-addressed, no additional signatures are
|
||||
// needed to substitute / make these available in the build environment.
|
||||
// As root nodes are content-addressed, no additional signatures are needed
|
||||
// to substitute / make these available in the build environment.
|
||||
// Inputs MUST be sorted by their names.
|
||||
repeated tvix.castore.v1.Node inputs = 1;
|
||||
|
||||
|
|
|
@ -1,12 +1,17 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{ depot, pkgs, ... }:
|
||||
let
|
||||
protos = lib.sourceByRegex depot.path.origSrc [
|
||||
"buf.yaml"
|
||||
"buf.gen.yaml"
|
||||
# We need to include castore.proto (only), as it's referred.
|
||||
"^tvix(/castore(/protos(/castore\.proto)?)?)?$"
|
||||
"^tvix(/build(/protos(/.*\.proto)?)?)?$"
|
||||
];
|
||||
protos = depot.nix.sparseTree {
|
||||
name = "build-protos";
|
||||
root = depot.path.origSrc;
|
||||
paths = [
|
||||
# We need to include castore.proto (only), as it's referred.
|
||||
../../castore/protos/castore.proto
|
||||
./build.proto
|
||||
./rpc_build.proto
|
||||
../../../buf.yaml
|
||||
../../../buf.gen.yaml
|
||||
];
|
||||
};
|
||||
in
|
||||
depot.nix.readTree.drvTargets {
|
||||
inherit protos;
|
||||
|
|
|
@ -4,7 +4,7 @@ syntax = "proto3";
|
|||
|
||||
package tvix.build.v1;
|
||||
|
||||
import "build/protos/build.proto";
|
||||
import "tvix/build/protos/build.proto";
|
||||
|
||||
option go_package = "code.tvl.fyi/tvix/build-go;buildv1";
|
||||
|
||||
|
|
|
@ -17,11 +17,6 @@ use tvix_build::proto::FILE_DESCRIPTOR_SET;
|
|||
#[cfg(feature = "tonic-reflection")]
|
||||
use tvix_castore::proto::FILE_DESCRIPTOR_SET as CASTORE_FILE_DESCRIPTOR_SET;
|
||||
|
||||
use mimalloc::MiMalloc;
|
||||
|
||||
#[global_allocator]
|
||||
static GLOBAL: MiMalloc = MiMalloc;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(author, version, about, long_about = None)]
|
||||
struct Cli {
|
||||
|
@ -90,18 +85,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
|||
|
||||
#[cfg(feature = "tonic-reflection")]
|
||||
{
|
||||
router = router.add_service(
|
||||
tonic_reflection::server::Builder::configure()
|
||||
.register_encoded_file_descriptor_set(CASTORE_FILE_DESCRIPTOR_SET)
|
||||
.register_encoded_file_descriptor_set(FILE_DESCRIPTOR_SET)
|
||||
.build_v1alpha()?,
|
||||
);
|
||||
router = router.add_service(
|
||||
tonic_reflection::server::Builder::configure()
|
||||
.register_encoded_file_descriptor_set(CASTORE_FILE_DESCRIPTOR_SET)
|
||||
.register_encoded_file_descriptor_set(FILE_DESCRIPTOR_SET)
|
||||
.build_v1()?,
|
||||
);
|
||||
let reflection_svc = tonic_reflection::server::Builder::configure()
|
||||
.register_encoded_file_descriptor_set(CASTORE_FILE_DESCRIPTOR_SET)
|
||||
.register_encoded_file_descriptor_set(FILE_DESCRIPTOR_SET)
|
||||
.build()?;
|
||||
router = router.add_service(reflection_svc);
|
||||
}
|
||||
|
||||
info!(listen_address=%listen_address, "listening");
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::path::{Path, PathBuf};
|
||||
|
||||
use itertools::Itertools;
|
||||
use tvix_castore::DirectoryError;
|
||||
use tvix_castore::proto::{NamedNode, ValidateNodeError};
|
||||
|
||||
mod grpc_buildservice_wrapper;
|
||||
|
||||
|
@ -19,7 +19,7 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = tonic::include_file_descriptor_set!("tvix
|
|||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ValidateBuildRequestError {
|
||||
#[error("invalid input node at position {0}: {1}")]
|
||||
InvalidInputNode(usize, DirectoryError),
|
||||
InvalidInputNode(usize, ValidateNodeError),
|
||||
|
||||
#[error("input nodes are not sorted by name")]
|
||||
InputNodesNotSorted,
|
||||
|
@ -123,21 +123,20 @@ impl BuildRequest {
|
|||
/// and all restrictions around paths themselves (relative, clean, …) need
|
||||
// to be fulfilled.
|
||||
pub fn validate(&self) -> Result<(), ValidateBuildRequestError> {
|
||||
// validate names. Make sure they're sorted
|
||||
|
||||
let mut last_name: bytes::Bytes = "".into();
|
||||
for (i, node) in self.inputs.iter().enumerate() {
|
||||
// TODO(flokli): store result somewhere
|
||||
let (name, _node) = node
|
||||
.clone()
|
||||
.into_name_and_node()
|
||||
// validate all input nodes
|
||||
for (i, n) in self.inputs.iter().enumerate() {
|
||||
// ensure the input node itself is valid
|
||||
n.validate()
|
||||
.map_err(|e| ValidateBuildRequestError::InvalidInputNode(i, e))?;
|
||||
}
|
||||
|
||||
if name.as_ref() <= last_name.as_ref() {
|
||||
return Err(ValidateBuildRequestError::InputNodesNotSorted);
|
||||
} else {
|
||||
last_name = name.into()
|
||||
}
|
||||
// now we can look at the names, and make sure they're sorted.
|
||||
if !is_sorted(
|
||||
self.inputs
|
||||
.iter()
|
||||
.map(|e| e.node.as_ref().unwrap().get_name()),
|
||||
) {
|
||||
Err(ValidateBuildRequestError::InputNodesNotSorted)?
|
||||
}
|
||||
|
||||
// validate working_dir
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc (unknown)
|
||||
// source: tvix/castore/protos/castore.proto
|
||||
|
||||
|
@ -466,7 +466,7 @@ func file_tvix_castore_protos_castore_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_tvix_castore_protos_castore_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_tvix_castore_protos_castore_proto_goTypes = []any{
|
||||
var file_tvix_castore_protos_castore_proto_goTypes = []interface{}{
|
||||
(*Directory)(nil), // 0: tvix.castore.v1.Directory
|
||||
(*DirectoryNode)(nil), // 1: tvix.castore.v1.DirectoryNode
|
||||
(*FileNode)(nil), // 2: tvix.castore.v1.FileNode
|
||||
|
@ -493,7 +493,7 @@ func file_tvix_castore_protos_castore_proto_init() {
|
|||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Directory); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -505,7 +505,7 @@ func file_tvix_castore_protos_castore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DirectoryNode); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -517,7 +517,7 @@ func file_tvix_castore_protos_castore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FileNode); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -529,7 +529,7 @@ func file_tvix_castore_protos_castore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SymlinkNode); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -541,7 +541,7 @@ func file_tvix_castore_protos_castore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[4].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Node); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -554,7 +554,7 @@ func file_tvix_castore_protos_castore_proto_init() {
|
|||
}
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[4].OneofWrappers = []any{
|
||||
file_tvix_castore_protos_castore_proto_msgTypes[4].OneofWrappers = []interface{}{
|
||||
(*Node_Directory)(nil),
|
||||
(*Node_File)(nil),
|
||||
(*Node_Symlink)(nil),
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc (unknown)
|
||||
// source: tvix/castore/protos/rpc_blobstore.proto
|
||||
|
||||
|
@ -415,7 +415,7 @@ func file_tvix_castore_protos_rpc_blobstore_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_tvix_castore_protos_rpc_blobstore_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
|
||||
var file_tvix_castore_protos_rpc_blobstore_proto_goTypes = []any{
|
||||
var file_tvix_castore_protos_rpc_blobstore_proto_goTypes = []interface{}{
|
||||
(*StatBlobRequest)(nil), // 0: tvix.castore.v1.StatBlobRequest
|
||||
(*StatBlobResponse)(nil), // 1: tvix.castore.v1.StatBlobResponse
|
||||
(*ReadBlobRequest)(nil), // 2: tvix.castore.v1.ReadBlobRequest
|
||||
|
@ -444,7 +444,7 @@ func file_tvix_castore_protos_rpc_blobstore_proto_init() {
|
|||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StatBlobRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -456,7 +456,7 @@ func file_tvix_castore_protos_rpc_blobstore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StatBlobResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -468,7 +468,7 @@ func file_tvix_castore_protos_rpc_blobstore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ReadBlobRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -480,7 +480,7 @@ func file_tvix_castore_protos_rpc_blobstore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BlobChunk); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -492,7 +492,7 @@ func file_tvix_castore_protos_rpc_blobstore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[4].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PutBlobResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -504,7 +504,7 @@ func file_tvix_castore_protos_rpc_blobstore_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[5].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_blobstore_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StatBlobResponse_ChunkMeta); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc-gen-go v1.34.1
|
||||
// protoc (unknown)
|
||||
// source: tvix/castore/protos/rpc_directory.proto
|
||||
|
||||
|
@ -199,7 +199,7 @@ func file_tvix_castore_protos_rpc_directory_proto_rawDescGZIP() []byte {
|
|||
}
|
||||
|
||||
var file_tvix_castore_protos_rpc_directory_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_tvix_castore_protos_rpc_directory_proto_goTypes = []any{
|
||||
var file_tvix_castore_protos_rpc_directory_proto_goTypes = []interface{}{
|
||||
(*GetDirectoryRequest)(nil), // 0: tvix.castore.v1.GetDirectoryRequest
|
||||
(*PutDirectoryResponse)(nil), // 1: tvix.castore.v1.PutDirectoryResponse
|
||||
(*Directory)(nil), // 2: tvix.castore.v1.Directory
|
||||
|
@ -223,7 +223,7 @@ func file_tvix_castore_protos_rpc_directory_proto_init() {
|
|||
}
|
||||
file_tvix_castore_protos_castore_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_tvix_castore_protos_rpc_directory_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_directory_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetDirectoryRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -235,7 +235,7 @@ func file_tvix_castore_protos_rpc_directory_proto_init() {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_rpc_directory_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
file_tvix_castore_protos_rpc_directory_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*PutDirectoryResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
|
@ -248,7 +248,7 @@ func file_tvix_castore_protos_rpc_directory_proto_init() {
|
|||
}
|
||||
}
|
||||
}
|
||||
file_tvix_castore_protos_rpc_directory_proto_msgTypes[0].OneofWrappers = []any{
|
||||
file_tvix_castore_protos_rpc_directory_proto_msgTypes[0].OneofWrappers = []interface{}{
|
||||
(*GetDirectoryRequest_Digest)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
|
|
|
@ -4,68 +4,100 @@ version = "0.1.0"
|
|||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
async-compression = { workspace = true, features = ["tokio", "zstd"] }
|
||||
async-stream = { workspace = true }
|
||||
async-tempfile = { workspace = true }
|
||||
blake3 = { workspace = true, features = ["rayon", "std", "traits-preview"] }
|
||||
bstr = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
data-encoding = { workspace = true }
|
||||
digest = { workspace = true }
|
||||
fastcdc = { workspace = true, features = ["tokio"] }
|
||||
futures = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
object_store = { workspace = true, features = ["http"] }
|
||||
parking_lot = { workspace = true }
|
||||
pin-project-lite = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
sled = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio-stream = { workspace = true, features = ["fs", "net"] }
|
||||
tokio-util = { workspace = true, features = ["io", "io-util", "codec"] }
|
||||
tokio-tar = { workspace = true }
|
||||
tokio = { workspace = true, features = ["fs", "macros", "net", "rt", "rt-multi-thread", "signal"] }
|
||||
tonic = { workspace = true }
|
||||
tower = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-indicatif = { workspace = true }
|
||||
async-compression = { version = "0.4.9", features = ["tokio", "zstd"]}
|
||||
async-stream = "0.3.5"
|
||||
async-tempfile = "0.4.0"
|
||||
blake3 = { version = "1.3.1", features = ["rayon", "std", "traits-preview"] }
|
||||
bstr = "1.6.0"
|
||||
bytes = "1.4.0"
|
||||
data-encoding = "2.6.0"
|
||||
digest = "0.10.7"
|
||||
fastcdc = { version = "3.1.0", features = ["tokio"] }
|
||||
futures = "0.3.30"
|
||||
lazy_static = "1.4.0"
|
||||
object_store = { version = "0.10.1", features = ["http"] }
|
||||
parking_lot = "0.12.1"
|
||||
pin-project-lite = "0.2.13"
|
||||
prost = "0.13.1"
|
||||
sled = { version = "0.34.7" }
|
||||
thiserror = "1.0.38"
|
||||
tokio-stream = { version = "0.1.14", features = ["fs", "net"] }
|
||||
tokio-util = { version = "0.7.9", features = ["io", "io-util", "codec"] }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio = { version = "1.32.0", features = ["fs", "macros", "net", "rt", "rt-multi-thread", "signal"] }
|
||||
tonic = "0.12.0"
|
||||
tower = "0.4.13"
|
||||
tracing = "0.1.37"
|
||||
tracing-indicatif = "0.3.6"
|
||||
tvix-tracing = { path = "../tracing", features = ["tonic"] }
|
||||
url = { workspace = true }
|
||||
walkdir = { workspace = true }
|
||||
zstd = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_with = { workspace = true }
|
||||
serde_qs = { workspace = true }
|
||||
petgraph = { workspace = true }
|
||||
erased-serde = { workspace = true }
|
||||
serde_tagged = { workspace = true }
|
||||
hyper-util = { workspace = true }
|
||||
redb = { workspace = true }
|
||||
bigtable_rs = { workspace = true, optional = true }
|
||||
fuse-backend-rs = { workspace = true, optional = true }
|
||||
libc = { workspace = true, optional = true }
|
||||
threadpool = { workspace = true, optional = true }
|
||||
tonic-reflection = { workspace = true, optional = true }
|
||||
vhost = { workspace = true, optional = true }
|
||||
vhost-user-backend = { workspace = true, optional = true }
|
||||
virtio-queue = { workspace = true, optional = true }
|
||||
vm-memory = { workspace = true, optional = true }
|
||||
vmm-sys-util = { workspace = true, optional = true }
|
||||
virtio-bindings = { workspace = true, optional = true }
|
||||
url = "2.4.0"
|
||||
walkdir = "2.4.0"
|
||||
zstd = "0.13.0"
|
||||
serde = { version = "1.0.197", features = [ "derive" ] }
|
||||
serde_with = "3.7.0"
|
||||
serde_qs = "0.12.0"
|
||||
petgraph = "0.6.4"
|
||||
erased-serde = "0.4.5"
|
||||
serde_tagged = "0.3.0"
|
||||
hyper-util = "0.1.6"
|
||||
|
||||
[dependencies.bigtable_rs]
|
||||
optional = true
|
||||
version = "0.2.10"
|
||||
|
||||
[dependencies.fuse-backend-rs]
|
||||
optional = true
|
||||
version = "0.11.0"
|
||||
|
||||
[dependencies.libc]
|
||||
optional = true
|
||||
version = "0.2.144"
|
||||
|
||||
[dependencies.threadpool]
|
||||
version = "1.8.1"
|
||||
optional = true
|
||||
|
||||
[dependencies.tonic-reflection]
|
||||
optional = true
|
||||
version = "0.12.0"
|
||||
|
||||
[dependencies.vhost]
|
||||
optional = true
|
||||
version = "0.6"
|
||||
|
||||
[dependencies.vhost-user-backend]
|
||||
optional = true
|
||||
version = "0.8"
|
||||
|
||||
[dependencies.virtio-queue]
|
||||
optional = true
|
||||
version = "0.7"
|
||||
|
||||
[dependencies.vm-memory]
|
||||
optional = true
|
||||
version = "0.10"
|
||||
|
||||
[dependencies.vmm-sys-util]
|
||||
optional = true
|
||||
version = "0.11"
|
||||
|
||||
[dependencies.virtio-bindings]
|
||||
optional = true
|
||||
version = "0.2.1"
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = { workspace = true }
|
||||
tonic-build = { workspace = true }
|
||||
prost-build = "0.13.1"
|
||||
tonic-build = "0.12.0"
|
||||
|
||||
[dev-dependencies]
|
||||
async-process = { workspace = true }
|
||||
rstest = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
tokio-retry = { workspace = true }
|
||||
hex-literal = { workspace = true }
|
||||
rstest_reuse = { workspace = true }
|
||||
xattr = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
async-process = "2.1.0"
|
||||
rstest = "0.19.0"
|
||||
tempfile = "3.3.0"
|
||||
tokio-retry = "0.3.0"
|
||||
hex-literal = "0.4.1"
|
||||
rstest_reuse = "0.6.0"
|
||||
xattr = "1.3.1"
|
||||
serde_json = "*"
|
||||
|
||||
[features]
|
||||
default = ["cloud"]
|
||||
|
@ -93,3 +125,6 @@ tonic-reflection = ["dep:tonic-reflection"]
|
|||
# Requires the following packages in $PATH:
|
||||
# cbtemulator, google-cloud-bigtable-tool
|
||||
integration = []
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
|
|
@ -20,16 +20,16 @@ fn main() -> Result<()> {
|
|||
.type_attribute(".", "#[derive(Eq, Hash)]")
|
||||
.compile(
|
||||
&[
|
||||
"castore/protos/castore.proto",
|
||||
"castore/protos/rpc_blobstore.proto",
|
||||
"castore/protos/rpc_directory.proto",
|
||||
"tvix/castore/protos/castore.proto",
|
||||
"tvix/castore/protos/rpc_blobstore.proto",
|
||||
"tvix/castore/protos/rpc_directory.proto",
|
||||
],
|
||||
// If we are in running `cargo build` manually, using `../..` works fine,
|
||||
// but in case we run inside a nix build, we need to instead point PROTO_ROOT
|
||||
// to a custom tree containing that structure.
|
||||
// to a sparseTree containing that structure.
|
||||
&[match std::env::var_os("PROTO_ROOT") {
|
||||
Some(proto_root) => proto_root.to_str().unwrap().to_owned(),
|
||||
None => "..".to_string(),
|
||||
None => "../..".to_string(),
|
||||
}],
|
||||
)
|
||||
}
|
||||
|
|
|
@ -1,10 +1,16 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{ depot, pkgs, ... }:
|
||||
let
|
||||
protos = lib.sourceByRegex depot.path.origSrc [
|
||||
"buf.yaml"
|
||||
"buf.gen.yaml"
|
||||
"^tvix(/castore(/protos(/.*\.proto)?)?)?$"
|
||||
];
|
||||
protos = depot.nix.sparseTree {
|
||||
name = "castore-protos";
|
||||
root = depot.path.origSrc;
|
||||
paths = [
|
||||
./castore.proto
|
||||
./rpc_blobstore.proto
|
||||
./rpc_directory.proto
|
||||
../../../buf.yaml
|
||||
../../../buf.gen.yaml
|
||||
];
|
||||
};
|
||||
in
|
||||
depot.nix.readTree.drvTargets {
|
||||
inherit protos;
|
||||
|
|
|
@ -4,7 +4,7 @@ syntax = "proto3";
|
|||
|
||||
package tvix.castore.v1;
|
||||
|
||||
import "castore/protos/castore.proto";
|
||||
import "tvix/castore/protos/castore.proto";
|
||||
|
||||
option go_package = "code.tvl.fyi/tvix/castore-go;castorev1";
|
||||
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use tokio_util::io::{ReaderStream, StreamReader};
|
||||
use tonic::async_trait;
|
||||
use tracing::instrument;
|
||||
use tracing::{instrument, warn};
|
||||
|
||||
use crate::composition::{CompositionContext, ServiceBuilder};
|
||||
use crate::{B3Digest, Error};
|
||||
|
||||
use super::{BlobReader, BlobService, BlobWriter, ChunkedReader};
|
||||
use super::{naive_seeker::NaiveSeeker, BlobReader, BlobService, BlobWriter};
|
||||
|
||||
/// Combinator for a BlobService, using a "local" and "remote" blobservice.
|
||||
/// Requests are tried in (and returned from) the local store first, only if
|
||||
|
@ -69,16 +71,19 @@ where
|
|||
// otherwise, a chunked reader, which will always try the
|
||||
// local backend first.
|
||||
|
||||
let chunked_reader = ChunkedReader::from_chunks(
|
||||
remote_chunks.into_iter().map(|chunk| {
|
||||
// map Vec<ChunkMeta> to Vec<(B3Digest, u64)>
|
||||
let chunks: Vec<(B3Digest, u64)> = remote_chunks
|
||||
.into_iter()
|
||||
.map(|chunk_meta| {
|
||||
(
|
||||
chunk.digest.try_into().expect("invalid b3 digest"),
|
||||
chunk.size,
|
||||
B3Digest::try_from(chunk_meta.digest)
|
||||
.expect("invalid chunk digest"),
|
||||
chunk_meta.size,
|
||||
)
|
||||
}),
|
||||
Arc::new(self.clone()) as Arc<dyn BlobService>,
|
||||
);
|
||||
Ok(Some(Box::new(chunked_reader)))
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Some(make_chunked_reader(self.clone(), chunks)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -126,3 +131,41 @@ impl ServiceBuilder for CombinedBlobServiceConfig {
|
|||
}))
|
||||
}
|
||||
}
|
||||
|
||||
fn make_chunked_reader<BS>(
|
||||
// This must consume, as we can't retain references to blob_service,
|
||||
// as it'd add a lifetime to BlobReader in general, which will get
|
||||
// problematic in TvixStoreFs, which is using async move closures and cloning.
|
||||
blob_service: BS,
|
||||
// A list of b3 digests for individual chunks, and their sizes.
|
||||
chunks: Vec<(B3Digest, u64)>,
|
||||
) -> Box<dyn BlobReader>
|
||||
where
|
||||
BS: BlobService + Clone + 'static,
|
||||
{
|
||||
// TODO: offset, verified streaming
|
||||
|
||||
// construct readers for each chunk
|
||||
let blob_service = blob_service.clone();
|
||||
let readers_stream = tokio_stream::iter(chunks).map(move |(digest, _)| {
|
||||
let d = digest.to_owned();
|
||||
let blob_service = blob_service.clone();
|
||||
async move {
|
||||
blob_service.open_read(&d.to_owned()).await?.ok_or_else(|| {
|
||||
warn!(chunk.digest = %digest, "chunk not found");
|
||||
std::io::Error::new(std::io::ErrorKind::NotFound, "chunk not found")
|
||||
})
|
||||
}
|
||||
});
|
||||
|
||||
// convert the stream of readers to a stream of streams of byte chunks
|
||||
let bytes_streams = readers_stream.then(|elem| async { elem.await.map(ReaderStream::new) });
|
||||
|
||||
// flatten into one stream of byte chunks
|
||||
let bytes_stream = bytes_streams.try_flatten();
|
||||
|
||||
// convert into AsyncRead
|
||||
let blob_reader = StreamReader::new(bytes_stream);
|
||||
|
||||
Box::new(NaiveSeeker::new(Box::pin(blob_reader)))
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ mod combinator;
|
|||
mod from_addr;
|
||||
mod grpc;
|
||||
mod memory;
|
||||
mod naive_seeker;
|
||||
mod object_store;
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
265
tvix/castore/src/blobservice/naive_seeker.rs
Normal file
265
tvix/castore/src/blobservice/naive_seeker.rs
Normal file
|
@ -0,0 +1,265 @@
|
|||
use super::BlobReader;
|
||||
use futures::ready;
|
||||
use pin_project_lite::pin_project;
|
||||
use std::io;
|
||||
use std::task::Poll;
|
||||
use tokio::io::AsyncRead;
|
||||
use tracing::{debug, instrument, trace, warn};
|
||||
|
||||
pin_project! {
|
||||
/// This implements [tokio::io::AsyncSeek] for and [tokio::io::AsyncRead] by
|
||||
/// simply skipping over some bytes, keeping track of the position.
|
||||
/// It fails whenever you try to seek backwards.
|
||||
///
|
||||
/// ## Pinning concerns:
|
||||
///
|
||||
/// [NaiveSeeker] is itself pinned by callers, and we do not need to concern
|
||||
/// ourselves regarding that.
|
||||
///
|
||||
/// Though, its fields as per
|
||||
/// <https://doc.rust-lang.org/std/pin/#pinning-is-not-structural-for-field>
|
||||
/// can be pinned or unpinned.
|
||||
///
|
||||
/// So we need to go over each field and choose our policy carefully.
|
||||
///
|
||||
/// The obvious cases are the bookkeeping integers we keep in the structure,
|
||||
/// those are private and not shared to anyone, we never build a
|
||||
/// `Pin<&mut X>` out of them at any point, therefore, we can safely never
|
||||
/// mark them as pinned. Of course, it is expected that no developer here
|
||||
/// attempt to `pin!(self.pos)` to pin them because it makes no sense. If
|
||||
/// they have to become pinned, they should be marked `#[pin]` and we need
|
||||
/// to discuss it.
|
||||
///
|
||||
/// So the bookkeeping integers are in the right state with respect to their
|
||||
/// pinning status. The projection should offer direct access.
|
||||
///
|
||||
/// On the `r` field, i.e. a `BufReader<R>`, given that
|
||||
/// <https://docs.rs/tokio/latest/tokio/io/struct.BufReader.html#impl-Unpin-for-BufReader%3CR%3E>
|
||||
/// is available, even a `Pin<&mut BufReader<R>>` can be safely moved.
|
||||
///
|
||||
/// The only care we should have regards the internal reader itself, i.e.
|
||||
/// the `R` instance, see that Tokio decided to `#[pin]` it too:
|
||||
/// <https://docs.rs/tokio/latest/src/tokio/io/util/buf_reader.rs.html#29>
|
||||
///
|
||||
/// In general, there's no `Unpin` instance for `R: tokio::io::AsyncRead`
|
||||
/// (see <https://docs.rs/tokio/latest/tokio/io/trait.AsyncRead.html>).
|
||||
///
|
||||
/// Therefore, we could keep it unpinned and pin it in every call site
|
||||
/// whenever we need to call `poll_*` which can be confusing to the non-
|
||||
/// expert developer and we have a fair share amount of situations where the
|
||||
/// [BufReader] instance is naked, i.e. in its `&mut BufReader<R>`
|
||||
/// form, this is annoying because it could lead to expose the naked `R`
|
||||
/// internal instance somehow and would produce a risk of making it move
|
||||
/// unexpectedly.
|
||||
///
|
||||
/// We choose the path of the least resistance as we have no reason to have
|
||||
/// access to the raw `BufReader<R>` instance, we just `#[pin]` it too and
|
||||
/// enjoy its `poll_*` safe APIs and push the unpinning concerns to the
|
||||
/// internal implementations themselves, which studied the question longer
|
||||
/// than us.
|
||||
pub struct NaiveSeeker<R: tokio::io::AsyncRead> {
|
||||
#[pin]
|
||||
r: tokio::io::BufReader<R>,
|
||||
pos: u64,
|
||||
bytes_to_skip: u64,
|
||||
}
|
||||
}
|
||||
|
||||
/// The buffer size used to discard data.
|
||||
const DISCARD_BUF_SIZE: usize = 4096;
|
||||
|
||||
impl<R: tokio::io::AsyncRead> NaiveSeeker<R> {
|
||||
pub fn new(r: R) -> Self {
|
||||
NaiveSeeker {
|
||||
r: tokio::io::BufReader::new(r),
|
||||
pos: 0,
|
||||
bytes_to_skip: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: tokio::io::AsyncRead> tokio::io::AsyncRead for NaiveSeeker<R> {
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn poll_read(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &mut tokio::io::ReadBuf<'_>,
|
||||
) -> Poll<std::io::Result<()>> {
|
||||
// The amount of data read can be determined by the increase
|
||||
// in the length of the slice returned by `ReadBuf::filled`.
|
||||
let filled_before = buf.filled().len();
|
||||
|
||||
let this = self.project();
|
||||
ready!(this.r.poll_read(cx, buf))?;
|
||||
|
||||
let bytes_read = buf.filled().len() - filled_before;
|
||||
*this.pos += bytes_read as u64;
|
||||
|
||||
trace!(bytes_read = bytes_read, new_pos = this.pos, "poll_read");
|
||||
|
||||
Ok(()).into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: tokio::io::AsyncRead> tokio::io::AsyncBufRead for NaiveSeeker<R> {
|
||||
fn poll_fill_buf(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> Poll<io::Result<&[u8]>> {
|
||||
self.project().r.poll_fill_buf(cx)
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip(self))]
|
||||
fn consume(self: std::pin::Pin<&mut Self>, amt: usize) {
|
||||
let this = self.project();
|
||||
this.r.consume(amt);
|
||||
*this.pos += amt as u64;
|
||||
|
||||
trace!(new_pos = this.pos, "consume");
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: tokio::io::AsyncRead> tokio::io::AsyncSeek for NaiveSeeker<R> {
|
||||
#[instrument(level="trace", skip(self), fields(inner_pos=%self.pos), err(Debug))]
|
||||
fn start_seek(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
position: std::io::SeekFrom,
|
||||
) -> std::io::Result<()> {
|
||||
let absolute_offset: u64 = match position {
|
||||
io::SeekFrom::Start(start_offset) => {
|
||||
if start_offset < self.pos {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
format!("can't seek backwards ({} -> {})", self.pos, start_offset),
|
||||
));
|
||||
} else {
|
||||
start_offset
|
||||
}
|
||||
}
|
||||
// we don't know the total size, can't support this.
|
||||
io::SeekFrom::End(_end_offset) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"can't seek from end",
|
||||
));
|
||||
}
|
||||
io::SeekFrom::Current(relative_offset) => {
|
||||
if relative_offset < 0 {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Unsupported,
|
||||
"can't seek backwards relative to current position",
|
||||
));
|
||||
} else {
|
||||
self.pos + relative_offset as u64
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// we already know absolute_offset is >= self.pos
|
||||
debug_assert!(
|
||||
absolute_offset >= self.pos,
|
||||
"absolute_offset {} must be >= self.pos {}",
|
||||
absolute_offset,
|
||||
self.pos
|
||||
);
|
||||
|
||||
// calculate bytes to skip
|
||||
let this = self.project();
|
||||
*this.bytes_to_skip = absolute_offset - *this.pos;
|
||||
|
||||
debug!(bytes_to_skip = *this.bytes_to_skip, "seek");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
fn poll_complete(
|
||||
mut self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> Poll<std::io::Result<u64>> {
|
||||
if self.bytes_to_skip == 0 {
|
||||
// return the new position (from the start of the stream)
|
||||
return Poll::Ready(Ok(self.pos));
|
||||
}
|
||||
|
||||
// discard some bytes, until pos is where we want it to be.
|
||||
// We create a buffer that we'll discard later on.
|
||||
let mut discard_buf = [0; DISCARD_BUF_SIZE];
|
||||
|
||||
// Loop until we've reached the desired seek position. This is done by issuing repeated
|
||||
// `poll_read` calls.
|
||||
// If the data is not available yet, we will yield back to the executor
|
||||
// and wait to be polled again.
|
||||
loop {
|
||||
if self.bytes_to_skip == 0 {
|
||||
return Poll::Ready(Ok(self.pos));
|
||||
}
|
||||
|
||||
// calculate the length we want to skip at most, which is either a max
|
||||
// buffer size, or the number of remaining bytes to read, whatever is
|
||||
// smaller.
|
||||
let bytes_to_skip_now = std::cmp::min(self.bytes_to_skip as usize, discard_buf.len());
|
||||
let mut discard_buf = tokio::io::ReadBuf::new(&mut discard_buf[..bytes_to_skip_now]);
|
||||
|
||||
ready!(self.as_mut().poll_read(cx, &mut discard_buf))?;
|
||||
let bytes_skipped = discard_buf.filled().len();
|
||||
|
||||
if bytes_skipped == 0 {
|
||||
return Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
"got EOF while trying to skip bytes",
|
||||
)));
|
||||
}
|
||||
// decrement bytes to skip. The poll_read call already updated self.pos.
|
||||
*self.as_mut().project().bytes_to_skip -= bytes_skipped as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: tokio::io::AsyncRead + Send + Unpin + 'static> BlobReader for NaiveSeeker<R> {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{NaiveSeeker, DISCARD_BUF_SIZE};
|
||||
use std::io::{Cursor, SeekFrom};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt};
|
||||
|
||||
/// This seek requires multiple `poll_read` as we use a multiples of
|
||||
/// DISCARD_BUF_SIZE when doing the seek.
|
||||
/// This ensures we don't hang indefinitely.
|
||||
#[tokio::test]
|
||||
async fn seek() {
|
||||
let buf = vec![0u8; DISCARD_BUF_SIZE * 4];
|
||||
let reader = Cursor::new(&buf);
|
||||
let mut seeker = NaiveSeeker::new(reader);
|
||||
seeker.seek(SeekFrom::Start(4000)).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn seek_read() {
|
||||
let mut buf = vec![0u8; DISCARD_BUF_SIZE * 2];
|
||||
buf.extend_from_slice(&[1u8; DISCARD_BUF_SIZE * 2]);
|
||||
buf.extend_from_slice(&[2u8; DISCARD_BUF_SIZE * 2]);
|
||||
|
||||
let reader = Cursor::new(&buf);
|
||||
let mut seeker = NaiveSeeker::new(reader);
|
||||
|
||||
let mut read_buf = vec![0u8; DISCARD_BUF_SIZE];
|
||||
seeker.read_exact(&mut read_buf).await.expect("must read");
|
||||
assert_eq!(read_buf.as_slice(), &[0u8; DISCARD_BUF_SIZE]);
|
||||
|
||||
seeker
|
||||
.seek(SeekFrom::Current(DISCARD_BUF_SIZE as i64))
|
||||
.await
|
||||
.expect("must seek");
|
||||
seeker.read_exact(&mut read_buf).await.expect("must read");
|
||||
assert_eq!(read_buf.as_slice(), &[1u8; DISCARD_BUF_SIZE]);
|
||||
|
||||
seeker
|
||||
.seek(SeekFrom::Start(2 * 2 * DISCARD_BUF_SIZE as u64))
|
||||
.await
|
||||
.expect("must seek");
|
||||
seeker.read_exact(&mut read_buf).await.expect("must read");
|
||||
assert_eq!(read_buf.as_slice(), &[2u8; DISCARD_BUF_SIZE]);
|
||||
}
|
||||
}
|
|
@ -149,7 +149,7 @@ impl<'r, 'de: 'r, T: 'static> SeedFactory<'de, TagString<'de>> for RegistryWithF
|
|||
.0
|
||||
.iter()
|
||||
.find(|(k, _)| *k == &(TypeId::of::<T>(), tag.as_ref()))
|
||||
.ok_or_else(|| serde::de::Error::custom(format!("Unknown type: {}", tag)))?
|
||||
.ok_or_else(|| serde::de::Error::custom("Unknown tag"))?
|
||||
.1;
|
||||
|
||||
let entry: &RegistryEntry<T> = <dyn Any>::downcast_ref(&**seed).unwrap();
|
||||
|
@ -215,7 +215,7 @@ impl<'de, T: 'static> serde::Deserialize<'de> for DeserializeWithRegistry<T> {
|
|||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum TryFromUrlError {
|
||||
#[error("Unknown type: {0}")]
|
||||
#[error("Unknown tag: {0}")]
|
||||
UnknownTag(String),
|
||||
}
|
||||
|
||||
|
@ -274,9 +274,6 @@ pub fn add_default_services(reg: &mut Registry) {
|
|||
}
|
||||
|
||||
pub struct CompositionContext<'a> {
|
||||
// The stack used to detect recursive instantiations and prevent deadlocks
|
||||
// The TypeId of the trait object is included to distinguish e.g. the
|
||||
// BlobService "default" and the DirectoryService "default".
|
||||
stack: Vec<(TypeId, String)>,
|
||||
composition: Option<&'a Composition>,
|
||||
}
|
||||
|
@ -434,13 +431,10 @@ impl Composition {
|
|||
new_context
|
||||
.stack
|
||||
.push((TypeId::of::<T>(), entrypoint.clone()));
|
||||
let res =
|
||||
config.build(&entrypoint, &new_context).await.map_err(|e| {
|
||||
match e.downcast() {
|
||||
Ok(e) => *e,
|
||||
Err(e) => CompositionError::Failed(entrypoint, e.into()),
|
||||
}
|
||||
});
|
||||
let res = config
|
||||
.build(&entrypoint, &new_context)
|
||||
.await
|
||||
.map_err(|e| CompositionError::Failed(entrypoint, e.into()));
|
||||
tx.send(Some(res.clone())).unwrap();
|
||||
res
|
||||
})
|
||||
|
@ -476,66 +470,3 @@ impl Composition {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::blobservice::BlobService;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Test that we return a reference to the same instance of MemoryBlobService (via ptr_eq)
|
||||
/// when instantiating the same entrypoint twice. By instantiating concurrently, we also
|
||||
/// test the channels notifying the second consumer when the store has been instantiated.
|
||||
#[tokio::test]
|
||||
async fn concurrent() {
|
||||
let blob_services_configs_json = serde_json::json!({
|
||||
"default": {
|
||||
"type": "memory",
|
||||
}
|
||||
});
|
||||
|
||||
let blob_services_configs =
|
||||
with_registry(®, || serde_json::from_value(blob_services_configs_json)).unwrap();
|
||||
let mut blob_service_composition = Composition::default();
|
||||
blob_service_composition.extend_with_configs::<dyn BlobService>(blob_services_configs);
|
||||
let (blob_service1, blob_service2) = tokio::join!(
|
||||
blob_service_composition.build::<dyn BlobService>("default"),
|
||||
blob_service_composition.build::<dyn BlobService>("default")
|
||||
);
|
||||
assert!(Arc::ptr_eq(
|
||||
&blob_service1.unwrap(),
|
||||
&blob_service2.unwrap()
|
||||
));
|
||||
}
|
||||
|
||||
/// Test that we throw the correct error when an instantiation would recurse (deadlock)
|
||||
#[tokio::test]
|
||||
async fn reject_recursion() {
|
||||
let blob_services_configs_json = serde_json::json!({
|
||||
"default": {
|
||||
"type": "combined",
|
||||
"local": "other",
|
||||
"remote": "other"
|
||||
},
|
||||
"other": {
|
||||
"type": "combined",
|
||||
"local": "default",
|
||||
"remote": "default"
|
||||
}
|
||||
});
|
||||
|
||||
let blob_services_configs =
|
||||
with_registry(®, || serde_json::from_value(blob_services_configs_json)).unwrap();
|
||||
let mut blob_service_composition = Composition::default();
|
||||
blob_service_composition.extend_with_configs::<dyn BlobService>(blob_services_configs);
|
||||
match blob_service_composition
|
||||
.build::<dyn BlobService>("default")
|
||||
.await
|
||||
{
|
||||
Err(CompositionError::Recursion(stack)) => {
|
||||
assert_eq!(stack, vec!["default".to_string(), "other".to_string()])
|
||||
}
|
||||
other => panic!("should have returned an error, returned: {:?}", other.err()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ use thiserror::Error;
|
|||
pub struct B3Digest(Bytes);
|
||||
|
||||
// TODO: allow converting these errors to crate::Error
|
||||
#[derive(Error, Debug, PartialEq)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("invalid digest length: {0}")]
|
||||
InvalidDigestLen(usize),
|
||||
|
@ -26,11 +26,6 @@ impl From<B3Digest> for bytes::Bytes {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<blake3::Hash> for B3Digest {
|
||||
fn from(value: blake3::Hash) -> Self {
|
||||
Self(Bytes::copy_from_slice(value.as_bytes()))
|
||||
}
|
||||
}
|
||||
impl From<digest::Output<blake3::Hasher>> for B3Digest {
|
||||
fn from(value: digest::Output<blake3::Hasher>) -> Self {
|
||||
let v = Into::<[u8; B3_LEN]>::into(value);
|
||||
|
@ -72,12 +67,6 @@ impl From<&[u8; B3_LEN]> for B3Digest {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<B3Digest> for [u8; B3_LEN] {
|
||||
fn from(value: B3Digest) -> Self {
|
||||
value.0.to_vec().try_into().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for B3Digest {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.to_owned())
|
||||
|
|
|
@ -9,9 +9,7 @@ use std::sync::Arc;
|
|||
use tonic::async_trait;
|
||||
use tracing::{instrument, trace, warn};
|
||||
|
||||
use super::{
|
||||
utils::traverse_directory, Directory, DirectoryPutter, DirectoryService, SimplePutter,
|
||||
};
|
||||
use super::{utils::traverse_directory, DirectoryPutter, DirectoryService, SimplePutter};
|
||||
use crate::composition::{CompositionContext, ServiceBuilder};
|
||||
use crate::{proto, B3Digest, Error};
|
||||
|
||||
|
@ -151,7 +149,7 @@ fn derive_directory_key(digest: &B3Digest) -> String {
|
|||
#[async_trait]
|
||||
impl DirectoryService for BigtableDirectoryService {
|
||||
#[instrument(skip(self, digest), err, fields(directory.digest = %digest))]
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<Directory>, Error> {
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
|
||||
let mut client = self.client.clone();
|
||||
let directory_key = derive_directory_key(digest);
|
||||
|
||||
|
@ -243,20 +241,28 @@ impl DirectoryService for BigtableDirectoryService {
|
|||
|
||||
// Try to parse the value into a Directory message.
|
||||
let directory = proto::Directory::decode(Bytes::from(row_cell.value))
|
||||
.map_err(|e| Error::StorageError(format!("unable to decode directory proto: {}", e)))?
|
||||
.try_into()
|
||||
.map_err(|e| Error::StorageError(format!("unable to decode directory proto: {}", e)))?;
|
||||
|
||||
// validate the Directory.
|
||||
directory
|
||||
.validate()
|
||||
.map_err(|e| Error::StorageError(format!("invalid Directory message: {}", e)))?;
|
||||
|
||||
Ok(Some(directory))
|
||||
}
|
||||
|
||||
#[instrument(skip(self, directory), err, fields(directory.digest = %directory.digest()))]
|
||||
async fn put(&self, directory: Directory) -> Result<B3Digest, Error> {
|
||||
async fn put(&self, directory: proto::Directory) -> Result<B3Digest, Error> {
|
||||
let directory_digest = directory.digest();
|
||||
let mut client = self.client.clone();
|
||||
let directory_key = derive_directory_key(&directory_digest);
|
||||
|
||||
let data = proto::Directory::from(directory).encode_to_vec();
|
||||
// Ensure the directory we're trying to upload passes validation
|
||||
directory
|
||||
.validate()
|
||||
.map_err(|e| Error::InvalidRequest(format!("directory is invalid: {}", e)))?;
|
||||
|
||||
let data = directory.encode_to_vec();
|
||||
if data.len() as u64 > CELL_SIZE_LIMIT {
|
||||
return Err(Error::StorageError(
|
||||
"Directory exceeds cell limit on Bigtable".into(),
|
||||
|
@ -304,7 +310,7 @@ impl DirectoryService for BigtableDirectoryService {
|
|||
fn get_recursive(
|
||||
&self,
|
||||
root_directory_digest: &B3Digest,
|
||||
) -> BoxStream<'static, Result<Directory, Error>> {
|
||||
) -> BoxStream<'static, Result<proto::Directory, Error>> {
|
||||
traverse_directory(self.clone(), root_directory_digest)
|
||||
}
|
||||
|
||||
|
|
|
@ -7,9 +7,10 @@ use futures::TryStreamExt;
|
|||
use tonic::async_trait;
|
||||
use tracing::{instrument, trace};
|
||||
|
||||
use super::{Directory, DirectoryGraph, DirectoryService, RootToLeavesValidator, SimplePutter};
|
||||
use super::{DirectoryGraph, DirectoryService, RootToLeavesValidator, SimplePutter};
|
||||
use crate::composition::{CompositionContext, ServiceBuilder};
|
||||
use crate::directoryservice::DirectoryPutter;
|
||||
use crate::proto;
|
||||
use crate::B3Digest;
|
||||
use crate::Error;
|
||||
|
||||
|
@ -39,7 +40,7 @@ where
|
|||
DS2: DirectoryService + Clone + 'static,
|
||||
{
|
||||
#[instrument(skip(self, digest), fields(directory.digest = %digest))]
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<Directory>, Error> {
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
|
||||
match self.near.get(digest).await? {
|
||||
Some(directory) => {
|
||||
trace!("serving from cache");
|
||||
|
@ -81,7 +82,7 @@ where
|
|||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
async fn put(&self, _directory: Directory) -> Result<B3Digest, Error> {
|
||||
async fn put(&self, _directory: proto::Directory) -> Result<B3Digest, Error> {
|
||||
Err(Error::StorageError("unimplemented".to_string()))
|
||||
}
|
||||
|
||||
|
@ -89,7 +90,7 @@ where
|
|||
fn get_recursive(
|
||||
&self,
|
||||
root_directory_digest: &B3Digest,
|
||||
) -> BoxStream<'static, Result<Directory, Error>> {
|
||||
) -> BoxStream<'static, Result<proto::Directory, Error>> {
|
||||
let near = self.near.clone();
|
||||
let far = self.far.clone();
|
||||
let digest = root_directory_digest.clone();
|
||||
|
@ -178,75 +179,3 @@ impl ServiceBuilder for CacheConfig {
|
|||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Router<DS1, DS2> {
|
||||
writes: DS1,
|
||||
reads: DS2,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<DS1, DS2> DirectoryService for Router<DS1, DS2>
|
||||
where
|
||||
DS1: DirectoryService + Clone + 'static,
|
||||
DS2: DirectoryService + Clone + 'static,
|
||||
{
|
||||
#[instrument(skip(self, digest), fields(directory.digest = %digest))]
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<Directory>, Error> {
|
||||
self.reads.get(digest).await
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
async fn put(&self, directory: Directory) -> Result<B3Digest, Error> {
|
||||
self.writes.put(directory).await
|
||||
}
|
||||
|
||||
#[instrument(skip_all, fields(directory.digest = %root_directory_digest))]
|
||||
fn get_recursive(
|
||||
&self,
|
||||
root_directory_digest: &B3Digest,
|
||||
) -> BoxStream<'static, Result<Directory, Error>> {
|
||||
self.reads.get_recursive(root_directory_digest)
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
fn put_multiple_start(&self) -> Box<(dyn DirectoryPutter + 'static)> {
|
||||
self.writes.put_multiple_start()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Debug)]
|
||||
#[serde(deny_unknown_fields)]
|
||||
pub struct RouterConfig {
|
||||
writes: String,
|
||||
reads: String,
|
||||
}
|
||||
|
||||
impl TryFrom<url::Url> for RouterConfig {
|
||||
type Error = Box<dyn std::error::Error + Send + Sync>;
|
||||
fn try_from(_url: url::Url) -> Result<Self, Self::Error> {
|
||||
Err(Error::StorageError(
|
||||
"Instantiating a CombinedDirectoryService from a url is not supported".into(),
|
||||
)
|
||||
.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ServiceBuilder for RouterConfig {
|
||||
type Output = dyn DirectoryService;
|
||||
async fn build<'a>(
|
||||
&'a self,
|
||||
_instance_name: &str,
|
||||
context: &CompositionContext,
|
||||
) -> Result<Arc<dyn DirectoryService>, Box<dyn std::error::Error + Send + Sync + 'static>> {
|
||||
let (writes, reads) = futures::join!(
|
||||
context.resolve(self.writes.clone()),
|
||||
context.resolve(self.reads.clone())
|
||||
);
|
||||
Ok(Arc::new(Router {
|
||||
writes: writes?,
|
||||
reads: reads?,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
use std::collections::HashMap;
|
||||
|
||||
use bstr::ByteSlice;
|
||||
|
||||
use petgraph::{
|
||||
graph::{DiGraph, NodeIndex},
|
||||
visit::{Bfs, DfsPostOrder, EdgeRef, IntoNodeIdentifiers, Walker},
|
||||
|
@ -8,7 +10,10 @@ use petgraph::{
|
|||
use tracing::instrument;
|
||||
|
||||
use super::order_validator::{LeavesToRootValidator, OrderValidator, RootToLeavesValidator};
|
||||
use crate::{path::PathComponent, B3Digest, Directory, Node};
|
||||
use crate::{
|
||||
proto::{self, Directory, DirectoryNode},
|
||||
B3Digest,
|
||||
};
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
|
@ -16,11 +21,6 @@ pub enum Error {
|
|||
ValidationError(String),
|
||||
}
|
||||
|
||||
struct EdgeWeight {
|
||||
name: PathComponent,
|
||||
size: u64,
|
||||
}
|
||||
|
||||
/// This can be used to validate and/or re-order a Directory closure (DAG of
|
||||
/// connected Directories), and their insertion order.
|
||||
///
|
||||
|
@ -58,7 +58,7 @@ pub struct DirectoryGraph<O> {
|
|||
//
|
||||
// The option in the edge weight tracks the pending validation state of the respective edge, for example if
|
||||
// the child has not been added yet.
|
||||
graph: DiGraph<Option<Directory>, Option<EdgeWeight>>,
|
||||
graph: DiGraph<Option<Directory>, Option<DirectoryNode>>,
|
||||
|
||||
// A lookup table from directory digest to node index.
|
||||
digest_to_node_ix: HashMap<B3Digest, NodeIndex>,
|
||||
|
@ -67,18 +67,18 @@ pub struct DirectoryGraph<O> {
|
|||
}
|
||||
|
||||
pub struct ValidatedDirectoryGraph {
|
||||
graph: DiGraph<Option<Directory>, Option<EdgeWeight>>,
|
||||
graph: DiGraph<Option<Directory>, Option<DirectoryNode>>,
|
||||
|
||||
root: Option<NodeIndex>,
|
||||
}
|
||||
|
||||
fn check_edge(edge: &EdgeWeight, child: &Directory) -> Result<(), Error> {
|
||||
fn check_edge(dir: &DirectoryNode, child: &Directory) -> Result<(), Error> {
|
||||
// Ensure the size specified in the child node matches our records.
|
||||
if edge.size != child.size() {
|
||||
if dir.size != child.size() {
|
||||
return Err(Error::ValidationError(format!(
|
||||
"'{}' has wrong size, specified {}, recorded {}",
|
||||
edge.name,
|
||||
edge.size,
|
||||
dir.name.as_bstr(),
|
||||
dir.size,
|
||||
child.size(),
|
||||
)));
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ fn check_edge(edge: &EdgeWeight, child: &Directory) -> Result<(), Error> {
|
|||
impl DirectoryGraph<LeavesToRootValidator> {
|
||||
/// Insert a new Directory into the closure
|
||||
#[instrument(level = "trace", skip_all, fields(directory.digest=%directory.digest(), directory.size=%directory.size()), err)]
|
||||
pub fn add(&mut self, directory: Directory) -> Result<(), Error> {
|
||||
pub fn add(&mut self, directory: proto::Directory) -> Result<(), Error> {
|
||||
if !self.order_validator.add_directory(&directory) {
|
||||
return Err(Error::ValidationError(
|
||||
"unknown directory was referenced".into(),
|
||||
|
@ -108,7 +108,7 @@ impl DirectoryGraph<RootToLeavesValidator> {
|
|||
|
||||
/// Insert a new Directory into the closure
|
||||
#[instrument(level = "trace", skip_all, fields(directory.digest=%directory.digest(), directory.size=%directory.size()), err)]
|
||||
pub fn add(&mut self, directory: Directory) -> Result<(), Error> {
|
||||
pub fn add(&mut self, directory: proto::Directory) -> Result<(), Error> {
|
||||
let digest = directory.digest();
|
||||
if !self.order_validator.digest_allowed(&digest) {
|
||||
return Err(Error::ValidationError("unexpected digest".into()));
|
||||
|
@ -129,7 +129,12 @@ impl<O: OrderValidator> DirectoryGraph<O> {
|
|||
}
|
||||
|
||||
/// Adds a directory which has already been confirmed to be in-order to the graph
|
||||
pub fn add_order_unchecked(&mut self, directory: Directory) -> Result<(), Error> {
|
||||
pub fn add_order_unchecked(&mut self, directory: proto::Directory) -> Result<(), Error> {
|
||||
// Do some basic validation
|
||||
directory
|
||||
.validate()
|
||||
.map_err(|e| Error::ValidationError(e.to_string()))?;
|
||||
|
||||
let digest = directory.digest();
|
||||
|
||||
// Teach the graph about the existence of a node with this digest
|
||||
|
@ -144,32 +149,23 @@ impl<O: OrderValidator> DirectoryGraph<O> {
|
|||
}
|
||||
|
||||
// set up edges to all child directories
|
||||
for (name, node) in directory.nodes() {
|
||||
if let Node::Directory { digest, size } = node {
|
||||
let child_ix = *self
|
||||
.digest_to_node_ix
|
||||
.entry(digest.clone())
|
||||
.or_insert_with(|| self.graph.add_node(None));
|
||||
for subdir in &directory.directories {
|
||||
let subdir_digest: B3Digest = subdir.digest.clone().try_into().unwrap();
|
||||
|
||||
let pending_edge_check = match &self.graph[child_ix] {
|
||||
Some(child) => {
|
||||
// child is already available, validate the edge now
|
||||
check_edge(
|
||||
&EdgeWeight {
|
||||
name: name.clone(),
|
||||
size: *size,
|
||||
},
|
||||
child,
|
||||
)?;
|
||||
None
|
||||
}
|
||||
None => Some(EdgeWeight {
|
||||
name: name.clone(),
|
||||
size: *size,
|
||||
}), // pending validation
|
||||
};
|
||||
self.graph.add_edge(ix, child_ix, pending_edge_check);
|
||||
}
|
||||
let child_ix = *self
|
||||
.digest_to_node_ix
|
||||
.entry(subdir_digest)
|
||||
.or_insert_with(|| self.graph.add_node(None));
|
||||
|
||||
let pending_edge_check = match &self.graph[child_ix] {
|
||||
Some(child) => {
|
||||
// child is already available, validate the edge now
|
||||
check_edge(subdir, child)?;
|
||||
None
|
||||
}
|
||||
None => Some(subdir.clone()), // pending validation
|
||||
};
|
||||
self.graph.add_edge(ix, child_ix, pending_edge_check);
|
||||
}
|
||||
|
||||
// validate the edges from parents to this node
|
||||
|
@ -187,7 +183,6 @@ impl<O: OrderValidator> DirectoryGraph<O> {
|
|||
.expect("edge not found")
|
||||
.take()
|
||||
.expect("edge is already validated");
|
||||
|
||||
check_edge(&edge_weight, &directory)?;
|
||||
}
|
||||
|
||||
|
@ -271,37 +266,37 @@ impl ValidatedDirectoryGraph {
|
|||
.filter_map(move |i| nodes[i.index()].weight.take())
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{
|
||||
fixtures::{DIRECTORY_A, DIRECTORY_B, DIRECTORY_C},
|
||||
proto::{self, Directory},
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use rstest::rstest;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref BROKEN_DIRECTORY : Directory = Directory {
|
||||
symlinks: vec![SymlinkNode {
|
||||
symlinks: vec![proto::SymlinkNode {
|
||||
name: "".into(), // invalid name!
|
||||
target: "doesntmatter".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
*/
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::fixtures::{DIRECTORY_A, DIRECTORY_B, DIRECTORY_C};
|
||||
use crate::{Directory, Node};
|
||||
use lazy_static::lazy_static;
|
||||
use rstest::rstest;
|
||||
|
||||
pub static ref BROKEN_PARENT_DIRECTORY: Directory = Directory {
|
||||
directories: vec![proto::DirectoryNode {
|
||||
name: "foo".into(),
|
||||
digest: DIRECTORY_A.digest().into(),
|
||||
size: DIRECTORY_A.size() + 42, // wrong!
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
|
||||
use super::{DirectoryGraph, LeavesToRootValidator, RootToLeavesValidator};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref BROKEN_PARENT_DIRECTORY: Directory =
|
||||
Directory::try_from_iter([
|
||||
(
|
||||
"foo".try_into().unwrap(),
|
||||
Node::Directory{
|
||||
digest: DIRECTORY_A.digest(),
|
||||
size: DIRECTORY_A.size() + 42, // wrong!
|
||||
}
|
||||
)
|
||||
]).unwrap();
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
/// Uploading an empty directory should succeed.
|
||||
#[case::empty_directory(&[&*DIRECTORY_A], false, Some(vec![&*DIRECTORY_A]))]
|
||||
|
@ -317,6 +312,8 @@ mod tests {
|
|||
#[case::unconnected_node(&[&*DIRECTORY_A, &*DIRECTORY_C, &*DIRECTORY_B], false, None)]
|
||||
/// Uploading B (referring to A) should fail immediately, because A was never uploaded.
|
||||
#[case::dangling_pointer(&[&*DIRECTORY_B], true, None)]
|
||||
/// Uploading a directory failing validation should fail immediately.
|
||||
#[case::failing_validation(&[&*BROKEN_DIRECTORY], true, None)]
|
||||
/// Uploading a directory which refers to another Directory with a wrong size should fail.
|
||||
#[case::wrong_size_in_parent(&[&*DIRECTORY_A, &*BROKEN_PARENT_DIRECTORY], true, None)]
|
||||
fn test_uploads(
|
||||
|
@ -369,6 +366,8 @@ mod tests {
|
|||
#[case::unconnected_node(&*DIRECTORY_C, &[&*DIRECTORY_C, &*DIRECTORY_B], true, None)]
|
||||
/// Downloading B (specified as the root) but receiving A instead should fail immediately, because A has no connection to B (the root).
|
||||
#[case::dangling_pointer(&*DIRECTORY_B, &[&*DIRECTORY_A], true, None)]
|
||||
/// Downloading a directory failing validation should fail immediately.
|
||||
#[case::failing_validation(&*BROKEN_DIRECTORY, &[&*BROKEN_DIRECTORY], true, None)]
|
||||
/// Downloading a directory which refers to another Directory with a wrong size should fail.
|
||||
#[case::wrong_size_in_parent(&*BROKEN_PARENT_DIRECTORY, &[&*BROKEN_PARENT_DIRECTORY, &*DIRECTORY_A], true, None)]
|
||||
fn test_downloads(
|
||||
|
|
|
@ -18,11 +18,6 @@ use super::DirectoryService;
|
|||
/// - `sled:///absolute/path/to/somewhere`
|
||||
/// Uses sled, using a path on the disk for persistency. Can be only opened
|
||||
/// from one process at the same time.
|
||||
/// - `redb:`
|
||||
/// Uses a in-memory redb implementation.
|
||||
/// - `redb:///absolute/path/to/somewhere`
|
||||
/// Uses redb, using a path on the disk for persistency. Can be only opened
|
||||
/// from one process at the same time.
|
||||
/// - `grpc+unix:///absolute/path/to/somewhere`
|
||||
/// Connects to a local tvix-store gRPC service via Unix socket.
|
||||
/// - `grpc+http://host:port`, `grpc+https://host:port`
|
||||
|
@ -57,8 +52,6 @@ mod tests {
|
|||
lazy_static! {
|
||||
static ref TMPDIR_SLED_1: TempDir = TempDir::new().unwrap();
|
||||
static ref TMPDIR_SLED_2: TempDir = TempDir::new().unwrap();
|
||||
static ref TMPDIR_REDB_1: TempDir = TempDir::new().unwrap();
|
||||
static ref TMPDIR_REDB_2: TempDir = TempDir::new().unwrap();
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
|
@ -82,16 +75,6 @@ mod tests {
|
|||
#[case::memory_invalid_root_path("memory:///", false)]
|
||||
/// This sets a memory url path to "/foo", which is invalid.
|
||||
#[case::memory_invalid_root_path_foo("memory:///foo", false)]
|
||||
/// This configures redb in temporary mode.
|
||||
#[case::redb_valid_temporary("redb://", true)]
|
||||
/// This configures redb with /, which should fail.
|
||||
#[case::redb_invalid_root("redb:///", false)]
|
||||
/// This configures redb with a host, not path, which should fail.
|
||||
#[case::redb_invalid_host("redb://foo.example", false)]
|
||||
/// This configures redb with a valid path, which should succeed.
|
||||
#[case::redb_valid_path(&format!("redb://{}", &TMPDIR_REDB_1.path().join("foo").to_str().unwrap()), true)]
|
||||
/// This configures redb with a host, and a valid path path, which should fail.
|
||||
#[case::redb_invalid_host_with_valid_path(&format!("redb://foo.example{}", &TMPDIR_REDB_2.path().join("bar").to_str().unwrap()), false)]
|
||||
/// Correct scheme to connect to a unix socket.
|
||||
#[case::grpc_valid_unix_socket("grpc+unix:///path/to/somewhere", true)]
|
||||
/// Correct scheme for unix socket, but setting a host too, which is invalid.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
use std::collections::HashSet;
|
||||
|
||||
use super::{Directory, DirectoryPutter, DirectoryService};
|
||||
use super::{DirectoryPutter, DirectoryService};
|
||||
use crate::composition::{CompositionContext, ServiceBuilder};
|
||||
use crate::proto::{self, get_directory_request::ByWhat};
|
||||
use crate::{B3Digest, DirectoryError, Error};
|
||||
use crate::{B3Digest, Error};
|
||||
use async_stream::try_stream;
|
||||
use futures::stream::BoxStream;
|
||||
use std::sync::Arc;
|
||||
|
@ -41,7 +41,10 @@ where
|
|||
T::Future: Send,
|
||||
{
|
||||
#[instrument(level = "trace", skip_all, fields(directory.digest = %digest))]
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<Directory>, crate::Error> {
|
||||
async fn get(
|
||||
&self,
|
||||
digest: &B3Digest,
|
||||
) -> Result<Option<crate::proto::Directory>, crate::Error> {
|
||||
// Get a new handle to the gRPC client, and copy the digest.
|
||||
let mut grpc_client = self.grpc_client.clone();
|
||||
let digest_cpy = digest.clone();
|
||||
|
@ -69,10 +72,15 @@ where
|
|||
"requested directory with digest {}, but got {}",
|
||||
digest, actual_digest
|
||||
)))
|
||||
} else if let Err(e) = directory.validate() {
|
||||
// Validate the Directory itself is valid.
|
||||
warn!("directory failed validation: {}", e.to_string());
|
||||
Err(crate::Error::StorageError(format!(
|
||||
"directory {} failed validation: {}",
|
||||
digest, e,
|
||||
)))
|
||||
} else {
|
||||
Ok(Some(directory.try_into().map_err(|_| {
|
||||
Error::StorageError("invalid root digest length in response".to_string())
|
||||
})?))
|
||||
Ok(Some(directory))
|
||||
}
|
||||
}
|
||||
Ok(None) => Ok(None),
|
||||
|
@ -82,11 +90,11 @@ where
|
|||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all, fields(directory.digest = %directory.digest()))]
|
||||
async fn put(&self, directory: Directory) -> Result<B3Digest, crate::Error> {
|
||||
async fn put(&self, directory: crate::proto::Directory) -> Result<B3Digest, crate::Error> {
|
||||
let resp = self
|
||||
.grpc_client
|
||||
.clone()
|
||||
.put(tokio_stream::once(proto::Directory::from(directory)))
|
||||
.put(tokio_stream::once(directory))
|
||||
.await;
|
||||
|
||||
match resp {
|
||||
|
@ -105,7 +113,7 @@ where
|
|||
fn get_recursive(
|
||||
&self,
|
||||
root_directory_digest: &B3Digest,
|
||||
) -> BoxStream<'static, Result<Directory, Error>> {
|
||||
) -> BoxStream<'static, Result<proto::Directory, Error>> {
|
||||
let mut grpc_client = self.grpc_client.clone();
|
||||
let root_directory_digest = root_directory_digest.clone();
|
||||
|
||||
|
@ -122,11 +130,19 @@ where
|
|||
// The Directory digests we received so far
|
||||
let mut received_directory_digests: HashSet<B3Digest> = HashSet::new();
|
||||
// The Directory digests we're still expecting to get sent.
|
||||
let mut expected_directory_digests: HashSet<B3Digest> = HashSet::from([root_directory_digest.clone()]);
|
||||
let mut expected_directory_digests: HashSet<B3Digest> = HashSet::from([root_directory_digest]);
|
||||
|
||||
loop {
|
||||
match stream.message().await {
|
||||
Ok(Some(directory)) => {
|
||||
// validate the directory itself.
|
||||
if let Err(e) = directory.validate() {
|
||||
Err(crate::Error::StorageError(format!(
|
||||
"directory {} failed validation: {}",
|
||||
directory.digest(),
|
||||
e,
|
||||
)))?;
|
||||
}
|
||||
// validate we actually expected that directory, and move it from expected to received.
|
||||
let directory_digest = directory.digest();
|
||||
let was_expected = expected_directory_digests.remove(&directory_digest);
|
||||
|
@ -152,28 +168,14 @@ where
|
|||
.insert(child_directory_digest);
|
||||
}
|
||||
|
||||
let directory = directory.try_into()
|
||||
.map_err(|e: DirectoryError| Error::StorageError(e.to_string()))?;
|
||||
|
||||
yield directory;
|
||||
},
|
||||
Ok(None) if expected_directory_digests.len() == 1 && expected_directory_digests.contains(&root_directory_digest) => {
|
||||
// The root directory of the requested closure was not found, return an
|
||||
// empty stream
|
||||
return
|
||||
}
|
||||
Ok(None) => {
|
||||
// The stream has ended
|
||||
let diff_len = expected_directory_digests
|
||||
// Account for directories which have been referenced more than once,
|
||||
// but only received once since they were deduplicated
|
||||
.difference(&received_directory_digests)
|
||||
.count();
|
||||
// If this is not empty, then the closure is incomplete
|
||||
if diff_len != 0 {
|
||||
// If we were still expecting something, that's an error.
|
||||
if !expected_directory_digests.is_empty() {
|
||||
Err(crate::Error::StorageError(format!(
|
||||
"still expected {} directories, but got premature end of stream",
|
||||
diff_len
|
||||
expected_directory_digests.len(),
|
||||
)))?
|
||||
} else {
|
||||
return
|
||||
|
@ -266,11 +268,11 @@ pub struct GRPCPutter {
|
|||
#[async_trait]
|
||||
impl DirectoryPutter for GRPCPutter {
|
||||
#[instrument(level = "trace", skip_all, fields(directory.digest=%directory.digest()), err)]
|
||||
async fn put(&mut self, directory: Directory) -> Result<(), crate::Error> {
|
||||
async fn put(&mut self, directory: proto::Directory) -> Result<(), crate::Error> {
|
||||
match self.rq {
|
||||
// If we're not already closed, send the directory to directory_sender.
|
||||
Some((_, ref directory_sender)) => {
|
||||
if directory_sender.send(directory.into()).is_err() {
|
||||
if directory_sender.send(directory).is_err() {
|
||||
// If the channel has been prematurely closed, invoke close (so we can peek at the error code)
|
||||
// That error code is much more helpful, because it
|
||||
// contains the error message from the server.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::{B3Digest, Error};
|
||||
use crate::{proto, B3Digest, Error};
|
||||
use futures::stream::BoxStream;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
@ -7,9 +7,8 @@ use tonic::async_trait;
|
|||
use tracing::{instrument, warn};
|
||||
|
||||
use super::utils::traverse_directory;
|
||||
use super::{Directory, DirectoryPutter, DirectoryService, SimplePutter};
|
||||
use super::{DirectoryPutter, DirectoryService, SimplePutter};
|
||||
use crate::composition::{CompositionContext, ServiceBuilder};
|
||||
use crate::proto;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct MemoryDirectoryService {
|
||||
|
@ -19,7 +18,7 @@ pub struct MemoryDirectoryService {
|
|||
#[async_trait]
|
||||
impl DirectoryService for MemoryDirectoryService {
|
||||
#[instrument(skip(self, digest), fields(directory.digest = %digest))]
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<Directory>, Error> {
|
||||
async fn get(&self, digest: &B3Digest) -> Result<Option<proto::Directory>, Error> {
|
||||
let db = self.db.read().await;
|
||||
|
||||
match db.get(digest) {
|
||||
|
@ -38,20 +37,35 @@ impl DirectoryService for MemoryDirectoryService {
|
|||
)));
|
||||
}
|
||||
|
||||
Ok(Some(directory.clone().try_into().map_err(|e| {
|
||||
crate::Error::StorageError(format!("corrupted directory: {}", e))
|
||||
})?))
|
||||
// Validate the Directory itself is valid.
|
||||
if let Err(e) = directory.validate() {
|
||||
warn!("directory failed validation: {}", e.to_string());
|
||||
return Err(Error::StorageError(format!(
|
||||
"directory {} failed validation: {}",
|
||||
actual_digest, e,
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(Some(directory.clone()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, directory), fields(directory.digest = %directory.digest()))]
|
||||
async fn put(&self, directory: Directory) -> Result<B3Digest, Error> {
|
||||
async fn put(&self, directory: proto::Directory) -> Result<B3Digest, Error> {
|
||||
let digest = directory.digest();
|
||||
|
||||
// validate the directory itself.
|
||||
if let Err(e) = directory.validate() {
|
||||
return Err(Error::InvalidRequest(format!(
|
||||
"directory {} failed validation: {}",
|
||||
digest, e,
|
||||
)));
|
||||
}
|
||||
|
||||
// store it
|
||||
let mut db = self.db.write().await;
|
||||
db.insert(digest.clone(), directory.into());
|
||||
db.insert(digest.clone(), directory);
|
||||
|
||||
Ok(digest)
|
||||
}
|
||||
|
@ -60,7 +74,7 @@ impl DirectoryService for MemoryDirectoryService {
|
|||
fn get_recursive(
|
||||
&self,
|
||||
root_directory_digest: &B3Digest,
|
||||
) -> BoxStream<'static, Result<Directory, Error>> {
|
||||
) -> BoxStream<'static, Result<proto::Directory, Error>> {
|
||||
traverse_directory(self.clone(), root_directory_digest)
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue