* Sync with the trunk.
This commit is contained in:
commit
194d21f9f6
45 changed files with 928 additions and 682 deletions
52
configure.ac
52
configure.ac
|
@ -5,32 +5,35 @@ AM_INIT_AUTOMAKE([dist-bzip2 foreign])
|
|||
|
||||
AC_DEFINE_UNQUOTED(NIX_VERSION, ["$VERSION"], [Nix version.])
|
||||
|
||||
AC_CANONICAL_HOST
|
||||
|
||||
AC_PROG_SED
|
||||
|
||||
# Construct a Nix system name (like "i686-linux").
|
||||
AC_CANONICAL_HOST
|
||||
AC_MSG_CHECKING([for the canonical Nix system name])
|
||||
cpu_name=$(uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
|
||||
machine_name=$(uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
|
||||
|
||||
case $machine_name in
|
||||
i*86)
|
||||
machine_name=i686
|
||||
;;
|
||||
x86_64)
|
||||
machine_name=x86_64
|
||||
;;
|
||||
ppc)
|
||||
machine_name=powerpc
|
||||
;;
|
||||
*)
|
||||
if test "$cpu_name" != "unknown"; then
|
||||
machine_name=$cpu_name
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
|
||||
[Platform identifier (e.g., `i686-linux').]),
|
||||
[system=$withval],
|
||||
[case "$host_cpu" in
|
||||
i*86)
|
||||
machine_name="i686";;
|
||||
amd64)
|
||||
machine_name="x86_64";;
|
||||
*)
|
||||
machine_name="$host_cpu";;
|
||||
esac
|
||||
|
||||
sys_name=$(uname -s | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ ' 'abcdefghijklmnopqrstuvwxyz_')
|
||||
case "$host_os" in
|
||||
linux-gnu*)
|
||||
# For backward compatibility, strip the `-gnu' part.
|
||||
system="$machine_name-linux";;
|
||||
*)
|
||||
# Strip the version number from names such as `gnu0.3',
|
||||
# `darwin10.2.0', etc.
|
||||
system="$machine_name-`echo $host_os | "$SED" -e's/@<:@0-9.@:>@*$//g'`";;
|
||||
esac])
|
||||
|
||||
sys_name=$(uname -s | tr 'A-Z ' 'a-z_')
|
||||
|
||||
case $sys_name in
|
||||
cygwin*)
|
||||
|
@ -38,9 +41,6 @@ case $sys_name in
|
|||
;;
|
||||
esac
|
||||
|
||||
AC_ARG_WITH(system, AC_HELP_STRING([--with-system=SYSTEM],
|
||||
[Platform identifier (e.g., `i686-linux').]),
|
||||
system=$withval, system="${machine_name}-${sys_name}")
|
||||
AC_MSG_RESULT($system)
|
||||
AC_SUBST(system)
|
||||
AC_DEFINE_UNQUOTED(SYSTEM, ["$system"], [platform identifier (`cpu-os')])
|
||||
|
@ -62,7 +62,7 @@ fi
|
|||
|
||||
|
||||
# Solaris-specific stuff.
|
||||
if test "$sys_name" = "sunos"; then
|
||||
if test "$sys_name" = sunos; then
|
||||
# Solaris requires -lsocket -lnsl for network functions
|
||||
LIBS="-lsocket -lnsl $LIBS"
|
||||
fi
|
||||
|
@ -255,7 +255,7 @@ AC_ARG_WITH(sqlite, AC_HELP_STRING([--with-sqlite=PATH],
|
|||
[prefix of SQLite]),
|
||||
sqlite=$withval, sqlite=)
|
||||
AM_CONDITIONAL(HAVE_SQLITE, test -n "$sqlite")
|
||||
SQLITE_VERSION=3070701
|
||||
SQLITE_VERSION=3070900
|
||||
AC_SUBST(SQLITE_VERSION)
|
||||
if test -z "$sqlite"; then
|
||||
sqlite_lib='${top_builddir}/externals/sqlite-autoconf-$(SQLITE_VERSION)/libsqlite3.la'
|
||||
|
|
|
@ -24,8 +24,11 @@
|
|||
</group>
|
||||
<arg><option>--sign</option></arg>
|
||||
<arg><option>--gzip</option></arg>
|
||||
<arg><option>--bzip2</option></arg>
|
||||
<arg><option>--xz</option></arg>
|
||||
<arg><option>--include-outputs</option></arg>
|
||||
<arg choice='plain'>
|
||||
<arg><replaceable>user@</replaceable></arg><replaceable>machine</replaceable>
|
||||
<replaceable>user@</replaceable><replaceable>machine</replaceable>
|
||||
</arg>
|
||||
<arg choice='plain'><replaceable>paths</replaceable></arg>
|
||||
</cmdsynopsis>
|
||||
|
@ -84,22 +87,33 @@ those paths. If this bothers you, use
|
|||
|
||||
<listitem><para>Let the sending machine cryptographically sign the
|
||||
dump of each path with the key in
|
||||
<filename>/nix/etc/nix/signing-key.sec</filename>. If the user on
|
||||
the target machine does not have direct access to the Nix store
|
||||
(i.e., if the target machine has a multi-user Nix installation),
|
||||
then the target machine will check the dump against
|
||||
<filename>/nix/etc/nix/signing-key.pub</filename> before unpacking
|
||||
it in its Nix store. This allows secure sharing of store paths
|
||||
between untrusted users on two machines, provided that there is a
|
||||
trust relation between the Nix installations on both machines
|
||||
(namely, they have matching public/secret keys).</para></listitem>
|
||||
<filename><replaceable>sysconfdir</replaceable>/nix/signing-key.sec</filename>.
|
||||
If the user on the target machine does not have direct access to
|
||||
the Nix store (i.e., if the target machine has a multi-user Nix
|
||||
installation), then the target machine will check the dump against
|
||||
<filename><replaceable>sysconfdir</replaceable>/nix/signing-key.pub</filename>
|
||||
before unpacking it in its Nix store. This allows secure sharing
|
||||
of store paths between untrusted users on two machines, provided
|
||||
that there is a trust relation between the Nix installations on
|
||||
both machines (namely, they have matching public/secret
|
||||
keys).</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><option>--gzip</option></term>
|
||||
<varlistentry><term><option>--gzip</option> / <option>--bzip2</option> / <option>--xz</option></term>
|
||||
|
||||
<listitem><para>Compress the dump of each path with
|
||||
<command>gzip</command> before sending it.</para></listitem>
|
||||
<listitem><para>Compress the dump of each path with respectively
|
||||
<command>gzip</command>, <command>bzip2</command> or
|
||||
<command>xz</command> before sending it. The corresponding
|
||||
decompression program must be installed on the target
|
||||
machine.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
<varlistentry><term><option>--include-outputs</option></term>
|
||||
|
||||
<listitem><para>Also copy the outputs of store derivations included
|
||||
in the closure.</para></listitem>
|
||||
|
||||
</varlistentry>
|
||||
|
||||
|
|
|
@ -256,7 +256,7 @@ number of possible ways:
|
|||
<emphasis>attribute paths</emphasis> that select attributes from the
|
||||
top-level Nix expression. This is faster than using derivation
|
||||
names and unambiguous. To find out the attribute paths of available
|
||||
packages, use <literal>nix-env -qaA '*'</literal>.</para></listitem>
|
||||
packages, use <literal>nix-env -qaP '*'</literal>.</para></listitem>
|
||||
|
||||
<listitem><para>If <option>--from-profile</option>
|
||||
<replaceable>path</replaceable> is given,
|
||||
|
|
|
@ -2,10 +2,12 @@ perlversion := $(shell perl -e 'use Config; print $$Config{version};')
|
|||
perlarchname := $(shell perl -e 'use Config; print $$Config{archname};')
|
||||
perllibdir = $(libdir)/perl5/site_perl/$(perlversion)/$(perlarchname)
|
||||
|
||||
PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/Config.pm.in
|
||||
PERL_MODULES = lib/Nix/Store.pm lib/Nix/Manifest.pm lib/Nix/GeneratePatches.pm lib/Nix/SSH.pm lib/Nix/CopyClosure.pm lib/Nix/Config.pm.in
|
||||
|
||||
# Hack required by "make check".
|
||||
all: $(PERL_MODULES:.in=)
|
||||
ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/Store.so
|
||||
mkdir -p lib/auto/Nix/Store
|
||||
ln -sfn $(abs_builddir)/.libs/libNixStore.so lib/auto/Nix/Store/Store.so
|
||||
|
||||
install-exec-local: $(PERL_MODULES:.in=)
|
||||
$(INSTALL) -d $(DESTDIR)$(perllibdir)/Nix
|
||||
|
|
|
@ -4,6 +4,7 @@ $binDir = $ENV{"NIX_BIN_DIR"} || "@bindir@";
|
|||
$libexecDir = $ENV{"NIX_LIBEXEC_DIR"} || "@libexecdir@";
|
||||
$manifestDir = $ENV{"NIX_MANIFESTS_DIR"} || "@localstatedir@/nix/manifests";
|
||||
$logDir = $ENV{"NIX_LOG_DIR"} || "@localstatedir@/log/nix";
|
||||
$confDir = $ENV{"NIX_CONF_DIR"} || "@sysconfdir@/nix";
|
||||
|
||||
$bzip2 = $ENV{"NIX_BZIP2"} || "@bzip2@";
|
||||
$curl = "@curl@";
|
||||
|
|
46
perl/lib/Nix/CopyClosure.pm
Normal file
46
perl/lib/Nix/CopyClosure.pm
Normal file
|
@ -0,0 +1,46 @@
|
|||
package Nix::CopyClosure;
|
||||
|
||||
use strict;
|
||||
use Nix::Config;
|
||||
use Nix::Store;
|
||||
|
||||
|
||||
sub copyTo {
|
||||
my ($sshHost, $sshOpts, $storePaths, $compressor, $decompressor, $includeOutputs, $dryRun, $sign) = @_;
|
||||
|
||||
$compressor = "$compressor |" if $compressor ne "";
|
||||
$decompressor = "$decompressor |" if $decompressor ne "";
|
||||
|
||||
# Get the closure of this path.
|
||||
my @closure = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs,
|
||||
map { followLinksToStorePath $_ } @{$storePaths})));
|
||||
|
||||
# Ask the remote host which paths are invalid. Because of limits
|
||||
# to the command line length, do this in chunks. Eventually,
|
||||
# we'll want to use ‘--from-stdin’, but we can't rely on the
|
||||
# target having this option yet.
|
||||
my @missing = ();
|
||||
while (scalar(@closure) > 0) {
|
||||
my @ps = splice(@closure, 0, 1500);
|
||||
open(READ, "set -f; ssh $sshHost @{$sshOpts} nix-store --check-validity --print-invalid @ps|");
|
||||
while (<READ>) {
|
||||
chomp;
|
||||
push @missing, $_;
|
||||
}
|
||||
close READ or die;
|
||||
}
|
||||
|
||||
# Export the store paths and import them on the remote machine.
|
||||
if (scalar @missing > 0) {
|
||||
print STDERR "copying ", scalar @missing, " missing paths to ‘$sshHost’...\n";
|
||||
#print STDERR " $_\n" foreach @missing;
|
||||
unless ($dryRun) {
|
||||
open SSH, "| $compressor ssh $sshHost @{$sshOpts} '$decompressor nix-store --import'" or die;
|
||||
exportPaths(fileno(SSH), $sign, @missing);
|
||||
close SSH or die "copying store paths to remote machine `$sshHost' failed: $?";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
1;
|
|
@ -53,8 +53,14 @@ sub addPatch {
|
|||
sub readManifest_ {
|
||||
my ($manifest, $addNAR, $addPatch) = @_;
|
||||
|
||||
open MANIFEST, "<$manifest"
|
||||
or die "cannot open `$manifest': $!";
|
||||
# Decompress the manifest if necessary.
|
||||
if ($manifest =~ /\.bz2$/) {
|
||||
open MANIFEST, "$Nix::Config::bzip2 -d < $manifest |"
|
||||
or die "cannot decompress `$manifest': $!";
|
||||
} else {
|
||||
open MANIFEST, "<$manifest"
|
||||
or die "cannot open `$manifest': $!";
|
||||
}
|
||||
|
||||
my $inside = 0;
|
||||
my $type;
|
||||
|
@ -120,7 +126,6 @@ sub readManifest_ {
|
|||
elsif (/^\s*Hash:\s*(\S+)\s*$/) { $hash = $1; }
|
||||
elsif (/^\s*URL:\s*(\S+)\s*$/) { $url = $1; }
|
||||
elsif (/^\s*Size:\s*(\d+)\s*$/) { $size = $1; }
|
||||
elsif (/^\s*SuccOf:\s*(\/\S+)\s*$/) { } # obsolete
|
||||
elsif (/^\s*BasePath:\s*(\/\S+)\s*$/) { $basePath = $1; }
|
||||
elsif (/^\s*BaseHash:\s*(\S+)\s*$/) { $baseHash = $1; }
|
||||
elsif (/^\s*Type:\s*(\S+)\s*$/) { $patchType = $1; }
|
||||
|
@ -286,14 +291,22 @@ EOF
|
|||
open MAINLOCK, ">>$lockFile" or die "unable to acquire lock ‘$lockFile’: $!\n";
|
||||
flock(MAINLOCK, LOCK_EX) or die;
|
||||
|
||||
our $insertNAR = $dbh->prepare(
|
||||
"insert into NARs(manifest, storePath, url, hash, size, narHash, " .
|
||||
"narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)") or die;
|
||||
|
||||
our $insertPatch = $dbh->prepare(
|
||||
"insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " .
|
||||
"size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)");
|
||||
|
||||
$dbh->begin_work;
|
||||
|
||||
# Read each manifest in $manifestDir and add it to the database,
|
||||
# unless we've already done so on a previous run.
|
||||
my %seen;
|
||||
|
||||
for my $manifest (glob "$manifestDir/*.nixmanifest") {
|
||||
$manifest = Cwd::abs_path($manifest);
|
||||
for my $manifestLink (glob "$manifestDir/*.nixmanifest") {
|
||||
my $manifest = Cwd::abs_path($manifestLink);
|
||||
my $timestamp = lstat($manifest)->mtime;
|
||||
$seen{$manifest} = 1;
|
||||
|
||||
|
@ -312,20 +325,16 @@ EOF
|
|||
|
||||
sub addNARToDB {
|
||||
my ($storePath, $narFile) = @_;
|
||||
$dbh->do(
|
||||
"insert into NARs(manifest, storePath, url, hash, size, narHash, " .
|
||||
"narSize, refs, deriver, system) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
{}, $id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size},
|
||||
$insertNAR->execute(
|
||||
$id, $storePath, $narFile->{url}, $narFile->{hash}, $narFile->{size},
|
||||
$narFile->{narHash}, $narFile->{narSize}, $narFile->{references},
|
||||
$narFile->{deriver}, $narFile->{system});
|
||||
};
|
||||
|
||||
sub addPatchToDB {
|
||||
my ($storePath, $patch) = @_;
|
||||
$dbh->do(
|
||||
"insert into Patches(manifest, storePath, basePath, baseHash, url, hash, " .
|
||||
"size, narHash, narSize, patchType) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
{}, $id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
|
||||
$insertPatch->execute(
|
||||
$id, $storePath, $patch->{basePath}, $patch->{baseHash}, $patch->{url},
|
||||
$patch->{hash}, $patch->{size}, $patch->{narHash}, $patch->{narSize},
|
||||
$patch->{patchType});
|
||||
};
|
||||
|
@ -333,10 +342,10 @@ EOF
|
|||
my $version = readManifest_($manifest, \&addNARToDB, \&addPatchToDB);
|
||||
|
||||
if ($version < 3) {
|
||||
die "you have an old-style manifest `$manifest'; please delete it";
|
||||
die "you have an old-style or corrupt manifest `$manifestLink'; please delete it";
|
||||
}
|
||||
if ($version >= 10) {
|
||||
die "manifest `$manifest' is too new; please delete it or upgrade Nix";
|
||||
die "manifest `$manifestLink' is too new; please delete it or upgrade Nix";
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
package Nix::Store;
|
||||
|
||||
use 5.010001;
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
|
@ -12,7 +11,12 @@ our %EXPORT_TAGS = ( 'all' => [ qw( ) ] );
|
|||
|
||||
our @EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
|
||||
|
||||
our @EXPORT = qw(isValidPath topoSortPaths computeFSClosure followLinksToStorePath);
|
||||
our @EXPORT = qw(
|
||||
isValidPath queryReferences queryPathInfo queryDeriver queryPathHash
|
||||
topoSortPaths computeFSClosure followLinksToStorePath exportPaths
|
||||
hashPath hashFile hashString
|
||||
addToStore makeFixedOutputPath
|
||||
);
|
||||
|
||||
our $VERSION = '0.15';
|
||||
|
||||
|
|
|
@ -18,10 +18,8 @@ using namespace nix;
|
|||
void doInit()
|
||||
{
|
||||
if (!store) {
|
||||
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", "/nix/store")));
|
||||
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", "/nix/var/nix"));
|
||||
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
|
||||
try {
|
||||
setDefaultsFromEnvironment();
|
||||
store = openStore();
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
|
@ -69,7 +67,7 @@ SV * queryPathHash(char * path)
|
|||
try {
|
||||
doInit();
|
||||
Hash hash = store->queryPathHash(path);
|
||||
string s = "sha256:" + printHash(hash);
|
||||
string s = "sha256:" + printHash32(hash);
|
||||
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
|
@ -148,3 +146,73 @@ SV * followLinksToStorePath(char * path)
|
|||
}
|
||||
OUTPUT:
|
||||
RETVAL
|
||||
|
||||
|
||||
void exportPaths(int fd, int sign, ...)
|
||||
PPCODE:
|
||||
try {
|
||||
doInit();
|
||||
Paths paths;
|
||||
for (int n = 2; n < items; ++n) paths.push_back(SvPV_nolen(ST(n)));
|
||||
FdSink sink(fd);
|
||||
exportPaths(*store, paths, sign, sink);
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
}
|
||||
|
||||
|
||||
SV * hashPath(char * algo, int base32, char * path)
|
||||
PPCODE:
|
||||
try {
|
||||
Hash h = hashPath(parseHashType(algo), path).first;
|
||||
string s = base32 ? printHash32(h) : printHash(h);
|
||||
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
}
|
||||
|
||||
|
||||
SV * hashFile(char * algo, int base32, char * path)
|
||||
PPCODE:
|
||||
try {
|
||||
Hash h = hashFile(parseHashType(algo), path);
|
||||
string s = base32 ? printHash32(h) : printHash(h);
|
||||
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
}
|
||||
|
||||
|
||||
SV * hashString(char * algo, int base32, char * s)
|
||||
PPCODE:
|
||||
try {
|
||||
Hash h = hashString(parseHashType(algo), s);
|
||||
string s = base32 ? printHash32(h) : printHash(h);
|
||||
XPUSHs(sv_2mortal(newSVpv(s.c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
}
|
||||
|
||||
|
||||
SV * addToStore(char * srcPath, int recursive, char * algo)
|
||||
PPCODE:
|
||||
try {
|
||||
doInit();
|
||||
Path path = store->addToStore(srcPath, recursive, parseHashType(algo));
|
||||
XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
}
|
||||
|
||||
|
||||
SV * makeFixedOutputPath(int recursive, char * algo, char * hash, char * name)
|
||||
PPCODE:
|
||||
try {
|
||||
doInit();
|
||||
HashType ht = parseHashType(algo);
|
||||
Path path = makeFixedOutputPath(recursive, ht,
|
||||
parseHash16or32(ht, hash), name);
|
||||
XPUSHs(sv_2mortal(newSVpv(path.c_str(), 0)));
|
||||
} catch (Error & e) {
|
||||
croak(e.what());
|
||||
}
|
||||
|
|
|
@ -3,7 +3,9 @@
|
|||
use Fcntl ':flock';
|
||||
use English '-no_match_vars';
|
||||
use IO::Handle;
|
||||
use Nix::Config;
|
||||
use Nix::SSH qw/sshOpts openSSHConnection/;
|
||||
use Nix::CopyClosure;
|
||||
no warnings('once');
|
||||
|
||||
|
||||
|
@ -208,7 +210,7 @@ print STDERR "@ build-remote $drvPath $hostName\n" if $printBuildTrace;
|
|||
|
||||
|
||||
my $maybeSign = "";
|
||||
$maybeSign = "--sign" if -e "/nix/etc/nix/signing-key.sec";
|
||||
$maybeSign = "--sign" if -e "$Nix::Config::confDir/signing-key.sec";
|
||||
|
||||
|
||||
# Register the derivation as a temporary GC root. Note that $PPID is
|
||||
|
@ -224,8 +226,7 @@ sub removeRoots {
|
|||
|
||||
|
||||
# Copy the derivation and its dependencies to the build machine.
|
||||
system("NIX_SSHOPTS=\"@sshOpts\" @bindir@/nix-copy-closure $hostName $maybeSign $drvPath @inputs") == 0
|
||||
or die "cannot copy inputs to $hostName: $?";
|
||||
Nix::CopyClosure::copyTo($hostName, [ @sshOpts ], [ $drvPath, @inputs ], "", "", 0, 0, $maybeSign ne "");
|
||||
|
||||
|
||||
# Perform the build.
|
||||
|
@ -239,7 +240,7 @@ my $buildFlags = "--max-silent-time $maxSilentTime --fallback --add-root $rootsD
|
|||
# in which case every child receives SIGHUP; however, `-tt' doesn't
|
||||
# work on some platforms when connection sharing is used.)
|
||||
pipe STDIN, DUMMY; # make sure we have a readable STDIN
|
||||
if (system("ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) {
|
||||
if (system("exec ssh $hostName @sshOpts '(read; kill -INT -\$\$) <&0 & nix-store -r $drvPath $buildFlags > /dev/null' 2>&4") != 0) {
|
||||
# Note that if we get exit code 100 from `nix-store -r', it
|
||||
# denotes a permanent build failure (as opposed to an SSH problem
|
||||
# or a temporary Nix problem). We propagate this to the caller to
|
||||
|
@ -259,7 +260,7 @@ foreach my $output (@outputs) {
|
|||
my $maybeSignRemote = "";
|
||||
$maybeSignRemote = "--sign" if $UID != 0;
|
||||
|
||||
system("ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" .
|
||||
system("exec ssh $hostName @sshOpts 'nix-store --export $maybeSignRemote $output'" .
|
||||
"| NIX_HELD_LOCKS=$output @bindir@/nix-store --import > /dev/null") == 0
|
||||
or die "cannot copy $output from $hostName: $?";
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
use strict;
|
||||
use Nix::Config;
|
||||
use Nix::Manifest;
|
||||
use Nix::Store;
|
||||
use POSIX qw(strftime);
|
||||
use File::Temp qw(tempdir);
|
||||
|
||||
|
@ -19,14 +20,8 @@ my $fast = 1;
|
|||
my $dbh = updateManifestDB();
|
||||
|
||||
|
||||
sub isValidPath {
|
||||
my $p = shift;
|
||||
if ($fast) {
|
||||
return -e $p;
|
||||
} else {
|
||||
return system("$Nix::Config::binDir/nix-store --check-validity '$p' 2> /dev/null") == 0;
|
||||
}
|
||||
}
|
||||
# $hashCache->{$algo}->{$path} yields the $algo-hash of $path.
|
||||
my $hashCache;
|
||||
|
||||
|
||||
sub parseHash {
|
||||
|
@ -101,15 +96,17 @@ sub computeSmallestDownload {
|
|||
|
||||
foreach my $patch (@{$patchList}) {
|
||||
if (isValidPath($patch->{basePath})) {
|
||||
# !!! this should be cached
|
||||
my ($baseHashAlgo, $baseHash) = parseHash $patch->{baseHash};
|
||||
my $format = "--base32";
|
||||
$format = "" if $baseHashAlgo eq "md5";
|
||||
my $hash = $fast && $baseHashAlgo eq "sha256"
|
||||
? `$Nix::Config::binDir/nix-store -q --hash "$patch->{basePath}"`
|
||||
: `$Nix::Config::binDir/nix-hash --type '$baseHashAlgo' $format "$patch->{basePath}"`;
|
||||
chomp $hash;
|
||||
$hash =~ s/.*://;
|
||||
|
||||
my $hash = $hashCache->{$baseHashAlgo}->{$patch->{basePath}};
|
||||
if (!defined $hash) {
|
||||
$hash = $fast && $baseHashAlgo eq "sha256"
|
||||
? queryPathHash($patch->{basePath})
|
||||
: hashPath($baseHashAlgo, $baseHashAlgo ne "md5", $patch->{basePath});
|
||||
$hash =~ s/.*://;
|
||||
$hashCache->{$baseHashAlgo}->{$patch->{basePath}} = $hash;
|
||||
}
|
||||
|
||||
next if $hash ne $baseHash;
|
||||
}
|
||||
push @queue, $patch->{basePath};
|
||||
|
@ -257,7 +254,7 @@ open LOGFILE, ">>$logFile" or die "cannot open log file $logFile";
|
|||
my $date = strftime ("%F %H:%M:%S UTC", gmtime (time));
|
||||
print LOGFILE "$$ get $targetPath $date\n";
|
||||
|
||||
print "\n*** Trying to download/patch `$targetPath'\n";
|
||||
print STDERR "\n*** Trying to download/patch `$targetPath'\n";
|
||||
|
||||
|
||||
# Compute the shortest path.
|
||||
|
@ -281,7 +278,7 @@ sub downloadFile {
|
|||
$ENV{"PRINT_PATH"} = 1;
|
||||
$ENV{"QUIET"} = 1;
|
||||
my ($hash, $path) = `$Nix::Config::binDir/nix-prefetch-url '$url'`;
|
||||
die "download of `$url' failed" . ($! ? ": $!" : "") unless $? == 0;
|
||||
die "download of `$url' failed" . ($! ? ": $!" : "") . "\n" unless $? == 0;
|
||||
chomp $path;
|
||||
return $path;
|
||||
}
|
||||
|
@ -293,17 +290,17 @@ while (scalar @path > 0) {
|
|||
my $u = $edge->{start};
|
||||
my $v = $edge->{end};
|
||||
|
||||
print "\n*** Step $curStep/$maxStep: ";
|
||||
print STDERR "\n*** Step $curStep/$maxStep: ";
|
||||
|
||||
if ($edge->{type} eq "present") {
|
||||
print "using already present path `$v'\n";
|
||||
print STDERR "using already present path `$v'\n";
|
||||
print LOGFILE "$$ present $v\n";
|
||||
|
||||
if ($curStep < $maxStep) {
|
||||
# Since this is not the last step, the path will be used
|
||||
# as a base to one or more patches. So turn the base path
|
||||
# into a NAR archive, to which we can apply the patch.
|
||||
print " packing base path...\n";
|
||||
print STDERR " packing base path...\n";
|
||||
system("$Nix::Config::binDir/nix-store --dump $v > $tmpNar") == 0
|
||||
or die "cannot dump `$v'";
|
||||
}
|
||||
|
@ -311,17 +308,17 @@ while (scalar @path > 0) {
|
|||
|
||||
elsif ($edge->{type} eq "patch") {
|
||||
my $patch = $edge->{info};
|
||||
print "applying patch `$patch->{url}' to `$u' to create `$v'\n";
|
||||
print STDERR "applying patch `$patch->{url}' to `$u' to create `$v'\n";
|
||||
|
||||
print LOGFILE "$$ patch $patch->{url} $patch->{size} $patch->{baseHash} $u $v\n";
|
||||
|
||||
# Download the patch.
|
||||
print " downloading patch...\n";
|
||||
print STDERR " downloading patch...\n";
|
||||
my $patchPath = downloadFile "$patch->{url}";
|
||||
|
||||
# Apply the patch to the NAR archive produced in step 1 (for
|
||||
# the already present path) or a later step (for patch sequences).
|
||||
print " applying patch...\n";
|
||||
print STDERR " applying patch...\n";
|
||||
system("$Nix::Config::libexecDir/bspatch $tmpNar $tmpNar2 $patchPath") == 0
|
||||
or die "cannot apply patch `$patchPath' to $tmpNar";
|
||||
|
||||
|
@ -331,7 +328,7 @@ while (scalar @path > 0) {
|
|||
} else {
|
||||
# This was the last patch. Unpack the final NAR archive
|
||||
# into the target path.
|
||||
print " unpacking patched archive...\n";
|
||||
print STDERR " unpacking patched archive...\n";
|
||||
system("$Nix::Config::binDir/nix-store --restore $v < $tmpNar2") == 0
|
||||
or die "cannot unpack $tmpNar2 into `$v'";
|
||||
}
|
||||
|
@ -341,13 +338,13 @@ while (scalar @path > 0) {
|
|||
|
||||
elsif ($edge->{type} eq "narfile") {
|
||||
my $narFile = $edge->{info};
|
||||
print "downloading `$narFile->{url}' into `$v'\n";
|
||||
print STDERR "downloading `$narFile->{url}' into `$v'\n";
|
||||
|
||||
my $size = $narFile->{size} || -1;
|
||||
print LOGFILE "$$ narfile $narFile->{url} $size $v\n";
|
||||
|
||||
# Download the archive.
|
||||
print " downloading archive...\n";
|
||||
print STDERR " downloading archive...\n";
|
||||
my $narFilePath = downloadFile "$narFile->{url}";
|
||||
|
||||
if ($curStep < $maxStep) {
|
||||
|
@ -356,7 +353,7 @@ while (scalar @path > 0) {
|
|||
or die "cannot unpack `$narFilePath' into `$v'";
|
||||
} else {
|
||||
# Unpack the archive into the target path.
|
||||
print " unpacking archive...\n";
|
||||
print STDERR " unpacking archive...\n";
|
||||
system("$Nix::Config::bzip2 -d < '$narFilePath' | $Nix::Config::binDir/nix-store --restore '$v'") == 0
|
||||
or die "cannot unpack `$narFilePath' into `$v'";
|
||||
}
|
||||
|
@ -376,20 +373,15 @@ if (defined $finalNarHash) {
|
|||
|
||||
# The hash in the manifest can be either in base-16 or base-32.
|
||||
# Handle both.
|
||||
my $extraFlag =
|
||||
($hashAlgo eq "sha256" && length($hash) != 64)
|
||||
? "--base32" : "";
|
||||
my $hash2 = hashPath($hashAlgo, $hashAlgo eq "sha256" && length($hash) != 64, $targetPath);
|
||||
|
||||
my $hash2 = `$Nix::Config::binDir/nix-hash --type $hashAlgo $extraFlag $targetPath`
|
||||
or die "cannot compute hash of path `$targetPath'";
|
||||
chomp $hash2;
|
||||
|
||||
die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2"
|
||||
die "hash mismatch in downloaded path $targetPath; expected $hash, got $hash2\n"
|
||||
if $hash ne $hash2;
|
||||
} else {
|
||||
die "cannot check integrity of the downloaded path since its hash is not known";
|
||||
die "cannot check integrity of the downloaded path since its hash is not known\n";
|
||||
}
|
||||
|
||||
|
||||
print STDERR "\n";
|
||||
print LOGFILE "$$ success\n";
|
||||
close LOGFILE;
|
||||
|
|
|
@ -3,11 +3,12 @@
|
|||
use Nix::SSH;
|
||||
use Nix::Config;
|
||||
use Nix::Store;
|
||||
use Nix::CopyClosure;
|
||||
|
||||
|
||||
if (scalar @ARGV < 1) {
|
||||
print STDERR <<EOF
|
||||
Usage: nix-copy-closure [--from | --to] HOSTNAME [--sign] [--gzip] PATHS...
|
||||
Usage: nix-copy-closure [--from | --to] HOSTNAME [--sign] [--gzip] [--bzip2] [--xz] PATHS...
|
||||
EOF
|
||||
;
|
||||
exit 1;
|
||||
|
@ -39,8 +40,16 @@ while (@ARGV) {
|
|||
$sign = 1;
|
||||
}
|
||||
elsif ($arg eq "--gzip") {
|
||||
$compressor = "| gzip";
|
||||
$decompressor = "gunzip |";
|
||||
$compressor = "gzip";
|
||||
$decompressor = "gunzip";
|
||||
}
|
||||
elsif ($arg eq "--bzip2") {
|
||||
$compressor = "bzip2";
|
||||
$decompressor = "bunzip2";
|
||||
}
|
||||
elsif ($arg eq "--xz") {
|
||||
$compressor = "xz";
|
||||
$decompressor = "xz -d";
|
||||
}
|
||||
elsif ($arg eq "--from") {
|
||||
$toMode = 0;
|
||||
|
@ -67,30 +76,7 @@ openSSHConnection $sshHost or die "$0: unable to start SSH\n";
|
|||
|
||||
|
||||
if ($toMode) { # Copy TO the remote machine.
|
||||
|
||||
# Get the closure of this path.
|
||||
my @allStorePaths = reverse(topoSortPaths(computeFSClosure(0, $includeOutputs, map { followLinksToStorePath $_ } @storePaths)));
|
||||
|
||||
# Ask the remote host which paths are invalid.
|
||||
open(READ, "set -f; ssh $sshHost @sshOpts nix-store --check-validity --print-invalid @allStorePaths|");
|
||||
my @missing = ();
|
||||
while (<READ>) {
|
||||
chomp;
|
||||
push @missing, $_;
|
||||
}
|
||||
close READ or die;
|
||||
|
||||
# Export the store paths and import them on the remote machine.
|
||||
if (scalar @missing > 0) {
|
||||
print STDERR "copying these missing paths:\n";
|
||||
print STDERR " $_\n" foreach @missing;
|
||||
unless ($dryRun) {
|
||||
my $extraOpts = $sign ? "--sign" : "";
|
||||
system("set -f; nix-store --export $extraOpts @missing $compressor | ssh $sshHost @sshOpts '$decompressor nix-store --import'") == 0
|
||||
or die "copying store paths to remote machine `$sshHost' failed: $?";
|
||||
}
|
||||
}
|
||||
|
||||
Nix::CopyClosure::copyTo($sshHost, [ @sshOpts ], [ @storePaths ], $compressor, $decompressor, $includeOutputs, $dryRun, $sign);
|
||||
}
|
||||
|
||||
else { # Copy FROM the remote machine.
|
||||
|
@ -110,10 +96,12 @@ else { # Copy FROM the remote machine.
|
|||
|
||||
close READ or die "nix-store on remote machine `$sshHost' failed: $?";
|
||||
|
||||
# Export the store paths on the remote machine and import them on locally.
|
||||
# Export the store paths on the remote machine and import them locally.
|
||||
if (scalar @missing > 0) {
|
||||
print STDERR "copying these missing paths:\n";
|
||||
print STDERR " $_\n" foreach @missing;
|
||||
print STDERR "copying ", scalar @missing, " missing paths from ‘$sshHost’...\n";
|
||||
#print STDERR " $_\n" foreach @missing;
|
||||
$compressor = "| $compressor" if $compressor ne "";
|
||||
$decompressor = "$decompressor |" if $decompressor ne "";
|
||||
unless ($dryRun) {
|
||||
my $extraOpts = $sign ? "--sign" : "";
|
||||
system("set -f; ssh $sshHost @sshOpts 'nix-store --export $extraOpts @missing $compressor' | $decompressor $Nix::Config::binDir/nix-store --import") == 0
|
||||
|
|
|
@ -1,165 +1,128 @@
|
|||
#! @shell@ -e
|
||||
#! @perl@ -w @perlFlags@
|
||||
|
||||
url=$1
|
||||
expHash=$2
|
||||
use strict;
|
||||
use File::Basename;
|
||||
use File::Temp qw(tempdir);
|
||||
use File::stat;
|
||||
use Nix::Store;
|
||||
use Nix::Config;
|
||||
|
||||
binDir=@bindir@
|
||||
if [ -n "$NIX_BIN_DIR" ]; then binDir="$NIX_BIN_DIR"; fi
|
||||
my $url = shift;
|
||||
my $expHash = shift;
|
||||
my $hashType = $ENV{'NIX_HASH_ALGO'} || "sha256";
|
||||
my $cacheDir = $ENV{'NIX_DOWNLOAD_CACHE'};
|
||||
|
||||
# needed to make it work on NixOS
|
||||
export PATH=$PATH:@coreutils@
|
||||
if (!defined $url || $url eq "") {
|
||||
print STDERR <<EOF
|
||||
Usage: nix-prefetch-url URL [EXPECTED-HASH]
|
||||
EOF
|
||||
;
|
||||
exit 1;
|
||||
}
|
||||
|
||||
hashType=$NIX_HASH_ALGO
|
||||
if test -z "$hashType"; then
|
||||
hashType=sha256
|
||||
fi
|
||||
sub writeFile {
|
||||
my ($fn, $s) = @_;
|
||||
open TMP, ">$fn" or die;
|
||||
print TMP "$s" or die;
|
||||
close TMP or die;
|
||||
}
|
||||
|
||||
hashFormat=
|
||||
if test "$hashType" != "md5"; then
|
||||
hashFormat=--base32
|
||||
fi
|
||||
sub readFile {
|
||||
local $/ = undef;
|
||||
my ($fn) = @_;
|
||||
open TMP, "<$fn" or die;
|
||||
my $s = <TMP>;
|
||||
close TMP or die;
|
||||
return $s;
|
||||
}
|
||||
|
||||
if test -z "$url"; then
|
||||
echo "syntax: nix-prefetch-url URL [EXPECTED-HASH]" >&2
|
||||
exit 1
|
||||
fi
|
||||
my $tmpDir = tempdir("nix-prefetch-url.XXXXXX", CLEANUP => 1, TMPDIR => 1)
|
||||
or die "cannot create a temporary directory";
|
||||
|
||||
# Hack to support the mirror:// scheme from Nixpkgs.
|
||||
if ($url =~ /^mirror:\/\//) {
|
||||
system("$Nix::Config::binDir/nix-build '<nixpkgs>' -A resolveMirrorURLs --argstr url '$url' -o $tmpDir/urls > /dev/null") == 0
|
||||
or die "$0: nix-build failed; maybe \$NIX_PATH is not set properly\n";
|
||||
my @expanded = split ' ', readFile("$tmpDir/urls");
|
||||
die "$0: cannot resolve ‘$url’" unless scalar @expanded > 0;
|
||||
print STDERR "$url expands to $expanded[0]\n";
|
||||
$url = $expanded[0];
|
||||
}
|
||||
|
||||
# Handle escaped characters in the URI. `+', `=' and `?' are the only
|
||||
# characters that are valid in Nix store path names but have a special
|
||||
# meaning in URIs.
|
||||
name=$(basename "$url" | @sed@ -e 's/%2b/+/g' -e 's/%3d/=/g' -e 's/%3f/\?/g')
|
||||
if test -z "$name"; then echo "invalid url"; exit 1; fi
|
||||
my $name = basename $url;
|
||||
die "cannot figure out file name for ‘$url’\n" if $name eq "";
|
||||
$name =~ s/%2b/+/g;
|
||||
$name =~ s/%3d/=/g;
|
||||
$name =~ s/%3f/?/g;
|
||||
|
||||
my $finalPath;
|
||||
my $hash;
|
||||
|
||||
# If the hash was given, a file with that hash may already be in the
|
||||
# store.
|
||||
if test -n "$expHash"; then
|
||||
finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$expHash" "$name")
|
||||
if ! $bindir/nix-store --check-validity "$finalPath" 2> /dev/null; then
|
||||
finalPath=
|
||||
fi
|
||||
hash=$expHash
|
||||
fi
|
||||
|
||||
|
||||
mkTempDir() {
|
||||
if test -n "$tmpPath"; then return; fi
|
||||
local i=0
|
||||
while true; do
|
||||
if test -z "$TMPDIR"; then TMPDIR=/tmp; fi
|
||||
tmpPath=$TMPDIR/nix-prefetch-url-$$-$i
|
||||
if mkdir "$tmpPath"; then break; fi
|
||||
# !!! to bad we can't check for ENOENT in mkdir, so this check
|
||||
# is slightly racy (it bombs out if somebody just removed
|
||||
# $tmpPath...).
|
||||
if ! test -e "$tmpPath"; then exit 1; fi
|
||||
i=$((i + 1))
|
||||
done
|
||||
trap removeTempDir EXIT SIGINT SIGQUIT
|
||||
if (defined $expHash) {
|
||||
$finalPath = makeFixedOutputPath(0, $hashType, $expHash, $name);
|
||||
if (isValidPath($finalPath)) { $hash = $expHash; } else { $finalPath = undef; }
|
||||
}
|
||||
|
||||
removeTempDir() {
|
||||
if test -n "$tmpPath"; then
|
||||
rm -rf "$tmpPath" || true
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
doDownload() {
|
||||
@curl@ $cacheFlags --fail --location --max-redirs 20 --disable-epsv \
|
||||
--cookie-jar $tmpPath/cookies "$url" -o $tmpFile
|
||||
}
|
||||
|
||||
|
||||
# Hack to support the mirror:// scheme from Nixpkgs.
|
||||
if test "${url:0:9}" = "mirror://"; then
|
||||
if test -z "$NIXPKGS_ALL"; then
|
||||
echo "Resolving mirror:// URLs requires Nixpkgs. Please point \$NIXPKGS_ALL at a Nixpkgs tree." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mkTempDir
|
||||
nix-build "$NIXPKGS_ALL" -A resolveMirrorURLs --argstr url "$url" -o $tmpPath/urls > /dev/null
|
||||
|
||||
expanded=($(cat $tmpPath/urls))
|
||||
if test "${#expanded[*]}" = 0; then
|
||||
echo "$0: cannot resolve $url." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$url expands to ${expanded[*]} (using ${expanded[0]})" >&2
|
||||
url="${expanded[0]}"
|
||||
fi
|
||||
|
||||
|
||||
# If we don't know the hash or a file with that hash doesn't exist,
|
||||
# download the file and add it to the store.
|
||||
if test -z "$finalPath"; then
|
||||
if (!defined $finalPath) {
|
||||
|
||||
mkTempDir
|
||||
tmpFile=$tmpPath/$name
|
||||
my $tmpFile = "$tmpDir/$name";
|
||||
|
||||
# Optionally do timestamp-based caching of the download.
|
||||
# Actually, the only thing that we cache in $NIX_DOWNLOAD_CACHE is
|
||||
# the hash and the timestamp of the file at $url. The caching of
|
||||
# the file *contents* is done in Nix store, where it can be
|
||||
# garbage-collected independently.
|
||||
if test -n "$NIX_DOWNLOAD_CACHE"; then
|
||||
echo -n "$url" > $tmpPath/url
|
||||
urlHash=$($binDir/nix-hash --type sha256 --base32 --flat $tmpPath/url)
|
||||
echo "$url" > "$NIX_DOWNLOAD_CACHE/$urlHash.url"
|
||||
cachedHashFN="$NIX_DOWNLOAD_CACHE/$urlHash.$hashType"
|
||||
cachedTimestampFN="$NIX_DOWNLOAD_CACHE/$urlHash.stamp"
|
||||
cacheFlags="--remote-time"
|
||||
if test -e "$cachedTimestampFN" -a -e "$cachedHashFN"; then
|
||||
# Only download the file if it is newer than the cached version.
|
||||
cacheFlags="$cacheFlags --time-cond $cachedTimestampFN"
|
||||
fi
|
||||
fi
|
||||
my ($cachedTimestampFN, $cachedHashFN, @cacheFlags);
|
||||
if (defined $cacheDir) {
|
||||
my $urlHash = hashString("sha256", 1, $url);
|
||||
writeFile "$cacheDir/$urlHash.url", $url;
|
||||
$cachedHashFN = "$cacheDir/$urlHash.$hashType";
|
||||
$cachedTimestampFN = "$cacheDir/$urlHash.stamp";
|
||||
@cacheFlags = ("--time-cond", $cachedTimestampFN) if -f $cachedHashFN && -f $cachedTimestampFN;
|
||||
}
|
||||
|
||||
# Perform the download.
|
||||
doDownload
|
||||
my @curlFlags = ("curl", $url, "-o", $tmpFile, "--fail", "--location", "--max-redirs", "20", "--disable-epsv", "--cookie-jar", "$tmpDir/cookies", "--remote-time", (split " ", ($ENV{NIX_CURL_FLAGS} || "")));
|
||||
(system $Nix::Config::curl @curlFlags, @cacheFlags) == 0 or die "$0: download of ‘$url’ failed\n";
|
||||
|
||||
if test -n "$NIX_DOWNLOAD_CACHE" -a ! -e $tmpFile; then
|
||||
if (defined $cacheDir && ! -e $tmpFile) {
|
||||
# Curl didn't create $tmpFile, so apparently there's no newer
|
||||
# file on the server.
|
||||
hash=$(cat $cachedHashFN)
|
||||
finalPath=$($binDir/nix-store --print-fixed-path "$hashType" "$hash" "$name")
|
||||
if ! $binDir/nix-store --check-validity "$finalPath" 2> /dev/null; then
|
||||
echo "cached contents of \`$url' disappeared, redownloading..." >&2
|
||||
finalPath=
|
||||
cacheFlags="--remote-time"
|
||||
doDownload
|
||||
fi
|
||||
fi
|
||||
$hash = readFile $cachedHashFN or die;
|
||||
$finalPath = makeFixedOutputPath(0, $hashType, $hash, $name);
|
||||
unless (isValidPath $finalPath) {
|
||||
print STDERR "cached contents of ‘$url’ disappeared, redownloading...\n";
|
||||
$finalPath = undef;
|
||||
(system $Nix::Config::curl @curlFlags) == 0 or die "$0: download of ‘$url’ failed\n";
|
||||
}
|
||||
}
|
||||
|
||||
if test -z "$finalPath"; then
|
||||
if (!defined $finalPath) {
|
||||
|
||||
# Compute the hash.
|
||||
hash=$($binDir/nix-hash --type "$hashType" $hashFormat --flat $tmpFile)
|
||||
if ! test -n "$QUIET"; then echo "hash is $hash" >&2; fi
|
||||
$hash = hashFile($hashType, $hashType ne "md5", $tmpFile);
|
||||
|
||||
if test -n "$NIX_DOWNLOAD_CACHE"; then
|
||||
echo $hash > $cachedHashFN
|
||||
touch -r $tmpFile $cachedTimestampFN
|
||||
fi
|
||||
if (defined $cacheDir) {
|
||||
writeFile $cachedHashFN, $hash;
|
||||
my $st = stat($tmpFile) or die;
|
||||
open STAMP, ">$cachedTimestampFN" or die; close STAMP;
|
||||
utime($st->atime, $st->mtime, $cachedTimestampFN) or die;
|
||||
}
|
||||
|
||||
# Add the downloaded file to the Nix store.
|
||||
finalPath=$($binDir/nix-store --add-fixed "$hashType" $tmpFile)
|
||||
$finalPath = addToStore($tmpFile, 0, $hashType);
|
||||
}
|
||||
|
||||
if test -n "$expHash" -a "$expHash" != "$hash"; then
|
||||
echo "hash mismatch for URL \`$url'" >&2
|
||||
exit 1
|
||||
fi
|
||||
die "$0: hash mismatch for ‘$url’\n" if defined $expHash && $expHash ne $hash;
|
||||
}
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
if ! test -n "$QUIET"; then echo "path is $finalPath" >&2; fi
|
||||
|
||||
echo $hash
|
||||
|
||||
if test -n "$PRINT_PATH"; then
|
||||
echo $finalPath
|
||||
fi
|
||||
print STDERR "path is ‘$finalPath’\n" unless $ENV{'QUIET'};
|
||||
print "$hash\n";
|
||||
print "$finalPath\n" if $ENV{'PRINT_PATH'};
|
||||
|
|
|
@ -33,10 +33,6 @@ if (! -l $manifestDirLink) {
|
|||
|
||||
|
||||
# Process the URLs specified on the command line.
|
||||
my %narFiles;
|
||||
my %patches;
|
||||
|
||||
my $skipWrongStore = 0;
|
||||
|
||||
sub downloadFile {
|
||||
my $url = shift;
|
||||
|
@ -59,16 +55,7 @@ sub processURL {
|
|||
# First see if a bzipped manifest is available.
|
||||
if (system("$Nix::Config::curl --fail --silent --head '$url'.bz2 > /dev/null") == 0) {
|
||||
print "fetching list of Nix archives at `$url.bz2'...\n";
|
||||
my $bzipped = downloadFile "$url.bz2";
|
||||
|
||||
$manifest = "$tmpDir/MANIFEST";
|
||||
|
||||
system("$Nix::Config::bzip2 -d < $bzipped > $manifest") == 0
|
||||
or die "cannot decompress manifest";
|
||||
|
||||
$manifest = (`$Nix::Config::binDir/nix-store --add $manifest`
|
||||
or die "cannot copy $manifest to the store");
|
||||
chomp $manifest;
|
||||
$manifest = downloadFile "$url.bz2";
|
||||
}
|
||||
|
||||
# Otherwise, just get the uncompressed manifest.
|
||||
|
@ -77,20 +64,6 @@ sub processURL {
|
|||
$manifest = downloadFile $url;
|
||||
}
|
||||
|
||||
my $version = readManifest($manifest, \%narFiles, \%patches);
|
||||
|
||||
die "`$url' is not a manifest or it is too old (i.e., for Nix <= 0.7)\n" if $version < 3;
|
||||
die "manifest `$url' is too new\n" if $version >= 5;
|
||||
|
||||
if ($skipWrongStore) {
|
||||
foreach my $path (keys %narFiles) {
|
||||
if (substr($path, 0, length($storeDir) + 1) ne "$storeDir/") {
|
||||
print STDERR "warning: manifest `$url' assumes a Nix store at a different location than $storeDir, skipping...\n";
|
||||
exit 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
my $baseName = "unnamed";
|
||||
if ($url =~ /\/([^\/]+)\/[^\/]+$/) { # get the forelast component
|
||||
$baseName = $1;
|
||||
|
@ -129,12 +102,12 @@ sub processURL {
|
|||
while (@ARGV) {
|
||||
my $url = shift @ARGV;
|
||||
if ($url eq "--skip-wrong-store") {
|
||||
$skipWrongStore = 1;
|
||||
# No-op, no longer supported.
|
||||
} else {
|
||||
processURL $url;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
my $size = scalar (keys %narFiles);
|
||||
print "$size store paths in manifest\n";
|
||||
# Update the cache.
|
||||
updateManifestDB();
|
||||
|
|
|
@ -198,8 +198,8 @@ for (my $n = 0; $n < scalar @storePaths; $n++) {
|
|||
# In some exceptional cases (such as VM tests that use the Nix
|
||||
# store of the host), the database doesn't contain the hash. So
|
||||
# compute it.
|
||||
if ($narHash eq "sha256:0000000000000000000000000000000000000000000000000000") {
|
||||
$narHash = `$binDir/nix-hash --type sha256 '$storePath'`;
|
||||
if ($narHash =~ /^sha256:0*$/) {
|
||||
$narHash = `$binDir/nix-hash --type sha256 --base32 '$storePath'`;
|
||||
die "cannot hash `$storePath'" if $? != 0;
|
||||
chomp $narHash;
|
||||
$narHash = "sha256:$narHash";
|
||||
|
|
|
@ -44,4 +44,15 @@ bool parseSearchPathArg(const string & arg, Strings::iterator & i,
|
|||
}
|
||||
|
||||
|
||||
Path lookupFileArg(EvalState & state, string s)
|
||||
{
|
||||
if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
|
||||
Path p = state.findFile(s.substr(1, s.size() - 2));
|
||||
if (p == "") throw Error(format("file `%1%' was not found in the Nix search path (add it using $NIX_PATH or -I)") % p);
|
||||
return p;
|
||||
} else
|
||||
return absPath(s);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -14,6 +14,8 @@ bool parseOptionArg(const string & arg, Strings::iterator & i,
|
|||
bool parseSearchPathArg(const string & arg, Strings::iterator & i,
|
||||
const Strings::iterator & argsEnd, EvalState & state);
|
||||
|
||||
Path lookupFileArg(EvalState & state, string s);
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -363,9 +363,8 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
|
|||
foreach (PathSet::iterator, j, refs) {
|
||||
drv.inputSrcs.insert(*j);
|
||||
if (isDerivation(*j))
|
||||
drv.inputDrvs[*j] = store -> queryDerivationOutputNames(*j);
|
||||
drv.inputDrvs[*j] = store->queryDerivationOutputNames(*j);
|
||||
}
|
||||
|
||||
explicitlyPassed = true;
|
||||
} else if (path.at(0) == '!') {
|
||||
size_t index;
|
||||
|
@ -387,7 +386,7 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
|
|||
debug(format("derivation uses `%1%'") % path);
|
||||
if (!useDrvAsSrc && isDerivation(path))
|
||||
if (explicitlyPassed)
|
||||
drv.inputDrvs[path] = store -> queryDerivationOutputNames(path);
|
||||
drv.inputDrvs[path] = store->queryDerivationOutputNames(path);
|
||||
else if (drv.inputDrvs.find(path) == drv.inputDrvs.end())
|
||||
drv.inputDrvs[path] = singleton<StringSet>(output);
|
||||
else
|
||||
|
@ -416,17 +415,7 @@ static void prim_derivationStrict(EvalState & state, Value * * args, Value & v)
|
|||
HashType ht = parseHashType(outputHashAlgo);
|
||||
if (ht == htUnknown)
|
||||
throw EvalError(format("unknown hash algorithm `%1%'") % outputHashAlgo);
|
||||
Hash h(ht);
|
||||
if (outputHash.size() == h.hashSize * 2)
|
||||
/* hexadecimal representation */
|
||||
h = parseHash(ht, outputHash);
|
||||
else if (outputHash.size() == hashLength32(h))
|
||||
/* base-32 representation */
|
||||
h = parseHash32(ht, outputHash);
|
||||
else
|
||||
throw Error(format("hash `%1%' has wrong length for hash type `%2%'")
|
||||
% outputHash % outputHashAlgo);
|
||||
string s = outputHash;
|
||||
Hash h = parseHash16or32(ht, outputHash);
|
||||
outputHash = printHash(h);
|
||||
if (outputHashRecursive) outputHashAlgo = "r:" + outputHashAlgo;
|
||||
|
||||
|
|
|
@ -7,13 +7,6 @@ libmain_la_LIBADD = ../libstore/libstore.la @BDW_GC_LIBS@
|
|||
pkginclude_HEADERS = shared.hh
|
||||
|
||||
AM_CXXFLAGS = \
|
||||
-DNIX_STORE_DIR=\"$(storedir)\" \
|
||||
-DNIX_DATA_DIR=\"$(datadir)\" \
|
||||
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
|
||||
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
|
||||
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
|
||||
-DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
|
||||
-DNIX_BIN_DIR=\"$(bindir)\" \
|
||||
-DNIX_VERSION=\"$(VERSION)\" \
|
||||
-I$(srcdir)/.. -I$(srcdir)/../libutil \
|
||||
-I$(srcdir)/../libstore
|
||||
|
|
|
@ -65,7 +65,7 @@ void printMissing(StoreAPI & store, const PathSet & paths)
|
|||
}
|
||||
|
||||
if (!willSubstitute.empty()) {
|
||||
printMsg(lvlInfo, format("these paths will be downloaded/copied (%.2f MiB download, %.2f MiB unpacked):")
|
||||
printMsg(lvlInfo, format("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):")
|
||||
% (downloadSize / (1024.0 * 1024.0))
|
||||
% (narSize / (1024.0 * 1024.0)));
|
||||
foreach (PathSet::iterator, i, willSubstitute)
|
||||
|
@ -90,23 +90,6 @@ static void setLogType(string lt)
|
|||
}
|
||||
|
||||
|
||||
static void closeStore()
|
||||
{
|
||||
try {
|
||||
throw;
|
||||
} catch (std::exception & e) {
|
||||
printMsg(lvlError,
|
||||
format("FATAL: unexpected exception (closing store and aborting): %1%") % e.what());
|
||||
}
|
||||
try {
|
||||
store.reset((StoreAPI *) 0);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
abort();
|
||||
}
|
||||
|
||||
|
||||
RemoveTempRoots::~RemoveTempRoots()
|
||||
{
|
||||
removeTempRoots();
|
||||
|
@ -120,29 +103,7 @@ static bool showTrace = false;
|
|||
processor. */
|
||||
static void initAndRun(int argc, char * * argv)
|
||||
{
|
||||
/* Setup Nix paths. */
|
||||
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
|
||||
nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
|
||||
nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
|
||||
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
|
||||
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
|
||||
nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
|
||||
nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
|
||||
nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
|
||||
|
||||
string subs = getEnv("NIX_SUBSTITUTERS", "default");
|
||||
if (subs == "default") {
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
|
||||
} else
|
||||
substituters = tokenizeString(subs, ":");
|
||||
|
||||
/* Get some settings from the configuration file. */
|
||||
thisSystem = querySetting("system", SYSTEM);
|
||||
maxBuildJobs = queryIntSetting("build-max-jobs", 1);
|
||||
buildCores = queryIntSetting("build-cores", 1);
|
||||
maxSilentTime = queryIntSetting("build-max-silent-time", 0);
|
||||
buildTimeout = queryIntSetting("build-timeout", 0);
|
||||
setDefaultsFromEnvironment();
|
||||
|
||||
/* Catch SIGINT. */
|
||||
struct sigaction act;
|
||||
|
@ -260,12 +221,6 @@ static void initAndRun(int argc, char * * argv)
|
|||
exit. */
|
||||
RemoveTempRoots removeTempRoots __attribute__((unused));
|
||||
|
||||
/* Make sure that the database gets closed properly, even if
|
||||
terminate() is called (which happens sometimes due to bugs in
|
||||
destructor/exceptions interaction, but that needn't preclude a
|
||||
clean shutdown of the database). */
|
||||
std::set_terminate(closeStore);
|
||||
|
||||
run(remaining);
|
||||
|
||||
/* Close the Nix database. */
|
||||
|
|
|
@ -15,7 +15,16 @@ libstore_la_LIBADD = ../libutil/libutil.la ../boost/format/libformat.la ${aterm_
|
|||
EXTRA_DIST = schema.sql
|
||||
|
||||
AM_CXXFLAGS = -Wall \
|
||||
${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil
|
||||
${sqlite_include} -I$(srcdir)/.. -I$(srcdir)/../libutil \
|
||||
-DNIX_STORE_DIR=\"$(storedir)\" \
|
||||
-DNIX_DATA_DIR=\"$(datadir)\" \
|
||||
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
|
||||
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
|
||||
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
|
||||
-DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
|
||||
-DNIX_BIN_DIR=\"$(bindir)\" \
|
||||
-I$(srcdir)/.. -I$(srcdir)/../libutil \
|
||||
-I$(srcdir)/../libstore
|
||||
|
||||
local-store.lo: schema.sql.hh
|
||||
|
||||
|
|
|
@ -1650,6 +1650,9 @@ void DerivationGoal::startBuilder()
|
|||
(format("nixbld:!:%1%:\n")
|
||||
% (buildUser.enabled() ? buildUser.getGID() : getgid())).str());
|
||||
|
||||
/* Create /etc/hosts with localhost entry. */
|
||||
writeFile(chrootRootDir + "/etc/hosts", "127.0.0.1 localhost\n");
|
||||
|
||||
/* Bind-mount a user-configurable set of directories from the
|
||||
host file system. The `/dev/pts' directory must be mounted
|
||||
separately so that newly-created pseudo-terminals show
|
||||
|
@ -2199,9 +2202,7 @@ void SubstitutionGoal::tryNext()
|
|||
if (subs.size() == 0) {
|
||||
/* None left. Terminate this goal and let someone else deal
|
||||
with it. */
|
||||
printMsg(lvlError,
|
||||
format("path `%1%' is required, but there is no substituter that can build it")
|
||||
% storePath);
|
||||
debug(format("path `%1%' is required, but there is no substituter that can build it") % storePath);
|
||||
amDone(ecFailed);
|
||||
return;
|
||||
}
|
||||
|
@ -2232,8 +2233,7 @@ void SubstitutionGoal::referencesValid()
|
|||
trace("all references realised");
|
||||
|
||||
if (nrFailed > 0) {
|
||||
printMsg(lvlError,
|
||||
format("some references of path `%1%' could not be realised") % storePath);
|
||||
debug(format("some references of path `%1%' could not be realised") % storePath);
|
||||
amDone(ecFailed);
|
||||
return;
|
||||
}
|
||||
|
@ -2286,9 +2286,7 @@ void SubstitutionGoal::tryToRun()
|
|||
return;
|
||||
}
|
||||
|
||||
printMsg(lvlInfo,
|
||||
format("substituting path `%1%' using substituter `%2%'")
|
||||
% storePath % sub);
|
||||
printMsg(lvlInfo, format("fetching path `%1%'...") % storePath);
|
||||
|
||||
logPipe.create();
|
||||
|
||||
|
@ -2364,19 +2362,15 @@ void SubstitutionGoal::finished()
|
|||
try {
|
||||
|
||||
if (!statusOk(status))
|
||||
throw SubstError(format("builder for `%1%' %2%")
|
||||
throw SubstError(format("fetching path `%1%' %2%")
|
||||
% storePath % statusToString(status));
|
||||
|
||||
if (!pathExists(storePath))
|
||||
throw SubstError(
|
||||
format("substitute did not produce path `%1%'")
|
||||
% storePath);
|
||||
throw SubstError(format("substitute did not produce path `%1%'") % storePath);
|
||||
|
||||
} catch (SubstError & e) {
|
||||
|
||||
printMsg(lvlInfo,
|
||||
format("substitution of path `%1%' using substituter `%2%' failed: %3%")
|
||||
% storePath % sub % e.msg());
|
||||
printMsg(lvlInfo, e.msg());
|
||||
|
||||
if (printBuildTrace) {
|
||||
printMsg(lvlError, format("@ substituter-failed %1% %2% %3%")
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#include "config.h"
|
||||
|
||||
#include "globals.hh"
|
||||
#include "util.hh"
|
||||
|
||||
|
@ -139,4 +141,32 @@ void reloadSettings()
|
|||
}
|
||||
|
||||
|
||||
void setDefaultsFromEnvironment()
|
||||
{
|
||||
/* Setup Nix paths. */
|
||||
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
|
||||
nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
|
||||
nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
|
||||
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
|
||||
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
|
||||
nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
|
||||
nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
|
||||
nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
|
||||
|
||||
string subs = getEnv("NIX_SUBSTITUTERS", "default");
|
||||
if (subs == "default") {
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
|
||||
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
|
||||
} else
|
||||
substituters = tokenizeString(subs, ":");
|
||||
|
||||
/* Get some settings from the configuration file. */
|
||||
thisSystem = querySetting("system", SYSTEM);
|
||||
maxBuildJobs = queryIntSetting("build-max-jobs", 1);
|
||||
buildCores = queryIntSetting("build-cores", 1);
|
||||
maxSilentTime = queryIntSetting("build-max-silent-time", 0);
|
||||
buildTimeout = queryIntSetting("build-timeout", 0);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -114,6 +114,8 @@ void overrideSetting(const string & name, const Strings & value);
|
|||
|
||||
void reloadSettings();
|
||||
|
||||
void setDefaultsFromEnvironment();
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -327,10 +327,9 @@ void LocalStore::openDB(bool create)
|
|||
if (sqlite3_exec(db, ("pragma synchronous = " + syncMode + ";").c_str(), 0, 0, 0) != SQLITE_OK)
|
||||
throwSQLiteError(db, "setting synchronous mode");
|
||||
|
||||
/* Set the SQLite journal mode. WAL mode is fastest, but doesn't
|
||||
seem entirely stable at the moment (Oct. 2010). Thus, use
|
||||
truncate mode by default. */
|
||||
string mode = queryBoolSetting("use-sqlite-wal", false) ? "wal" : "truncate";
|
||||
/* Set the SQLite journal mode. WAL mode is fastest, so it's the
|
||||
default. */
|
||||
string mode = queryBoolSetting("use-sqlite-wal", true) ? "wal" : "truncate";
|
||||
string prevMode;
|
||||
{
|
||||
SQLiteStmt stmt;
|
||||
|
@ -367,7 +366,7 @@ void LocalStore::openDB(bool create)
|
|||
stmtRegisterValidPath.create(db,
|
||||
"insert into ValidPaths (path, hash, registrationTime, deriver, narSize) values (?, ?, ?, ?, ?);");
|
||||
stmtUpdatePathInfo.create(db,
|
||||
"update ValidPaths set narSize = ? where path = ?;");
|
||||
"update ValidPaths set narSize = ?, hash = ? where path = ?;");
|
||||
stmtAddReference.create(db,
|
||||
"insert or replace into Refs (referrer, reference) values (?, ?);");
|
||||
stmtQueryPathInfo.create(db,
|
||||
|
@ -684,7 +683,7 @@ ValidPathInfo LocalStore::queryPathInfo(const Path & path)
|
|||
}
|
||||
|
||||
|
||||
/* Update path info in the database. Currently only updated the
|
||||
/* Update path info in the database. Currently only updates the
|
||||
narSize field. */
|
||||
void LocalStore::updatePathInfo(const ValidPathInfo & info)
|
||||
{
|
||||
|
@ -693,6 +692,7 @@ void LocalStore::updatePathInfo(const ValidPathInfo & info)
|
|||
stmtUpdatePathInfo.bind64(info.narSize);
|
||||
else
|
||||
stmtUpdatePathInfo.bind(); // null
|
||||
stmtUpdatePathInfo.bind("sha256:" + printHash(info.hash));
|
||||
stmtUpdatePathInfo.bind(info.path);
|
||||
if (sqlite3_step(stmtUpdatePathInfo) != SQLITE_DONE)
|
||||
throwSQLiteError(db, format("updating info of path `%1%' in database") % info.path);
|
||||
|
@ -1125,16 +1125,14 @@ struct HashAndWriteSink : Sink
|
|||
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
|
||||
{
|
||||
}
|
||||
virtual void operator ()
|
||||
(const unsigned char * data, unsigned int len)
|
||||
virtual void operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
writeSink(data, len);
|
||||
hashSink(data, len);
|
||||
}
|
||||
Hash currentHash()
|
||||
{
|
||||
HashSink hashSinkClone(hashSink);
|
||||
return hashSinkClone.finish().first;
|
||||
return hashSink.currentHash().first;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1180,7 +1178,7 @@ void LocalStore::exportPath(const Path & path, bool sign,
|
|||
|
||||
PathSet references;
|
||||
queryReferences(path, references);
|
||||
writeStringSet(references, hashAndWriteSink);
|
||||
writeStrings(references, hashAndWriteSink);
|
||||
|
||||
Path deriver = queryDeriver(path);
|
||||
writeString(deriver, hashAndWriteSink);
|
||||
|
@ -1223,11 +1221,11 @@ struct HashAndReadSource : Source
|
|||
{
|
||||
hashing = true;
|
||||
}
|
||||
virtual void operator ()
|
||||
(unsigned char * data, unsigned int len)
|
||||
size_t read(unsigned char * data, size_t len)
|
||||
{
|
||||
readSource(data, len);
|
||||
if (hashing) hashSink(data, len);
|
||||
size_t n = readSource.read(data, len);
|
||||
if (hashing) hashSink(data, n);
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1267,7 +1265,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
|
|||
|
||||
Path dstPath = readStorePath(hashAndReadSource);
|
||||
|
||||
PathSet references = readStorePaths(hashAndReadSource);
|
||||
PathSet references = readStorePaths<PathSet>(hashAndReadSource);
|
||||
|
||||
Path deriver = readString(hashAndReadSource);
|
||||
if (deriver != "") assertStorePath(deriver);
|
||||
|
@ -1278,7 +1276,7 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
|
|||
bool haveSignature = readInt(hashAndReadSource) == 1;
|
||||
|
||||
if (requireSignature && !haveSignature)
|
||||
throw Error("imported archive lacks a signature");
|
||||
throw Error(format("imported archive of `%1%' lacks a signature") % dstPath);
|
||||
|
||||
if (haveSignature) {
|
||||
string signature = readString(hashAndReadSource);
|
||||
|
@ -1354,6 +1352,19 @@ Path LocalStore::importPath(bool requireSignature, Source & source)
|
|||
}
|
||||
|
||||
|
||||
Paths LocalStore::importPaths(bool requireSignature, Source & source)
|
||||
{
|
||||
Paths res;
|
||||
while (true) {
|
||||
unsigned long long n = readLongLong(source);
|
||||
if (n == 0) break;
|
||||
if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'");
|
||||
res.push_back(importPath(requireSignature, source));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFreed,
|
||||
unsigned long long & blocksFreed)
|
||||
{
|
||||
|
@ -1369,7 +1380,7 @@ void LocalStore::deleteFromStore(const Path & path, unsigned long long & bytesFr
|
|||
PathSet referrers; queryReferrers(path, referrers);
|
||||
referrers.erase(path); /* ignore self-references */
|
||||
if (!referrers.empty())
|
||||
throw PathInUse(format("cannot delete path `%1%' because it is in use by `%2%'")
|
||||
throw PathInUse(format("cannot delete path `%1%' because it is in use by %2%")
|
||||
% path % showPaths(referrers));
|
||||
invalidatePath(path);
|
||||
}
|
||||
|
@ -1409,6 +1420,8 @@ void LocalStore::verifyStore(bool checkContents)
|
|||
if (checkContents) {
|
||||
printMsg(lvlInfo, "checking hashes...");
|
||||
|
||||
Hash nullHash(htSHA256);
|
||||
|
||||
foreach (PathSet::iterator, i, validPaths) {
|
||||
try {
|
||||
ValidPathInfo info = queryPathInfo(*i);
|
||||
|
@ -1417,17 +1430,30 @@ void LocalStore::verifyStore(bool checkContents)
|
|||
printMsg(lvlTalkative, format("checking contents of `%1%'") % *i);
|
||||
HashResult current = hashPath(info.hash.type, *i);
|
||||
|
||||
if (current.first != info.hash) {
|
||||
if (info.hash != nullHash && info.hash != current.first) {
|
||||
printMsg(lvlError, format("path `%1%' was modified! "
|
||||
"expected hash `%2%', got `%3%'")
|
||||
% *i % printHash(info.hash) % printHash(current.first));
|
||||
} else {
|
||||
|
||||
bool update = false;
|
||||
|
||||
/* Fill in missing hashes. */
|
||||
if (info.hash == nullHash) {
|
||||
printMsg(lvlError, format("fixing missing hash on `%1%'") % *i);
|
||||
info.hash = current.first;
|
||||
update = true;
|
||||
}
|
||||
|
||||
/* Fill in missing narSize fields (from old stores). */
|
||||
if (info.narSize == 0) {
|
||||
printMsg(lvlError, format("updating size field on `%1%' to %2%") % *i % current.second);
|
||||
info.narSize = current.second;
|
||||
updatePathInfo(info);
|
||||
update = true;
|
||||
}
|
||||
|
||||
if (update) updatePathInfo(info);
|
||||
|
||||
}
|
||||
|
||||
} catch (Error & e) {
|
||||
|
|
|
@ -148,7 +148,7 @@ public:
|
|||
void exportPath(const Path & path, bool sign,
|
||||
Sink & sink);
|
||||
|
||||
Path importPath(bool requireSignature, Source & source);
|
||||
Paths importPaths(bool requireSignature, Source & source);
|
||||
|
||||
void buildDerivations(const PathSet & drvPaths);
|
||||
|
||||
|
@ -261,6 +261,8 @@ private:
|
|||
|
||||
Path createTempDirInStore();
|
||||
|
||||
Path importPath(bool requireSignature, Source & source);
|
||||
|
||||
void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
|
||||
};
|
||||
|
||||
|
|
|
@ -57,11 +57,11 @@ struct RefScanSink : Sink
|
|||
|
||||
RefScanSink() : hashSink(htSHA256) { }
|
||||
|
||||
void operator () (const unsigned char * data, unsigned int len);
|
||||
void operator () (const unsigned char * data, size_t len);
|
||||
};
|
||||
|
||||
|
||||
void RefScanSink::operator () (const unsigned char * data, unsigned int len)
|
||||
void RefScanSink::operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
hashSink(data, len);
|
||||
|
||||
|
|
|
@ -27,13 +27,15 @@ Path readStorePath(Source & from)
|
|||
}
|
||||
|
||||
|
||||
PathSet readStorePaths(Source & from)
|
||||
template<class T> T readStorePaths(Source & from)
|
||||
{
|
||||
PathSet paths = readStringSet(from);
|
||||
foreach (PathSet::iterator, i, paths) assertStorePath(*i);
|
||||
T paths = readStrings<T>(from);
|
||||
foreach (typename T::iterator, i, paths) assertStorePath(*i);
|
||||
return paths;
|
||||
}
|
||||
|
||||
template PathSet readStorePaths(Source & from);
|
||||
|
||||
|
||||
RemoteStore::RemoteStore()
|
||||
{
|
||||
|
@ -65,6 +67,7 @@ void RemoteStore::openConnection()
|
|||
/* Send the magic greeting, check for the reply. */
|
||||
try {
|
||||
writeInt(WORKER_MAGIC_1, to);
|
||||
to.flush();
|
||||
unsigned int magic = readInt(from);
|
||||
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
|
||||
|
||||
|
@ -166,6 +169,7 @@ void RemoteStore::connectToDaemon()
|
|||
RemoteStore::~RemoteStore()
|
||||
{
|
||||
try {
|
||||
to.flush();
|
||||
fdSocket.close();
|
||||
if (child != -1)
|
||||
child.wait(true);
|
||||
|
@ -213,7 +217,7 @@ PathSet RemoteStore::queryValidPaths()
|
|||
openConnection();
|
||||
writeInt(wopQueryValidPaths, to);
|
||||
processStderr();
|
||||
return readStorePaths(from);
|
||||
return readStorePaths<PathSet>(from);
|
||||
}
|
||||
|
||||
|
||||
|
@ -240,7 +244,7 @@ bool RemoteStore::querySubstitutablePathInfo(const Path & path,
|
|||
if (reply == 0) return false;
|
||||
info.deriver = readString(from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
info.references = readStorePaths(from);
|
||||
info.references = readStorePaths<PathSet>(from);
|
||||
info.downloadSize = readLongLong(from);
|
||||
info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
|
||||
return true;
|
||||
|
@ -258,7 +262,7 @@ ValidPathInfo RemoteStore::queryPathInfo(const Path & path)
|
|||
info.deriver = readString(from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
info.hash = parseHash(htSHA256, readString(from));
|
||||
info.references = readStorePaths(from);
|
||||
info.references = readStorePaths<PathSet>(from);
|
||||
info.registrationTime = readInt(from);
|
||||
info.narSize = readLongLong(from);
|
||||
return info;
|
||||
|
@ -283,7 +287,7 @@ void RemoteStore::queryReferences(const Path & path,
|
|||
writeInt(wopQueryReferences, to);
|
||||
writeString(path, to);
|
||||
processStderr();
|
||||
PathSet references2 = readStorePaths(from);
|
||||
PathSet references2 = readStorePaths<PathSet>(from);
|
||||
references.insert(references2.begin(), references2.end());
|
||||
}
|
||||
|
||||
|
@ -295,7 +299,7 @@ void RemoteStore::queryReferrers(const Path & path,
|
|||
writeInt(wopQueryReferrers, to);
|
||||
writeString(path, to);
|
||||
processStderr();
|
||||
PathSet referrers2 = readStorePaths(from);
|
||||
PathSet referrers2 = readStorePaths<PathSet>(from);
|
||||
referrers.insert(referrers2.begin(), referrers2.end());
|
||||
}
|
||||
|
||||
|
@ -318,7 +322,7 @@ PathSet RemoteStore::queryDerivationOutputs(const Path & path)
|
|||
writeInt(wopQueryDerivationOutputs, to);
|
||||
writeString(path, to);
|
||||
processStderr();
|
||||
return readStorePaths(from);
|
||||
return readStorePaths<PathSet>(from);
|
||||
}
|
||||
|
||||
|
||||
|
@ -358,7 +362,7 @@ Path RemoteStore::addTextToStore(const string & name, const string & s,
|
|||
writeInt(wopAddTextToStore, to);
|
||||
writeString(name, to);
|
||||
writeString(s, to);
|
||||
writeStringSet(references, to);
|
||||
writeStrings(references, to);
|
||||
|
||||
processStderr();
|
||||
return readStorePath(from);
|
||||
|
@ -377,14 +381,14 @@ void RemoteStore::exportPath(const Path & path, bool sign,
|
|||
}
|
||||
|
||||
|
||||
Path RemoteStore::importPath(bool requireSignature, Source & source)
|
||||
Paths RemoteStore::importPaths(bool requireSignature, Source & source)
|
||||
{
|
||||
openConnection();
|
||||
writeInt(wopImportPath, to);
|
||||
writeInt(wopImportPaths, to);
|
||||
/* We ignore requireSignature, since the worker forces it to true
|
||||
anyway. */
|
||||
processStderr(0, &source);
|
||||
return readStorePath(from);
|
||||
return readStorePaths<Paths>(from);
|
||||
}
|
||||
|
||||
|
||||
|
@ -392,7 +396,7 @@ void RemoteStore::buildDerivations(const PathSet & drvPaths)
|
|||
{
|
||||
openConnection();
|
||||
writeInt(wopBuildDerivations, to);
|
||||
writeStringSet(drvPaths, to);
|
||||
writeStrings(drvPaths, to);
|
||||
processStderr();
|
||||
readInt(from);
|
||||
}
|
||||
|
@ -459,7 +463,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
|
||||
writeInt(wopCollectGarbage, to);
|
||||
writeInt(options.action, to);
|
||||
writeStringSet(options.pathsToDelete, to);
|
||||
writeStrings(options.pathsToDelete, to);
|
||||
writeInt(options.ignoreLiveness, to);
|
||||
writeLongLong(options.maxFreed, to);
|
||||
writeInt(options.maxLinks, to);
|
||||
|
@ -471,7 +475,7 @@ void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
|||
|
||||
processStderr();
|
||||
|
||||
results.paths = readStringSet(from);
|
||||
results.paths = readStrings<PathSet>(from);
|
||||
results.bytesFreed = readLongLong(from);
|
||||
results.blocksFreed = readLongLong(from);
|
||||
}
|
||||
|
@ -482,7 +486,7 @@ PathSet RemoteStore::queryFailedPaths()
|
|||
openConnection();
|
||||
writeInt(wopQueryFailedPaths, to);
|
||||
processStderr();
|
||||
return readStorePaths(from);
|
||||
return readStorePaths<PathSet>(from);
|
||||
}
|
||||
|
||||
|
||||
|
@ -490,7 +494,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths)
|
|||
{
|
||||
openConnection();
|
||||
writeInt(wopClearFailedPaths, to);
|
||||
writeStringSet(paths, to);
|
||||
writeStrings(paths, to);
|
||||
processStderr();
|
||||
readInt(from);
|
||||
}
|
||||
|
@ -498,6 +502,7 @@ void RemoteStore::clearFailedPaths(const PathSet & paths)
|
|||
|
||||
void RemoteStore::processStderr(Sink * sink, Source * source)
|
||||
{
|
||||
to.flush();
|
||||
unsigned int msg;
|
||||
while ((msg = readInt(from)) == STDERR_NEXT
|
||||
|| msg == STDERR_READ || msg == STDERR_WRITE) {
|
||||
|
@ -508,11 +513,11 @@ void RemoteStore::processStderr(Sink * sink, Source * source)
|
|||
}
|
||||
else if (msg == STDERR_READ) {
|
||||
if (!source) throw Error("no source");
|
||||
unsigned int len = readInt(from);
|
||||
size_t len = readInt(from);
|
||||
unsigned char * buf = new unsigned char[len];
|
||||
AutoDeleteArray<unsigned char> d(buf);
|
||||
(*source)(buf, len);
|
||||
writeString(string((const char *) buf, len), to);
|
||||
writeString(buf, source->read(buf, len), to);
|
||||
to.flush();
|
||||
}
|
||||
else {
|
||||
string s = readString(from);
|
||||
|
|
|
@ -58,7 +58,7 @@ public:
|
|||
void exportPath(const Path & path, bool sign,
|
||||
Sink & sink);
|
||||
|
||||
Path importPath(bool requireSignature, Source & source);
|
||||
Paths importPaths(bool requireSignature, Source & source);
|
||||
|
||||
void buildDerivations(const PathSet & drvPaths);
|
||||
|
||||
|
|
|
@ -298,6 +298,17 @@ string showPaths(const PathSet & paths)
|
|||
}
|
||||
|
||||
|
||||
void exportPaths(StoreAPI & store, const Paths & paths,
|
||||
bool sign, Sink & sink)
|
||||
{
|
||||
foreach (Paths::const_iterator, i, paths) {
|
||||
writeInt(1, sink);
|
||||
store.exportPath(*i, sign, sink);
|
||||
}
|
||||
writeInt(0, sink);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -154,9 +154,7 @@ public:
|
|||
|
||||
/* Copy the contents of a path to the store and register the
|
||||
validity the resulting path. The resulting path is returned.
|
||||
If `fixed' is true, then the output of a fixed-output
|
||||
derivation is pre-loaded into the Nix store. The function
|
||||
object `filter' can be used to exclude files (see
|
||||
The function object `filter' can be used to exclude files (see
|
||||
libutil/archive.hh). */
|
||||
virtual Path addToStore(const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
|
@ -174,9 +172,9 @@ public:
|
|||
virtual void exportPath(const Path & path, bool sign,
|
||||
Sink & sink) = 0;
|
||||
|
||||
/* Import a NAR dump created by exportPath() into the Nix
|
||||
store. */
|
||||
virtual Path importPath(bool requireSignature, Source & source) = 0;
|
||||
/* Import a sequence of NAR dumps created by exportPaths() into
|
||||
the Nix store. */
|
||||
virtual Paths importPaths(bool requireSignature, Source & source) = 0;
|
||||
|
||||
/* Ensure that the output paths of the derivation are valid. If
|
||||
they are already valid, this is a no-op. Otherwise, validity
|
||||
|
@ -345,6 +343,12 @@ ValidPathInfo decodeValidPathInfo(std::istream & str,
|
|||
bool hashGiven = false);
|
||||
|
||||
|
||||
/* Export multiple paths in the format expected by ‘nix-store
|
||||
--import’. */
|
||||
void exportPaths(StoreAPI & store, const Paths & paths,
|
||||
bool sign, Sink & sink);
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ namespace nix {
|
|||
#define WORKER_MAGIC_1 0x6e697863
|
||||
#define WORKER_MAGIC_2 0x6478696f
|
||||
|
||||
#define PROTOCOL_VERSION 0x108
|
||||
#define PROTOCOL_VERSION 0x109
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
|
@ -29,7 +29,6 @@ typedef enum {
|
|||
wopSyncWithGC = 13,
|
||||
wopFindRoots = 14,
|
||||
wopExportPath = 16,
|
||||
wopImportPath = 17,
|
||||
wopQueryDeriver = 18,
|
||||
wopSetOptions = 19,
|
||||
wopCollectGarbage = 20,
|
||||
|
@ -39,7 +38,8 @@ typedef enum {
|
|||
wopQueryFailedPaths = 24,
|
||||
wopClearFailedPaths = 25,
|
||||
wopQueryPathInfo = 26,
|
||||
wopQueryDerivationOutputNames = 27,
|
||||
wopImportPaths = 27,
|
||||
wopQueryDerivationOutputNames = 28,
|
||||
} WorkerOp;
|
||||
|
||||
|
||||
|
@ -59,7 +59,7 @@ typedef enum {
|
|||
|
||||
|
||||
Path readStorePath(Source & from);
|
||||
PathSet readStorePaths(Source & from);
|
||||
template<class T> T readStorePaths(Source & from);
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -204,6 +204,22 @@ Hash parseHash32(HashType ht, const string & s)
|
|||
}
|
||||
|
||||
|
||||
Hash parseHash16or32(HashType ht, const string & s)
|
||||
{
|
||||
Hash hash(ht);
|
||||
if (s.size() == hash.hashSize * 2)
|
||||
/* hexadecimal representation */
|
||||
hash = parseHash(ht, s);
|
||||
else if (s.size() == hashLength32(hash))
|
||||
/* base-32 representation */
|
||||
hash = parseHash32(ht, s);
|
||||
else
|
||||
throw Error(format("hash `%1%' has wrong length for hash type `%2%'")
|
||||
% s % printHashType(ht));
|
||||
return hash;
|
||||
}
|
||||
|
||||
|
||||
bool isHash(const string & s)
|
||||
{
|
||||
if (s.length() != 32) return false;
|
||||
|
@ -290,21 +306,13 @@ HashSink::HashSink(HashType ht) : ht(ht)
|
|||
start(ht, *ctx);
|
||||
}
|
||||
|
||||
HashSink::HashSink(const HashSink & h)
|
||||
{
|
||||
ht = h.ht;
|
||||
bytes = h.bytes;
|
||||
ctx = new Ctx;
|
||||
*ctx = *h.ctx;
|
||||
}
|
||||
|
||||
HashSink::~HashSink()
|
||||
{
|
||||
bufPos = 0;
|
||||
delete ctx;
|
||||
}
|
||||
|
||||
void HashSink::operator ()
|
||||
(const unsigned char * data, unsigned int len)
|
||||
void HashSink::write(const unsigned char * data, size_t len)
|
||||
{
|
||||
bytes += len;
|
||||
update(ht, *ctx, data, len);
|
||||
|
@ -312,11 +320,21 @@ void HashSink::operator ()
|
|||
|
||||
HashResult HashSink::finish()
|
||||
{
|
||||
flush();
|
||||
Hash hash(ht);
|
||||
nix::finish(ht, *ctx, hash.hash);
|
||||
return HashResult(hash, bytes);
|
||||
}
|
||||
|
||||
HashResult HashSink::currentHash()
|
||||
{
|
||||
flush();
|
||||
Ctx ctx2 = *ctx;
|
||||
Hash hash(ht);
|
||||
nix::finish(ht, ctx2, hash.hash);
|
||||
return HashResult(hash, bytes);
|
||||
}
|
||||
|
||||
|
||||
HashResult hashPath(
|
||||
HashType ht, const Path & path, PathFilter & filter)
|
||||
|
|
|
@ -58,6 +58,9 @@ string printHash32(const Hash & hash);
|
|||
/* Parse a base-32 representation of a hash code. */
|
||||
Hash parseHash32(HashType ht, const string & s);
|
||||
|
||||
/* Parse a base-16 or base-32 representation of a hash code. */
|
||||
Hash parseHash16or32(HashType ht, const string & s);
|
||||
|
||||
/* Verify that the given string is a valid hash code. */
|
||||
bool isHash(const string & s);
|
||||
|
||||
|
@ -88,7 +91,7 @@ string printHashType(HashType ht);
|
|||
|
||||
union Ctx;
|
||||
|
||||
class HashSink : public Sink
|
||||
class HashSink : public BufferedSink
|
||||
{
|
||||
private:
|
||||
HashType ht;
|
||||
|
@ -99,8 +102,9 @@ public:
|
|||
HashSink(HashType ht);
|
||||
HashSink(const HashSink & h);
|
||||
~HashSink();
|
||||
virtual void operator () (const unsigned char * data, unsigned int len);
|
||||
void write(const unsigned char * data, size_t len);
|
||||
HashResult finish();
|
||||
HashResult currentHash();
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -2,24 +2,117 @@
|
|||
#include "util.hh"
|
||||
|
||||
#include <cstring>
|
||||
#include <cerrno>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
void FdSink::operator () (const unsigned char * data, unsigned int len)
|
||||
BufferedSink::~BufferedSink()
|
||||
{
|
||||
/* We can't call flush() here, because C++ for some insane reason
|
||||
doesn't allow you to call virtual methods from a destructor. */
|
||||
assert(!bufPos);
|
||||
if (buffer) delete[] buffer;
|
||||
}
|
||||
|
||||
|
||||
void BufferedSink::operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
if (!buffer) buffer = new unsigned char[bufSize];
|
||||
|
||||
while (len) {
|
||||
/* Optimisation: bypass the buffer if the data exceeds the
|
||||
buffer size. */
|
||||
if (bufPos + len >= bufSize) {
|
||||
flush();
|
||||
write(data, len);
|
||||
break;
|
||||
}
|
||||
/* Otherwise, copy the bytes to the buffer. Flush the buffer
|
||||
when it's full. */
|
||||
size_t n = bufPos + len > bufSize ? bufSize - bufPos : len;
|
||||
memcpy(buffer + bufPos, data, n);
|
||||
data += n; bufPos += n; len -= n;
|
||||
if (bufPos == bufSize) flush();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void BufferedSink::flush()
|
||||
{
|
||||
if (bufPos == 0) return;
|
||||
size_t n = bufPos;
|
||||
bufPos = 0; // don't trigger the assert() in ~BufferedSink()
|
||||
write(buffer, n);
|
||||
}
|
||||
|
||||
|
||||
FdSink::~FdSink()
|
||||
{
|
||||
try { flush(); } catch (...) { ignoreException(); }
|
||||
}
|
||||
|
||||
|
||||
void FdSink::write(const unsigned char * data, size_t len)
|
||||
{
|
||||
writeFull(fd, data, len);
|
||||
}
|
||||
|
||||
|
||||
void FdSource::operator () (unsigned char * data, unsigned int len)
|
||||
void Source::operator () (unsigned char * data, size_t len)
|
||||
{
|
||||
readFull(fd, data, len);
|
||||
while (len) {
|
||||
size_t n = read(data, len);
|
||||
data += n; len -= n;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void writePadding(unsigned int len, Sink & sink)
|
||||
BufferedSource::~BufferedSource()
|
||||
{
|
||||
if (buffer) delete[] buffer;
|
||||
}
|
||||
|
||||
|
||||
size_t BufferedSource::read(unsigned char * data, size_t len)
|
||||
{
|
||||
if (!buffer) buffer = new unsigned char[bufSize];
|
||||
|
||||
if (!bufPosIn) bufPosIn = readUnbuffered(buffer, bufSize);
|
||||
|
||||
/* Copy out the data in the buffer. */
|
||||
size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len;
|
||||
memcpy(data, buffer + bufPosOut, n);
|
||||
bufPosOut += n;
|
||||
if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0;
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
|
||||
{
|
||||
ssize_t n;
|
||||
do {
|
||||
checkInterrupt();
|
||||
n = ::read(fd, (char *) data, bufSize);
|
||||
} while (n == -1 && errno == EINTR);
|
||||
if (n == -1) throw SysError("reading from file");
|
||||
if (n == 0) throw EndOfFile("unexpected end-of-file");
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
size_t StringSource::read(unsigned char * data, size_t len)
|
||||
{
|
||||
if (pos == s.size()) throw EndOfFile("end of string reached");
|
||||
size_t n = s.copy((char *) data, len, pos);
|
||||
pos += n;
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
void writePadding(size_t len, Sink & sink)
|
||||
{
|
||||
if (len % 8) {
|
||||
unsigned char zero[8];
|
||||
|
@ -56,28 +149,36 @@ void writeLongLong(unsigned long long n, Sink & sink)
|
|||
}
|
||||
|
||||
|
||||
void writeString(const string & s, Sink & sink)
|
||||
void writeString(const unsigned char * buf, size_t len, Sink & sink)
|
||||
{
|
||||
unsigned int len = s.length();
|
||||
writeInt(len, sink);
|
||||
sink((const unsigned char *) s.c_str(), len);
|
||||
sink(buf, len);
|
||||
writePadding(len, sink);
|
||||
}
|
||||
|
||||
|
||||
void writeStringSet(const StringSet & ss, Sink & sink)
|
||||
void writeString(const string & s, Sink & sink)
|
||||
{
|
||||
writeInt(ss.size(), sink);
|
||||
for (StringSet::iterator i = ss.begin(); i != ss.end(); ++i)
|
||||
writeString(*i, sink);
|
||||
writeString((const unsigned char *) s.c_str(), s.size(), sink);
|
||||
}
|
||||
|
||||
|
||||
void readPadding(unsigned int len, Source & source)
|
||||
template<class T> void writeStrings(const T & ss, Sink & sink)
|
||||
{
|
||||
writeInt(ss.size(), sink);
|
||||
foreach (typename T::const_iterator, i, ss)
|
||||
writeString(*i, sink);
|
||||
}
|
||||
|
||||
template void writeStrings(const Paths & ss, Sink & sink);
|
||||
template void writeStrings(const PathSet & ss, Sink & sink);
|
||||
|
||||
|
||||
void readPadding(size_t len, Source & source)
|
||||
{
|
||||
if (len % 8) {
|
||||
unsigned char zero[8];
|
||||
unsigned int n = 8 - (len % 8);
|
||||
size_t n = 8 - (len % 8);
|
||||
source(zero, n);
|
||||
for (unsigned int i = 0; i < n; i++)
|
||||
if (zero[i]) throw SerialisationError("non-zero padding");
|
||||
|
@ -115,9 +216,19 @@ unsigned long long readLongLong(Source & source)
|
|||
}
|
||||
|
||||
|
||||
size_t readString(unsigned char * buf, size_t max, Source & source)
|
||||
{
|
||||
size_t len = readInt(source);
|
||||
if (len > max) throw Error("string is too long");
|
||||
source(buf, len);
|
||||
readPadding(len, source);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
||||
string readString(Source & source)
|
||||
{
|
||||
unsigned int len = readInt(source);
|
||||
size_t len = readInt(source);
|
||||
unsigned char * buf = new unsigned char[len];
|
||||
AutoDeleteArray<unsigned char> d(buf);
|
||||
source(buf, len);
|
||||
|
@ -126,14 +237,17 @@ string readString(Source & source)
|
|||
}
|
||||
|
||||
|
||||
StringSet readStringSet(Source & source)
|
||||
template<class T> T readStrings(Source & source)
|
||||
{
|
||||
unsigned int count = readInt(source);
|
||||
StringSet ss;
|
||||
T ss;
|
||||
while (count--)
|
||||
ss.insert(readString(source));
|
||||
ss.insert(ss.end(), readString(source));
|
||||
return ss;
|
||||
}
|
||||
|
||||
template Paths readStrings(Source & source);
|
||||
template PathSet readStrings(Source & source);
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -11,7 +11,25 @@ namespace nix {
|
|||
struct Sink
|
||||
{
|
||||
virtual ~Sink() { }
|
||||
virtual void operator () (const unsigned char * data, unsigned int len) = 0;
|
||||
virtual void operator () (const unsigned char * data, size_t len) = 0;
|
||||
};
|
||||
|
||||
|
||||
/* A buffered abstract sink. */
|
||||
struct BufferedSink : Sink
|
||||
{
|
||||
size_t bufSize, bufPos;
|
||||
unsigned char * buffer;
|
||||
|
||||
BufferedSink(size_t bufSize = 32 * 1024)
|
||||
: bufSize(bufSize), bufPos(0), buffer(0) { }
|
||||
~BufferedSink();
|
||||
|
||||
void operator () (const unsigned char * data, size_t len);
|
||||
|
||||
void flush();
|
||||
|
||||
virtual void write(const unsigned char * data, size_t len) = 0;
|
||||
};
|
||||
|
||||
|
||||
|
@ -20,49 +38,55 @@ struct Source
|
|||
{
|
||||
virtual ~Source() { }
|
||||
|
||||
/* The callee should store exactly *len bytes in the buffer
|
||||
pointed to by data. It should block if that much data is not
|
||||
yet available, or throw an error if it is not going to be
|
||||
available. */
|
||||
virtual void operator () (unsigned char * data, unsigned int len) = 0;
|
||||
/* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’.
|
||||
It blocks until all the requested data is available, or throws
|
||||
an error if it is not going to be available. */
|
||||
void operator () (unsigned char * data, size_t len);
|
||||
|
||||
/* Store up to ‘len’ in the buffer pointed to by ‘data’, and
|
||||
return the number of bytes stored. If blocks until at least
|
||||
one byte is available. */
|
||||
virtual size_t read(unsigned char * data, size_t len) = 0;
|
||||
};
|
||||
|
||||
|
||||
/* A buffered abstract source. */
|
||||
struct BufferedSource : Source
|
||||
{
|
||||
size_t bufSize, bufPosIn, bufPosOut;
|
||||
unsigned char * buffer;
|
||||
|
||||
BufferedSource(size_t bufSize = 32 * 1024)
|
||||
: bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { }
|
||||
~BufferedSource();
|
||||
|
||||
size_t read(unsigned char * data, size_t len);
|
||||
|
||||
/* Underlying read call, to be overriden. */
|
||||
virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
|
||||
};
|
||||
|
||||
|
||||
/* A sink that writes data to a file descriptor. */
|
||||
struct FdSink : Sink
|
||||
struct FdSink : BufferedSink
|
||||
{
|
||||
int fd;
|
||||
|
||||
FdSink()
|
||||
{
|
||||
fd = -1;
|
||||
}
|
||||
FdSink() : fd(-1) { }
|
||||
FdSink(int fd) : fd(fd) { }
|
||||
~FdSink();
|
||||
|
||||
FdSink(int fd)
|
||||
{
|
||||
this->fd = fd;
|
||||
}
|
||||
|
||||
void operator () (const unsigned char * data, unsigned int len);
|
||||
void write(const unsigned char * data, size_t len);
|
||||
};
|
||||
|
||||
|
||||
/* A source that reads data from a file descriptor. */
|
||||
struct FdSource : Source
|
||||
struct FdSource : BufferedSource
|
||||
{
|
||||
int fd;
|
||||
|
||||
FdSource()
|
||||
{
|
||||
fd = -1;
|
||||
}
|
||||
|
||||
FdSource(int fd)
|
||||
{
|
||||
this->fd = fd;
|
||||
}
|
||||
|
||||
void operator () (unsigned char * data, unsigned int len);
|
||||
FdSource() : fd(-1) { }
|
||||
FdSource(int fd) : fd(fd) { }
|
||||
size_t readUnbuffered(unsigned char * data, size_t len);
|
||||
};
|
||||
|
||||
|
||||
|
@ -70,7 +94,7 @@ struct FdSource : Source
|
|||
struct StringSink : Sink
|
||||
{
|
||||
string s;
|
||||
virtual void operator () (const unsigned char * data, unsigned int len)
|
||||
void operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
s.append((const char *) data, len);
|
||||
}
|
||||
|
@ -81,29 +105,25 @@ struct StringSink : Sink
|
|||
struct StringSource : Source
|
||||
{
|
||||
const string & s;
|
||||
unsigned int pos;
|
||||
size_t pos;
|
||||
StringSource(const string & _s) : s(_s), pos(0) { }
|
||||
virtual void operator () (unsigned char * data, unsigned int len)
|
||||
{
|
||||
s.copy((char *) data, len, pos);
|
||||
pos += len;
|
||||
if (pos > s.size())
|
||||
throw Error("end of string reached");
|
||||
}
|
||||
size_t read(unsigned char * data, size_t len);
|
||||
};
|
||||
|
||||
|
||||
void writePadding(unsigned int len, Sink & sink);
|
||||
void writePadding(size_t len, Sink & sink);
|
||||
void writeInt(unsigned int n, Sink & sink);
|
||||
void writeLongLong(unsigned long long n, Sink & sink);
|
||||
void writeString(const unsigned char * buf, size_t len, Sink & sink);
|
||||
void writeString(const string & s, Sink & sink);
|
||||
void writeStringSet(const StringSet & ss, Sink & sink);
|
||||
template<class T> void writeStrings(const T & ss, Sink & sink);
|
||||
|
||||
void readPadding(unsigned int len, Source & source);
|
||||
void readPadding(size_t len, Source & source);
|
||||
unsigned int readInt(Source & source);
|
||||
unsigned long long readLongLong(Source & source);
|
||||
size_t readString(unsigned char * buf, size_t max, Source & source);
|
||||
string readString(Source & source);
|
||||
StringSet readStringSet(Source & source);
|
||||
template<class T> T readStrings(Source & source);
|
||||
|
||||
|
||||
MakeError(SerialisationError, Error)
|
||||
|
|
|
@ -1270,7 +1270,7 @@ void run(Strings args)
|
|||
else if (arg == "--profile" || arg == "-p")
|
||||
globals.profile = absPath(needArg(i, args, arg));
|
||||
else if (arg == "--file" || arg == "-f")
|
||||
globals.instSource.nixExprPath = absPath(needArg(i, args, arg));
|
||||
globals.instSource.nixExprPath = lookupFileArg(globals.state, needArg(i, args, arg));
|
||||
else if (arg == "--switch-profile" || arg == "-S")
|
||||
op = opSwitchProfile;
|
||||
else if (arg == "--switch-generation" || arg == "-G")
|
||||
|
|
|
@ -43,7 +43,7 @@ void run(Strings args)
|
|||
}
|
||||
|
||||
if (op == opHash) {
|
||||
for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) {
|
||||
foreach (Strings::iterator, i, ss) {
|
||||
Hash h = flat ? hashFile(ht, *i) : hashPath(ht, *i).first;
|
||||
if (truncate && h.hashSize > 20) h = compressHash(h, 20);
|
||||
std::cout << format("%1%\n") %
|
||||
|
@ -52,8 +52,8 @@ void run(Strings args)
|
|||
}
|
||||
|
||||
else {
|
||||
for (Strings::iterator i = ss.begin(); i != ss.end(); ++i) {
|
||||
Hash h = op == opTo16 ? parseHash32(ht, *i) : parseHash(ht, *i);
|
||||
foreach (Strings::iterator, i, ss) {
|
||||
Hash h = parseHash16or32(ht, *i);
|
||||
std::cout << format("%1%\n") %
|
||||
(op == opTo16 ? printHash(h) : printHash32(h));
|
||||
}
|
||||
|
|
|
@ -138,8 +138,7 @@ void run(Strings args)
|
|||
}
|
||||
|
||||
foreach (Strings::iterator, i, files) {
|
||||
Path path = absPath(*i);
|
||||
Expr * e = state.parseExprFromFile(path);
|
||||
Expr * e = state.parseExprFromFile(lookupFileArg(state, *i));
|
||||
processExpr(state, attrPaths, parseOnly, strict, autoArgs,
|
||||
evalOnly, xmlOutput, xmlOutputSourceLocation, e);
|
||||
}
|
||||
|
|
|
@ -133,14 +133,6 @@ static void opAddFixed(Strings opFlags, Strings opArgs)
|
|||
}
|
||||
|
||||
|
||||
static Hash parseHash16or32(HashType ht, const string & s)
|
||||
{
|
||||
return s.size() == Hash(ht).hashSize * 2
|
||||
? parseHash(ht, s)
|
||||
: parseHash32(ht, s);
|
||||
}
|
||||
|
||||
|
||||
/* Hack to support caching in `nix-prefetch-url'. */
|
||||
static void opPrintFixedPath(Strings opFlags, Strings opArgs)
|
||||
{
|
||||
|
@ -594,11 +586,7 @@ static void opExport(Strings opFlags, Strings opArgs)
|
|||
else throw UsageError(format("unknown flag `%1%'") % *i);
|
||||
|
||||
FdSink sink(STDOUT_FILENO);
|
||||
for (Strings::iterator i = opArgs.begin(); i != opArgs.end(); ++i) {
|
||||
writeInt(1, sink);
|
||||
store->exportPath(*i, sign, sink);
|
||||
}
|
||||
writeInt(0, sink);
|
||||
exportPaths(*store, opArgs, sign, sink);
|
||||
}
|
||||
|
||||
|
||||
|
@ -612,12 +600,10 @@ static void opImport(Strings opFlags, Strings opArgs)
|
|||
if (!opArgs.empty()) throw UsageError("no arguments expected");
|
||||
|
||||
FdSource source(STDIN_FILENO);
|
||||
while (true) {
|
||||
unsigned long long n = readLongLong(source);
|
||||
if (n == 0) break;
|
||||
if (n != 1) throw Error("input doesn't look like something created by `nix-store --export'");
|
||||
cout << format("%1%\n") % store->importPath(requireSignature, source) << std::flush;
|
||||
}
|
||||
Paths paths = store->importPaths(requireSignature, source);
|
||||
|
||||
foreach (Paths::iterator, i, paths)
|
||||
cout << format("%1%\n") % *i << std::flush;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -56,7 +56,8 @@ static void tunnelStderr(const unsigned char * buf, size_t count)
|
|||
if (canSendStderr && myPid == getpid()) {
|
||||
try {
|
||||
writeInt(STDERR_NEXT, to);
|
||||
writeString(string((char *) buf, count), to);
|
||||
writeString(buf, count, to);
|
||||
to.flush();
|
||||
} catch (...) {
|
||||
/* Write failed; that means that the other side is
|
||||
gone. */
|
||||
|
@ -200,26 +201,20 @@ static void stopWork(bool success = true, const string & msg = "", unsigned int
|
|||
struct TunnelSink : Sink
|
||||
{
|
||||
Sink & to;
|
||||
TunnelSink(Sink & to) : to(to)
|
||||
{
|
||||
}
|
||||
virtual void operator ()
|
||||
(const unsigned char * data, unsigned int len)
|
||||
TunnelSink(Sink & to) : to(to) { }
|
||||
virtual void operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
writeInt(STDERR_WRITE, to);
|
||||
writeString(string((const char *) data, len), to);
|
||||
writeString(data, len, to);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
struct TunnelSource : Source
|
||||
struct TunnelSource : BufferedSource
|
||||
{
|
||||
Source & from;
|
||||
TunnelSource(Source & from) : from(from)
|
||||
{
|
||||
}
|
||||
virtual void operator ()
|
||||
(unsigned char * data, unsigned int len)
|
||||
TunnelSource(Source & from) : from(from) { }
|
||||
size_t readUnbuffered(unsigned char * data, size_t len)
|
||||
{
|
||||
/* Careful: we're going to receive data from the client now,
|
||||
so we have to disable the SIGPOLL handler. */
|
||||
|
@ -228,11 +223,12 @@ struct TunnelSource : Source
|
|||
|
||||
writeInt(STDERR_READ, to);
|
||||
writeInt(len, to);
|
||||
string s = readString(from);
|
||||
if (s.size() != len) throw Error("not enough data");
|
||||
memcpy(data, (const unsigned char *) s.c_str(), len);
|
||||
to.flush();
|
||||
size_t n = readString(data, len, from);
|
||||
|
||||
startWork();
|
||||
if (n == 0) throw EndOfFile("unexpected end-of-file");
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -241,11 +237,14 @@ struct TunnelSource : Source
|
|||
the contents of the file to `s'. Otherwise barf. */
|
||||
struct RetrieveRegularNARSink : ParseSink
|
||||
{
|
||||
bool regular;
|
||||
string s;
|
||||
|
||||
RetrieveRegularNARSink() : regular(true) { }
|
||||
|
||||
void createDirectory(const Path & path)
|
||||
{
|
||||
throw Error("regular file expected");
|
||||
regular = false;
|
||||
}
|
||||
|
||||
void receiveContents(unsigned char * data, unsigned int len)
|
||||
|
@ -255,7 +254,7 @@ struct RetrieveRegularNARSink : ParseSink
|
|||
|
||||
void createSymlink(const Path & path, const string & target)
|
||||
{
|
||||
throw Error("regular file expected");
|
||||
regular = false;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -266,10 +265,11 @@ struct SavingSourceAdapter : Source
|
|||
Source & orig;
|
||||
string s;
|
||||
SavingSourceAdapter(Source & orig) : orig(orig) { }
|
||||
void operator () (unsigned char * data, unsigned int len)
|
||||
size_t read(unsigned char * data, size_t len)
|
||||
{
|
||||
orig(data, len);
|
||||
s.append((const char *) data, len);
|
||||
size_t n = orig.read(data, len);
|
||||
s.append((const char *) data, n);
|
||||
return n;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -327,7 +327,7 @@ static void performOp(unsigned int clientVersion,
|
|||
store->queryReferrers(path, paths);
|
||||
else paths = store->queryDerivationOutputs(path);
|
||||
stopWork();
|
||||
writeStringSet(paths, to);
|
||||
writeStrings(paths, to);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -371,11 +371,11 @@ static void performOp(unsigned int clientVersion,
|
|||
addToStoreFromDump(). */
|
||||
ParseSink sink; /* null sink; just parse the NAR */
|
||||
parseDump(sink, savedNAR);
|
||||
} else {
|
||||
} else
|
||||
parseDump(savedRegular, from);
|
||||
}
|
||||
|
||||
startWork();
|
||||
if (!savedRegular.regular) throw Error("regular file expected");
|
||||
Path path = dynamic_cast<LocalStore *>(store.get())
|
||||
->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
|
||||
stopWork();
|
||||
|
@ -387,7 +387,7 @@ static void performOp(unsigned int clientVersion,
|
|||
case wopAddTextToStore: {
|
||||
string suffix = readString(from);
|
||||
string s = readString(from);
|
||||
PathSet refs = readStorePaths(from);
|
||||
PathSet refs = readStorePaths<PathSet>(from);
|
||||
startWork();
|
||||
Path path = store->addTextToStore(suffix, s, refs);
|
||||
stopWork();
|
||||
|
@ -406,17 +406,17 @@ static void performOp(unsigned int clientVersion,
|
|||
break;
|
||||
}
|
||||
|
||||
case wopImportPath: {
|
||||
case wopImportPaths: {
|
||||
startWork();
|
||||
TunnelSource source(from);
|
||||
Path path = store->importPath(true, source);
|
||||
Paths paths = store->importPaths(true, source);
|
||||
stopWork();
|
||||
writeString(path, to);
|
||||
writeStrings(paths, to);
|
||||
break;
|
||||
}
|
||||
|
||||
case wopBuildDerivations: {
|
||||
PathSet drvs = readStorePaths(from);
|
||||
PathSet drvs = readStorePaths<PathSet>(from);
|
||||
startWork();
|
||||
store->buildDerivations(drvs);
|
||||
stopWork();
|
||||
|
@ -474,7 +474,7 @@ static void performOp(unsigned int clientVersion,
|
|||
case wopCollectGarbage: {
|
||||
GCOptions options;
|
||||
options.action = (GCOptions::GCAction) readInt(from);
|
||||
options.pathsToDelete = readStorePaths(from);
|
||||
options.pathsToDelete = readStorePaths<PathSet>(from);
|
||||
options.ignoreLiveness = readInt(from);
|
||||
options.maxFreed = readLongLong(from);
|
||||
options.maxLinks = readInt(from);
|
||||
|
@ -492,7 +492,7 @@ static void performOp(unsigned int clientVersion,
|
|||
store->collectGarbage(options, results);
|
||||
stopWork();
|
||||
|
||||
writeStringSet(results.paths, to);
|
||||
writeStrings(results.paths, to);
|
||||
writeLongLong(results.bytesFreed, to);
|
||||
writeLongLong(results.blocksFreed, to);
|
||||
|
||||
|
@ -530,7 +530,7 @@ static void performOp(unsigned int clientVersion,
|
|||
writeInt(res ? 1 : 0, to);
|
||||
if (res) {
|
||||
writeString(info.deriver, to);
|
||||
writeStringSet(info.references, to);
|
||||
writeStrings(info.references, to);
|
||||
writeLongLong(info.downloadSize, to);
|
||||
if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
|
||||
writeLongLong(info.narSize, to);
|
||||
|
@ -542,7 +542,7 @@ static void performOp(unsigned int clientVersion,
|
|||
startWork();
|
||||
PathSet paths = store->queryValidPaths();
|
||||
stopWork();
|
||||
writeStringSet(paths, to);
|
||||
writeStrings(paths, to);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -550,12 +550,12 @@ static void performOp(unsigned int clientVersion,
|
|||
startWork();
|
||||
PathSet paths = store->queryFailedPaths();
|
||||
stopWork();
|
||||
writeStringSet(paths, to);
|
||||
writeStrings(paths, to);
|
||||
break;
|
||||
}
|
||||
|
||||
case wopClearFailedPaths: {
|
||||
PathSet paths = readStringSet(from);
|
||||
PathSet paths = readStrings<PathSet>(from);
|
||||
startWork();
|
||||
store->clearFailedPaths(paths);
|
||||
stopWork();
|
||||
|
@ -570,7 +570,7 @@ static void performOp(unsigned int clientVersion,
|
|||
stopWork();
|
||||
writeString(info.deriver, to);
|
||||
writeString(printHash(info.hash), to);
|
||||
writeStringSet(info.references, to);
|
||||
writeStrings(info.references, to);
|
||||
writeInt(info.registrationTime, to);
|
||||
writeLongLong(info.narSize, to);
|
||||
break;
|
||||
|
@ -603,8 +603,8 @@ static void processConnection()
|
|||
unsigned int magic = readInt(from);
|
||||
if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
|
||||
writeInt(WORKER_MAGIC_2, to);
|
||||
|
||||
writeInt(PROTOCOL_VERSION, to);
|
||||
to.flush();
|
||||
unsigned int clientVersion = readInt(from);
|
||||
|
||||
/* Send startup error messages to the client. */
|
||||
|
@ -626,9 +626,11 @@ static void processConnection()
|
|||
store = boost::shared_ptr<StoreAPI>(new LocalStore());
|
||||
|
||||
stopWork();
|
||||
to.flush();
|
||||
|
||||
} catch (Error & e) {
|
||||
stopWork(false, e.msg());
|
||||
to.flush();
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -648,9 +650,19 @@ static void processConnection()
|
|||
try {
|
||||
performOp(clientVersion, from, to, op);
|
||||
} catch (Error & e) {
|
||||
/* If we're not in a state were we can send replies, then
|
||||
something went wrong processing the input of the
|
||||
client. This can happen especially if I/O errors occur
|
||||
during addTextToStore() / importPath(). If that
|
||||
happens, just send the error message and exit. */
|
||||
bool errorAllowed = canSendStderr;
|
||||
if (!errorAllowed) printMsg(lvlError, format("error processing client input: %1%") % e.msg());
|
||||
stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0);
|
||||
if (!errorAllowed) break;
|
||||
}
|
||||
|
||||
to.flush();
|
||||
|
||||
assert(!canSendStderr);
|
||||
};
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
TESTS_ENVIRONMENT = $(bash) -e
|
||||
TESTS_ENVIRONMENT = NIX_REMOTE= $(bash) -e
|
||||
|
||||
extra1 = $(shell pwd)/test-tmp/shared
|
||||
|
||||
|
|
|
@ -23,8 +23,6 @@ export SHARED=$TEST_ROOT/shared
|
|||
|
||||
export PATH=$NIX_BIN_DIR:$TOP/scripts:$PATH
|
||||
|
||||
export NIX_REMOTE=
|
||||
|
||||
export REAL_BIN_DIR=@bindir@
|
||||
export REAL_LIBEXEC_DIR=@libexecdir@
|
||||
export REAL_LOCALSTATE_DIR=@localstatedir@
|
||||
|
|
|
@ -72,6 +72,7 @@ in
|
|||
$client->succeed("chmod 600 /root/.ssh/id_dsa");
|
||||
|
||||
# Install the SSH key on the slaves.
|
||||
$client->waitForJob("network-interfaces");
|
||||
foreach my $slave ($slave1, $slave2) {
|
||||
$slave->succeed("mkdir -m 700 /root/.ssh");
|
||||
$slave->copyFileFromHost("key.pub", "/root/.ssh/authorized_keys");
|
||||
|
|
|
@ -36,7 +36,7 @@ nix-env -p $profiles/test -q '*' | grep -q foo-2.0pre1
|
|||
test "$($profiles/test/bin/foo)" = "foo-2.0pre1"
|
||||
|
||||
# Upgrade "foo": should install foo-2.0.
|
||||
nix-env -p $profiles/test -f ./user-envs.nix -u foo
|
||||
NIX_PATH=nixpkgs=./user-envs.nix nix-env -p $profiles/test -f '<nixpkgs>' -u foo
|
||||
|
||||
# Query installed: should contain foo-2.0 now.
|
||||
test "$(nix-env -p $profiles/test -q '*' | wc -l)" -eq 1
|
||||
|
|
Loading…
Reference in a new issue