style(3p/nix): Reformat project in Google C++ style

Reformatted with:

    fd . -e hh -e cc | xargs clang-format -i
This commit is contained in:
Vincent Ambo 2020-05-17 16:31:57 +01:00
parent 65a1aae98c
commit 0f2cf531f7
175 changed files with 32126 additions and 34689 deletions

View file

@ -1,361 +1,364 @@
#include "archive.hh"
#include "binary-cache-store.hh"
#include <chrono>
#include <future>
#include "archive.hh"
#include "compression.hh"
#include "derivations.hh"
#include "fs-accessor.hh"
#include "globals.hh"
#include "nar-info.hh"
#include "sync.hh"
#include "remote-fs-accessor.hh"
#include "nar-info-disk-cache.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include <chrono>
#include <future>
#include "nar-accessor.hh"
#include "nar-info-disk-cache.hh"
#include "nar-info.hh"
#include "remote-fs-accessor.hh"
#include "sync.hh"
namespace nix {
BinaryCacheStore::BinaryCacheStore(const Params & params)
: Store(params)
{
if (secretKeyFile != "")
secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
BinaryCacheStore::BinaryCacheStore(const Params& params) : Store(params) {
if (secretKeyFile != "")
secretKey =
std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
StringSink sink;
sink << narVersionMagic1;
narMagic = *sink.s;
StringSink sink;
sink << narVersionMagic1;
narMagic = *sink.s;
}
void BinaryCacheStore::init()
{
std::string cacheInfoFile = "nix-cache-info";
void BinaryCacheStore::init() {
std::string cacheInfoFile = "nix-cache-info";
auto cacheInfo = getFile(cacheInfoFile);
if (!cacheInfo) {
upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
} else {
for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
size_t colon = line.find(':');
if (colon == std::string::npos) continue;
auto name = line.substr(0, colon);
auto value = trim(line.substr(colon + 1, std::string::npos));
if (name == "StoreDir") {
if (value != storeDir)
throw Error(format("binary cache '%s' is for Nix stores with prefix '%s', not '%s'")
% getUri() % value % storeDir);
} else if (name == "WantMassQuery") {
wantMassQuery_ = value == "1";
} else if (name == "Priority") {
string2Int(value, priority);
}
}
auto cacheInfo = getFile(cacheInfoFile);
if (!cacheInfo) {
upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n",
"text/x-nix-cache-info");
} else {
for (auto& line : tokenizeString<Strings>(*cacheInfo, "\n")) {
size_t colon = line.find(':');
if (colon == std::string::npos) continue;
auto name = line.substr(0, colon);
auto value = trim(line.substr(colon + 1, std::string::npos));
if (name == "StoreDir") {
if (value != storeDir)
throw Error(format("binary cache '%s' is for Nix stores with prefix "
"'%s', not '%s'") %
getUri() % value % storeDir);
} else if (name == "WantMassQuery") {
wantMassQuery_ = value == "1";
} else if (name == "Priority") {
string2Int(value, priority);
}
}
}
}
void BinaryCacheStore::getFile(const std::string & path,
Callback<std::shared_ptr<std::string>> callback) noexcept
{
try {
callback(getFile(path));
} catch (...) { callback.rethrow(); }
void BinaryCacheStore::getFile(
const std::string& path,
Callback<std::shared_ptr<std::string>> callback) noexcept {
try {
callback(getFile(path));
} catch (...) {
callback.rethrow();
}
}
void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
{
std::promise<std::shared_ptr<std::string>> promise;
getFile(path,
{[&](std::future<std::shared_ptr<std::string>> result) {
void BinaryCacheStore::getFile(const std::string& path, Sink& sink) {
std::promise<std::shared_ptr<std::string>> promise;
getFile(path, {[&](std::future<std::shared_ptr<std::string>> result) {
try {
promise.set_value(result.get());
promise.set_value(result.get());
} catch (...) {
promise.set_exception(std::current_exception());
promise.set_exception(std::current_exception());
}
}});
auto data = promise.get_future().get();
sink((unsigned char *) data->data(), data->size());
}});
auto data = promise.get_future().get();
sink((unsigned char*)data->data(), data->size());
}
std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
{
StringSink sink;
try {
getFile(path, sink);
} catch (NoSuchBinaryCacheFile &) {
return nullptr;
std::shared_ptr<std::string> BinaryCacheStore::getFile(
const std::string& path) {
StringSink sink;
try {
getFile(path, sink);
} catch (NoSuchBinaryCacheFile&) {
return nullptr;
}
return sink.s;
}
Path BinaryCacheStore::narInfoFileFor(const Path& storePath) {
assertStorePath(storePath);
return storePathToHash(storePath) + ".narinfo";
}
void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo) {
auto narInfoFile = narInfoFileFor(narInfo->path);
upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo");
auto hashPart = storePathToHash(narInfo->path);
{
auto state_(state.lock());
state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo));
}
if (diskCache)
diskCache->upsertNarInfo(getUri(), hashPart,
std::shared_ptr<NarInfo>(narInfo));
}
void BinaryCacheStore::addToStore(const ValidPathInfo& info,
const ref<std::string>& nar,
RepairFlag repair, CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) {
if (!repair && isValidPath(info.path)) return;
/* Verify that all references are valid. This may do some .narinfo
reads, but typically they'll already be cached. */
for (auto& ref : info.references) try {
if (ref != info.path) queryPathInfo(ref);
} catch (InvalidPath&) {
throw Error(format("cannot add '%s' to the binary cache because the "
"reference '%s' is not valid") %
info.path % ref);
}
return sink.s;
}
Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
{
assertStorePath(storePath);
return storePathToHash(storePath) + ".narinfo";
}
assert(nar->compare(0, narMagic.size(), narMagic) == 0);
void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
{
auto narInfoFile = narInfoFileFor(narInfo->path);
auto narInfo = make_ref<NarInfo>(info);
upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo");
narInfo->narSize = nar->size();
narInfo->narHash = hashString(htSHA256, *nar);
auto hashPart = storePathToHash(narInfo->path);
if (info.narHash && info.narHash != narInfo->narHash)
throw Error(
format("refusing to copy corrupted path '%1%' to binary cache") %
info.path);
auto accessor_ = std::dynamic_pointer_cast<RemoteFSAccessor>(accessor);
/* Optionally write a JSON file containing a listing of the
contents of the NAR. */
if (writeNARListing) {
std::ostringstream jsonOut;
{
auto state_(state.lock());
state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo));
JSONObject jsonRoot(jsonOut);
jsonRoot.attr("version", 1);
auto narAccessor = makeNarAccessor(nar);
if (accessor_) accessor_->addToCache(info.path, *nar, narAccessor);
{
auto res = jsonRoot.placeholder("root");
listNar(res, narAccessor, "", true);
}
}
if (diskCache)
diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(),
"application/json");
}
else {
if (accessor_) accessor_->addToCache(info.path, *nar, makeNarAccessor(nar));
}
/* Compress the NAR. */
narInfo->compression = compression;
auto now1 = std::chrono::steady_clock::now();
auto narCompressed = compress(compression, *nar, parallelCompression);
auto now2 = std::chrono::steady_clock::now();
narInfo->fileHash = hashString(htSHA256, *narCompressed);
narInfo->fileSize = narCompressed->size();
auto duration =
std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1)
.count();
printMsg(lvlTalkative,
format("copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% "
"ms) to binary cache") %
narInfo->path % narInfo->narSize %
((1.0 - (double)narCompressed->size() / nar->size()) * 100.0) %
duration);
/* Atomically write the NAR file. */
narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar" +
(compression == "xz" ? ".xz"
: compression == "bzip2"
? ".bz2"
: compression == "br" ? ".br" : "");
if (repair || !fileExists(narInfo->url)) {
stats.narWrite++;
upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
} else
stats.narWriteAverted++;
stats.narWriteBytes += nar->size();
stats.narWriteCompressedBytes += narCompressed->size();
stats.narWriteCompressionTimeMs += duration;
/* Atomically write the NAR info file.*/
if (secretKey) narInfo->sign(*secretKey);
writeNarInfo(narInfo);
stats.narInfoWrite++;
}
void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
{
if (!repair && isValidPath(info.path)) return;
bool BinaryCacheStore::isValidPathUncached(const Path& storePath) {
// FIXME: this only checks whether a .narinfo with a matching hash
// part exists. So f4kb...-foo matches f4kb...-bar, even
// though they shouldn't. Not easily fixed.
return fileExists(narInfoFileFor(storePath));
}
/* Verify that all references are valid. This may do some .narinfo
reads, but typically they'll already be cached. */
for (auto & ref : info.references)
void BinaryCacheStore::narFromPath(const Path& storePath, Sink& sink) {
auto info = queryPathInfo(storePath).cast<const NarInfo>();
uint64_t narSize = 0;
LambdaSink wrapperSink([&](const unsigned char* data, size_t len) {
sink(data, len);
narSize += len;
});
auto decompressor = makeDecompressionSink(info->compression, wrapperSink);
try {
getFile(info->url, *decompressor);
} catch (NoSuchBinaryCacheFile& e) {
throw SubstituteGone(e.what());
}
decompressor->finish();
stats.narRead++;
// stats.narReadCompressedBytes += nar->size(); // FIXME
stats.narReadBytes += narSize;
}
void BinaryCacheStore::queryPathInfoUncached(
const Path& storePath,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept {
auto uri = getUri();
auto act = std::make_shared<Activity>(
*logger, lvlTalkative, actQueryPathInfo,
fmt("querying info about '%s' on '%s'", storePath, uri),
Logger::Fields{storePath, uri});
PushActivity pact(act->id);
auto narInfoFile = narInfoFileFor(storePath);
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
getFile(
narInfoFile, {[=](std::future<std::shared_ptr<std::string>> fut) {
try {
if (ref != info.path)
queryPathInfo(ref);
} catch (InvalidPath &) {
throw Error(format("cannot add '%s' to the binary cache because the reference '%s' is not valid")
% info.path % ref);
auto data = fut.get();
if (!data) return (*callbackPtr)(nullptr);
stats.narInfoRead++;
(*callbackPtr)(
(std::shared_ptr<ValidPathInfo>)std::make_shared<NarInfo>(
*this, *data, narInfoFile));
(void)
act; // force Activity into this lambda to ensure it stays alive
} catch (...) {
callbackPtr->rethrow();
}
assert(nar->compare(0, narMagic.size(), narMagic) == 0);
auto narInfo = make_ref<NarInfo>(info);
narInfo->narSize = nar->size();
narInfo->narHash = hashString(htSHA256, *nar);
if (info.narHash && info.narHash != narInfo->narHash)
throw Error(format("refusing to copy corrupted path '%1%' to binary cache") % info.path);
auto accessor_ = std::dynamic_pointer_cast<RemoteFSAccessor>(accessor);
/* Optionally write a JSON file containing a listing of the
contents of the NAR. */
if (writeNARListing) {
std::ostringstream jsonOut;
{
JSONObject jsonRoot(jsonOut);
jsonRoot.attr("version", 1);
auto narAccessor = makeNarAccessor(nar);
if (accessor_)
accessor_->addToCache(info.path, *nar, narAccessor);
{
auto res = jsonRoot.placeholder("root");
listNar(res, narAccessor, "", true);
}
}
upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
}
else {
if (accessor_)
accessor_->addToCache(info.path, *nar, makeNarAccessor(nar));
}
/* Compress the NAR. */
narInfo->compression = compression;
auto now1 = std::chrono::steady_clock::now();
auto narCompressed = compress(compression, *nar, parallelCompression);
auto now2 = std::chrono::steady_clock::now();
narInfo->fileHash = hashString(htSHA256, *narCompressed);
narInfo->fileSize = narCompressed->size();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
printMsg(lvlTalkative, format("copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache")
% narInfo->path % narInfo->narSize
% ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0)
% duration);
/* Atomically write the NAR file. */
narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar"
+ (compression == "xz" ? ".xz" :
compression == "bzip2" ? ".bz2" :
compression == "br" ? ".br" :
"");
if (repair || !fileExists(narInfo->url)) {
stats.narWrite++;
upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
} else
stats.narWriteAverted++;
stats.narWriteBytes += nar->size();
stats.narWriteCompressedBytes += narCompressed->size();
stats.narWriteCompressionTimeMs += duration;
/* Atomically write the NAR info file.*/
if (secretKey) narInfo->sign(*secretKey);
writeNarInfo(narInfo);
stats.narInfoWrite++;
}});
}
bool BinaryCacheStore::isValidPathUncached(const Path & storePath)
{
// FIXME: this only checks whether a .narinfo with a matching hash
// part exists. So f4kb...-foo matches f4kb...-bar, even
// though they shouldn't. Not easily fixed.
return fileExists(narInfoFileFor(storePath));
Path BinaryCacheStore::addToStore(const string& name, const Path& srcPath,
bool recursive, HashType hashAlgo,
PathFilter& filter, RepairFlag repair) {
// FIXME: some cut&paste from LocalStore::addToStore().
/* Read the whole path into memory. This is not a very scalable
method for very large paths, but `copyPath' is mainly used for
small files. */
StringSink sink;
Hash h;
if (recursive) {
dumpPath(srcPath, sink, filter);
h = hashString(hashAlgo, *sink.s);
} else {
auto s = readFile(srcPath);
dumpString(s, sink);
h = hashString(hashAlgo, s);
}
ValidPathInfo info;
info.path = makeFixedOutputPath(recursive, h, name);
addToStore(info, sink.s, repair, CheckSigs, nullptr);
return info.path;
}
void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
{
auto info = queryPathInfo(storePath).cast<const NarInfo>();
Path BinaryCacheStore::addTextToStore(const string& name, const string& s,
const PathSet& references,
RepairFlag repair) {
ValidPathInfo info;
info.path = computeStorePathForText(name, s, references);
info.references = references;
uint64_t narSize = 0;
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
sink(data, len);
narSize += len;
});
auto decompressor = makeDecompressionSink(info->compression, wrapperSink);
try {
getFile(info->url, *decompressor);
} catch (NoSuchBinaryCacheFile & e) {
throw SubstituteGone(e.what());
}
decompressor->finish();
stats.narRead++;
//stats.narReadCompressedBytes += nar->size(); // FIXME
stats.narReadBytes += narSize;
}
void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
{
auto uri = getUri();
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
fmt("querying info about '%s' on '%s'", storePath, uri), Logger::Fields{storePath, uri});
PushActivity pact(act->id);
auto narInfoFile = narInfoFileFor(storePath);
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
getFile(narInfoFile,
{[=](std::future<std::shared_ptr<std::string>> fut) {
try {
auto data = fut.get();
if (!data) return (*callbackPtr)(nullptr);
stats.narInfoRead++;
(*callbackPtr)((std::shared_ptr<ValidPathInfo>)
std::make_shared<NarInfo>(*this, *data, narInfoFile));
(void) act; // force Activity into this lambda to ensure it stays alive
} catch (...) {
callbackPtr->rethrow();
}
}});
}
Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
{
// FIXME: some cut&paste from LocalStore::addToStore().
/* Read the whole path into memory. This is not a very scalable
method for very large paths, but `copyPath' is mainly used for
small files. */
if (repair || !isValidPath(info.path)) {
StringSink sink;
Hash h;
if (recursive) {
dumpPath(srcPath, sink, filter);
h = hashString(hashAlgo, *sink.s);
} else {
auto s = readFile(srcPath);
dumpString(s, sink);
h = hashString(hashAlgo, s);
}
ValidPathInfo info;
info.path = makeFixedOutputPath(recursive, h, name);
dumpString(s, sink);
addToStore(info, sink.s, repair, CheckSigs, nullptr);
}
return info.path;
return info.path;
}
Path BinaryCacheStore::addTextToStore(const string & name, const string & s,
const PathSet & references, RepairFlag repair)
{
ValidPathInfo info;
info.path = computeStorePathForText(name, s, references);
info.references = references;
ref<FSAccessor> BinaryCacheStore::getFSAccessor() {
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()),
localNarCache);
}
if (repair || !isValidPath(info.path)) {
StringSink sink;
dumpString(s, sink);
addToStore(info, sink.s, repair, CheckSigs, nullptr);
void BinaryCacheStore::addSignatures(const Path& storePath,
const StringSet& sigs) {
/* Note: this is inherently racy since there is no locking on
binary caches. In particular, with S3 this unreliable, even
when addSignatures() is called sequentially on a path, because
S3 might return an outdated cached version. */
auto narInfo = make_ref<NarInfo>((NarInfo&)*queryPathInfo(storePath));
narInfo->sigs.insert(sigs.begin(), sigs.end());
auto narInfoFile = narInfoFileFor(narInfo->path);
writeNarInfo(narInfo);
}
std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path& path) {
Path drvPath;
if (isDerivation(path))
drvPath = path;
else {
try {
auto info = queryPathInfo(path);
// FIXME: add a "Log" field to .narinfo
if (info->deriver == "") return nullptr;
drvPath = info->deriver;
} catch (InvalidPath&) {
return nullptr;
}
}
return info.path;
auto logPath = "log/" + baseNameOf(drvPath);
debug("fetching build log from binary cache '%s/%s'", getUri(), logPath);
return getFile(logPath);
}
ref<FSAccessor> BinaryCacheStore::getFSAccessor()
{
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), localNarCache);
}
void BinaryCacheStore::addSignatures(const Path & storePath, const StringSet & sigs)
{
/* Note: this is inherently racy since there is no locking on
binary caches. In particular, with S3 this unreliable, even
when addSignatures() is called sequentially on a path, because
S3 might return an outdated cached version. */
auto narInfo = make_ref<NarInfo>((NarInfo &) *queryPathInfo(storePath));
narInfo->sigs.insert(sigs.begin(), sigs.end());
auto narInfoFile = narInfoFileFor(narInfo->path);
writeNarInfo(narInfo);
}
std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path)
{
Path drvPath;
if (isDerivation(path))
drvPath = path;
else {
try {
auto info = queryPathInfo(path);
// FIXME: add a "Log" field to .narinfo
if (info->deriver == "") return nullptr;
drvPath = info->deriver;
} catch (InvalidPath &) {
return nullptr;
}
}
auto logPath = "log/" + baseNameOf(drvPath);
debug("fetching build log from binary cache '%s/%s'", getUri(), logPath);
return getFile(logPath);
}
}
} // namespace nix

View file

@ -1,115 +1,113 @@
#pragma once
#include "crypto.hh"
#include "store-api.hh"
#include "pool.hh"
#include <atomic>
#include "crypto.hh"
#include "pool.hh"
#include "store-api.hh"
namespace nix {
struct NarInfo;
class BinaryCacheStore : public Store
{
public:
class BinaryCacheStore : public Store {
public:
const Setting<std::string> compression{
this, "xz", "compression",
"NAR compression method ('xz', 'bzip2', or 'none')"};
const Setting<bool> writeNARListing{
this, false, "write-nar-listing",
"whether to write a JSON file listing the files in each NAR"};
const Setting<Path> secretKeyFile{
this, "", "secret-key",
"path to secret key used to sign the binary cache"};
const Setting<Path> localNarCache{this, "", "local-nar-cache",
"path to a local cache of NARs"};
const Setting<bool> parallelCompression{
this, false, "parallel-compression",
"enable multi-threading compression, available for xz only currently"};
const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
const Setting<Path> localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"};
const Setting<bool> parallelCompression{this, false, "parallel-compression",
"enable multi-threading compression, available for xz only currently"};
private:
std::unique_ptr<SecretKey> secretKey;
private:
protected:
BinaryCacheStore(const Params& params);
std::unique_ptr<SecretKey> secretKey;
public:
virtual bool fileExists(const std::string& path) = 0;
protected:
virtual void upsertFile(const std::string& path, const std::string& data,
const std::string& mimeType) = 0;
BinaryCacheStore(const Params & params);
/* Note: subclasses must implement at least one of the two
following getFile() methods. */
public:
/* Dump the contents of the specified file to a sink. */
virtual void getFile(const std::string& path, Sink& sink);
virtual bool fileExists(const std::string & path) = 0;
/* Fetch the specified file and call the specified callback with
the result. A subclass may implement this asynchronously. */
virtual void getFile(
const std::string& path,
Callback<std::shared_ptr<std::string>> callback) noexcept;
virtual void upsertFile(const std::string & path,
const std::string & data,
const std::string & mimeType) = 0;
std::shared_ptr<std::string> getFile(const std::string& path);
/* Note: subclasses must implement at least one of the two
following getFile() methods. */
protected:
bool wantMassQuery_ = false;
int priority = 50;
/* Dump the contents of the specified file to a sink. */
virtual void getFile(const std::string & path, Sink & sink);
public:
virtual void init();
/* Fetch the specified file and call the specified callback with
the result. A subclass may implement this asynchronously. */
virtual void getFile(const std::string & path,
Callback<std::shared_ptr<std::string>> callback) noexcept;
private:
std::string narMagic;
std::shared_ptr<std::string> getFile(const std::string & path);
std::string narInfoFileFor(const Path& storePath);
protected:
void writeNarInfo(ref<NarInfo> narInfo);
bool wantMassQuery_ = false;
int priority = 50;
public:
bool isValidPathUncached(const Path& path) override;
public:
void queryPathInfoUncached(
const Path& path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
virtual void init();
Path queryPathFromHashPart(const string& hashPart) override {
unsupported("queryPathFromHashPart");
}
private:
bool wantMassQuery() override { return wantMassQuery_; }
std::string narMagic;
void addToStore(const ValidPathInfo& info, const ref<std::string>& nar,
RepairFlag repair, CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override;
std::string narInfoFileFor(const Path & storePath);
Path addToStore(const string& name, const Path& srcPath, bool recursive,
HashType hashAlgo, PathFilter& filter,
RepairFlag repair) override;
void writeNarInfo(ref<NarInfo> narInfo);
Path addTextToStore(const string& name, const string& s,
const PathSet& references, RepairFlag repair) override;
public:
void narFromPath(const Path& path, Sink& sink) override;
bool isValidPathUncached(const Path & path) override;
BuildResult buildDerivation(const Path& drvPath, const BasicDerivation& drv,
BuildMode buildMode) override {
unsupported("buildDerivation");
}
void queryPathInfoUncached(const Path & path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
void ensurePath(const Path& path) override { unsupported("ensurePath"); }
Path queryPathFromHashPart(const string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
ref<FSAccessor> getFSAccessor() override;
bool wantMassQuery() override { return wantMassQuery_; }
void addSignatures(const Path& storePath, const StringSet& sigs) override;
void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
RepairFlag repair, CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override;
Path addToStore(const string & name, const Path & srcPath,
bool recursive, HashType hashAlgo,
PathFilter & filter, RepairFlag repair) override;
Path addTextToStore(const string & name, const string & s,
const PathSet & references, RepairFlag repair) override;
void narFromPath(const Path & path, Sink & sink) override;
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override
{ unsupported("buildDerivation"); }
void ensurePath(const Path & path) override
{ unsupported("ensurePath"); }
ref<FSAccessor> getFSAccessor() override;
void addSignatures(const Path & storePath, const StringSet & sigs) override;
std::shared_ptr<std::string> getBuildLog(const Path & path) override;
int getPriority() override { return priority; }
std::shared_ptr<std::string> getBuildLog(const Path& path) override;
int getPriority() override { return priority; }
};
MakeError(NoSuchBinaryCacheFile, Error);
}
} // namespace nix

File diff suppressed because it is too large Load diff

View file

@ -5,7 +5,7 @@
namespace nix {
// TODO: make pluggable.
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
void builtinBuildenv(const BasicDerivation & drv);
void builtinFetchurl(const BasicDerivation& drv, const std::string& netrcData);
void builtinBuildenv(const BasicDerivation& drv);
}
} // namespace nix

View file

@ -1,13 +1,12 @@
#include "builtins.hh"
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#include <algorithm>
#include "builtins.hh"
namespace nix {
typedef std::map<Path,int> Priorities;
typedef std::map<Path, int> Priorities;
// FIXME: change into local variables.
@ -16,102 +15,104 @@ static Priorities priorities;
static unsigned long symlinks;
/* For each activated package, create symlinks */
static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
{
DirEntries srcFiles;
static void createLinks(const Path& srcDir, const Path& dstDir, int priority) {
DirEntries srcFiles;
try {
srcFiles = readDirectory(srcDir);
} catch (SysError& e) {
if (e.errNo == ENOTDIR) {
printError(
"warning: not including '%s' in the user environment because it's "
"not a directory",
srcDir);
return;
}
throw;
}
for (const auto& ent : srcFiles) {
if (ent.name[0] == '.') /* not matched by glob */
continue;
auto srcFile = srcDir + "/" + ent.name;
auto dstFile = dstDir + "/" + ent.name;
struct stat srcSt;
try {
srcFiles = readDirectory(srcDir);
} catch (SysError & e) {
if (e.errNo == ENOTDIR) {
printError("warning: not including '%s' in the user environment because it's not a directory", srcDir);
return;
}
throw;
if (stat(srcFile.c_str(), &srcSt) == -1)
throw SysError("getting status of '%1%'", srcFile);
} catch (SysError& e) {
if (e.errNo == ENOENT || e.errNo == ENOTDIR) {
printError("warning: skipping dangling symlink '%s'", dstFile);
continue;
}
throw;
}
for (const auto & ent : srcFiles) {
if (ent.name[0] == '.')
/* not matched by glob */
continue;
auto srcFile = srcDir + "/" + ent.name;
auto dstFile = dstDir + "/" + ent.name;
/* The files below are special-cased to that they don't show up
* in user profiles, either because they are useless, or
* because they would cauase pointless collisions (e.g., each
* Python package brings its own
* `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
*/
if (hasSuffix(srcFile, "/propagated-build-inputs") ||
hasSuffix(srcFile, "/nix-support") ||
hasSuffix(srcFile, "/perllocal.pod") ||
hasSuffix(srcFile, "/info/dir") || hasSuffix(srcFile, "/log"))
continue;
struct stat srcSt;
try {
if (stat(srcFile.c_str(), &srcSt) == -1)
throw SysError("getting status of '%1%'", srcFile);
} catch (SysError & e) {
if (e.errNo == ENOENT || e.errNo == ENOTDIR) {
printError("warning: skipping dangling symlink '%s'", dstFile);
continue;
}
throw;
else if (S_ISDIR(srcSt.st_mode)) {
struct stat dstSt;
auto res = lstat(dstFile.c_str(), &dstSt);
if (res == 0) {
if (S_ISDIR(dstSt.st_mode)) {
createLinks(srcFile, dstFile, priority);
continue;
} else if (S_ISLNK(dstSt.st_mode)) {
auto target = canonPath(dstFile, true);
if (!S_ISDIR(lstat(target).st_mode))
throw Error("collision between '%1%' and non-directory '%2%'",
srcFile, target);
if (unlink(dstFile.c_str()) == -1)
throw SysError(format("unlinking '%1%'") % dstFile);
if (mkdir(dstFile.c_str(), 0755) == -1)
throw SysError(format("creating directory '%1%'"));
createLinks(target, dstFile, priorities[dstFile]);
createLinks(srcFile, dstFile, priority);
continue;
}
/* The files below are special-cased to that they don't show up
* in user profiles, either because they are useless, or
* because they would cauase pointless collisions (e.g., each
* Python package brings its own
* `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
*/
if (hasSuffix(srcFile, "/propagated-build-inputs") ||
hasSuffix(srcFile, "/nix-support") ||
hasSuffix(srcFile, "/perllocal.pod") ||
hasSuffix(srcFile, "/info/dir") ||
hasSuffix(srcFile, "/log"))
continue;
else if (S_ISDIR(srcSt.st_mode)) {
struct stat dstSt;
auto res = lstat(dstFile.c_str(), &dstSt);
if (res == 0) {
if (S_ISDIR(dstSt.st_mode)) {
createLinks(srcFile, dstFile, priority);
continue;
} else if (S_ISLNK(dstSt.st_mode)) {
auto target = canonPath(dstFile, true);
if (!S_ISDIR(lstat(target).st_mode))
throw Error("collision between '%1%' and non-directory '%2%'", srcFile, target);
if (unlink(dstFile.c_str()) == -1)
throw SysError(format("unlinking '%1%'") % dstFile);
if (mkdir(dstFile.c_str(), 0755) == -1)
throw SysError(format("creating directory '%1%'"));
createLinks(target, dstFile, priorities[dstFile]);
createLinks(srcFile, dstFile, priority);
continue;
}
} else if (errno != ENOENT)
throw SysError(format("getting status of '%1%'") % dstFile);
}
else {
struct stat dstSt;
auto res = lstat(dstFile.c_str(), &dstSt);
if (res == 0) {
if (S_ISLNK(dstSt.st_mode)) {
auto prevPriority = priorities[dstFile];
if (prevPriority == priority)
throw Error(
"packages '%1%' and '%2%' have the same priority %3%; "
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
"to change the priority of one of the conflicting packages"
" (0 being the highest priority)",
srcFile, readLink(dstFile), priority);
if (prevPriority < priority)
continue;
if (unlink(dstFile.c_str()) == -1)
throw SysError(format("unlinking '%1%'") % dstFile);
} else if (S_ISDIR(dstSt.st_mode))
throw Error("collision between non-directory '%1%' and directory '%2%'", srcFile, dstFile);
} else if (errno != ENOENT)
throw SysError(format("getting status of '%1%'") % dstFile);
}
createSymlink(srcFile, dstFile);
priorities[dstFile] = priority;
symlinks++;
} else if (errno != ENOENT)
throw SysError(format("getting status of '%1%'") % dstFile);
}
else {
struct stat dstSt;
auto res = lstat(dstFile.c_str(), &dstSt);
if (res == 0) {
if (S_ISLNK(dstSt.st_mode)) {
auto prevPriority = priorities[dstFile];
if (prevPriority == priority)
throw Error(
"packages '%1%' and '%2%' have the same priority %3%; "
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
"to change the priority of one of the conflicting packages"
" (0 being the highest priority)",
srcFile, readLink(dstFile), priority);
if (prevPriority < priority) continue;
if (unlink(dstFile.c_str()) == -1)
throw SysError(format("unlinking '%1%'") % dstFile);
} else if (S_ISDIR(dstSt.st_mode))
throw Error(
"collision between non-directory '%1%' and directory '%2%'",
srcFile, dstFile);
} else if (errno != ENOENT)
throw SysError(format("getting status of '%1%'") % dstFile);
}
createSymlink(srcFile, dstFile);
priorities[dstFile] = priority;
symlinks++;
}
}
typedef std::set<Path> FileProp;
@ -121,84 +122,87 @@ static FileProp postponed = FileProp{};
static Path out;
static void addPkg(const Path & pkgDir, int priority)
{
if (done.count(pkgDir)) return;
done.insert(pkgDir);
createLinks(pkgDir, out, priority);
static void addPkg(const Path& pkgDir, int priority) {
if (done.count(pkgDir)) return;
done.insert(pkgDir);
createLinks(pkgDir, out, priority);
try {
for (const auto & p : tokenizeString<std::vector<string>>(
readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n"))
if (!done.count(p))
postponed.insert(p);
} catch (SysError & e) {
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
}
try {
for (const auto& p : tokenizeString<std::vector<string>>(
readFile(pkgDir + "/nix-support/propagated-user-env-packages"),
" \n"))
if (!done.count(p)) postponed.insert(p);
} catch (SysError& e) {
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
}
}
struct Package {
Path path;
bool active;
int priority;
Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
Path path;
bool active;
int priority;
Package(Path path, bool active, int priority)
: path{path}, active{active}, priority{priority} {}
};
typedef std::vector<Package> Packages;
void builtinBuildenv(const BasicDerivation & drv)
{
auto getAttr = [&](const string & name) {
auto i = drv.env.find(name);
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
return i->second;
};
void builtinBuildenv(const BasicDerivation& drv) {
auto getAttr = [&](const string& name) {
auto i = drv.env.find(name);
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
return i->second;
};
out = getAttr("out");
createDirs(out);
out = getAttr("out");
createDirs(out);
/* Convert the stuff we get from the environment back into a
* coherent data type. */
Packages pkgs;
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
while (!derivations.empty()) {
/* !!! We're trusting the caller to structure derivations env var correctly */
auto active = derivations.front(); derivations.pop_front();
auto priority = stoi(derivations.front()); derivations.pop_front();
auto outputs = stoi(derivations.front()); derivations.pop_front();
for (auto n = 0; n < outputs; n++) {
auto path = derivations.front(); derivations.pop_front();
pkgs.emplace_back(path, active != "false", priority);
}
}
/* Symlink to the packages that have been installed explicitly by the
* user. Process in priority order to reduce unnecessary
* symlink/unlink steps.
/* Convert the stuff we get from the environment back into a
* coherent data type. */
Packages pkgs;
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
while (!derivations.empty()) {
/* !!! We're trusting the caller to structure derivations env var correctly
*/
std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) {
return a.priority < b.priority || (a.priority == b.priority && a.path < b.path);
});
for (const auto & pkg : pkgs)
if (pkg.active)
addPkg(pkg.path, pkg.priority);
/* Symlink to the packages that have been "propagated" by packages
* installed by the user (i.e., package X declares that it wants Y
* installed as well). We do these later because they have a lower
* priority in case of collisions.
*/
auto priorityCounter = 1000;
while (!postponed.empty()) {
auto pkgDirs = postponed;
postponed = FileProp{};
for (const auto & pkgDir : pkgDirs)
addPkg(pkgDir, priorityCounter++);
auto active = derivations.front();
derivations.pop_front();
auto priority = stoi(derivations.front());
derivations.pop_front();
auto outputs = stoi(derivations.front());
derivations.pop_front();
for (auto n = 0; n < outputs; n++) {
auto path = derivations.front();
derivations.pop_front();
pkgs.emplace_back(path, active != "false", priority);
}
}
printError("created %d symlinks in user environment", symlinks);
/* Symlink to the packages that have been installed explicitly by the
* user. Process in priority order to reduce unnecessary
* symlink/unlink steps.
*/
std::sort(pkgs.begin(), pkgs.end(), [](const Package& a, const Package& b) {
return a.priority < b.priority ||
(a.priority == b.priority && a.path < b.path);
});
for (const auto& pkg : pkgs)
if (pkg.active) addPkg(pkg.path, pkg.priority);
createSymlink(getAttr("manifest"), out + "/manifest.nix");
/* Symlink to the packages that have been "propagated" by packages
* installed by the user (i.e., package X declares that it wants Y
* installed as well). We do these later because they have a lower
* priority in case of collisions.
*/
auto priorityCounter = 1000;
while (!postponed.empty()) {
auto pkgDirs = postponed;
postponed = FileProp{};
for (const auto& pkgDir : pkgDirs) addPkg(pkgDir, priorityCounter++);
}
printError("created %d symlinks in user environment", symlinks);
createSymlink(getAttr("manifest"), out + "/manifest.nix");
}
}
} // namespace nix

View file

@ -1,78 +1,76 @@
#include "archive.hh"
#include "builtins.hh"
#include "compression.hh"
#include "download.hh"
#include "store-api.hh"
#include "archive.hh"
#include "compression.hh"
namespace nix {
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
{
/* Make the host's netrc data available. Too bad curl requires
this to be stored in a file. It would be nice if we could just
pass a pointer to the data. */
if (netrcData != "") {
settings.netrcFile = "netrc";
writeFile(settings.netrcFile, netrcData, 0600);
void builtinFetchurl(const BasicDerivation& drv, const std::string& netrcData) {
/* Make the host's netrc data available. Too bad curl requires
this to be stored in a file. It would be nice if we could just
pass a pointer to the data. */
if (netrcData != "") {
settings.netrcFile = "netrc";
writeFile(settings.netrcFile, netrcData, 0600);
}
auto getAttr = [&](const string& name) {
auto i = drv.env.find(name);
if (i == drv.env.end())
throw Error(format("attribute '%s' missing") % name);
return i->second;
};
Path storePath = getAttr("out");
auto mainUrl = getAttr("url");
bool unpack = get(drv.env, "unpack", "") == "1";
/* Note: have to use a fresh downloader here because we're in
a forked process. */
auto downloader = makeDownloader();
auto fetch = [&](const std::string& url) {
auto source = sinkToSource([&](Sink& sink) {
/* No need to do TLS verification, because we check the hash of
the result anyway. */
DownloadRequest request(url);
request.verifyTLS = false;
request.decompress = false;
auto decompressor = makeDecompressionSink(
unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
downloader->download(std::move(request), *decompressor);
decompressor->finish();
});
if (unpack)
restorePath(storePath, *source);
else
writeFile(storePath, *source);
auto executable = drv.env.find("executable");
if (executable != drv.env.end() && executable->second == "1") {
if (chmod(storePath.c_str(), 0755) == -1)
throw SysError(format("making '%1%' executable") % storePath);
}
};
auto getAttr = [&](const string & name) {
auto i = drv.env.find(name);
if (i == drv.env.end()) throw Error(format("attribute '%s' missing") % name);
return i->second;
};
/* Try the hashed mirrors first. */
if (getAttr("outputHashMode") == "flat")
for (auto hashedMirror : settings.hashedMirrors.get()) try {
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
auto ht = parseHashType(getAttr("outputHashAlgo"));
auto h = Hash(getAttr("outputHash"), ht);
fetch(hashedMirror + printHashType(h.type) + "/" +
h.to_string(Base16, false));
return;
} catch (Error& e) {
debug(e.what());
}
Path storePath = getAttr("out");
auto mainUrl = getAttr("url");
bool unpack = get(drv.env, "unpack", "") == "1";
/* Note: have to use a fresh downloader here because we're in
a forked process. */
auto downloader = makeDownloader();
auto fetch = [&](const std::string & url) {
auto source = sinkToSource([&](Sink & sink) {
/* No need to do TLS verification, because we check the hash of
the result anyway. */
DownloadRequest request(url);
request.verifyTLS = false;
request.decompress = false;
auto decompressor = makeDecompressionSink(
unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
downloader->download(std::move(request), *decompressor);
decompressor->finish();
});
if (unpack)
restorePath(storePath, *source);
else
writeFile(storePath, *source);
auto executable = drv.env.find("executable");
if (executable != drv.env.end() && executable->second == "1") {
if (chmod(storePath.c_str(), 0755) == -1)
throw SysError(format("making '%1%' executable") % storePath);
}
};
/* Try the hashed mirrors first. */
if (getAttr("outputHashMode") == "flat")
for (auto hashedMirror : settings.hashedMirrors.get())
try {
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
auto ht = parseHashType(getAttr("outputHashAlgo"));
auto h = Hash(getAttr("outputHash"), ht);
fetch(hashedMirror + printHashType(h.type) + "/" + h.to_string(Base16, false));
return;
} catch (Error & e) {
debug(e.what());
}
/* Otherwise try the specified URL. */
fetch(mainUrl);
/* Otherwise try the specified URL. */
fetch(mainUrl);
}
}
} // namespace nix

View file

@ -1,6 +1,6 @@
#include "crypto.hh"
#include "util.hh"
#include "globals.hh"
#include "util.hh"
#if HAVE_SODIUM
#include <sodium.h>
@ -8,119 +8,107 @@
namespace nix {
static std::pair<std::string, std::string> split(const string & s)
{
size_t colon = s.find(':');
if (colon == std::string::npos || colon == 0)
return {"", ""};
return {std::string(s, 0, colon), std::string(s, colon + 1)};
static std::pair<std::string, std::string> split(const string& s) {
size_t colon = s.find(':');
if (colon == std::string::npos || colon == 0) return {"", ""};
return {std::string(s, 0, colon), std::string(s, colon + 1)};
}
Key::Key(const string & s)
{
auto ss = split(s);
Key::Key(const string& s) {
auto ss = split(s);
name = ss.first;
key = ss.second;
name = ss.first;
key = ss.second;
if (name == "" || key == "")
throw Error("secret key is corrupt");
if (name == "" || key == "") throw Error("secret key is corrupt");
key = base64Decode(key);
key = base64Decode(key);
}
SecretKey::SecretKey(const string & s)
: Key(s)
{
SecretKey::SecretKey(const string& s) : Key(s) {
#if HAVE_SODIUM
if (key.size() != crypto_sign_SECRETKEYBYTES)
throw Error("secret key is not valid");
if (key.size() != crypto_sign_SECRETKEYBYTES)
throw Error("secret key is not valid");
#endif
}
#if !HAVE_SODIUM
[[noreturn]] static void noSodium()
{
throw Error("Nix was not compiled with libsodium, required for signed binary cache support");
[[noreturn]] static void noSodium() {
throw Error(
"Nix was not compiled with libsodium, required for signed binary cache "
"support");
}
#endif
std::string SecretKey::signDetached(const std::string & data) const
{
std::string SecretKey::signDetached(const std::string& data) const {
#if HAVE_SODIUM
unsigned char sig[crypto_sign_BYTES];
unsigned long long sigLen;
crypto_sign_detached(sig, &sigLen, (unsigned char *) data.data(), data.size(),
(unsigned char *) key.data());
return name + ":" + base64Encode(std::string((char *) sig, sigLen));
unsigned char sig[crypto_sign_BYTES];
unsigned long long sigLen;
crypto_sign_detached(sig, &sigLen, (unsigned char*)data.data(), data.size(),
(unsigned char*)key.data());
return name + ":" + base64Encode(std::string((char*)sig, sigLen));
#else
noSodium();
noSodium();
#endif
}
PublicKey SecretKey::toPublicKey() const
{
PublicKey SecretKey::toPublicKey() const {
#if HAVE_SODIUM
unsigned char pk[crypto_sign_PUBLICKEYBYTES];
crypto_sign_ed25519_sk_to_pk(pk, (unsigned char *) key.data());
return PublicKey(name, std::string((char *) pk, crypto_sign_PUBLICKEYBYTES));
unsigned char pk[crypto_sign_PUBLICKEYBYTES];
crypto_sign_ed25519_sk_to_pk(pk, (unsigned char*)key.data());
return PublicKey(name, std::string((char*)pk, crypto_sign_PUBLICKEYBYTES));
#else
noSodium();
noSodium();
#endif
}
PublicKey::PublicKey(const string & s)
: Key(s)
{
PublicKey::PublicKey(const string& s) : Key(s) {
#if HAVE_SODIUM
if (key.size() != crypto_sign_PUBLICKEYBYTES)
throw Error("public key is not valid");
if (key.size() != crypto_sign_PUBLICKEYBYTES)
throw Error("public key is not valid");
#endif
}
bool verifyDetached(const std::string & data, const std::string & sig,
const PublicKeys & publicKeys)
{
bool verifyDetached(const std::string& data, const std::string& sig,
const PublicKeys& publicKeys) {
#if HAVE_SODIUM
auto ss = split(sig);
auto ss = split(sig);
auto key = publicKeys.find(ss.first);
if (key == publicKeys.end()) return false;
auto key = publicKeys.find(ss.first);
if (key == publicKeys.end()) return false;
auto sig2 = base64Decode(ss.second);
if (sig2.size() != crypto_sign_BYTES)
throw Error("signature is not valid");
auto sig2 = base64Decode(ss.second);
if (sig2.size() != crypto_sign_BYTES) throw Error("signature is not valid");
return crypto_sign_verify_detached((unsigned char *) sig2.data(),
(unsigned char *) data.data(), data.size(),
(unsigned char *) key->second.key.data()) == 0;
return crypto_sign_verify_detached(
(unsigned char*)sig2.data(), (unsigned char*)data.data(),
data.size(), (unsigned char*)key->second.key.data()) == 0;
#else
noSodium();
noSodium();
#endif
}
PublicKeys getDefaultPublicKeys()
{
PublicKeys publicKeys;
PublicKeys getDefaultPublicKeys() {
PublicKeys publicKeys;
// FIXME: filter duplicates
// FIXME: filter duplicates
for (auto s : settings.trustedPublicKeys.get()) {
PublicKey key(s);
publicKeys.emplace(key.name, key);
for (auto s : settings.trustedPublicKeys.get()) {
PublicKey key(s);
publicKeys.emplace(key.name, key);
}
for (auto secretKeyFile : settings.secretKeyFiles.get()) {
try {
SecretKey secretKey(readFile(secretKeyFile));
publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
} catch (SysError& e) {
/* Ignore unreadable key files. That's normal in a
multi-user installation. */
}
}
for (auto secretKeyFile : settings.secretKeyFiles.get()) {
try {
SecretKey secretKey(readFile(secretKeyFile));
publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
} catch (SysError & e) {
/* Ignore unreadable key files. That's normal in a
multi-user installation. */
}
}
return publicKeys;
return publicKeys;
}
}
} // namespace nix

View file

@ -1,54 +1,48 @@
#pragma once
#include "types.hh"
#include <map>
#include "types.hh"
namespace nix {
struct Key
{
std::string name;
std::string key;
struct Key {
std::string name;
std::string key;
/* Construct Key from a string in the format
<name>:<key-in-base64>. */
Key(const std::string & s);
/* Construct Key from a string in the format
<name>:<key-in-base64>. */
Key(const std::string& s);
protected:
Key(const std::string & name, const std::string & key)
: name(name), key(key) { }
protected:
Key(const std::string& name, const std::string& key) : name(name), key(key) {}
};
struct PublicKey;
struct SecretKey : Key
{
SecretKey(const std::string & s);
struct SecretKey : Key {
SecretKey(const std::string& s);
/* Return a detached signature of the given string. */
std::string signDetached(const std::string & s) const;
/* Return a detached signature of the given string. */
std::string signDetached(const std::string& s) const;
PublicKey toPublicKey() const;
PublicKey toPublicKey() const;
};
struct PublicKey : Key
{
PublicKey(const std::string & data);
struct PublicKey : Key {
PublicKey(const std::string& data);
private:
PublicKey(const std::string & name, const std::string & key)
: Key(name, key) { }
friend struct SecretKey;
private:
PublicKey(const std::string& name, const std::string& key) : Key(name, key) {}
friend struct SecretKey;
};
typedef std::map<std::string, PublicKey> PublicKeys;
/* Return true iff sig is a correct signature over data using one
of the given public keys. */
bool verifyDetached(const std::string & data, const std::string & sig,
const PublicKeys & publicKeys);
bool verifyDetached(const std::string& data, const std::string& sig,
const PublicKeys& publicKeys);
PublicKeys getDefaultPublicKeys();
}
} // namespace nix

View file

@ -1,289 +1,294 @@
#include "derivations.hh"
#include "store-api.hh"
#include "fs-accessor.hh"
#include "globals.hh"
#include "istringstream_nocopy.hh"
#include "store-api.hh"
#include "util.hh"
#include "worker-protocol.hh"
#include "fs-accessor.hh"
#include "istringstream_nocopy.hh"
namespace nix {
void DerivationOutput::parseHashInfo(bool& recursive, Hash& hash) const {
recursive = false;
string algo = hashAlgo;
void DerivationOutput::parseHashInfo(bool & recursive, Hash & hash) const
{
recursive = false;
string algo = hashAlgo;
if (string(algo, 0, 2) == "r:") {
recursive = true;
algo = string(algo, 2);
}
if (string(algo, 0, 2) == "r:") {
recursive = true;
algo = string(algo, 2);
}
HashType hashType = parseHashType(algo);
if (hashType == htUnknown)
throw Error(format("unknown hash algorithm '%1%'") % algo);
HashType hashType = parseHashType(algo);
if (hashType == htUnknown)
throw Error(format("unknown hash algorithm '%1%'") % algo);
hash = Hash(this->hash, hashType);
hash = Hash(this->hash, hashType);
}
Path BasicDerivation::findOutput(const string & id) const
{
auto i = outputs.find(id);
if (i == outputs.end())
throw Error(format("derivation has no output '%1%'") % id);
return i->second.path;
Path BasicDerivation::findOutput(const string& id) const {
auto i = outputs.find(id);
if (i == outputs.end())
throw Error(format("derivation has no output '%1%'") % id);
return i->second.path;
}
bool BasicDerivation::isBuiltin() const
{
return string(builder, 0, 8) == "builtin:";
bool BasicDerivation::isBuiltin() const {
return string(builder, 0, 8) == "builtin:";
}
Path writeDerivation(ref<Store> store,
const Derivation & drv, const string & name, RepairFlag repair)
{
PathSet references;
references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
for (auto & i : drv.inputDrvs)
references.insert(i.first);
/* Note that the outputs of a derivation are *not* references
(that can be missing (of course) and should not necessarily be
held during a garbage collection). */
string suffix = name + drvExtension;
string contents = drv.unparse();
return settings.readOnlyMode
? store->computeStorePathForText(suffix, contents, references)
: store->addTextToStore(suffix, contents, references, repair);
Path writeDerivation(ref<Store> store, const Derivation& drv,
const string& name, RepairFlag repair) {
PathSet references;
references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
for (auto& i : drv.inputDrvs) references.insert(i.first);
/* Note that the outputs of a derivation are *not* references
(that can be missing (of course) and should not necessarily be
held during a garbage collection). */
string suffix = name + drvExtension;
string contents = drv.unparse();
return settings.readOnlyMode
? store->computeStorePathForText(suffix, contents, references)
: store->addTextToStore(suffix, contents, references, repair);
}
/* Read string `s' from stream `str'. */
static void expect(std::istream & str, const string & s)
{
char s2[s.size()];
str.read(s2, s.size());
if (string(s2, s.size()) != s)
throw FormatError(format("expected string '%1%'") % s);
static void expect(std::istream& str, const string& s) {
char s2[s.size()];
str.read(s2, s.size());
if (string(s2, s.size()) != s)
throw FormatError(format("expected string '%1%'") % s);
}
/* Read a C-style string from stream `str'. */
static string parseString(std::istream & str)
{
string res;
expect(str, "\"");
int c;
while ((c = str.get()) != '"')
if (c == '\\') {
c = str.get();
if (c == 'n') res += '\n';
else if (c == 'r') res += '\r';
else if (c == 't') res += '\t';
else res += c;
}
else res += c;
return res;
static string parseString(std::istream& str) {
string res;
expect(str, "\"");
int c;
while ((c = str.get()) != '"')
if (c == '\\') {
c = str.get();
if (c == 'n')
res += '\n';
else if (c == 'r')
res += '\r';
else if (c == 't')
res += '\t';
else
res += c;
} else
res += c;
return res;
}
static Path parsePath(std::istream & str)
{
string s = parseString(str);
if (s.size() == 0 || s[0] != '/')
throw FormatError(format("bad path '%1%' in derivation") % s);
return s;
static Path parsePath(std::istream& str) {
string s = parseString(str);
if (s.size() == 0 || s[0] != '/')
throw FormatError(format("bad path '%1%' in derivation") % s);
return s;
}
static bool endOfList(std::istream & str)
{
if (str.peek() == ',') {
str.get();
return false;
}
if (str.peek() == ']') {
str.get();
return true;
}
static bool endOfList(std::istream& str) {
if (str.peek() == ',') {
str.get();
return false;
}
if (str.peek() == ']') {
str.get();
return true;
}
return false;
}
static StringSet parseStrings(std::istream & str, bool arePaths)
{
StringSet res;
while (!endOfList(str))
res.insert(arePaths ? parsePath(str) : parseString(str));
return res;
static StringSet parseStrings(std::istream& str, bool arePaths) {
StringSet res;
while (!endOfList(str))
res.insert(arePaths ? parsePath(str) : parseString(str));
return res;
}
static Derivation parseDerivation(const string& s) {
Derivation drv;
istringstream_nocopy str(s);
expect(str, "Derive([");
static Derivation parseDerivation(const string & s)
{
Derivation drv;
istringstream_nocopy str(s);
expect(str, "Derive([");
/* Parse the list of outputs. */
while (!endOfList(str)) {
DerivationOutput out;
expect(str, "("); string id = parseString(str);
expect(str, ","); out.path = parsePath(str);
expect(str, ","); out.hashAlgo = parseString(str);
expect(str, ","); out.hash = parseString(str);
expect(str, ")");
drv.outputs[id] = out;
}
/* Parse the list of input derivations. */
expect(str, ",[");
while (!endOfList(str)) {
expect(str, "(");
Path drvPath = parsePath(str);
expect(str, ",[");
drv.inputDrvs[drvPath] = parseStrings(str, false);
expect(str, ")");
}
expect(str, ",["); drv.inputSrcs = parseStrings(str, true);
expect(str, ","); drv.platform = parseString(str);
expect(str, ","); drv.builder = parseString(str);
/* Parse the builder arguments. */
expect(str, ",[");
while (!endOfList(str))
drv.args.push_back(parseString(str));
/* Parse the environment variables. */
expect(str, ",[");
while (!endOfList(str)) {
expect(str, "("); string name = parseString(str);
expect(str, ","); string value = parseString(str);
expect(str, ")");
drv.env[name] = value;
}
/* Parse the list of outputs. */
while (!endOfList(str)) {
DerivationOutput out;
expect(str, "(");
string id = parseString(str);
expect(str, ",");
out.path = parsePath(str);
expect(str, ",");
out.hashAlgo = parseString(str);
expect(str, ",");
out.hash = parseString(str);
expect(str, ")");
return drv;
drv.outputs[id] = out;
}
/* Parse the list of input derivations. */
expect(str, ",[");
while (!endOfList(str)) {
expect(str, "(");
Path drvPath = parsePath(str);
expect(str, ",[");
drv.inputDrvs[drvPath] = parseStrings(str, false);
expect(str, ")");
}
expect(str, ",[");
drv.inputSrcs = parseStrings(str, true);
expect(str, ",");
drv.platform = parseString(str);
expect(str, ",");
drv.builder = parseString(str);
/* Parse the builder arguments. */
expect(str, ",[");
while (!endOfList(str)) drv.args.push_back(parseString(str));
/* Parse the environment variables. */
expect(str, ",[");
while (!endOfList(str)) {
expect(str, "(");
string name = parseString(str);
expect(str, ",");
string value = parseString(str);
expect(str, ")");
drv.env[name] = value;
}
expect(str, ")");
return drv;
}
Derivation readDerivation(const Path & drvPath)
{
try {
return parseDerivation(readFile(drvPath));
} catch (FormatError & e) {
throw Error(format("error parsing derivation '%1%': %2%") % drvPath % e.msg());
}
Derivation readDerivation(const Path& drvPath) {
try {
return parseDerivation(readFile(drvPath));
} catch (FormatError& e) {
throw Error(format("error parsing derivation '%1%': %2%") % drvPath %
e.msg());
}
}
Derivation Store::derivationFromPath(const Path & drvPath)
{
assertStorePath(drvPath);
ensurePath(drvPath);
auto accessor = getFSAccessor();
try {
return parseDerivation(accessor->readFile(drvPath));
} catch (FormatError & e) {
throw Error(format("error parsing derivation '%1%': %2%") % drvPath % e.msg());
}
Derivation Store::derivationFromPath(const Path& drvPath) {
assertStorePath(drvPath);
ensurePath(drvPath);
auto accessor = getFSAccessor();
try {
return parseDerivation(accessor->readFile(drvPath));
} catch (FormatError& e) {
throw Error(format("error parsing derivation '%1%': %2%") % drvPath %
e.msg());
}
}
static void printString(string & res, const string & s)
{
res += '"';
for (const char * i = s.c_str(); *i; i++)
if (*i == '\"' || *i == '\\') { res += "\\"; res += *i; }
else if (*i == '\n') res += "\\n";
else if (*i == '\r') res += "\\r";
else if (*i == '\t') res += "\\t";
else res += *i;
res += '"';
static void printString(string& res, const string& s) {
res += '"';
for (const char* i = s.c_str(); *i; i++)
if (*i == '\"' || *i == '\\') {
res += "\\";
res += *i;
} else if (*i == '\n')
res += "\\n";
else if (*i == '\r')
res += "\\r";
else if (*i == '\t')
res += "\\t";
else
res += *i;
res += '"';
}
template<class ForwardIterator>
static void printStrings(string & res, ForwardIterator i, ForwardIterator j)
{
res += '[';
bool first = true;
for ( ; i != j; ++i) {
if (first) first = false; else res += ',';
printString(res, *i);
}
res += ']';
template <class ForwardIterator>
static void printStrings(string& res, ForwardIterator i, ForwardIterator j) {
res += '[';
bool first = true;
for (; i != j; ++i) {
if (first)
first = false;
else
res += ',';
printString(res, *i);
}
res += ']';
}
string Derivation::unparse() const {
string s;
s.reserve(65536);
s += "Derive([";
string Derivation::unparse() const
{
string s;
s.reserve(65536);
s += "Derive([";
bool first = true;
for (auto& i : outputs) {
if (first)
first = false;
else
s += ',';
s += '(';
printString(s, i.first);
s += ',';
printString(s, i.second.path);
s += ',';
printString(s, i.second.hashAlgo);
s += ',';
printString(s, i.second.hash);
s += ')';
}
bool first = true;
for (auto & i : outputs) {
if (first) first = false; else s += ',';
s += '('; printString(s, i.first);
s += ','; printString(s, i.second.path);
s += ','; printString(s, i.second.hashAlgo);
s += ','; printString(s, i.second.hash);
s += ')';
}
s += "],[";
first = true;
for (auto& i : inputDrvs) {
if (first)
first = false;
else
s += ',';
s += '(';
printString(s, i.first);
s += ',';
printStrings(s, i.second.begin(), i.second.end());
s += ')';
}
s += "],[";
first = true;
for (auto & i : inputDrvs) {
if (first) first = false; else s += ',';
s += '('; printString(s, i.first);
s += ','; printStrings(s, i.second.begin(), i.second.end());
s += ')';
}
s += "],";
printStrings(s, inputSrcs.begin(), inputSrcs.end());
s += "],";
printStrings(s, inputSrcs.begin(), inputSrcs.end());
s += ',';
printString(s, platform);
s += ',';
printString(s, builder);
s += ',';
printStrings(s, args.begin(), args.end());
s += ','; printString(s, platform);
s += ','; printString(s, builder);
s += ','; printStrings(s, args.begin(), args.end());
s += ",[";
first = true;
for (auto& i : env) {
if (first)
first = false;
else
s += ',';
s += '(';
printString(s, i.first);
s += ',';
printString(s, i.second);
s += ')';
}
s += ",[";
first = true;
for (auto & i : env) {
if (first) first = false; else s += ',';
s += '('; printString(s, i.first);
s += ','; printString(s, i.second);
s += ')';
}
s += "])";
s += "])";
return s;
return s;
}
bool isDerivation(const string & fileName)
{
return hasSuffix(fileName, drvExtension);
bool isDerivation(const string& fileName) {
return hasSuffix(fileName, drvExtension);
}
bool BasicDerivation::isFixedOutput() const
{
return outputs.size() == 1 &&
outputs.begin()->first == "out" &&
outputs.begin()->second.hash != "";
bool BasicDerivation::isFixedOutput() const {
return outputs.size() == 1 && outputs.begin()->first == "out" &&
outputs.begin()->second.hash != "";
}
DrvHashes drvHashes;
/* Returns the hash of a derivation modulo fixed-output
subderivations. A fixed-output derivation is a derivation with one
output (`out') for which an expected hash and hash algorithm are
@ -304,113 +309,95 @@ DrvHashes drvHashes;
paths have been replaced by the result of a recursive call to this
function, and that for fixed-output derivations we return a hash of
its output path. */
Hash hashDerivationModulo(Store & store, Derivation drv)
{
/* Return a fixed hash for fixed-output derivations. */
if (drv.isFixedOutput()) {
DerivationOutputs::const_iterator i = drv.outputs.begin();
return hashString(htSHA256, "fixed:out:"
+ i->second.hashAlgo + ":"
+ i->second.hash + ":"
+ i->second.path);
Hash hashDerivationModulo(Store& store, Derivation drv) {
/* Return a fixed hash for fixed-output derivations. */
if (drv.isFixedOutput()) {
DerivationOutputs::const_iterator i = drv.outputs.begin();
return hashString(htSHA256, "fixed:out:" + i->second.hashAlgo + ":" +
i->second.hash + ":" + i->second.path);
}
/* For other derivations, replace the inputs paths with recursive
calls to this function.*/
DerivationInputs inputs2;
for (auto& i : drv.inputDrvs) {
Hash h = drvHashes[i.first];
if (!h) {
assert(store.isValidPath(i.first));
Derivation drv2 = readDerivation(store.toRealPath(i.first));
h = hashDerivationModulo(store, drv2);
drvHashes[i.first] = h;
}
inputs2[h.to_string(Base16, false)] = i.second;
}
drv.inputDrvs = inputs2;
/* For other derivations, replace the inputs paths with recursive
calls to this function.*/
DerivationInputs inputs2;
for (auto & i : drv.inputDrvs) {
Hash h = drvHashes[i.first];
if (!h) {
assert(store.isValidPath(i.first));
Derivation drv2 = readDerivation(store.toRealPath(i.first));
h = hashDerivationModulo(store, drv2);
drvHashes[i.first] = h;
}
inputs2[h.to_string(Base16, false)] = i.second;
}
drv.inputDrvs = inputs2;
return hashString(htSHA256, drv.unparse());
return hashString(htSHA256, drv.unparse());
}
DrvPathWithOutputs parseDrvPathWithOutputs(const string & s)
{
size_t n = s.find("!");
return n == s.npos
? DrvPathWithOutputs(s, std::set<string>())
: DrvPathWithOutputs(string(s, 0, n), tokenizeString<std::set<string> >(string(s, n + 1), ","));
DrvPathWithOutputs parseDrvPathWithOutputs(const string& s) {
size_t n = s.find("!");
return n == s.npos ? DrvPathWithOutputs(s, std::set<string>())
: DrvPathWithOutputs(string(s, 0, n),
tokenizeString<std::set<string> >(
string(s, n + 1), ","));
}
Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs)
{
return outputs.empty()
? drvPath
: drvPath + "!" + concatStringsSep(",", outputs);
Path makeDrvPathWithOutputs(const Path& drvPath,
const std::set<string>& outputs) {
return outputs.empty() ? drvPath
: drvPath + "!" + concatStringsSep(",", outputs);
}
bool wantOutput(const string & output, const std::set<string> & wanted)
{
return wanted.empty() || wanted.find(output) != wanted.end();
bool wantOutput(const string& output, const std::set<string>& wanted) {
return wanted.empty() || wanted.find(output) != wanted.end();
}
PathSet BasicDerivation::outputPaths() const
{
PathSet paths;
for (auto & i : outputs)
paths.insert(i.second.path);
return paths;
PathSet BasicDerivation::outputPaths() const {
PathSet paths;
for (auto& i : outputs) paths.insert(i.second.path);
return paths;
}
Source& readDerivation(Source& in, Store& store, BasicDerivation& drv) {
drv.outputs.clear();
auto nr = readNum<size_t>(in);
for (size_t n = 0; n < nr; n++) {
auto name = readString(in);
DerivationOutput o;
in >> o.path >> o.hashAlgo >> o.hash;
store.assertStorePath(o.path);
drv.outputs[name] = o;
}
Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
{
drv.outputs.clear();
auto nr = readNum<size_t>(in);
for (size_t n = 0; n < nr; n++) {
auto name = readString(in);
DerivationOutput o;
in >> o.path >> o.hashAlgo >> o.hash;
store.assertStorePath(o.path);
drv.outputs[name] = o;
}
drv.inputSrcs = readStorePaths<PathSet>(store, in);
in >> drv.platform >> drv.builder;
drv.args = readStrings<Strings>(in);
drv.inputSrcs = readStorePaths<PathSet>(store, in);
in >> drv.platform >> drv.builder;
drv.args = readStrings<Strings>(in);
nr = readNum<size_t>(in);
for (size_t n = 0; n < nr; n++) {
auto key = readString(in);
auto value = readString(in);
drv.env[key] = value;
}
nr = readNum<size_t>(in);
for (size_t n = 0; n < nr; n++) {
auto key = readString(in);
auto value = readString(in);
drv.env[key] = value;
}
return in;
return in;
}
Sink & operator << (Sink & out, const BasicDerivation & drv)
{
out << drv.outputs.size();
for (auto & i : drv.outputs)
out << i.first << i.second.path << i.second.hashAlgo << i.second.hash;
out << drv.inputSrcs << drv.platform << drv.builder << drv.args;
out << drv.env.size();
for (auto & i : drv.env)
out << i.first << i.second;
return out;
Sink& operator<<(Sink& out, const BasicDerivation& drv) {
out << drv.outputs.size();
for (auto& i : drv.outputs)
out << i.first << i.second.path << i.second.hashAlgo << i.second.hash;
out << drv.inputSrcs << drv.platform << drv.builder << drv.args;
out << drv.env.size();
for (auto& i : drv.env) out << i.first << i.second;
return out;
}
std::string hashPlaceholder(const std::string & outputName)
{
// FIXME: memoize?
return "/" + hashString(htSHA256, "nix-output:" + outputName).to_string(Base32, false);
std::string hashPlaceholder(const std::string& outputName) {
// FIXME: memoize?
return "/" + hashString(htSHA256, "nix-output:" + outputName)
.to_string(Base32, false);
}
}
} // namespace nix

View file

@ -1,36 +1,28 @@
#pragma once
#include "types.hh"
#include <map>
#include "hash.hh"
#include "store-api.hh"
#include <map>
#include "types.hh"
namespace nix {
/* Extension of derivations in the Nix store. */
const string drvExtension = ".drv";
/* Abstract syntax of derivations. */
struct DerivationOutput
{
Path path;
string hashAlgo; /* hash used for expected hash computation */
string hash; /* expected hash, may be null */
DerivationOutput()
{
}
DerivationOutput(Path path, string hashAlgo, string hash)
{
this->path = path;
this->hashAlgo = hashAlgo;
this->hash = hash;
}
void parseHashInfo(bool & recursive, Hash & hash) const;
struct DerivationOutput {
Path path;
string hashAlgo; /* hash used for expected hash computation */
string hash; /* expected hash, may be null */
DerivationOutput() {}
DerivationOutput(Path path, string hashAlgo, string hash) {
this->path = path;
this->hashAlgo = hashAlgo;
this->hash = hash;
}
void parseHashInfo(bool& recursive, Hash& hash) const;
};
typedef std::map<string, DerivationOutput> DerivationOutputs;
@ -41,77 +33,73 @@ typedef std::map<Path, StringSet> DerivationInputs;
typedef std::map<string, string> StringPairs;
struct BasicDerivation
{
DerivationOutputs outputs; /* keyed on symbolic IDs */
PathSet inputSrcs; /* inputs that are sources */
string platform;
Path builder;
Strings args;
StringPairs env;
struct BasicDerivation {
DerivationOutputs outputs; /* keyed on symbolic IDs */
PathSet inputSrcs; /* inputs that are sources */
string platform;
Path builder;
Strings args;
StringPairs env;
virtual ~BasicDerivation() { };
virtual ~BasicDerivation(){};
/* Return the path corresponding to the output identifier `id' in
the given derivation. */
Path findOutput(const string & id) const;
/* Return the path corresponding to the output identifier `id' in
the given derivation. */
Path findOutput(const string& id) const;
bool isBuiltin() const;
bool isBuiltin() const;
/* Return true iff this is a fixed-output derivation. */
bool isFixedOutput() const;
/* Return the output paths of a derivation. */
PathSet outputPaths() const;
/* Return true iff this is a fixed-output derivation. */
bool isFixedOutput() const;
/* Return the output paths of a derivation. */
PathSet outputPaths() const;
};
struct Derivation : BasicDerivation
{
DerivationInputs inputDrvs; /* inputs that are sub-derivations */
struct Derivation : BasicDerivation {
DerivationInputs inputDrvs; /* inputs that are sub-derivations */
/* Print a derivation. */
std::string unparse() const;
/* Print a derivation. */
std::string unparse() const;
};
class Store;
/* Write a derivation to the Nix store, and return its path. */
Path writeDerivation(ref<Store> store,
const Derivation & drv, const string & name, RepairFlag repair = NoRepair);
Path writeDerivation(ref<Store> store, const Derivation& drv,
const string& name, RepairFlag repair = NoRepair);
/* Read a derivation from a file. */
Derivation readDerivation(const Path & drvPath);
Derivation readDerivation(const Path& drvPath);
/* Check whether a file name ends with the extension for
derivations. */
bool isDerivation(const string & fileName);
bool isDerivation(const string& fileName);
Hash hashDerivationModulo(Store & store, Derivation drv);
Hash hashDerivationModulo(Store& store, Derivation drv);
/* Memoisation of hashDerivationModulo(). */
typedef std::map<Path, Hash> DrvHashes;
extern DrvHashes drvHashes; // FIXME: global, not thread-safe
extern DrvHashes drvHashes; // FIXME: global, not thread-safe
/* Split a string specifying a derivation and a set of outputs
(/nix/store/hash-foo!out1,out2,...) into the derivation path and
the outputs. */
typedef std::pair<string, std::set<string> > DrvPathWithOutputs;
DrvPathWithOutputs parseDrvPathWithOutputs(const string & s);
DrvPathWithOutputs parseDrvPathWithOutputs(const string& s);
Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs);
Path makeDrvPathWithOutputs(const Path& drvPath,
const std::set<string>& outputs);
bool wantOutput(const string & output, const std::set<string> & wanted);
bool wantOutput(const string& output, const std::set<string>& wanted);
struct Source;
struct Sink;
Source & readDerivation(Source & in, Store & store, BasicDerivation & drv);
Sink & operator << (Sink & out, const BasicDerivation & drv);
Source& readDerivation(Source& in, Store& store, BasicDerivation& drv);
Sink& operator<<(Sink& out, const BasicDerivation& drv);
std::string hashPlaceholder(const std::string & outputName);
std::string hashPlaceholder(const std::string& outputName);
}
} // namespace nix

File diff suppressed because it is too large Load diff

View file

@ -1,120 +1,118 @@
#pragma once
#include "types.hh"
#include "hash.hh"
#include "globals.hh"
#include <string>
#include <future>
#include <string>
#include "globals.hh"
#include "hash.hh"
#include "types.hh"
namespace nix {
struct DownloadSettings : Config
{
Setting<bool> enableHttp2{this, true, "http2",
"Whether to enable HTTP/2 support."};
struct DownloadSettings : Config {
Setting<bool> enableHttp2{this, true, "http2",
"Whether to enable HTTP/2 support."};
Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
"String appended to the user agent in HTTP requests."};
Setting<std::string> userAgentSuffix{
this, "", "user-agent-suffix",
"String appended to the user agent in HTTP requests."};
Setting<size_t> httpConnections{this, 25, "http-connections",
"Number of parallel HTTP connections.",
{"binary-caches-parallel-connections"}};
Setting<size_t> httpConnections{this,
25,
"http-connections",
"Number of parallel HTTP connections.",
{"binary-caches-parallel-connections"}};
Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
"Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
Setting<unsigned long> connectTimeout{
this, 0, "connect-timeout",
"Timeout for connecting to servers during downloads. 0 means use curl's "
"builtin default."};
Setting<unsigned long> stalledDownloadTimeout{this, 300, "stalled-download-timeout",
"Timeout (in seconds) for receiving data from servers during download. Nix cancels idle downloads after this timeout's duration."};
Setting<unsigned long> stalledDownloadTimeout{
this, 300, "stalled-download-timeout",
"Timeout (in seconds) for receiving data from servers during download. "
"Nix cancels idle downloads after this timeout's duration."};
Setting<unsigned int> tries{this, 5, "download-attempts",
"How often Nix will attempt to download a file before giving up."};
Setting<unsigned int> tries{
this, 5, "download-attempts",
"How often Nix will attempt to download a file before giving up."};
};
extern DownloadSettings downloadSettings;
struct DownloadRequest
{
std::string uri;
std::string expectedETag;
bool verifyTLS = true;
bool head = false;
size_t tries = downloadSettings.tries;
unsigned int baseRetryTimeMs = 250;
ActivityId parentAct;
bool decompress = true;
std::shared_ptr<std::string> data;
std::string mimeType;
std::function<void(char *, size_t)> dataCallback;
struct DownloadRequest {
std::string uri;
std::string expectedETag;
bool verifyTLS = true;
bool head = false;
size_t tries = downloadSettings.tries;
unsigned int baseRetryTimeMs = 250;
ActivityId parentAct;
bool decompress = true;
std::shared_ptr<std::string> data;
std::string mimeType;
std::function<void(char*, size_t)> dataCallback;
DownloadRequest(const std::string & uri)
: uri(uri), parentAct(getCurActivity()) { }
DownloadRequest(const std::string& uri)
: uri(uri), parentAct(getCurActivity()) {}
std::string verb()
{
return data ? "upload" : "download";
}
std::string verb() { return data ? "upload" : "download"; }
};
struct DownloadResult
{
bool cached = false;
std::string etag;
std::string effectiveUri;
std::shared_ptr<std::string> data;
uint64_t bodySize = 0;
struct DownloadResult {
bool cached = false;
std::string etag;
std::string effectiveUri;
std::shared_ptr<std::string> data;
uint64_t bodySize = 0;
};
struct CachedDownloadRequest
{
std::string uri;
bool unpack = false;
std::string name;
Hash expectedHash;
unsigned int ttl = settings.tarballTtl;
struct CachedDownloadRequest {
std::string uri;
bool unpack = false;
std::string name;
Hash expectedHash;
unsigned int ttl = settings.tarballTtl;
CachedDownloadRequest(const std::string & uri)
: uri(uri) { }
CachedDownloadRequest(const std::string& uri) : uri(uri) {}
};
struct CachedDownloadResult
{
// Note: 'storePath' may be different from 'path' when using a
// chroot store.
Path storePath;
Path path;
std::optional<std::string> etag;
std::string effectiveUri;
struct CachedDownloadResult {
// Note: 'storePath' may be different from 'path' when using a
// chroot store.
Path storePath;
Path path;
std::optional<std::string> etag;
std::string effectiveUri;
};
class Store;
struct Downloader
{
virtual ~Downloader() { }
struct Downloader {
virtual ~Downloader() {}
/* Enqueue a download request, returning a future to the result of
the download. The future may throw a DownloadError
exception. */
virtual void enqueueDownload(const DownloadRequest & request,
Callback<DownloadResult> callback) = 0;
/* Enqueue a download request, returning a future to the result of
the download. The future may throw a DownloadError
exception. */
virtual void enqueueDownload(const DownloadRequest& request,
Callback<DownloadResult> callback) = 0;
std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
std::future<DownloadResult> enqueueDownload(const DownloadRequest& request);
/* Synchronously download a file. */
DownloadResult download(const DownloadRequest & request);
/* Synchronously download a file. */
DownloadResult download(const DownloadRequest& request);
/* Download a file, writing its data to a sink. The sink will be
invoked on the thread of the caller. */
void download(DownloadRequest && request, Sink & sink);
/* Download a file, writing its data to a sink. The sink will be
invoked on the thread of the caller. */
void download(DownloadRequest&& request, Sink& sink);
/* Check if the specified file is already in ~/.cache/nix/tarballs
and is more recent than tarball-ttl seconds. Otherwise,
use the recorded ETag to verify if the server has a more
recent version, and if so, download it to the Nix store. */
CachedDownloadResult downloadCached(ref<Store> store, const CachedDownloadRequest & request);
/* Check if the specified file is already in ~/.cache/nix/tarballs
and is more recent than tarball-ttl seconds. Otherwise,
use the recorded ETag to verify if the server has a more
recent version, and if so, download it to the Nix store. */
CachedDownloadResult downloadCached(ref<Store> store,
const CachedDownloadRequest& request);
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
};
/* Return a shared Downloader object. Using this object is preferred
@ -124,15 +122,13 @@ ref<Downloader> getDownloader();
/* Return a new Downloader object. */
ref<Downloader> makeDownloader();
class DownloadError : public Error
{
public:
Downloader::Error error;
DownloadError(Downloader::Error error, const FormatOrString & fs)
: Error(fs), error(error)
{ }
class DownloadError : public Error {
public:
Downloader::Error error;
DownloadError(Downloader::Error error, const FormatOrString& fs)
: Error(fs), error(error) {}
};
bool isUri(const string & s);
bool isUri(const string& s);
}
} // namespace nix

View file

@ -1,106 +1,100 @@
#include "store-api.hh"
#include "archive.hh"
#include "worker-protocol.hh"
#include <algorithm>
#include "archive.hh"
#include "store-api.hh"
#include "worker-protocol.hh"
namespace nix {
struct HashAndWriteSink : Sink
{
Sink & writeSink;
HashSink hashSink;
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
{
}
virtual void operator () (const unsigned char * data, size_t len)
{
writeSink(data, len);
hashSink(data, len);
}
Hash currentHash()
{
return hashSink.currentHash().first;
}
struct HashAndWriteSink : Sink {
Sink& writeSink;
HashSink hashSink;
HashAndWriteSink(Sink& writeSink)
: writeSink(writeSink), hashSink(htSHA256) {}
virtual void operator()(const unsigned char* data, size_t len) {
writeSink(data, len);
hashSink(data, len);
}
Hash currentHash() { return hashSink.currentHash().first; }
};
void Store::exportPaths(const Paths & paths, Sink & sink)
{
Paths sorted = topoSortPaths(PathSet(paths.begin(), paths.end()));
std::reverse(sorted.begin(), sorted.end());
void Store::exportPaths(const Paths& paths, Sink& sink) {
Paths sorted = topoSortPaths(PathSet(paths.begin(), paths.end()));
std::reverse(sorted.begin(), sorted.end());
std::string doneLabel("paths exported");
//logger->incExpected(doneLabel, sorted.size());
std::string doneLabel("paths exported");
// logger->incExpected(doneLabel, sorted.size());
for (auto & path : sorted) {
//Activity act(*logger, lvlInfo, format("exporting path '%s'") % path);
sink << 1;
exportPath(path, sink);
//logger->incProgress(doneLabel);
}
for (auto& path : sorted) {
// Activity act(*logger, lvlInfo, format("exporting path '%s'") % path);
sink << 1;
exportPath(path, sink);
// logger->incProgress(doneLabel);
}
sink << 0;
sink << 0;
}
void Store::exportPath(const Path & path, Sink & sink)
{
auto info = queryPathInfo(path);
void Store::exportPath(const Path& path, Sink& sink) {
auto info = queryPathInfo(path);
HashAndWriteSink hashAndWriteSink(sink);
HashAndWriteSink hashAndWriteSink(sink);
narFromPath(path, hashAndWriteSink);
narFromPath(path, hashAndWriteSink);
/* Refuse to export paths that have changed. This prevents
filesystem corruption from spreading to other machines.
Don't complain if the stored hash is zero (unknown). */
Hash hash = hashAndWriteSink.currentHash();
if (hash != info->narHash && info->narHash != Hash(info->narHash.type))
throw Error(format("hash of path '%1%' has changed from '%2%' to '%3%'!") % path
% info->narHash.to_string() % hash.to_string());
/* Refuse to export paths that have changed. This prevents
filesystem corruption from spreading to other machines.
Don't complain if the stored hash is zero (unknown). */
Hash hash = hashAndWriteSink.currentHash();
if (hash != info->narHash && info->narHash != Hash(info->narHash.type))
throw Error(format("hash of path '%1%' has changed from '%2%' to '%3%'!") %
path % info->narHash.to_string() % hash.to_string());
hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0;
hashAndWriteSink << exportMagic << path << info->references << info->deriver
<< 0;
}
Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, CheckSigsFlag checkSigs)
{
Paths res;
while (true) {
auto n = readNum<uint64_t>(source);
if (n == 0) break;
if (n != 1) throw Error("input doesn't look like something created by 'nix-store --export'");
Paths Store::importPaths(Source& source, std::shared_ptr<FSAccessor> accessor,
CheckSigsFlag checkSigs) {
Paths res;
while (true) {
auto n = readNum<uint64_t>(source);
if (n == 0) break;
if (n != 1)
throw Error(
"input doesn't look like something created by 'nix-store --export'");
/* Extract the NAR from the source. */
TeeSink tee(source);
parseDump(tee, tee.source);
/* Extract the NAR from the source. */
TeeSink tee(source);
parseDump(tee, tee.source);
uint32_t magic = readInt(source);
if (magic != exportMagic)
throw Error("Nix archive cannot be imported; wrong format");
uint32_t magic = readInt(source);
if (magic != exportMagic)
throw Error("Nix archive cannot be imported; wrong format");
ValidPathInfo info;
ValidPathInfo info;
info.path = readStorePath(*this, source);
info.path = readStorePath(*this, source);
//Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path);
// Activity act(*logger, lvlInfo, format("importing path '%s'") %
// info.path);
info.references = readStorePaths<PathSet>(*this, source);
info.references = readStorePaths<PathSet>(*this, source);
info.deriver = readString(source);
if (info.deriver != "") assertStorePath(info.deriver);
info.deriver = readString(source);
if (info.deriver != "") assertStorePath(info.deriver);
info.narHash = hashString(htSHA256, *tee.source.data);
info.narSize = tee.source.data->size();
info.narHash = hashString(htSHA256, *tee.source.data);
info.narSize = tee.source.data->size();
// Ignore optional legacy signature.
if (readInt(source) == 1)
readString(source);
// Ignore optional legacy signature.
if (readInt(source) == 1) readString(source);
addToStore(info, tee.source.data, NoRepair, checkSigs, accessor);
addToStore(info, tee.source.data, NoRepair, checkSigs, accessor);
res.push_back(info.path);
}
res.push_back(info.path);
}
return res;
return res;
}
}
} // namespace nix

View file

@ -6,28 +6,26 @@ namespace nix {
/* An abstract class for accessing a filesystem-like structure, such
as a (possibly remote) Nix store or the contents of a NAR file. */
class FSAccessor
{
public:
enum Type { tMissing, tRegular, tSymlink, tDirectory };
class FSAccessor {
public:
enum Type { tMissing, tRegular, tSymlink, tDirectory };
struct Stat
{
Type type = tMissing;
uint64_t fileSize = 0; // regular files only
bool isExecutable = false; // regular files only
uint64_t narOffset = 0; // regular files only
};
struct Stat {
Type type = tMissing;
uint64_t fileSize = 0; // regular files only
bool isExecutable = false; // regular files only
uint64_t narOffset = 0; // regular files only
};
virtual ~FSAccessor() { }
virtual ~FSAccessor() {}
virtual Stat stat(const Path & path) = 0;
virtual Stat stat(const Path& path) = 0;
virtual StringSet readDirectory(const Path & path) = 0;
virtual StringSet readDirectory(const Path& path) = 0;
virtual std::string readFile(const Path & path) = 0;
virtual std::string readFile(const Path& path) = 0;
virtual std::string readLink(const Path & path) = 0;
virtual std::string readLink(const Path& path) = 0;
};
}
} // namespace nix

File diff suppressed because it is too large Load diff

View file

@ -1,17 +1,14 @@
#include "globals.hh"
#include "util.hh"
#include "archive.hh"
#include "args.hh"
#include <dlfcn.h>
#include <algorithm>
#include <map>
#include <thread>
#include <dlfcn.h>
#include "archive.hh"
#include "args.hh"
#include "util.hh"
namespace nix {
/* The default location of the daemon socket, relative to nixStateDir.
The socket is in a directory to allow you to control access to the
Nix daemon by setting the mode/ownership of the directory
@ -21,9 +18,9 @@ namespace nix {
/* chroot-like behavior from Apple's sandbox */
#if __APPLE__
#define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh"
#define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh"
#else
#define DEFAULT_ALLOWED_IMPURE_PREFIXES ""
#define DEFAULT_ALLOWED_IMPURE_PREFIXES ""
#endif
Settings settings;
@ -31,157 +28,163 @@ Settings settings;
static GlobalConfig::Register r1(&settings);
Settings::Settings()
: nixPrefix(NIX_PREFIX)
, nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))))
, nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)))
, nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)))
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)))
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)))
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)))
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)))
, nixManDir(canonPath(NIX_MAN_DIR))
, nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH))
{
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
: nixPrefix(NIX_PREFIX),
nixStore(canonPath(
getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)))),
nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR))),
nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR))),
nixStateDir(canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR))),
nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR))),
nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR))),
nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR))),
nixManDir(canonPath(NIX_MAN_DIR)),
nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH)) {
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", ""));
if (caFile == "") {
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
if (pathExists(fn)) {
caFile = fn;
break;
}
}
caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", ""));
if (caFile == "") {
for (auto& fn :
{"/etc/ssl/certs/ca-certificates.crt",
"/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
if (pathExists(fn)) {
caFile = fn;
break;
}
}
/* Backwards compatibility. */
auto s = getEnv("NIX_REMOTE_SYSTEMS");
if (s != "") {
Strings ss;
for (auto & p : tokenizeString<Strings>(s, ":"))
ss.push_back("@" + p);
builders = concatStringsSep(" ", ss);
}
/* Backwards compatibility. */
auto s = getEnv("NIX_REMOTE_SYSTEMS");
if (s != "") {
Strings ss;
for (auto& p : tokenizeString<Strings>(s, ":")) ss.push_back("@" + p);
builders = concatStringsSep(" ", ss);
}
#if defined(__linux__) && defined(SANDBOX_SHELL)
sandboxPaths = tokenizeString<StringSet>("/bin/sh=" SANDBOX_SHELL);
sandboxPaths = tokenizeString<StringSet>("/bin/sh=" SANDBOX_SHELL);
#endif
allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
allowedImpureHostPrefixes =
tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
}
void loadConfFile()
{
globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf");
void loadConfFile() {
globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf");
/* We only want to send overrides to the daemon, i.e. stuff from
~/.nix/nix.conf or the command line. */
globalConfig.resetOverriden();
/* We only want to send overrides to the daemon, i.e. stuff from
~/.nix/nix.conf or the command line. */
globalConfig.resetOverriden();
auto dirs = getConfigDirs();
// Iterate over them in reverse so that the ones appearing first in the path take priority
for (auto dir = dirs.rbegin(); dir != dirs.rend(); dir++) {
globalConfig.applyConfigFile(*dir + "/nix/nix.conf");
}
auto dirs = getConfigDirs();
// Iterate over them in reverse so that the ones appearing first in the path
// take priority
for (auto dir = dirs.rbegin(); dir != dirs.rend(); dir++) {
globalConfig.applyConfigFile(*dir + "/nix/nix.conf");
}
}
unsigned int Settings::getDefaultCores()
{
return std::max(1U, std::thread::hardware_concurrency());
unsigned int Settings::getDefaultCores() {
return std::max(1U, std::thread::hardware_concurrency());
}
StringSet Settings::getDefaultSystemFeatures()
{
/* For backwards compatibility, accept some "features" that are
used in Nixpkgs to route builds to certain machines but don't
actually require anything special on the machines. */
StringSet features{"nixos-test", "benchmark", "big-parallel"};
StringSet Settings::getDefaultSystemFeatures() {
/* For backwards compatibility, accept some "features" that are
used in Nixpkgs to route builds to certain machines but don't
actually require anything special on the machines. */
StringSet features{"nixos-test", "benchmark", "big-parallel"};
#if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0)
features.insert("kvm");
#endif
#if __linux__
if (access("/dev/kvm", R_OK | W_OK) == 0) features.insert("kvm");
#endif
return features;
return features;
}
const string nixVersion = PACKAGE_VERSION;
template<> void BaseSetting<SandboxMode>::set(const std::string & str)
{
if (str == "true") value = smEnabled;
else if (str == "relaxed") value = smRelaxed;
else if (str == "false") value = smDisabled;
else throw UsageError("option '%s' has invalid value '%s'", name, str);
template <>
void BaseSetting<SandboxMode>::set(const std::string& str) {
if (str == "true")
value = smEnabled;
else if (str == "relaxed")
value = smRelaxed;
else if (str == "false")
value = smDisabled;
else
throw UsageError("option '%s' has invalid value '%s'", name, str);
}
template<> std::string BaseSetting<SandboxMode>::to_string()
{
if (value == smEnabled) return "true";
else if (value == smRelaxed) return "relaxed";
else if (value == smDisabled) return "false";
else abort();
template <>
std::string BaseSetting<SandboxMode>::to_string() {
if (value == smEnabled)
return "true";
else if (value == smRelaxed)
return "relaxed";
else if (value == smDisabled)
return "false";
else
abort();
}
template<> void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder & out)
{
AbstractSetting::toJSON(out);
template <>
void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder& out) {
AbstractSetting::toJSON(out);
}
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
{
args.mkFlag()
.longName(name)
.description("Enable sandboxing.")
.handler([=](std::vector<std::string> ss) { override(smEnabled); })
.category(category);
args.mkFlag()
.longName("no-" + name)
.description("Disable sandboxing.")
.handler([=](std::vector<std::string> ss) { override(smDisabled); })
.category(category);
args.mkFlag()
.longName("relaxed-" + name)
.description("Enable sandboxing, but allow builds to disable it.")
.handler([=](std::vector<std::string> ss) { override(smRelaxed); })
.category(category);
template <>
void BaseSetting<SandboxMode>::convertToArg(Args& args,
const std::string& category) {
args.mkFlag()
.longName(name)
.description("Enable sandboxing.")
.handler([=](std::vector<std::string> ss) { override(smEnabled); })
.category(category);
args.mkFlag()
.longName("no-" + name)
.description("Disable sandboxing.")
.handler([=](std::vector<std::string> ss) { override(smDisabled); })
.category(category);
args.mkFlag()
.longName("relaxed-" + name)
.description("Enable sandboxing, but allow builds to disable it.")
.handler([=](std::vector<std::string> ss) { override(smRelaxed); })
.category(category);
}
void MaxBuildJobsSetting::set(const std::string & str)
{
if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
else if (!string2Int(str, value))
throw UsageError("configuration setting '%s' should be 'auto' or an integer", name);
void MaxBuildJobsSetting::set(const std::string& str) {
if (str == "auto")
value = std::max(1U, std::thread::hardware_concurrency());
else if (!string2Int(str, value))
throw UsageError(
"configuration setting '%s' should be 'auto' or an integer", name);
}
void initPlugins()
{
for (const auto & pluginFile : settings.pluginFiles.get()) {
Paths pluginFiles;
try {
auto ents = readDirectory(pluginFile);
for (const auto & ent : ents)
pluginFiles.emplace_back(pluginFile + "/" + ent.name);
} catch (SysError & e) {
if (e.errNo != ENOTDIR)
throw;
pluginFiles.emplace_back(pluginFile);
}
for (const auto & file : pluginFiles) {
/* handle is purposefully leaked as there may be state in the
DSO needed by the action of the plugin. */
void *handle =
dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!handle)
throw Error("could not dynamically open plugin file '%s': %s", file, dlerror());
}
void initPlugins() {
for (const auto& pluginFile : settings.pluginFiles.get()) {
Paths pluginFiles;
try {
auto ents = readDirectory(pluginFile);
for (const auto& ent : ents)
pluginFiles.emplace_back(pluginFile + "/" + ent.name);
} catch (SysError& e) {
if (e.errNo != ENOTDIR) throw;
pluginFiles.emplace_back(pluginFile);
}
for (const auto& file : pluginFiles) {
/* handle is purposefully leaked as there may be state in the
DSO needed by the action of the plugin. */
void* handle = dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL);
if (!handle)
throw Error("could not dynamically open plugin file '%s': %s", file,
dlerror());
}
}
/* Since plugins can add settings, try to re-apply previously
unknown settings. */
globalConfig.reapplyUnknownSettings();
globalConfig.warnUnknownSettings();
/* Since plugins can add settings, try to re-apply previously
unknown settings. */
globalConfig.reapplyUnknownSettings();
globalConfig.warnUnknownSettings();
}
}
} // namespace nix

View file

@ -1,361 +1,475 @@
#pragma once
#include "types.hh"
#include "config.hh"
#include "util.hh"
#include <map>
#include <limits>
#include <sys/types.h>
#include <limits>
#include <map>
#include "config.hh"
#include "types.hh"
#include "util.hh"
namespace nix {
typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode;
struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
{
MaxBuildJobsSetting(Config * options,
unsigned int def,
const std::string & name,
const std::string & description,
const std::set<std::string> & aliases = {})
: BaseSetting<unsigned int>(def, name, description, aliases)
{
options->addSetting(this);
}
struct MaxBuildJobsSetting : public BaseSetting<unsigned int> {
MaxBuildJobsSetting(Config* options, unsigned int def,
const std::string& name, const std::string& description,
const std::set<std::string>& aliases = {})
: BaseSetting<unsigned int>(def, name, description, aliases) {
options->addSetting(this);
}
void set(const std::string & str) override;
void set(const std::string& str) override;
};
class Settings : public Config {
unsigned int getDefaultCores();
unsigned int getDefaultCores();
StringSet getDefaultSystemFeatures();
StringSet getDefaultSystemFeatures();
public:
Settings();
public:
Path nixPrefix;
Settings();
/* The directory where we store sources and derived files. */
Path nixStore;
Path nixPrefix;
Path nixDataDir; /* !!! fix */
/* The directory where we store sources and derived files. */
Path nixStore;
/* The directory where we log various operations. */
Path nixLogDir;
Path nixDataDir; /* !!! fix */
/* The directory where state is stored. */
Path nixStateDir;
/* The directory where we log various operations. */
Path nixLogDir;
/* The directory where configuration files are stored. */
Path nixConfDir;
/* The directory where state is stored. */
Path nixStateDir;
/* The directory where internal helper programs are stored. */
Path nixLibexecDir;
/* The directory where configuration files are stored. */
Path nixConfDir;
/* The directory where the main programs are stored. */
Path nixBinDir;
/* The directory where internal helper programs are stored. */
Path nixLibexecDir;
/* The directory where the man pages are stored. */
Path nixManDir;
/* The directory where the main programs are stored. */
Path nixBinDir;
/* File name of the socket the daemon listens to. */
Path nixDaemonSocketFile;
/* The directory where the man pages are stored. */
Path nixManDir;
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE", "auto"), "store",
"The default Nix store to use."};
/* File name of the socket the daemon listens to. */
Path nixDaemonSocketFile;
Setting<bool> keepFailed{
this, false, "keep-failed",
"Whether to keep temporary directories of failed builds."};
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE", "auto"), "store",
"The default Nix store to use."};
Setting<bool> keepGoing{
this, false, "keep-going",
"Whether to keep building derivations when another build fails."};
Setting<bool> keepFailed{this, false, "keep-failed",
"Whether to keep temporary directories of failed builds."};
Setting<bool> tryFallback{
this,
false,
"fallback",
"Whether to fall back to building when substitution fails.",
{"build-fallback"}};
Setting<bool> keepGoing{this, false, "keep-going",
"Whether to keep building derivations when another build fails."};
/* Whether to show build log output in real time. */
bool verboseBuild = true;
Setting<bool> tryFallback{this, false, "fallback",
"Whether to fall back to building when substitution fails.",
{"build-fallback"}};
Setting<size_t> logLines{
this, 10, "log-lines",
"If verbose-build is false, the number of lines of the tail of "
"the log to show if a build fails."};
/* Whether to show build log output in real time. */
bool verboseBuild = true;
MaxBuildJobsSetting maxBuildJobs{this,
1,
"max-jobs",
"Maximum number of parallel build jobs. "
"\"auto\" means use number of cores.",
{"build-max-jobs"}};
Setting<size_t> logLines{this, 10, "log-lines",
"If verbose-build is false, the number of lines of the tail of "
"the log to show if a build fails."};
Setting<unsigned int> buildCores{
this,
getDefaultCores(),
"cores",
"Number of CPU cores to utilize in parallel within a build, "
"i.e. by passing this number to Make via '-j'. 0 means that the "
"number of actual CPU cores on the local host ought to be "
"auto-detected.",
{"build-cores"}};
MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs",
"Maximum number of parallel build jobs. \"auto\" means use number of cores.",
{"build-max-jobs"}};
/* Read-only mode. Don't copy stuff to the store, don't change
the database. */
bool readOnlyMode = false;
Setting<unsigned int> buildCores{this, getDefaultCores(), "cores",
"Number of CPU cores to utilize in parallel within a build, "
"i.e. by passing this number to Make via '-j'. 0 means that the "
"number of actual CPU cores on the local host ought to be "
"auto-detected.", {"build-cores"}};
Setting<std::string> thisSystem{this, SYSTEM, "system",
"The canonical Nix system name."};
/* Read-only mode. Don't copy stuff to the store, don't change
the database. */
bool readOnlyMode = false;
Setting<time_t> maxSilentTime{
this,
0,
"max-silent-time",
"The maximum time in seconds that a builer can go without "
"producing any output on stdout/stderr before it is killed. "
"0 means infinity.",
{"build-max-silent-time"}};
Setting<std::string> thisSystem{this, SYSTEM, "system",
"The canonical Nix system name."};
Setting<time_t> buildTimeout{
this,
0,
"timeout",
"The maximum duration in seconds that a builder can run. "
"0 means infinity.",
{"build-timeout"}};
Setting<time_t> maxSilentTime{this, 0, "max-silent-time",
"The maximum time in seconds that a builer can go without "
"producing any output on stdout/stderr before it is killed. "
"0 means infinity.",
{"build-max-silent-time"}};
PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote",
"build-hook",
"The path of the helper program that executes builds "
"to remote machines."};
Setting<time_t> buildTimeout{this, 0, "timeout",
"The maximum duration in seconds that a builder can run. "
"0 means infinity.", {"build-timeout"}};
Setting<std::string> builders{this, "@" + nixConfDir + "/machines",
"builders",
"A semicolon-separated list of build machines, "
"in the format of nix.machines."};
PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote", "build-hook",
"The path of the helper program that executes builds to remote machines."};
Setting<bool> buildersUseSubstitutes{
this, false, "builders-use-substitutes",
"Whether build machines should use their own substitutes for obtaining "
"build dependencies if possible, rather than waiting for this host to "
"upload them."};
Setting<std::string> builders{this, "@" + nixConfDir + "/machines", "builders",
"A semicolon-separated list of build machines, in the format of nix.machines."};
Setting<off_t> reservedSize{
this, 8 * 1024 * 1024, "gc-reserved-space",
"Amount of reserved disk space for the garbage collector."};
Setting<bool> buildersUseSubstitutes{this, false, "builders-use-substitutes",
"Whether build machines should use their own substitutes for obtaining "
"build dependencies if possible, rather than waiting for this host to "
"upload them."};
Setting<bool> fsyncMetadata{this, true, "fsync-metadata",
"Whether SQLite should use fsync()."};
Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
"Amount of reserved disk space for the garbage collector."};
Setting<bool> useSQLiteWAL{this, true, "use-sqlite-wal",
"Whether SQLite should use WAL mode."};
Setting<bool> fsyncMetadata{this, true, "fsync-metadata",
"Whether SQLite should use fsync()."};
Setting<bool> syncBeforeRegistering{
this, false, "sync-before-registering",
"Whether to call sync() before registering a path as valid."};
Setting<bool> useSQLiteWAL{this, true, "use-sqlite-wal",
"Whether SQLite should use WAL mode."};
Setting<bool> useSubstitutes{this,
true,
"substitute",
"Whether to use substitutes.",
{"build-use-substitutes"}};
Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
"Whether to call sync() before registering a path as valid."};
Setting<std::string> buildUsersGroup{
this, "", "build-users-group",
"The Unix group that contains the build users."};
Setting<bool> useSubstitutes{this, true, "substitute",
"Whether to use substitutes.",
{"build-use-substitutes"}};
Setting<bool> impersonateLinux26{
this,
false,
"impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}};
Setting<std::string> buildUsersGroup{this, "", "build-users-group",
"The Unix group that contains the build users."};
Setting<bool> keepLog{this,
true,
"keep-build-log",
"Whether to store build logs.",
{"build-keep-log"}};
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
{"build-impersonate-linux-26"}};
Setting<bool> compressLog{this,
true,
"compress-build-log",
"Whether to compress logs.",
{"build-compress-log"}};
Setting<bool> keepLog{this, true, "keep-build-log",
"Whether to store build logs.",
{"build-keep-log"}};
Setting<unsigned long> maxLogSize{
this,
0,
"max-build-log-size",
"Maximum number of bytes a builder can write to stdout/stderr "
"before being killed (0 means no limit).",
{"build-max-log-size"}};
Setting<bool> compressLog{this, true, "compress-build-log",
"Whether to compress logs.",
{"build-compress-log"}};
/* When buildRepeat > 0 and verboseBuild == true, whether to print
repeated builds (i.e. builds other than the first one) to
stderr. Hack to prevent Hydra logs from being polluted. */
bool printRepeatedBuilds = true;
Setting<unsigned long> maxLogSize{this, 0, "max-build-log-size",
"Maximum number of bytes a builder can write to stdout/stderr "
"before being killed (0 means no limit).",
{"build-max-log-size"}};
Setting<unsigned int> pollInterval{
this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
/* When buildRepeat > 0 and verboseBuild == true, whether to print
repeated builds (i.e. builds other than the first one) to
stderr. Hack to prevent Hydra logs from being polluted. */
bool printRepeatedBuilds = true;
Setting<bool> checkRootReachability{
this, false, "gc-check-reachability",
"Whether to check if new GC roots can in fact be found by the "
"garbage collector."};
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
"How often (in seconds) to poll for locks."};
Setting<bool> gcKeepOutputs{
this,
false,
"keep-outputs",
"Whether the garbage collector should keep outputs of live derivations.",
{"gc-keep-outputs"}};
Setting<bool> checkRootReachability{this, false, "gc-check-reachability",
"Whether to check if new GC roots can in fact be found by the "
"garbage collector."};
Setting<bool> gcKeepDerivations{
this,
true,
"keep-derivations",
"Whether the garbage collector should keep derivers of live paths.",
{"gc-keep-derivations"}};
Setting<bool> gcKeepOutputs{this, false, "keep-outputs",
"Whether the garbage collector should keep outputs of live derivations.",
{"gc-keep-outputs"}};
Setting<bool> autoOptimiseStore{this, false, "auto-optimise-store",
"Whether to automatically replace files with "
"identical contents with hard links."};
Setting<bool> gcKeepDerivations{this, true, "keep-derivations",
"Whether the garbage collector should keep derivers of live paths.",
{"gc-keep-derivations"}};
Setting<bool> envKeepDerivations{
this,
false,
"keep-env-derivations",
"Whether to add derivations as a dependency of user environments "
"(to prevent them from being GCed).",
{"env-keep-derivations"}};
Setting<bool> autoOptimiseStore{this, false, "auto-optimise-store",
"Whether to automatically replace files with identical contents with hard links."};
/* Whether to lock the Nix client and worker to the same CPU. */
bool lockCPU;
Setting<bool> envKeepDerivations{this, false, "keep-env-derivations",
"Whether to add derivations as a dependency of user environments "
"(to prevent them from being GCed).",
{"env-keep-derivations"}};
/* Whether to show a stack trace if Nix evaluation fails. */
Setting<bool> showTrace{
this, false, "show-trace",
"Whether to show a stack trace on evaluation errors."};
/* Whether to lock the Nix client and worker to the same CPU. */
bool lockCPU;
Setting<SandboxMode> sandboxMode {
this,
#if __linux__
smEnabled
#else
smDisabled
#endif
,
"sandbox",
"Whether to enable sandboxed builds. Can be \"true\", \"false\" or "
"\"relaxed\".",
{
"build-use-chroot", "build-use-sandbox"
}
};
/* Whether to show a stack trace if Nix evaluation fails. */
Setting<bool> showTrace{this, false, "show-trace",
"Whether to show a stack trace on evaluation errors."};
Setting<PathSet> sandboxPaths{
this,
{},
"sandbox-paths",
"The paths to make available inside the build sandbox.",
{"build-chroot-dirs", "build-sandbox-paths"}};
Setting<SandboxMode> sandboxMode{this,
#if __linux__
smEnabled
#else
smDisabled
#endif
, "sandbox",
"Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
{"build-use-chroot", "build-use-sandbox"}};
Setting<bool> sandboxFallback{
this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
Setting<PathSet> sandboxPaths{this, {}, "sandbox-paths",
"The paths to make available inside the build sandbox.",
{"build-chroot-dirs", "build-sandbox-paths"}};
Setting<PathSet> extraSandboxPaths{
this,
{},
"extra-sandbox-paths",
"Additional paths to make available inside the build sandbox.",
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
"Whether to disable sandboxing when the kernel doesn't allow it."};
Setting<PathSet> extraSandboxPaths{this, {}, "extra-sandbox-paths",
"Additional paths to make available inside the build sandbox.",
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
Setting<size_t> buildRepeat{this, 0, "repeat",
"The number of times to repeat a build in order to verify determinism.",
{"build-repeat"}};
Setting<size_t> buildRepeat{
this,
0,
"repeat",
"The number of times to repeat a build in order to verify determinism.",
{"build-repeat"}};
#if __linux__
Setting<std::string> sandboxShmSize{this, "50%", "sandbox-dev-shm-size",
"The size of /dev/shm in the build sandbox."};
Setting<std::string> sandboxShmSize{
this, "50%", "sandbox-dev-shm-size",
"The size of /dev/shm in the build sandbox."};
Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir",
"The build directory inside the sandbox."};
Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir",
"The build directory inside the sandbox."};
#endif
Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps",
"Which prefixes to allow derivations to ask for access to (primarily for Darwin)."};
Setting<PathSet> allowedImpureHostPrefixes{
this,
{},
"allowed-impure-host-deps",
"Which prefixes to allow derivations to ask for access to (primarily for "
"Darwin)."};
#if __APPLE__
Setting<bool> darwinLogSandboxViolations{this, false, "darwin-log-sandbox-violations",
"Whether to log Darwin sandbox access violations to the system log."};
Setting<bool> darwinLogSandboxViolations{
this, false, "darwin-log-sandbox-violations",
"Whether to log Darwin sandbox access violations to the system log."};
#endif
Setting<bool> runDiffHook{this, false, "run-diff-hook",
"Whether to run the program specified by the diff-hook setting "
"repeated builds produce a different result. Typically used to "
"plug in diffoscope."};
Setting<bool> runDiffHook{
this, false, "run-diff-hook",
"Whether to run the program specified by the diff-hook setting "
"repeated builds produce a different result. Typically used to "
"plug in diffoscope."};
PathSetting diffHook{this, true, "", "diff-hook",
"A program that prints out the differences between the two paths "
"specified on its command line."};
PathSetting diffHook{
this, true, "", "diff-hook",
"A program that prints out the differences between the two paths "
"specified on its command line."};
Setting<bool> enforceDeterminism{this, true, "enforce-determinism",
"Whether to fail if repeated builds produce different output."};
Setting<bool> enforceDeterminism{
this, true, "enforce-determinism",
"Whether to fail if repeated builds produce different output."};
Setting<Strings> trustedPublicKeys{this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
"trusted-public-keys",
"Trusted public keys for secure substitution.",
{"binary-cache-public-keys"}};
Setting<Strings> trustedPublicKeys{
this,
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
"trusted-public-keys",
"Trusted public keys for secure substitution.",
{"binary-cache-public-keys"}};
Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
"Secret keys with which to sign local builds."};
Setting<Strings> secretKeyFiles{
this,
{},
"secret-key-files",
"Secret keys with which to sign local builds."};
Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
"How long downloaded files are considered up-to-date."};
Setting<unsigned int> tarballTtl{
this, 60 * 60, "tarball-ttl",
"How long downloaded files are considered up-to-date."};
Setting<bool> requireSigs{this, true, "require-sigs",
"Whether to check that any non-content-addressed path added to the "
"Nix store has a valid signature (that is, one signed using a key "
"listed in 'trusted-public-keys'."};
Setting<bool> requireSigs{
this, true, "require-sigs",
"Whether to check that any non-content-addressed path added to the "
"Nix store has a valid signature (that is, one signed using a key "
"listed in 'trusted-public-keys'."};
Setting<StringSet> extraPlatforms{this,
std::string{SYSTEM} == "x86_64-linux" ? StringSet{"i686-linux"} : StringSet{},
"extra-platforms",
"Additional platforms that can be built on the local system. "
"These may be supported natively (e.g. armv7 on some aarch64 CPUs "
"or using hacks like qemu-user."};
Setting<StringSet> extraPlatforms{
this,
std::string{SYSTEM} == "x86_64-linux" ? StringSet{"i686-linux"}
: StringSet{},
"extra-platforms",
"Additional platforms that can be built on the local system. "
"These may be supported natively (e.g. armv7 on some aarch64 CPUs "
"or using hacks like qemu-user."};
Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
"system-features",
"Optional features that this system implements (like \"kvm\")."};
Setting<StringSet> systemFeatures{
this, getDefaultSystemFeatures(), "system-features",
"Optional features that this system implements (like \"kvm\")."};
Setting<Strings> substituters{this,
nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
"substituters",
"The URIs of substituters (such as https://cache.nixos.org/).",
{"binary-caches"}};
Setting<Strings> substituters{
this,
nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"}
: Strings(),
"substituters",
"The URIs of substituters (such as https://cache.nixos.org/).",
{"binary-caches"}};
// FIXME: provide a way to add to option values.
Setting<Strings> extraSubstituters{this, {}, "extra-substituters",
"Additional URIs of substituters.",
{"extra-binary-caches"}};
// FIXME: provide a way to add to option values.
Setting<Strings> extraSubstituters{this,
{},
"extra-substituters",
"Additional URIs of substituters.",
{"extra-binary-caches"}};
Setting<StringSet> trustedSubstituters{this, {}, "trusted-substituters",
"Disabled substituters that may be enabled via the substituters option by untrusted users.",
{"trusted-binary-caches"}};
Setting<StringSet> trustedSubstituters{
this,
{},
"trusted-substituters",
"Disabled substituters that may be enabled via the substituters option "
"by untrusted users.",
{"trusted-binary-caches"}};
Setting<Strings> trustedUsers{this, {"root"}, "trusted-users",
"Which users or groups are trusted to ask the daemon to do unsafe things."};
Setting<Strings> trustedUsers{this,
{"root"},
"trusted-users",
"Which users or groups are trusted to ask the "
"daemon to do unsafe things."};
Setting<unsigned int> ttlNegativeNarInfoCache{this, 3600, "narinfo-cache-negative-ttl",
"The TTL in seconds for negative lookups in the disk cache i.e binary cache lookups that "
"return an invalid path result"};
Setting<unsigned int> ttlNegativeNarInfoCache{
this, 3600, "narinfo-cache-negative-ttl",
"The TTL in seconds for negative lookups in the disk cache i.e binary "
"cache lookups that "
"return an invalid path result"};
Setting<unsigned int> ttlPositiveNarInfoCache{this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
"The TTL in seconds for positive lookups in the disk cache i.e binary cache lookups that "
"return a valid path result."};
Setting<unsigned int> ttlPositiveNarInfoCache{
this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
"The TTL in seconds for positive lookups in the disk cache i.e binary "
"cache lookups that "
"return a valid path result."};
/* ?Who we trust to use the daemon in safe ways */
Setting<Strings> allowedUsers{this, {"*"}, "allowed-users",
"Which users or groups are allowed to connect to the daemon."};
/* ?Who we trust to use the daemon in safe ways */
Setting<Strings> allowedUsers{
this,
{"*"},
"allowed-users",
"Which users or groups are allowed to connect to the daemon."};
Setting<bool> printMissing{this, true, "print-missing",
"Whether to print what paths need to be built or downloaded."};
Setting<bool> printMissing{
this, true, "print-missing",
"Whether to print what paths need to be built or downloaded."};
Setting<std::string> preBuildHook{this,
Setting<std::string> preBuildHook {
this,
#if __APPLE__
nixLibexecDir + "/nix/resolve-system-dependencies",
#else
"",
#endif
"pre-build-hook",
"A program to run just before a build to set derivation-specific build settings."};
"A program to run just before a build to set derivation-specific build "
"settings."
};
Setting<std::string> postBuildHook{this, "", "post-build-hook",
"A program to run just after each successful build."};
Setting<std::string> postBuildHook{
this, "", "post-build-hook",
"A program to run just after each successful build."};
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
"Path to the netrc file used to obtain usernames/passwords for downloads."};
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"),
"netrc-file",
"Path to the netrc file used to obtain "
"usernames/passwords for downloads."};
/* Path to the SSL CA file used */
Path caFile;
/* Path to the SSL CA file used */
Path caFile;
#if __linux__
Setting<bool> filterSyscalls{this, true, "filter-syscalls",
"Whether to prevent certain dangerous system calls, such as "
"creation of setuid/setgid files or adding ACLs or extended "
"attributes. Only disable this if you're aware of the "
"security implications."};
Setting<bool> filterSyscalls{
this, true, "filter-syscalls",
"Whether to prevent certain dangerous system calls, such as "
"creation of setuid/setgid files or adding ACLs or extended "
"attributes. Only disable this if you're aware of the "
"security implications."};
Setting<bool> allowNewPrivileges{this, false, "allow-new-privileges",
"Whether builders can acquire new privileges by calling programs with "
"setuid/setgid bits or with file capabilities."};
Setting<bool> allowNewPrivileges{
this, false, "allow-new-privileges",
"Whether builders can acquire new privileges by calling programs with "
"setuid/setgid bits or with file capabilities."};
#endif
Setting<Strings> hashedMirrors{this, {"http://tarballs.nixos.org/"}, "hashed-mirrors",
"A list of servers used by builtins.fetchurl to fetch files by hash."};
Setting<Strings> hashedMirrors{
this,
{"http://tarballs.nixos.org/"},
"hashed-mirrors",
"A list of servers used by builtins.fetchurl to fetch files by hash."};
Setting<uint64_t> minFree{this, 0, "min-free",
"Automatically run the garbage collector when free disk space drops below the specified amount."};
Setting<uint64_t> minFree{this, 0, "min-free",
"Automatically run the garbage collector when free "
"disk space drops below the specified amount."};
Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
"Stop deleting garbage when free disk space is above the specified amount."};
Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(),
"max-free",
"Stop deleting garbage when free disk space is "
"above the specified amount."};
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
"Number of seconds between checking free disk space."};
Setting<uint64_t> minFreeCheckInterval{
this, 5, "min-free-check-interval",
"Number of seconds between checking free disk space."};
Setting<Paths> pluginFiles{this, {}, "plugin-files",
"Plugins to dynamically load at nix initialization time."};
Setting<Paths> pluginFiles{
this,
{},
"plugin-files",
"Plugins to dynamically load at nix initialization time."};
};
// FIXME: don't use a global variable.
extern Settings settings;
@ -367,4 +481,4 @@ void loadConfFile();
extern const string nixVersion;
}
} // namespace nix

View file

@ -7,167 +7,150 @@ namespace nix {
MakeError(UploadToHTTP, Error);
class HttpBinaryCacheStore : public BinaryCacheStore
{
private:
class HttpBinaryCacheStore : public BinaryCacheStore {
private:
Path cacheUri;
Path cacheUri;
struct State {
bool enabled = true;
std::chrono::steady_clock::time_point disabledUntil;
};
struct State
{
bool enabled = true;
std::chrono::steady_clock::time_point disabledUntil;
};
Sync<State> _state;
Sync<State> _state;
public:
HttpBinaryCacheStore(const Params& params, const Path& _cacheUri)
: BinaryCacheStore(params), cacheUri(_cacheUri) {
if (cacheUri.back() == '/') cacheUri.pop_back();
public:
diskCache = getNarInfoDiskCache();
}
HttpBinaryCacheStore(
const Params & params, const Path & _cacheUri)
: BinaryCacheStore(params)
, cacheUri(_cacheUri)
{
if (cacheUri.back() == '/')
cacheUri.pop_back();
std::string getUri() override { return cacheUri; }
diskCache = getNarInfoDiskCache();
void init() override {
// FIXME: do this lazily?
if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) {
try {
BinaryCacheStore::init();
} catch (UploadToHTTP&) {
throw Error("'%s' does not appear to be a binary cache", cacheUri);
}
diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority);
}
}
std::string getUri() override
{
return cacheUri;
protected:
void maybeDisable() {
auto state(_state.lock());
if (state->enabled && settings.tryFallback) {
int t = 60;
printError("disabling binary cache '%s' for %s seconds", getUri(), t);
state->enabled = false;
state->disabledUntil =
std::chrono::steady_clock::now() + std::chrono::seconds(t);
}
}
void init() override
{
// FIXME: do this lazily?
if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) {
try {
BinaryCacheStore::init();
} catch (UploadToHTTP &) {
throw Error("'%s' does not appear to be a binary cache", cacheUri);
}
diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority);
}
void checkEnabled() {
auto state(_state.lock());
if (state->enabled) return;
if (std::chrono::steady_clock::now() > state->disabledUntil) {
state->enabled = true;
debug("re-enabling binary cache '%s'", getUri());
return;
}
throw SubstituterDisabled("substituter '%s' is disabled", getUri());
}
protected:
bool fileExists(const std::string& path) override {
checkEnabled();
void maybeDisable()
{
auto state(_state.lock());
if (state->enabled && settings.tryFallback) {
int t = 60;
printError("disabling binary cache '%s' for %s seconds", getUri(), t);
state->enabled = false;
state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t);
}
try {
DownloadRequest request(cacheUri + "/" + path);
request.head = true;
getDownloader()->download(request);
return true;
} catch (DownloadError& e) {
/* S3 buckets return 403 if a file doesn't exist and the
bucket is unlistable, so treat 403 as 404. */
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
return false;
maybeDisable();
throw;
}
}
void checkEnabled()
{
auto state(_state.lock());
if (state->enabled) return;
if (std::chrono::steady_clock::now() > state->disabledUntil) {
state->enabled = true;
debug("re-enabling binary cache '%s'", getUri());
return;
}
throw SubstituterDisabled("substituter '%s' is disabled", getUri());
void upsertFile(const std::string& path, const std::string& data,
const std::string& mimeType) override {
auto req = DownloadRequest(cacheUri + "/" + path);
req.data = std::make_shared<string>(data); // FIXME: inefficient
req.mimeType = mimeType;
try {
getDownloader()->download(req);
} catch (DownloadError& e) {
throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s",
cacheUri, e.msg());
}
}
bool fileExists(const std::string & path) override
{
checkEnabled();
DownloadRequest makeRequest(const std::string& path) {
DownloadRequest request(cacheUri + "/" + path);
return request;
}
try {
DownloadRequest request(cacheUri + "/" + path);
request.head = true;
getDownloader()->download(request);
return true;
} catch (DownloadError & e) {
/* S3 buckets return 403 if a file doesn't exist and the
bucket is unlistable, so treat 403 as 404. */
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
return false;
void getFile(const std::string& path, Sink& sink) override {
checkEnabled();
auto request(makeRequest(path));
try {
getDownloader()->download(std::move(request), sink);
} catch (DownloadError& e) {
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
throw NoSuchBinaryCacheFile(
"file '%s' does not exist in binary cache '%s'", path, getUri());
maybeDisable();
throw;
}
}
void getFile(
const std::string& path,
Callback<std::shared_ptr<std::string>> callback) noexcept override {
checkEnabled();
auto request(makeRequest(path));
auto callbackPtr =
std::make_shared<decltype(callback)>(std::move(callback));
getDownloader()->enqueueDownload(
request, {[callbackPtr, this](std::future<DownloadResult> result) {
try {
(*callbackPtr)(result.get().data);
} catch (DownloadError& e) {
if (e.error == Downloader::NotFound ||
e.error == Downloader::Forbidden)
return (*callbackPtr)(std::shared_ptr<std::string>());
maybeDisable();
throw;
}
}
void upsertFile(const std::string & path,
const std::string & data,
const std::string & mimeType) override
{
auto req = DownloadRequest(cacheUri + "/" + path);
req.data = std::make_shared<string>(data); // FIXME: inefficient
req.mimeType = mimeType;
try {
getDownloader()->download(req);
} catch (DownloadError & e) {
throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", cacheUri, e.msg());
}
}
DownloadRequest makeRequest(const std::string & path)
{
DownloadRequest request(cacheUri + "/" + path);
return request;
}
void getFile(const std::string & path, Sink & sink) override
{
checkEnabled();
auto request(makeRequest(path));
try {
getDownloader()->download(std::move(request), sink);
} catch (DownloadError & e) {
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
maybeDisable();
throw;
}
}
void getFile(const std::string & path,
Callback<std::shared_ptr<std::string>> callback) noexcept override
{
checkEnabled();
auto request(makeRequest(path));
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
getDownloader()->enqueueDownload(request,
{[callbackPtr, this](std::future<DownloadResult> result) {
try {
(*callbackPtr)(result.get().data);
} catch (DownloadError & e) {
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
return (*callbackPtr)(std::shared_ptr<std::string>());
maybeDisable();
callbackPtr->rethrow();
} catch (...) {
callbackPtr->rethrow();
}
}});
}
callbackPtr->rethrow();
} catch (...) {
callbackPtr->rethrow();
}
}});
}
};
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, 7) != "http://" &&
std::string(uri, 0, 8) != "https://" &&
(getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" || std::string(uri, 0, 7) != "file://")
) return 0;
auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
store->init();
return store;
});
}
static RegisterStoreImplementation regStore(
[](const std::string& uri,
const Store::Params& params) -> std::shared_ptr<Store> {
if (std::string(uri, 0, 7) != "http://" &&
std::string(uri, 0, 8) != "https://" &&
(getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" ||
std::string(uri, 0, 7) != "file://"))
return 0;
auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
store->init();
return store;
});
} // namespace nix

View file

@ -1,293 +1,258 @@
#include "archive.hh"
#include "derivations.hh"
#include "pool.hh"
#include "remote-store.hh"
#include "serve-protocol.hh"
#include "ssh.hh"
#include "store-api.hh"
#include "worker-protocol.hh"
#include "ssh.hh"
#include "derivations.hh"
namespace nix {
static std::string uriScheme = "ssh://";
struct LegacySSHStore : public Store
{
const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"};
const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
const Setting<Path> remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
const Setting<std::string> remoteStore{this, "", "remote-store", "URI of the store on the remote system"};
struct LegacySSHStore : public Store {
const Setting<int> maxConnections{
this, 1, "max-connections",
"maximum number of concurrent SSH connections"};
const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{this, false, "compress",
"whether to compress the connection"};
const Setting<Path> remoteProgram{
this, "nix-store", "remote-program",
"path to the nix-store executable on the remote system"};
const Setting<std::string> remoteStore{
this, "", "remote-store", "URI of the store on the remote system"};
// Hack for getting remote build log output.
const Setting<int> logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
// Hack for getting remote build log output.
const Setting<int> logFD{
this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
struct Connection
{
std::unique_ptr<SSHMaster::Connection> sshConn;
FdSink to;
FdSource from;
int remoteVersion;
bool good = true;
};
struct Connection {
std::unique_ptr<SSHMaster::Connection> sshConn;
FdSink to;
FdSource from;
int remoteVersion;
bool good = true;
};
std::string host;
std::string host;
ref<Pool<Connection>> connections;
ref<Pool<Connection>> connections;
SSHMaster master;
SSHMaster master;
LegacySSHStore(const string & host, const Params & params)
: Store(params)
, host(host)
, connections(make_ref<Pool<Connection>>(
std::max(1, (int) maxConnections),
LegacySSHStore(const string& host, const Params& params)
: Store(params),
host(host),
connections(make_ref<Pool<Connection>>(
std::max(1, (int)maxConnections),
[this]() { return openConnection(); },
[](const ref<Connection> & r) { return r->good; }
))
, master(
host,
sshKey,
// Use SSH master only if using more than 1 connection.
connections->capacity() > 1,
compress,
logFD)
{
[](const ref<Connection>& r) { return r->good; })),
master(host, sshKey,
// Use SSH master only if using more than 1 connection.
connections->capacity() > 1, compress, logFD) {}
ref<Connection> openConnection() {
auto conn = make_ref<Connection>();
conn->sshConn = master.startCommand(
fmt("%s --serve --write", remoteProgram) +
(remoteStore.get() == ""
? ""
: " --store " + shellEscape(remoteStore.get())));
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
try {
conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
conn->to.flush();
unsigned int magic = readInt(conn->from);
if (magic != SERVE_MAGIC_2)
throw Error("protocol mismatch with 'nix-store --serve' on '%s'", host);
conn->remoteVersion = readInt(conn->from);
if (GET_PROTOCOL_MAJOR(conn->remoteVersion) != 0x200)
throw Error("unsupported 'nix-store --serve' protocol version on '%s'",
host);
} catch (EndOfFile& e) {
throw Error("cannot connect to '%1%'", host);
}
ref<Connection> openConnection()
{
auto conn = make_ref<Connection>();
conn->sshConn = master.startCommand(
fmt("%s --serve --write", remoteProgram)
+ (remoteStore.get() == "" ? "" : " --store " + shellEscape(remoteStore.get())));
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
return conn;
};
try {
conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
conn->to.flush();
string getUri() override { return uriScheme + host; }
unsigned int magic = readInt(conn->from);
if (magic != SERVE_MAGIC_2)
throw Error("protocol mismatch with 'nix-store --serve' on '%s'", host);
conn->remoteVersion = readInt(conn->from);
if (GET_PROTOCOL_MAJOR(conn->remoteVersion) != 0x200)
throw Error("unsupported 'nix-store --serve' protocol version on '%s'", host);
void queryPathInfoUncached(
const Path& path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override {
try {
auto conn(connections->get());
} catch (EndOfFile & e) {
throw Error("cannot connect to '%1%'", host);
}
debug("querying remote host '%s' for info on '%s'", host, path);
return conn;
};
conn->to << cmdQueryPathInfos << PathSet{path};
conn->to.flush();
string getUri() override
{
return uriScheme + host;
auto info = std::make_shared<ValidPathInfo>();
conn->from >> info->path;
if (info->path.empty()) return callback(nullptr);
assert(path == info->path);
PathSet references;
conn->from >> info->deriver;
info->references = readStorePaths<PathSet>(*this, conn->from);
readLongLong(conn->from); // download size
info->narSize = readLongLong(conn->from);
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4) {
auto s = readString(conn->from);
info->narHash = s.empty() ? Hash() : Hash(s);
conn->from >> info->ca;
info->sigs = readStrings<StringSet>(conn->from);
}
auto s = readString(conn->from);
assert(s == "");
callback(std::move(info));
} catch (...) {
callback.rethrow();
}
}
void addToStore(const ValidPathInfo& info, Source& source, RepairFlag repair,
CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override {
debug("adding path '%s' to remote host '%s'", info.path, host);
auto conn(connections->get());
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
conn->to << cmdAddToStoreNar << info.path << info.deriver
<< info.narHash.to_string(Base16, false) << info.references
<< info.registrationTime << info.narSize << info.ultimate
<< info.sigs << info.ca;
try {
copyNAR(source, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to.flush();
} else {
conn->to << cmdImportPaths << 1;
try {
copyNAR(source, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to << exportMagic << info.path << info.references << info.deriver
<< 0 << 0;
conn->to.flush();
}
void queryPathInfoUncached(const Path & path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override
{
try {
auto conn(connections->get());
if (readInt(conn->from) != 1)
throw Error(
"failed to add path '%s' to remote host '%s', info.path, host");
}
debug("querying remote host '%s' for info on '%s'", host, path);
void narFromPath(const Path& path, Sink& sink) override {
auto conn(connections->get());
conn->to << cmdQueryPathInfos << PathSet{path};
conn->to.flush();
conn->to << cmdDumpStorePath << path;
conn->to.flush();
copyNAR(conn->from, sink);
}
auto info = std::make_shared<ValidPathInfo>();
conn->from >> info->path;
if (info->path.empty()) return callback(nullptr);
assert(path == info->path);
Path queryPathFromHashPart(const string& hashPart) override {
unsupported("queryPathFromHashPart");
}
PathSet references;
conn->from >> info->deriver;
info->references = readStorePaths<PathSet>(*this, conn->from);
readLongLong(conn->from); // download size
info->narSize = readLongLong(conn->from);
Path addToStore(const string& name, const Path& srcPath, bool recursive,
HashType hashAlgo, PathFilter& filter,
RepairFlag repair) override {
unsupported("addToStore");
}
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4) {
auto s = readString(conn->from);
info->narHash = s.empty() ? Hash() : Hash(s);
conn->from >> info->ca;
info->sigs = readStrings<StringSet>(conn->from);
}
Path addTextToStore(const string& name, const string& s,
const PathSet& references, RepairFlag repair) override {
unsupported("addTextToStore");
}
auto s = readString(conn->from);
assert(s == "");
BuildResult buildDerivation(const Path& drvPath, const BasicDerivation& drv,
BuildMode buildMode) override {
auto conn(connections->get());
callback(std::move(info));
} catch (...) { callback.rethrow(); }
conn->to << cmdBuildDerivation << drvPath << drv << settings.maxSilentTime
<< settings.buildTimeout;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 2)
conn->to << settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->to << settings.buildRepeat << settings.enforceDeterminism;
conn->to.flush();
BuildResult status;
status.status = (BuildResult::Status)readInt(conn->from);
conn->from >> status.errorMsg;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->from >> status.timesBuilt >> status.isNonDeterministic >>
status.startTime >> status.stopTime;
return status;
}
void ensurePath(const Path& path) override { unsupported("ensurePath"); }
void computeFSClosure(const PathSet& paths, PathSet& out,
bool flipDirection = false, bool includeOutputs = false,
bool includeDerivers = false) override {
if (flipDirection || includeDerivers) {
Store::computeFSClosure(paths, out, flipDirection, includeOutputs,
includeDerivers);
return;
}
void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override
{
debug("adding path '%s' to remote host '%s'", info.path, host);
auto conn(connections->get());
auto conn(connections->get());
conn->to << cmdQueryClosure << includeOutputs << paths;
conn->to.flush();
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
auto res = readStorePaths<PathSet>(*this, conn->from);
conn->to
<< cmdAddToStoreNar
<< info.path
<< info.deriver
<< info.narHash.to_string(Base16, false)
<< info.references
<< info.registrationTime
<< info.narSize
<< info.ultimate
<< info.sigs
<< info.ca;
try {
copyNAR(source, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to.flush();
out.insert(res.begin(), res.end());
}
} else {
PathSet queryValidPaths(const PathSet& paths, SubstituteFlag maybeSubstitute =
NoSubstitute) override {
auto conn(connections->get());
conn->to
<< cmdImportPaths
<< 1;
try {
copyNAR(source, conn->to);
} catch (...) {
conn->good = false;
throw;
}
conn->to
<< exportMagic
<< info.path
<< info.references
<< info.deriver
<< 0
<< 0;
conn->to.flush();
conn->to << cmdQueryValidPaths << false // lock
<< maybeSubstitute << paths;
conn->to.flush();
}
return readStorePaths<PathSet>(*this, conn->from);
}
if (readInt(conn->from) != 1)
throw Error("failed to add path '%s' to remote host '%s', info.path, host");
}
void connect() override { auto conn(connections->get()); }
void narFromPath(const Path & path, Sink & sink) override
{
auto conn(connections->get());
conn->to << cmdDumpStorePath << path;
conn->to.flush();
copyNAR(conn->from, sink);
}
Path queryPathFromHashPart(const string & hashPart) override
{ unsupported("queryPathFromHashPart"); }
Path addToStore(const string & name, const Path & srcPath,
bool recursive, HashType hashAlgo,
PathFilter & filter, RepairFlag repair) override
{ unsupported("addToStore"); }
Path addTextToStore(const string & name, const string & s,
const PathSet & references, RepairFlag repair) override
{ unsupported("addTextToStore"); }
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override
{
auto conn(connections->get());
conn->to
<< cmdBuildDerivation
<< drvPath
<< drv
<< settings.maxSilentTime
<< settings.buildTimeout;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 2)
conn->to
<< settings.maxLogSize;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->to
<< settings.buildRepeat
<< settings.enforceDeterminism;
conn->to.flush();
BuildResult status;
status.status = (BuildResult::Status) readInt(conn->from);
conn->from >> status.errorMsg;
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
return status;
}
void ensurePath(const Path & path) override
{ unsupported("ensurePath"); }
void computeFSClosure(const PathSet & paths,
PathSet & out, bool flipDirection = false,
bool includeOutputs = false, bool includeDerivers = false) override
{
if (flipDirection || includeDerivers) {
Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
return;
}
auto conn(connections->get());
conn->to
<< cmdQueryClosure
<< includeOutputs
<< paths;
conn->to.flush();
auto res = readStorePaths<PathSet>(*this, conn->from);
out.insert(res.begin(), res.end());
}
PathSet queryValidPaths(const PathSet & paths,
SubstituteFlag maybeSubstitute = NoSubstitute) override
{
auto conn(connections->get());
conn->to
<< cmdQueryValidPaths
<< false // lock
<< maybeSubstitute
<< paths;
conn->to.flush();
return readStorePaths<PathSet>(*this, conn->from);
}
void connect() override
{
auto conn(connections->get());
}
unsigned int getProtocol() override
{
auto conn(connections->get());
return conn->remoteVersion;
}
unsigned int getProtocol() override {
auto conn(connections->get());
return conn->remoteVersion;
}
};
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
return std::make_shared<LegacySSHStore>(std::string(uri, uriScheme.size()), params);
});
static RegisterStoreImplementation regStore(
[](const std::string& uri,
const Store::Params& params) -> std::shared_ptr<Store> {
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
return std::make_shared<LegacySSHStore>(
std::string(uri, uriScheme.size()), params);
});
}
} // namespace nix

View file

@ -4,100 +4,82 @@
namespace nix {
class LocalBinaryCacheStore : public BinaryCacheStore
{
private:
class LocalBinaryCacheStore : public BinaryCacheStore {
private:
Path binaryCacheDir;
Path binaryCacheDir;
public:
LocalBinaryCacheStore(const Params& params, const Path& binaryCacheDir)
: BinaryCacheStore(params), binaryCacheDir(binaryCacheDir) {}
public:
void init() override;
LocalBinaryCacheStore(
const Params & params, const Path & binaryCacheDir)
: BinaryCacheStore(params)
, binaryCacheDir(binaryCacheDir)
{
}
void init() override;
std::string getUri() override
{
return "file://" + binaryCacheDir;
}
protected:
bool fileExists(const std::string & path) override;
void upsertFile(const std::string & path,
const std::string & data,
const std::string & mimeType) override;
void getFile(const std::string & path, Sink & sink) override
{
try {
readFile(binaryCacheDir + "/" + path, sink);
} catch (SysError & e) {
if (e.errNo == ENOENT)
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
}
}
PathSet queryAllValidPaths() override
{
PathSet paths;
for (auto & entry : readDirectory(binaryCacheDir)) {
if (entry.name.size() != 40 ||
!hasSuffix(entry.name, ".narinfo"))
continue;
paths.insert(storeDir + "/" + entry.name.substr(0, entry.name.size() - 8));
}
return paths;
std::string getUri() override { return "file://" + binaryCacheDir; }
protected:
bool fileExists(const std::string& path) override;
void upsertFile(const std::string& path, const std::string& data,
const std::string& mimeType) override;
void getFile(const std::string& path, Sink& sink) override {
try {
readFile(binaryCacheDir + "/" + path, sink);
} catch (SysError& e) {
if (e.errNo == ENOENT)
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache",
path);
}
}
PathSet queryAllValidPaths() override {
PathSet paths;
for (auto& entry : readDirectory(binaryCacheDir)) {
if (entry.name.size() != 40 || !hasSuffix(entry.name, ".narinfo"))
continue;
paths.insert(storeDir + "/" +
entry.name.substr(0, entry.name.size() - 8));
}
return paths;
}
};
void LocalBinaryCacheStore::init()
{
createDirs(binaryCacheDir + "/nar");
BinaryCacheStore::init();
void LocalBinaryCacheStore::init() {
createDirs(binaryCacheDir + "/nar");
BinaryCacheStore::init();
}
static void atomicWrite(const Path & path, const std::string & s)
{
Path tmp = path + ".tmp." + std::to_string(getpid());
AutoDelete del(tmp, false);
writeFile(tmp, s);
if (rename(tmp.c_str(), path.c_str()))
throw SysError(format("renaming '%1%' to '%2%'") % tmp % path);
del.cancel();
static void atomicWrite(const Path& path, const std::string& s) {
Path tmp = path + ".tmp." + std::to_string(getpid());
AutoDelete del(tmp, false);
writeFile(tmp, s);
if (rename(tmp.c_str(), path.c_str()))
throw SysError(format("renaming '%1%' to '%2%'") % tmp % path);
del.cancel();
}
bool LocalBinaryCacheStore::fileExists(const std::string & path)
{
return pathExists(binaryCacheDir + "/" + path);
bool LocalBinaryCacheStore::fileExists(const std::string& path) {
return pathExists(binaryCacheDir + "/" + path);
}
void LocalBinaryCacheStore::upsertFile(const std::string & path,
const std::string & data,
const std::string & mimeType)
{
atomicWrite(binaryCacheDir + "/" + path, data);
void LocalBinaryCacheStore::upsertFile(const std::string& path,
const std::string& data,
const std::string& mimeType) {
atomicWrite(binaryCacheDir + "/" + path, data);
}
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" ||
std::string(uri, 0, 7) != "file://")
static RegisterStoreImplementation regStore(
[](const std::string& uri,
const Store::Params& params) -> std::shared_ptr<Store> {
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" ||
std::string(uri, 0, 7) != "file://")
return 0;
auto store = std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7));
store->init();
return store;
});
auto store =
std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7));
store->init();
return store;
});
}
} // namespace nix

View file

@ -1,131 +1,113 @@
#include "archive.hh"
#include "fs-accessor.hh"
#include "store-api.hh"
#include "globals.hh"
#include "compression.hh"
#include "derivations.hh"
#include "fs-accessor.hh"
#include "globals.hh"
#include "store-api.hh"
namespace nix {
LocalFSStore::LocalFSStore(const Params & params)
: Store(params)
{
}
LocalFSStore::LocalFSStore(const Params& params) : Store(params) {}
struct LocalStoreAccessor : public FSAccessor
{
ref<LocalFSStore> store;
struct LocalStoreAccessor : public FSAccessor {
ref<LocalFSStore> store;
LocalStoreAccessor(ref<LocalFSStore> store) : store(store) { }
LocalStoreAccessor(ref<LocalFSStore> store) : store(store) {}
Path toRealPath(const Path & path)
{
Path storePath = store->toStorePath(path);
if (!store->isValidPath(storePath))
throw InvalidPath(format("path '%1%' is not a valid store path") % storePath);
return store->getRealStoreDir() + std::string(path, store->storeDir.size());
Path toRealPath(const Path& path) {
Path storePath = store->toStorePath(path);
if (!store->isValidPath(storePath))
throw InvalidPath(format("path '%1%' is not a valid store path") %
storePath);
return store->getRealStoreDir() + std::string(path, store->storeDir.size());
}
FSAccessor::Stat stat(const Path& path) override {
auto realPath = toRealPath(path);
struct stat st;
if (lstat(realPath.c_str(), &st)) {
if (errno == ENOENT || errno == ENOTDIR)
return {Type::tMissing, 0, false};
throw SysError(format("getting status of '%1%'") % path);
}
FSAccessor::Stat stat(const Path & path) override
{
auto realPath = toRealPath(path);
if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
throw Error(format("file '%1%' has unsupported type") % path);
struct stat st;
if (lstat(realPath.c_str(), &st)) {
if (errno == ENOENT || errno == ENOTDIR) return {Type::tMissing, 0, false};
throw SysError(format("getting status of '%1%'") % path);
}
if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
throw Error(format("file '%1%' has unsupported type") % path);
return {
S_ISREG(st.st_mode) ? Type::tRegular :
S_ISLNK(st.st_mode) ? Type::tSymlink :
Type::tDirectory,
S_ISREG(st.st_mode) ? (uint64_t) st.st_size : 0,
return {S_ISREG(st.st_mode)
? Type::tRegular
: S_ISLNK(st.st_mode) ? Type::tSymlink : Type::tDirectory,
S_ISREG(st.st_mode) ? (uint64_t)st.st_size : 0,
S_ISREG(st.st_mode) && st.st_mode & S_IXUSR};
}
}
StringSet readDirectory(const Path & path) override
{
auto realPath = toRealPath(path);
StringSet readDirectory(const Path& path) override {
auto realPath = toRealPath(path);
auto entries = nix::readDirectory(realPath);
auto entries = nix::readDirectory(realPath);
StringSet res;
for (auto & entry : entries)
res.insert(entry.name);
StringSet res;
for (auto& entry : entries) res.insert(entry.name);
return res;
}
return res;
}
std::string readFile(const Path & path) override
{
return nix::readFile(toRealPath(path));
}
std::string readFile(const Path& path) override {
return nix::readFile(toRealPath(path));
}
std::string readLink(const Path & path) override
{
return nix::readLink(toRealPath(path));
}
std::string readLink(const Path& path) override {
return nix::readLink(toRealPath(path));
}
};
ref<FSAccessor> LocalFSStore::getFSAccessor()
{
return make_ref<LocalStoreAccessor>(ref<LocalFSStore>(
std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())));
ref<FSAccessor> LocalFSStore::getFSAccessor() {
return make_ref<LocalStoreAccessor>(ref<LocalFSStore>(
std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())));
}
void LocalFSStore::narFromPath(const Path & path, Sink & sink)
{
if (!isValidPath(path))
throw Error(format("path '%s' is not valid") % path);
dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
void LocalFSStore::narFromPath(const Path& path, Sink& sink) {
if (!isValidPath(path)) throw Error(format("path '%s' is not valid") % path);
dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
}
const string LocalFSStore::drvsLogDir = "drvs";
std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path& path_) {
auto path(path_);
assertStorePath(path);
std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path & path_)
{
auto path(path_);
assertStorePath(path);
if (!isDerivation(path)) {
try {
path = queryPathInfo(path)->deriver;
} catch (InvalidPath &) {
return nullptr;
}
if (path == "") return nullptr;
if (!isDerivation(path)) {
try {
path = queryPathInfo(path)->deriver;
} catch (InvalidPath&) {
return nullptr;
}
if (path == "") return nullptr;
}
string baseName = baseNameOf(path);
string baseName = baseNameOf(path);
for (int j = 0; j < 2; j++) {
for (int j = 0; j < 2; j++) {
Path logPath = j == 0 ? fmt("%s/%s/%s/%s", logDir, drvsLogDir,
string(baseName, 0, 2), string(baseName, 2))
: fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
Path logBz2Path = logPath + ".bz2";
Path logPath =
j == 0
? fmt("%s/%s/%s/%s", logDir, drvsLogDir, string(baseName, 0, 2), string(baseName, 2))
: fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
Path logBz2Path = logPath + ".bz2";
if (pathExists(logPath))
return std::make_shared<std::string>(readFile(logPath));
else if (pathExists(logBz2Path)) {
try {
return decompress("bzip2", readFile(logBz2Path));
} catch (Error &) { }
}
if (pathExists(logPath))
return std::make_shared<std::string>(readFile(logPath));
else if (pathExists(logBz2Path)) {
try {
return decompress("bzip2", readFile(logBz2Path));
} catch (Error&) {
}
}
}
return nullptr;
return nullptr;
}
}
} // namespace nix

File diff suppressed because it is too large Load diff

View file

@ -1,309 +1,295 @@
#pragma once
#include "sqlite.hh"
#include "pathlocks.hh"
#include "store-api.hh"
#include "sync.hh"
#include "util.hh"
#include <chrono>
#include <future>
#include <string>
#include <unordered_set>
#include "pathlocks.hh"
#include "sqlite.hh"
#include "store-api.hh"
#include "sync.hh"
#include "util.hh"
namespace nix {
/* Nix store and database schema version. Version 1 (or 0) was Nix <=
0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */
const int nixSchemaVersion = 10;
struct Derivation;
struct OptimiseStats
{
unsigned long filesLinked = 0;
unsigned long long bytesFreed = 0;
unsigned long long blocksFreed = 0;
struct OptimiseStats {
unsigned long filesLinked = 0;
unsigned long long bytesFreed = 0;
unsigned long long blocksFreed = 0;
};
class LocalStore : public LocalFSStore {
private:
/* Lock file used for upgrading. */
AutoCloseFD globalLock;
class LocalStore : public LocalFSStore
{
private:
struct State {
/* The SQLite database object. */
SQLite db;
/* Lock file used for upgrading. */
AutoCloseFD globalLock;
/* Some precompiled SQLite statements. */
SQLiteStmt stmtRegisterValidPath;
SQLiteStmt stmtUpdatePathInfo;
SQLiteStmt stmtAddReference;
SQLiteStmt stmtQueryPathInfo;
SQLiteStmt stmtQueryReferences;
SQLiteStmt stmtQueryReferrers;
SQLiteStmt stmtInvalidatePath;
SQLiteStmt stmtAddDerivationOutput;
SQLiteStmt stmtQueryValidDerivers;
SQLiteStmt stmtQueryDerivationOutputs;
SQLiteStmt stmtQueryPathFromHashPart;
SQLiteStmt stmtQueryValidPaths;
struct State
{
/* The SQLite database object. */
SQLite db;
/* The file to which we write our temporary roots. */
AutoCloseFD fdTempRoots;
/* Some precompiled SQLite statements. */
SQLiteStmt stmtRegisterValidPath;
SQLiteStmt stmtUpdatePathInfo;
SQLiteStmt stmtAddReference;
SQLiteStmt stmtQueryPathInfo;
SQLiteStmt stmtQueryReferences;
SQLiteStmt stmtQueryReferrers;
SQLiteStmt stmtInvalidatePath;
SQLiteStmt stmtAddDerivationOutput;
SQLiteStmt stmtQueryValidDerivers;
SQLiteStmt stmtQueryDerivationOutputs;
SQLiteStmt stmtQueryPathFromHashPart;
SQLiteStmt stmtQueryValidPaths;
/* The last time we checked whether to do an auto-GC, or an
auto-GC finished. */
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
/* The file to which we write our temporary roots. */
AutoCloseFD fdTempRoots;
/* Whether auto-GC is running. If so, get gcFuture to wait for
the GC to finish. */
bool gcRunning = false;
std::shared_future<void> gcFuture;
/* The last time we checked whether to do an auto-GC, or an
auto-GC finished. */
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
/* How much disk space was available after the previous
auto-GC. If the current available disk space is below
minFree but not much below availAfterGC, then there is no
point in starting a new GC. */
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
/* Whether auto-GC is running. If so, get gcFuture to wait for
the GC to finish. */
bool gcRunning = false;
std::shared_future<void> gcFuture;
std::unique_ptr<PublicKeys> publicKeys;
};
/* How much disk space was available after the previous
auto-GC. If the current available disk space is below
minFree but not much below availAfterGC, then there is no
point in starting a new GC. */
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
Sync<State, std::recursive_mutex> _state;
std::unique_ptr<PublicKeys> publicKeys;
};
public:
PathSetting realStoreDir_;
Sync<State, std::recursive_mutex> _state;
const Path realStoreDir;
const Path dbDir;
const Path linksDir;
const Path reservedPath;
const Path schemaPath;
const Path trashDir;
const Path tempRootsDir;
const Path fnTempRoots;
public:
private:
Setting<bool> requireSigs{
(Store*)this, settings.requireSigs, "require-sigs",
"whether store paths should have a trusted signature on import"};
PathSetting realStoreDir_;
const PublicKeys& getPublicKeys();
const Path realStoreDir;
const Path dbDir;
const Path linksDir;
const Path reservedPath;
const Path schemaPath;
const Path trashDir;
const Path tempRootsDir;
const Path fnTempRoots;
public:
// Hack for build-remote.cc.
PathSet locksHeld = tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS"));
private:
/* Initialise the local store, upgrading the schema if
necessary. */
LocalStore(const Params& params);
Setting<bool> requireSigs{(Store*) this,
settings.requireSigs,
"require-sigs", "whether store paths should have a trusted signature on import"};
~LocalStore();
const PublicKeys & getPublicKeys();
/* Implementations of abstract store API methods. */
public:
std::string getUri() override;
// Hack for build-remote.cc.
PathSet locksHeld = tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS"));
bool isValidPathUncached(const Path& path) override;
/* Initialise the local store, upgrading the schema if
necessary. */
LocalStore(const Params & params);
PathSet queryValidPaths(const PathSet& paths, SubstituteFlag maybeSubstitute =
NoSubstitute) override;
~LocalStore();
PathSet queryAllValidPaths() override;
/* Implementations of abstract store API methods. */
void queryPathInfoUncached(
const Path& path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
std::string getUri() override;
void queryReferrers(const Path& path, PathSet& referrers) override;
bool isValidPathUncached(const Path & path) override;
PathSet queryValidDerivers(const Path& path) override;
PathSet queryValidPaths(const PathSet & paths,
SubstituteFlag maybeSubstitute = NoSubstitute) override;
PathSet queryDerivationOutputs(const Path& path) override;
PathSet queryAllValidPaths() override;
StringSet queryDerivationOutputNames(const Path& path) override;
void queryPathInfoUncached(const Path & path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
Path queryPathFromHashPart(const string& hashPart) override;
void queryReferrers(const Path & path, PathSet & referrers) override;
PathSet querySubstitutablePaths(const PathSet& paths) override;
PathSet queryValidDerivers(const Path & path) override;
void querySubstitutablePathInfos(const PathSet& paths,
SubstitutablePathInfos& infos) override;
PathSet queryDerivationOutputs(const Path & path) override;
void addToStore(const ValidPathInfo& info, Source& source, RepairFlag repair,
CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override;
StringSet queryDerivationOutputNames(const Path & path) override;
Path addToStore(const string& name, const Path& srcPath, bool recursive,
HashType hashAlgo, PathFilter& filter,
RepairFlag repair) override;
Path queryPathFromHashPart(const string & hashPart) override;
/* Like addToStore(), but the contents of the path are contained
in `dump', which is either a NAR serialisation (if recursive ==
true) or simply the contents of a regular file (if recursive ==
false). */
Path addToStoreFromDump(const string& dump, const string& name,
bool recursive = true, HashType hashAlgo = htSHA256,
RepairFlag repair = NoRepair);
PathSet querySubstitutablePaths(const PathSet & paths) override;
Path addTextToStore(const string& name, const string& s,
const PathSet& references, RepairFlag repair) override;
void querySubstitutablePathInfos(const PathSet & paths,
SubstitutablePathInfos & infos) override;
void buildPaths(const PathSet& paths, BuildMode buildMode) override;
void addToStore(const ValidPathInfo & info, Source & source,
RepairFlag repair, CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override;
BuildResult buildDerivation(const Path& drvPath, const BasicDerivation& drv,
BuildMode buildMode) override;
Path addToStore(const string & name, const Path & srcPath,
bool recursive, HashType hashAlgo,
PathFilter & filter, RepairFlag repair) override;
void ensurePath(const Path& path) override;
/* Like addToStore(), but the contents of the path are contained
in `dump', which is either a NAR serialisation (if recursive ==
true) or simply the contents of a regular file (if recursive ==
false). */
Path addToStoreFromDump(const string & dump, const string & name,
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair);
void addTempRoot(const Path& path) override;
Path addTextToStore(const string & name, const string & s,
const PathSet & references, RepairFlag repair) override;
void addIndirectRoot(const Path& path) override;
void buildPaths(const PathSet & paths, BuildMode buildMode) override;
void syncWithGC() override;
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override;
private:
typedef std::shared_ptr<AutoCloseFD> FDPtr;
typedef list<FDPtr> FDs;
void ensurePath(const Path & path) override;
void findTempRoots(FDs& fds, Roots& roots, bool censor);
void addTempRoot(const Path & path) override;
public:
Roots findRoots(bool censor) override;
void addIndirectRoot(const Path & path) override;
void collectGarbage(const GCOptions& options, GCResults& results) override;
void syncWithGC() override;
/* Optimise the disk space usage of the Nix store by hard-linking
files with the same contents. */
void optimiseStore(OptimiseStats& stats);
private:
void optimiseStore() override;
typedef std::shared_ptr<AutoCloseFD> FDPtr;
typedef list<FDPtr> FDs;
/* Optimise a single store path. */
void optimisePath(const Path& path);
void findTempRoots(FDs & fds, Roots & roots, bool censor);
bool verifyStore(bool checkContents, RepairFlag repair) override;
public:
/* Register the validity of a path, i.e., that `path' exists, that
the paths referenced by it exists, and in the case of an output
path of a derivation, that it has been produced by a successful
execution of the derivation (or something equivalent). Also
register the hash of the file system contents of the path. The
hash must be a SHA-256 hash. */
void registerValidPath(const ValidPathInfo& info);
Roots findRoots(bool censor) override;
void registerValidPaths(const ValidPathInfos& infos);
void collectGarbage(const GCOptions & options, GCResults & results) override;
unsigned int getProtocol() override;
/* Optimise the disk space usage of the Nix store by hard-linking
files with the same contents. */
void optimiseStore(OptimiseStats & stats);
void vacuumDB();
void optimiseStore() override;
/* Repair the contents of the given path by redownloading it using
a substituter (if available). */
void repairPath(const Path& path);
/* Optimise a single store path. */
void optimisePath(const Path & path);
void addSignatures(const Path& storePath, const StringSet& sigs) override;
bool verifyStore(bool checkContents, RepairFlag repair) override;
/* If free disk space in /nix/store if below minFree, delete
garbage until it exceeds maxFree. */
void autoGC(bool sync = true);
/* Register the validity of a path, i.e., that `path' exists, that
the paths referenced by it exists, and in the case of an output
path of a derivation, that it has been produced by a successful
execution of the derivation (or something equivalent). Also
register the hash of the file system contents of the path. The
hash must be a SHA-256 hash. */
void registerValidPath(const ValidPathInfo & info);
private:
int getSchema();
void registerValidPaths(const ValidPathInfos & infos);
void openDB(State& state, bool create);
unsigned int getProtocol() override;
void makeStoreWritable();
void vacuumDB();
uint64_t queryValidPathId(State& state, const Path& path);
/* Repair the contents of the given path by redownloading it using
a substituter (if available). */
void repairPath(const Path & path);
uint64_t addValidPath(State& state, const ValidPathInfo& info,
bool checkOutputs = true);
void addSignatures(const Path & storePath, const StringSet & sigs) override;
void invalidatePath(State& state, const Path& path);
/* If free disk space in /nix/store if below minFree, delete
garbage until it exceeds maxFree. */
void autoGC(bool sync = true);
/* Delete a path from the Nix store. */
void invalidatePathChecked(const Path& path);
private:
void verifyPath(const Path& path, const PathSet& store, PathSet& done,
PathSet& validPaths, RepairFlag repair, bool& errors);
int getSchema();
void updatePathInfo(State& state, const ValidPathInfo& info);
void openDB(State & state, bool create);
void upgradeStore6();
void upgradeStore7();
PathSet queryValidPathsOld();
ValidPathInfo queryPathInfoOld(const Path& path);
void makeStoreWritable();
struct GCState;
uint64_t queryValidPathId(State & state, const Path & path);
void deleteGarbage(GCState& state, const Path& path);
uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true);
void tryToDelete(GCState& state, const Path& path);
void invalidatePath(State & state, const Path & path);
bool canReachRoot(GCState& state, PathSet& visited, const Path& path);
/* Delete a path from the Nix store. */
void invalidatePathChecked(const Path & path);
void deletePathRecursive(GCState& state, const Path& path);
void verifyPath(const Path & path, const PathSet & store,
PathSet & done, PathSet & validPaths, RepairFlag repair, bool & errors);
bool isActiveTempFile(const GCState& state, const Path& path,
const string& suffix);
void updatePathInfo(State & state, const ValidPathInfo & info);
AutoCloseFD openGCLock(LockType lockType);
void upgradeStore6();
void upgradeStore7();
PathSet queryValidPathsOld();
ValidPathInfo queryPathInfoOld(const Path & path);
void findRoots(const Path& path, unsigned char type, Roots& roots);
struct GCState;
void findRootsNoTemp(Roots& roots, bool censor);
void deleteGarbage(GCState & state, const Path & path);
void findRuntimeRoots(Roots& roots, bool censor);
void tryToDelete(GCState & state, const Path & path);
void removeUnusedLinks(const GCState& state);
bool canReachRoot(GCState & state, PathSet & visited, const Path & path);
Path createTempDirInStore();
void deletePathRecursive(GCState & state, const Path & path);
void checkDerivationOutputs(const Path& drvPath, const Derivation& drv);
bool isActiveTempFile(const GCState & state,
const Path & path, const string & suffix);
typedef std::unordered_set<ino_t> InodeHash;
AutoCloseFD openGCLock(LockType lockType);
InodeHash loadInodeHash();
Strings readDirectoryIgnoringInodes(const Path& path,
const InodeHash& inodeHash);
void optimisePath_(Activity* act, OptimiseStats& stats, const Path& path,
InodeHash& inodeHash);
void findRoots(const Path & path, unsigned char type, Roots & roots);
// Internal versions that are not wrapped in retry_sqlite.
bool isValidPath_(State& state, const Path& path);
void queryReferrers(State& state, const Path& path, PathSet& referrers);
void findRootsNoTemp(Roots & roots, bool censor);
/* Add signatures to a ValidPathInfo using the secret keys
specified by the secret-key-files option. */
void signPathInfo(ValidPathInfo& info);
void findRuntimeRoots(Roots & roots, bool censor);
Path getRealStoreDir() override { return realStoreDir; }
void removeUnusedLinks(const GCState & state);
void createUser(const std::string& userName, uid_t userId) override;
Path createTempDirInStore();
void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
typedef std::unordered_set<ino_t> InodeHash;
InodeHash loadInodeHash();
Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash);
// Internal versions that are not wrapped in retry_sqlite.
bool isValidPath_(State & state, const Path & path);
void queryReferrers(State & state, const Path & path, PathSet & referrers);
/* Add signatures to a ValidPathInfo using the secret keys
specified by the secret-key-files option. */
void signPathInfo(ValidPathInfo & info);
Path getRealStoreDir() override { return realStoreDir; }
void createUser(const std::string & userName, uid_t userId) override;
friend class DerivationGoal;
friend class SubstitutionGoal;
friend class DerivationGoal;
friend class SubstitutionGoal;
};
typedef std::pair<dev_t, ino_t> Inode;
typedef set<Inode> InodesSeen;
/* "Fix", or canonicalise, the meta-data of the files in a store path
after it has been built. In particular:
- the last modification date on each file is set to 1 (i.e.,
@ -312,11 +298,12 @@ typedef set<Inode> InodesSeen;
without execute permission; setuid bits etc. are cleared)
- the owner and group are set to the Nix user and group, if we're
running as root. */
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
void canonicalisePathMetaData(const Path & path, uid_t fromUid);
void canonicalisePathMetaData(const Path& path, uid_t fromUid,
InodesSeen& inodesSeen);
void canonicalisePathMetaData(const Path& path, uid_t fromUid);
void canonicaliseTimestampAndPermissions(const Path & path);
void canonicaliseTimestampAndPermissions(const Path& path);
MakeError(PathInUse, Error);
}
} // namespace nix

View file

@ -1,100 +1,93 @@
#include "machines.hh"
#include "util.hh"
#include "globals.hh"
#include <algorithm>
#include "globals.hh"
#include "util.hh"
namespace nix {
Machine::Machine(decltype(storeUri) storeUri,
decltype(systemTypes) systemTypes,
decltype(sshKey) sshKey,
decltype(maxJobs) maxJobs,
decltype(speedFactor) speedFactor,
decltype(supportedFeatures) supportedFeatures,
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey) :
storeUri(
// Backwards compatibility: if the URI is a hostname,
// prepend ssh://.
storeUri.find("://") != std::string::npos
|| hasPrefix(storeUri, "local")
|| hasPrefix(storeUri, "remote")
|| hasPrefix(storeUri, "auto")
|| hasPrefix(storeUri, "/")
? storeUri
: "ssh://" + storeUri),
systemTypes(systemTypes),
sshKey(sshKey),
maxJobs(maxJobs),
speedFactor(std::max(1U, speedFactor)),
supportedFeatures(supportedFeatures),
mandatoryFeatures(mandatoryFeatures),
sshPublicHostKey(sshPublicHostKey)
{}
Machine::Machine(decltype(storeUri) storeUri, decltype(systemTypes) systemTypes,
decltype(sshKey) sshKey, decltype(maxJobs) maxJobs,
decltype(speedFactor) speedFactor,
decltype(supportedFeatures) supportedFeatures,
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey)
: storeUri(
// Backwards compatibility: if the URI is a hostname,
// prepend ssh://.
storeUri.find("://") != std::string::npos ||
hasPrefix(storeUri, "local") ||
hasPrefix(storeUri, "remote") ||
hasPrefix(storeUri, "auto") || hasPrefix(storeUri, "/")
? storeUri
: "ssh://" + storeUri),
systemTypes(systemTypes),
sshKey(sshKey),
maxJobs(maxJobs),
speedFactor(std::max(1U, speedFactor)),
supportedFeatures(supportedFeatures),
mandatoryFeatures(mandatoryFeatures),
sshPublicHostKey(sshPublicHostKey) {}
bool Machine::allSupported(const std::set<string> & features) const {
return std::all_of(features.begin(), features.end(),
[&](const string & feature) {
return supportedFeatures.count(feature) ||
mandatoryFeatures.count(feature);
});
bool Machine::allSupported(const std::set<string>& features) const {
return std::all_of(features.begin(), features.end(),
[&](const string& feature) {
return supportedFeatures.count(feature) ||
mandatoryFeatures.count(feature);
});
}
bool Machine::mandatoryMet(const std::set<string> & features) const {
return std::all_of(mandatoryFeatures.begin(), mandatoryFeatures.end(),
[&](const string & feature) {
return features.count(feature);
});
bool Machine::mandatoryMet(const std::set<string>& features) const {
return std::all_of(
mandatoryFeatures.begin(), mandatoryFeatures.end(),
[&](const string& feature) { return features.count(feature); });
}
void parseMachines(const std::string & s, Machines & machines)
{
for (auto line : tokenizeString<std::vector<string>>(s, "\n;")) {
trim(line);
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
if (line.empty()) continue;
void parseMachines(const std::string& s, Machines& machines) {
for (auto line : tokenizeString<std::vector<string>>(s, "\n;")) {
trim(line);
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
if (line.empty()) continue;
if (line[0] == '@') {
auto file = trim(std::string(line, 1));
try {
parseMachines(readFile(file), machines);
} catch (const SysError & e) {
if (e.errNo != ENOENT)
throw;
debug("cannot find machines file '%s'", file);
}
continue;
}
auto tokens = tokenizeString<std::vector<string>>(line);
auto sz = tokens.size();
if (sz < 1)
throw FormatError("bad machine specification '%s'", line);
auto isSet = [&](size_t n) {
return tokens.size() > n && tokens[n] != "" && tokens[n] != "-";
};
machines.emplace_back(tokens[0],
isSet(1) ? tokenizeString<std::vector<string>>(tokens[1], ",") : std::vector<string>{settings.thisSystem},
isSet(2) ? tokens[2] : "",
isSet(3) ? std::stoull(tokens[3]) : 1LL,
isSet(4) ? std::stoull(tokens[4]) : 1LL,
isSet(5) ? tokenizeString<std::set<string>>(tokens[5], ",") : std::set<string>{},
isSet(6) ? tokenizeString<std::set<string>>(tokens[6], ",") : std::set<string>{},
isSet(7) ? tokens[7] : "");
if (line[0] == '@') {
auto file = trim(std::string(line, 1));
try {
parseMachines(readFile(file), machines);
} catch (const SysError& e) {
if (e.errNo != ENOENT) throw;
debug("cannot find machines file '%s'", file);
}
continue;
}
auto tokens = tokenizeString<std::vector<string>>(line);
auto sz = tokens.size();
if (sz < 1) throw FormatError("bad machine specification '%s'", line);
auto isSet = [&](size_t n) {
return tokens.size() > n && tokens[n] != "" && tokens[n] != "-";
};
machines.emplace_back(
tokens[0],
isSet(1) ? tokenizeString<std::vector<string>>(tokens[1], ",")
: std::vector<string>{settings.thisSystem},
isSet(2) ? tokens[2] : "", isSet(3) ? std::stoull(tokens[3]) : 1LL,
isSet(4) ? std::stoull(tokens[4]) : 1LL,
isSet(5) ? tokenizeString<std::set<string>>(tokens[5], ",")
: std::set<string>{},
isSet(6) ? tokenizeString<std::set<string>>(tokens[6], ",")
: std::set<string>{},
isSet(7) ? tokens[7] : "");
}
}
Machines getMachines()
{
static auto machines = [&]() {
Machines machines;
parseMachines(settings.builders, machines);
return machines;
}();
Machines getMachines() {
static auto machines = [&]() {
Machines machines;
parseMachines(settings.builders, machines);
return machines;
}();
return machines;
}
}
} // namespace nix

View file

@ -5,35 +5,32 @@
namespace nix {
struct Machine {
const string storeUri;
const std::vector<string> systemTypes;
const string sshKey;
const unsigned int maxJobs;
const unsigned int speedFactor;
const std::set<string> supportedFeatures;
const std::set<string> mandatoryFeatures;
const std::string sshPublicHostKey;
bool enabled = true;
const string storeUri;
const std::vector<string> systemTypes;
const string sshKey;
const unsigned int maxJobs;
const unsigned int speedFactor;
const std::set<string> supportedFeatures;
const std::set<string> mandatoryFeatures;
const std::string sshPublicHostKey;
bool enabled = true;
bool allSupported(const std::set<string>& features) const;
bool allSupported(const std::set<string> & features) const;
bool mandatoryMet(const std::set<string>& features) const;
bool mandatoryMet(const std::set<string> & features) const;
Machine(decltype(storeUri) storeUri,
decltype(systemTypes) systemTypes,
decltype(sshKey) sshKey,
decltype(maxJobs) maxJobs,
decltype(speedFactor) speedFactor,
decltype(supportedFeatures) supportedFeatures,
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey);
Machine(decltype(storeUri) storeUri, decltype(systemTypes) systemTypes,
decltype(sshKey) sshKey, decltype(maxJobs) maxJobs,
decltype(speedFactor) speedFactor,
decltype(supportedFeatures) supportedFeatures,
decltype(mandatoryFeatures) mandatoryFeatures,
decltype(sshPublicHostKey) sshPublicHostKey);
};
typedef std::vector<Machine> Machines;
void parseMachines(const std::string & s, Machines & machines);
void parseMachines(const std::string& s, Machines& machines);
Machines getMachines();
}
} // namespace nix

View file

@ -1,282 +1,266 @@
#include "derivations.hh"
#include "parsed-derivations.hh"
#include "globals.hh"
#include "local-store.hh"
#include "parsed-derivations.hh"
#include "store-api.hh"
#include "thread-pool.hh"
namespace nix {
void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_,
bool flipDirection, bool includeOutputs,
bool includeDerivers) {
struct State {
size_t pending;
PathSet& paths;
std::exception_ptr exc;
};
void Store::computeFSClosure(const PathSet & startPaths,
PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
{
struct State
Sync<State> state_(State{0, paths_, 0});
std::function<void(const Path&)> enqueue;
std::condition_variable done;
enqueue = [&](const Path& path) -> void {
{
size_t pending;
PathSet & paths;
std::exception_ptr exc;
};
Sync<State> state_(State{0, paths_, 0});
std::function<void(const Path &)> enqueue;
std::condition_variable done;
enqueue = [&](const Path & path) -> void {
{
auto state(state_.lock());
if (state->exc) return;
if (state->paths.count(path)) return;
state->paths.insert(path);
state->pending++;
}
queryPathInfo(path, {[&, path](std::future<ref<ValidPathInfo>> fut) {
// FIXME: calls to isValidPath() should be async
try {
auto info = fut.get();
if (flipDirection) {
PathSet referrers;
queryReferrers(path, referrers);
for (auto & ref : referrers)
if (ref != path)
enqueue(ref);
if (includeOutputs)
for (auto & i : queryValidDerivers(path))
enqueue(i);
if (includeDerivers && isDerivation(path))
for (auto & i : queryDerivationOutputs(path))
if (isValidPath(i) && queryPathInfo(i)->deriver == path)
enqueue(i);
} else {
for (auto & ref : info->references)
if (ref != path)
enqueue(ref);
if (includeOutputs && isDerivation(path))
for (auto & i : queryDerivationOutputs(path))
if (isValidPath(i)) enqueue(i);
if (includeDerivers && isValidPath(info->deriver))
enqueue(info->deriver);
}
{
auto state(state_.lock());
assert(state->pending);
if (!--state->pending) done.notify_one();
}
} catch (...) {
auto state(state_.lock());
if (!state->exc) state->exc = std::current_exception();
assert(state->pending);
if (!--state->pending) done.notify_one();
};
}});
};
for (auto & startPath : startPaths)
enqueue(startPath);
{
auto state(state_.lock());
while (state->pending) state.wait(done);
if (state->exc) std::rethrow_exception(state->exc);
auto state(state_.lock());
if (state->exc) return;
if (state->paths.count(path)) return;
state->paths.insert(path);
state->pending++;
}
}
queryPathInfo(
path, {[&, path](std::future<ref<ValidPathInfo>> fut) {
// FIXME: calls to isValidPath() should be async
void Store::computeFSClosure(const Path & startPath,
PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
{
computeFSClosure(PathSet{startPath}, paths_, flipDirection, includeOutputs, includeDerivers);
}
try {
auto info = fut.get();
if (flipDirection) {
PathSet referrers;
queryReferrers(path, referrers);
for (auto& ref : referrers)
if (ref != path) enqueue(ref);
void Store::queryMissing(const PathSet & targets,
PathSet & willBuild_, PathSet & willSubstitute_, PathSet & unknown_,
unsigned long long & downloadSize_, unsigned long long & narSize_)
{
Activity act(*logger, lvlDebug, actUnknown, "querying info about missing paths");
if (includeOutputs)
for (auto& i : queryValidDerivers(path)) enqueue(i);
downloadSize_ = narSize_ = 0;
if (includeDerivers && isDerivation(path))
for (auto& i : queryDerivationOutputs(path))
if (isValidPath(i) && queryPathInfo(i)->deriver == path)
enqueue(i);
ThreadPool pool;
} else {
for (auto& ref : info->references)
if (ref != path) enqueue(ref);
struct State
{
PathSet done;
PathSet & unknown, & willSubstitute, & willBuild;
unsigned long long & downloadSize;
unsigned long long & narSize;
};
if (includeOutputs && isDerivation(path))
for (auto& i : queryDerivationOutputs(path))
if (isValidPath(i)) enqueue(i);
struct DrvState
{
size_t left;
bool done = false;
PathSet outPaths;
DrvState(size_t left) : left(left) { }
};
Sync<State> state_(State{PathSet(), unknown_, willSubstitute_, willBuild_, downloadSize_, narSize_});
std::function<void(Path)> doPath;
auto mustBuildDrv = [&](const Path & drvPath, const Derivation & drv) {
{
auto state(state_.lock());
state->willBuild.insert(drvPath);
}
for (auto & i : drv.inputDrvs)
pool.enqueue(std::bind(doPath, makeDrvPathWithOutputs(i.first, i.second)));
};
auto checkOutput = [&](
const Path & drvPath, ref<Derivation> drv, const Path & outPath, ref<Sync<DrvState>> drvState_)
{
if (drvState_->lock()->done) return;
SubstitutablePathInfos infos;
querySubstitutablePathInfos({outPath}, infos);
if (infos.empty()) {
drvState_->lock()->done = true;
mustBuildDrv(drvPath, *drv);
} else {
{
auto drvState(drvState_->lock());
if (drvState->done) return;
assert(drvState->left);
drvState->left--;
drvState->outPaths.insert(outPath);
if (!drvState->left) {
for (auto & path : drvState->outPaths)
pool.enqueue(std::bind(doPath, path));
}
if (includeDerivers && isValidPath(info->deriver))
enqueue(info->deriver);
}
}
};
doPath = [&](const Path & path) {
{
auto state(state_.lock());
if (state->done.count(path)) return;
state->done.insert(path);
}
DrvPathWithOutputs i2 = parseDrvPathWithOutputs(path);
if (isDerivation(i2.first)) {
if (!isValidPath(i2.first)) {
// FIXME: we could try to substitute the derivation.
auto state(state_.lock());
state->unknown.insert(path);
return;
}
Derivation drv = derivationFromPath(i2.first);
ParsedDerivation parsedDrv(i2.first, drv);
PathSet invalid;
for (auto & j : drv.outputs)
if (wantOutput(j.first, i2.second)
&& !isValidPath(j.second.path))
invalid.insert(j.second.path);
if (invalid.empty()) return;
if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
for (auto & output : invalid)
pool.enqueue(std::bind(checkOutput, i2.first, make_ref<Derivation>(drv), output, drvState));
} else
mustBuildDrv(i2.first, drv);
} else {
if (isValidPath(path)) return;
SubstitutablePathInfos infos;
querySubstitutablePathInfos({path}, infos);
if (infos.empty()) {
auto state(state_.lock());
state->unknown.insert(path);
return;
}
auto info = infos.find(path);
assert(info != infos.end());
{
auto state(state_.lock());
state->willSubstitute.insert(path);
state->downloadSize += info->second.downloadSize;
state->narSize += info->second.narSize;
auto state(state_.lock());
assert(state->pending);
if (!--state->pending) done.notify_one();
}
for (auto & ref : info->second.references)
pool.enqueue(std::bind(doPath, ref));
} catch (...) {
auto state(state_.lock());
if (!state->exc) state->exc = std::current_exception();
assert(state->pending);
if (!--state->pending) done.notify_one();
};
}});
};
for (auto& startPath : startPaths) enqueue(startPath);
{
auto state(state_.lock());
while (state->pending) state.wait(done);
if (state->exc) std::rethrow_exception(state->exc);
}
}
void Store::computeFSClosure(const Path& startPath, PathSet& paths_,
bool flipDirection, bool includeOutputs,
bool includeDerivers) {
computeFSClosure(PathSet{startPath}, paths_, flipDirection, includeOutputs,
includeDerivers);
}
void Store::queryMissing(const PathSet& targets, PathSet& willBuild_,
PathSet& willSubstitute_, PathSet& unknown_,
unsigned long long& downloadSize_,
unsigned long long& narSize_) {
Activity act(*logger, lvlDebug, actUnknown,
"querying info about missing paths");
downloadSize_ = narSize_ = 0;
ThreadPool pool;
struct State {
PathSet done;
PathSet &unknown, &willSubstitute, &willBuild;
unsigned long long& downloadSize;
unsigned long long& narSize;
};
struct DrvState {
size_t left;
bool done = false;
PathSet outPaths;
DrvState(size_t left) : left(left) {}
};
Sync<State> state_(State{PathSet(), unknown_, willSubstitute_, willBuild_,
downloadSize_, narSize_});
std::function<void(Path)> doPath;
auto mustBuildDrv = [&](const Path& drvPath, const Derivation& drv) {
{
auto state(state_.lock());
state->willBuild.insert(drvPath);
}
for (auto& i : drv.inputDrvs)
pool.enqueue(
std::bind(doPath, makeDrvPathWithOutputs(i.first, i.second)));
};
auto checkOutput = [&](const Path& drvPath, ref<Derivation> drv,
const Path& outPath, ref<Sync<DrvState>> drvState_) {
if (drvState_->lock()->done) return;
SubstitutablePathInfos infos;
querySubstitutablePathInfos({outPath}, infos);
if (infos.empty()) {
drvState_->lock()->done = true;
mustBuildDrv(drvPath, *drv);
} else {
{
auto drvState(drvState_->lock());
if (drvState->done) return;
assert(drvState->left);
drvState->left--;
drvState->outPaths.insert(outPath);
if (!drvState->left) {
for (auto& path : drvState->outPaths)
pool.enqueue(std::bind(doPath, path));
}
};
}
}
};
for (auto & path : targets)
pool.enqueue(std::bind(doPath, path));
doPath = [&](const Path& path) {
{
auto state(state_.lock());
if (state->done.count(path)) return;
state->done.insert(path);
}
pool.process();
DrvPathWithOutputs i2 = parseDrvPathWithOutputs(path);
if (isDerivation(i2.first)) {
if (!isValidPath(i2.first)) {
// FIXME: we could try to substitute the derivation.
auto state(state_.lock());
state->unknown.insert(path);
return;
}
Derivation drv = derivationFromPath(i2.first);
ParsedDerivation parsedDrv(i2.first, drv);
PathSet invalid;
for (auto& j : drv.outputs)
if (wantOutput(j.first, i2.second) && !isValidPath(j.second.path))
invalid.insert(j.second.path);
if (invalid.empty()) return;
if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
for (auto& output : invalid)
pool.enqueue(std::bind(checkOutput, i2.first,
make_ref<Derivation>(drv), output, drvState));
} else
mustBuildDrv(i2.first, drv);
} else {
if (isValidPath(path)) return;
SubstitutablePathInfos infos;
querySubstitutablePathInfos({path}, infos);
if (infos.empty()) {
auto state(state_.lock());
state->unknown.insert(path);
return;
}
auto info = infos.find(path);
assert(info != infos.end());
{
auto state(state_.lock());
state->willSubstitute.insert(path);
state->downloadSize += info->second.downloadSize;
state->narSize += info->second.narSize;
}
for (auto& ref : info->second.references)
pool.enqueue(std::bind(doPath, ref));
}
};
for (auto& path : targets) pool.enqueue(std::bind(doPath, path));
pool.process();
}
Paths Store::topoSortPaths(const PathSet& paths) {
Paths sorted;
PathSet visited, parents;
Paths Store::topoSortPaths(const PathSet & paths)
{
Paths sorted;
PathSet visited, parents;
std::function<void(const Path& path, const Path* parent)> dfsVisit;
std::function<void(const Path & path, const Path * parent)> dfsVisit;
dfsVisit = [&](const Path& path, const Path* parent) {
if (parents.find(path) != parents.end())
throw BuildError(
format("cycle detected in the references of '%1%' from '%2%'") %
path % *parent);
dfsVisit = [&](const Path & path, const Path * parent) {
if (parents.find(path) != parents.end())
throw BuildError(format("cycle detected in the references of '%1%' from '%2%'") % path % *parent);
if (visited.find(path) != visited.end()) return;
visited.insert(path);
parents.insert(path);
if (visited.find(path) != visited.end()) return;
visited.insert(path);
parents.insert(path);
PathSet references;
try {
references = queryPathInfo(path)->references;
} catch (InvalidPath&) {
}
PathSet references;
try {
references = queryPathInfo(path)->references;
} catch (InvalidPath &) {
}
for (auto& i : references)
/* Don't traverse into paths that don't exist. That can
happen due to substitutes for non-existent paths. */
if (i != path && paths.find(i) != paths.end()) dfsVisit(i, &path);
for (auto & i : references)
/* Don't traverse into paths that don't exist. That can
happen due to substitutes for non-existent paths. */
if (i != path && paths.find(i) != paths.end())
dfsVisit(i, &path);
sorted.push_front(path);
parents.erase(path);
};
sorted.push_front(path);
parents.erase(path);
};
for (auto& i : paths) dfsVisit(i, nullptr);
for (auto & i : paths)
dfsVisit(i, nullptr);
return sorted;
return sorted;
}
}
} // namespace nix

View file

@ -1,266 +1,242 @@
#include "nar-accessor.hh"
#include <algorithm>
#include <map>
#include <nlohmann/json.hpp>
#include <stack>
#include "archive.hh"
#include "json.hh"
#include <map>
#include <stack>
#include <algorithm>
#include <nlohmann/json.hpp>
namespace nix {
struct NarMember
{
FSAccessor::Type type = FSAccessor::Type::tMissing;
struct NarMember {
FSAccessor::Type type = FSAccessor::Type::tMissing;
bool isExecutable = false;
bool isExecutable = false;
/* If this is a regular file, position of the contents of this
file in the NAR. */
size_t start = 0, size = 0;
/* If this is a regular file, position of the contents of this
file in the NAR. */
size_t start = 0, size = 0;
std::string target;
std::string target;
/* If this is a directory, all the children of the directory. */
std::map<std::string, NarMember> children;
/* If this is a directory, all the children of the directory. */
std::map<std::string, NarMember> children;
};
struct NarAccessor : public FSAccessor
{
std::shared_ptr<const std::string> nar;
struct NarAccessor : public FSAccessor {
std::shared_ptr<const std::string> nar;
GetNarBytes getNarBytes;
GetNarBytes getNarBytes;
NarMember root;
NarMember root;
struct NarIndexer : ParseSink, StringSource
{
NarAccessor & acc;
struct NarIndexer : ParseSink, StringSource {
NarAccessor& acc;
std::stack<NarMember *> parents;
std::stack<NarMember*> parents;
std::string currentStart;
bool isExec = false;
std::string currentStart;
bool isExec = false;
NarIndexer(NarAccessor & acc, const std::string & nar)
: StringSource(nar), acc(acc)
{ }
NarIndexer(NarAccessor& acc, const std::string& nar)
: StringSource(nar), acc(acc) {}
void createMember(const Path & path, NarMember member) {
size_t level = std::count(path.begin(), path.end(), '/');
while (parents.size() > level) parents.pop();
void createMember(const Path& path, NarMember member) {
size_t level = std::count(path.begin(), path.end(), '/');
while (parents.size() > level) parents.pop();
if (parents.empty()) {
acc.root = std::move(member);
parents.push(&acc.root);
} else {
if (parents.top()->type != FSAccessor::Type::tDirectory)
throw Error("NAR file missing parent directory of path '%s'", path);
auto result = parents.top()->children.emplace(baseNameOf(path), std::move(member));
parents.push(&result.first->second);
}
}
void createDirectory(const Path & path) override
{
createMember(path, {FSAccessor::Type::tDirectory, false, 0, 0});
}
void createRegularFile(const Path & path) override
{
createMember(path, {FSAccessor::Type::tRegular, false, 0, 0});
}
void isExecutable() override
{
parents.top()->isExecutable = true;
}
void preallocateContents(unsigned long long size) override
{
currentStart = string(s, pos, 16);
assert(size <= std::numeric_limits<size_t>::max());
parents.top()->size = (size_t)size;
parents.top()->start = pos;
}
void receiveContents(unsigned char * data, unsigned int len) override
{
// Sanity check
if (!currentStart.empty()) {
assert(len < 16 || currentStart == string((char *) data, 16));
currentStart.clear();
}
}
void createSymlink(const Path & path, const string & target) override
{
createMember(path,
NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
if (parents.empty()) {
acc.root = std::move(member);
parents.push(&acc.root);
} else {
if (parents.top()->type != FSAccessor::Type::tDirectory)
throw Error("NAR file missing parent directory of path '%s'", path);
auto result = parents.top()->children.emplace(baseNameOf(path),
std::move(member));
parents.push(&result.first->second);
}
}
void createDirectory(const Path& path) override {
createMember(path, {FSAccessor::Type::tDirectory, false, 0, 0});
}
void createRegularFile(const Path& path) override {
createMember(path, {FSAccessor::Type::tRegular, false, 0, 0});
}
void isExecutable() override { parents.top()->isExecutable = true; }
void preallocateContents(unsigned long long size) override {
currentStart = string(s, pos, 16);
assert(size <= std::numeric_limits<size_t>::max());
parents.top()->size = (size_t)size;
parents.top()->start = pos;
}
void receiveContents(unsigned char* data, unsigned int len) override {
// Sanity check
if (!currentStart.empty()) {
assert(len < 16 || currentStart == string((char*)data, 16));
currentStart.clear();
}
}
void createSymlink(const Path& path, const string& target) override {
createMember(path,
NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
}
};
NarAccessor(ref<const std::string> nar) : nar(nar) {
NarIndexer indexer(*this, *nar);
parseDump(indexer, indexer);
}
NarAccessor(const std::string& listing, GetNarBytes getNarBytes)
: getNarBytes(getNarBytes) {
using json = nlohmann::json;
std::function<void(NarMember&, json&)> recurse;
recurse = [&](NarMember& member, json& v) {
std::string type = v["type"];
if (type == "directory") {
member.type = FSAccessor::Type::tDirectory;
for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) {
std::string name = i.key();
recurse(member.children[name], i.value());
}
} else if (type == "regular") {
member.type = FSAccessor::Type::tRegular;
member.size = v["size"];
member.isExecutable = v.value("executable", false);
member.start = v["narOffset"];
} else if (type == "symlink") {
member.type = FSAccessor::Type::tSymlink;
member.target = v.value("target", "");
} else
return;
};
NarAccessor(ref<const std::string> nar) : nar(nar)
{
NarIndexer indexer(*this, *nar);
parseDump(indexer, indexer);
json v = json::parse(listing);
recurse(root, v);
}
NarMember* find(const Path& path) {
Path canon = path == "" ? "" : canonPath(path);
NarMember* current = &root;
auto end = path.end();
for (auto it = path.begin(); it != end;) {
// because it != end, the remaining component is non-empty so we need
// a directory
if (current->type != FSAccessor::Type::tDirectory) return nullptr;
// skip slash (canonPath above ensures that this is always a slash)
assert(*it == '/');
it += 1;
// lookup current component
auto next = std::find(it, end, '/');
auto child = current->children.find(std::string(it, next));
if (child == current->children.end()) return nullptr;
current = &child->second;
it = next;
}
NarAccessor(const std::string & listing, GetNarBytes getNarBytes)
: getNarBytes(getNarBytes)
{
using json = nlohmann::json;
return current;
}
std::function<void(NarMember &, json &)> recurse;
NarMember& get(const Path& path) {
auto result = find(path);
if (result == nullptr)
throw Error("NAR file does not contain path '%1%'", path);
return *result;
}
recurse = [&](NarMember & member, json & v) {
std::string type = v["type"];
Stat stat(const Path& path) override {
auto i = find(path);
if (i == nullptr) return {FSAccessor::Type::tMissing, 0, false};
return {i->type, i->size, i->isExecutable, i->start};
}
if (type == "directory") {
member.type = FSAccessor::Type::tDirectory;
for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) {
std::string name = i.key();
recurse(member.children[name], i.value());
}
} else if (type == "regular") {
member.type = FSAccessor::Type::tRegular;
member.size = v["size"];
member.isExecutable = v.value("executable", false);
member.start = v["narOffset"];
} else if (type == "symlink") {
member.type = FSAccessor::Type::tSymlink;
member.target = v.value("target", "");
} else return;
};
StringSet readDirectory(const Path& path) override {
auto i = get(path);
json v = json::parse(listing);
recurse(root, v);
}
if (i.type != FSAccessor::Type::tDirectory)
throw Error(format("path '%1%' inside NAR file is not a directory") %
path);
NarMember * find(const Path & path)
{
Path canon = path == "" ? "" : canonPath(path);
NarMember * current = &root;
auto end = path.end();
for (auto it = path.begin(); it != end; ) {
// because it != end, the remaining component is non-empty so we need
// a directory
if (current->type != FSAccessor::Type::tDirectory) return nullptr;
StringSet res;
for (auto& child : i.children) res.insert(child.first);
// skip slash (canonPath above ensures that this is always a slash)
assert(*it == '/');
it += 1;
return res;
}
// lookup current component
auto next = std::find(it, end, '/');
auto child = current->children.find(std::string(it, next));
if (child == current->children.end()) return nullptr;
current = &child->second;
std::string readFile(const Path& path) override {
auto i = get(path);
if (i.type != FSAccessor::Type::tRegular)
throw Error(format("path '%1%' inside NAR file is not a regular file") %
path);
it = next;
}
if (getNarBytes) return getNarBytes(i.start, i.size);
return current;
}
assert(nar);
return std::string(*nar, i.start, i.size);
}
NarMember & get(const Path & path) {
auto result = find(path);
if (result == nullptr)
throw Error("NAR file does not contain path '%1%'", path);
return *result;
}
Stat stat(const Path & path) override
{
auto i = find(path);
if (i == nullptr)
return {FSAccessor::Type::tMissing, 0, false};
return {i->type, i->size, i->isExecutable, i->start};
}
StringSet readDirectory(const Path & path) override
{
auto i = get(path);
if (i.type != FSAccessor::Type::tDirectory)
throw Error(format("path '%1%' inside NAR file is not a directory") % path);
StringSet res;
for (auto & child : i.children)
res.insert(child.first);
return res;
}
std::string readFile(const Path & path) override
{
auto i = get(path);
if (i.type != FSAccessor::Type::tRegular)
throw Error(format("path '%1%' inside NAR file is not a regular file") % path);
if (getNarBytes) return getNarBytes(i.start, i.size);
assert(nar);
return std::string(*nar, i.start, i.size);
}
std::string readLink(const Path & path) override
{
auto i = get(path);
if (i.type != FSAccessor::Type::tSymlink)
throw Error(format("path '%1%' inside NAR file is not a symlink") % path);
return i.target;
}
std::string readLink(const Path& path) override {
auto i = get(path);
if (i.type != FSAccessor::Type::tSymlink)
throw Error(format("path '%1%' inside NAR file is not a symlink") % path);
return i.target;
}
};
ref<FSAccessor> makeNarAccessor(ref<const std::string> nar)
{
return make_ref<NarAccessor>(nar);
ref<FSAccessor> makeNarAccessor(ref<const std::string> nar) {
return make_ref<NarAccessor>(nar);
}
ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
GetNarBytes getNarBytes)
{
return make_ref<NarAccessor>(listing, getNarBytes);
ref<FSAccessor> makeLazyNarAccessor(const std::string& listing,
GetNarBytes getNarBytes) {
return make_ref<NarAccessor>(listing, getNarBytes);
}
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse)
{
auto st = accessor->stat(path);
void listNar(JSONPlaceholder& res, ref<FSAccessor> accessor, const Path& path,
bool recurse) {
auto st = accessor->stat(path);
auto obj = res.object();
auto obj = res.object();
switch (st.type) {
switch (st.type) {
case FSAccessor::Type::tRegular:
obj.attr("type", "regular");
obj.attr("size", st.fileSize);
if (st.isExecutable)
obj.attr("executable", true);
if (st.narOffset)
obj.attr("narOffset", st.narOffset);
break;
obj.attr("type", "regular");
obj.attr("size", st.fileSize);
if (st.isExecutable) obj.attr("executable", true);
if (st.narOffset) obj.attr("narOffset", st.narOffset);
break;
case FSAccessor::Type::tDirectory:
obj.attr("type", "directory");
{
auto res2 = obj.object("entries");
for (auto & name : accessor->readDirectory(path)) {
if (recurse) {
auto res3 = res2.placeholder(name);
listNar(res3, accessor, path + "/" + name, true);
} else
res2.object(name);
}
obj.attr("type", "directory");
{
auto res2 = obj.object("entries");
for (auto& name : accessor->readDirectory(path)) {
if (recurse) {
auto res3 = res2.placeholder(name);
listNar(res3, accessor, path + "/" + name, true);
} else
res2.object(name);
}
break;
}
break;
case FSAccessor::Type::tSymlink:
obj.attr("type", "symlink");
obj.attr("target", accessor->readLink(path));
break;
obj.attr("type", "symlink");
obj.attr("target", accessor->readLink(path));
break;
default:
throw Error("path '%s' does not exist in NAR", path);
}
throw Error("path '%s' does not exist in NAR", path);
}
}
}
} // namespace nix

View file

@ -1,7 +1,6 @@
#pragma once
#include <functional>
#include "fs-accessor.hh"
namespace nix {
@ -16,15 +15,14 @@ ref<FSAccessor> makeNarAccessor(ref<const std::string> nar);
inside the NAR. */
typedef std::function<std::string(uint64_t, uint64_t)> GetNarBytes;
ref<FSAccessor> makeLazyNarAccessor(
const std::string & listing,
GetNarBytes getNarBytes);
ref<FSAccessor> makeLazyNarAccessor(const std::string& listing,
GetNarBytes getNarBytes);
class JSONPlaceholder;
/* Write a JSON representation of the contents of a NAR (except file
contents). */
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
const Path & path, bool recurse);
void listNar(JSONPlaceholder& res, ref<FSAccessor> accessor, const Path& path,
bool recurse);
}
} // namespace nix

View file

@ -1,13 +1,12 @@
#include "nar-info-disk-cache.hh"
#include "sync.hh"
#include "sqlite.hh"
#include "globals.hh"
#include <sqlite3.h>
#include "globals.hh"
#include "sqlite.hh"
#include "sync.hh"
namespace nix {
static const char * schema = R"sql(
static const char* schema = R"sql(
create table if not exists BinaryCaches (
id integer primary key autoincrement not null,
@ -45,223 +44,222 @@ create table if not exists LastPurge (
)sql";
class NarInfoDiskCacheImpl : public NarInfoDiskCache
{
public:
class NarInfoDiskCacheImpl : public NarInfoDiskCache {
public:
/* How often to purge expired entries from the cache. */
const int purgeInterval = 24 * 3600;
/* How often to purge expired entries from the cache. */
const int purgeInterval = 24 * 3600;
struct Cache {
int id;
Path storeDir;
bool wantMassQuery;
int priority;
};
struct Cache
{
int id;
Path storeDir;
bool wantMassQuery;
int priority;
};
struct State {
SQLite db;
SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR,
purgeCache;
std::map<std::string, Cache> caches;
};
struct State
{
SQLite db;
SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache;
std::map<std::string, Cache> caches;
};
Sync<State> _state;
Sync<State> _state;
NarInfoDiskCacheImpl() {
auto state(_state.lock());
NarInfoDiskCacheImpl()
{
auto state(_state.lock());
Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite";
createDirs(dirOf(dbPath));
Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite";
createDirs(dirOf(dbPath));
state->db = SQLite(dbPath);
state->db = SQLite(dbPath);
if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
throwSQLiteError(state->db, "setting timeout");
if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
throwSQLiteError(state->db, "setting timeout");
// We can always reproduce the cache.
state->db.exec("pragma synchronous = off");
state->db.exec("pragma main.journal_mode = truncate");
// We can always reproduce the cache.
state->db.exec("pragma synchronous = off");
state->db.exec("pragma main.journal_mode = truncate");
state->db.exec(schema);
state->db.exec(schema);
state->insertCache.create(
state->db,
"insert or replace into BinaryCaches(url, timestamp, storeDir, "
"wantMassQuery, priority) values (?, ?, ?, ?, ?)");
state->insertCache.create(state->db,
"insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
state->queryCache.create(state->db,
"select id, storeDir, wantMassQuery, priority "
"from BinaryCaches where url = ?");
state->queryCache.create(state->db,
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?");
state->insertNAR.create(
state->db,
"insert or replace into NARs(cache, hashPart, namePart, url, "
"compression, fileHash, fileSize, narHash, "
"narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, "
"?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)");
state->insertNAR.create(state->db,
"insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
"narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)");
state->insertMissingNAR.create(
state->db,
"insert or replace into NARs(cache, hashPart, timestamp, present) "
"values (?, ?, ?, 0)");
state->insertMissingNAR.create(state->db,
"insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)");
state->queryNAR.create(
state->db,
"select present, namePart, url, compression, fileHash, fileSize, "
"narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? "
"and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 "
"and timestamp > ?))");
state->queryNAR.create(state->db,
"select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
/* Periodically purge expired entries from the database. */
retrySQLite<void>([&]() {
auto now = time(0);
/* Periodically purge expired entries from the database. */
retrySQLite<void>([&]() {
auto now = time(0);
SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
auto queryLastPurge_(queryLastPurge.use());
SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
auto queryLastPurge_(queryLastPurge.use());
if (!queryLastPurge_.next() ||
queryLastPurge_.getInt(0) < now - purgeInterval) {
SQLiteStmt(state->db,
"delete from NARs where ((present = 0 and timestamp < ?) or "
"(present = 1 and timestamp < ?))")
.use()(now - settings.ttlNegativeNarInfoCache)(
now - settings.ttlPositiveNarInfoCache)
.exec();
if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
SQLiteStmt(state->db,
"delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
.use()
(now - settings.ttlNegativeNarInfoCache)
(now - settings.ttlPositiveNarInfoCache)
.exec();
debug("deleted %d entries from the NAR info disk cache",
sqlite3_changes(state->db));
debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
SQLiteStmt(
state->db,
"insert or replace into LastPurge(dummy, value) values ('', ?)")
.use()(now)
.exec();
}
});
}
SQLiteStmt(state->db,
"insert or replace into LastPurge(dummy, value) values ('', ?)")
.use()(now).exec();
}
Cache& getCache(State& state, const std::string& uri) {
auto i = state.caches.find(uri);
if (i == state.caches.end()) abort();
return i->second;
}
void createCache(const std::string& uri, const Path& storeDir,
bool wantMassQuery, int priority) override {
retrySQLite<void>([&]() {
auto state(_state.lock());
// FIXME: race
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority)
.exec();
assert(sqlite3_changes(state->db) == 1);
state->caches[uri] = Cache{(int)sqlite3_last_insert_rowid(state->db),
storeDir, wantMassQuery, priority};
});
}
bool cacheExists(const std::string& uri, bool& wantMassQuery,
int& priority) override {
return retrySQLite<bool>([&]() {
auto state(_state.lock());
auto i = state->caches.find(uri);
if (i == state->caches.end()) {
auto queryCache(state->queryCache.use()(uri));
if (!queryCache.next()) return false;
state->caches.emplace(
uri, Cache{(int)queryCache.getInt(0), queryCache.getStr(1),
queryCache.getInt(2) != 0, (int)queryCache.getInt(3)});
}
auto& cache(getCache(*state, uri));
wantMassQuery = cache.wantMassQuery;
priority = cache.priority;
return true;
});
}
std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
const std::string& uri, const std::string& hashPart) override {
return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
[&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
auto state(_state.lock());
auto& cache(getCache(*state, uri));
auto now = time(0);
auto queryNAR(state->queryNAR.use()(cache.id)(hashPart)(
now - settings.ttlNegativeNarInfoCache)(
now - settings.ttlPositiveNarInfoCache));
if (!queryNAR.next()) return {oUnknown, 0};
if (!queryNAR.getInt(0)) return {oInvalid, 0};
auto narInfo = make_ref<NarInfo>();
auto namePart = queryNAR.getStr(1);
narInfo->path = cache.storeDir + "/" + hashPart +
(namePart.empty() ? "" : "-" + namePart);
narInfo->url = queryNAR.getStr(2);
narInfo->compression = queryNAR.getStr(3);
if (!queryNAR.isNull(4)) narInfo->fileHash = Hash(queryNAR.getStr(4));
narInfo->fileSize = queryNAR.getInt(5);
narInfo->narHash = Hash(queryNAR.getStr(6));
narInfo->narSize = queryNAR.getInt(7);
for (auto& r : tokenizeString<Strings>(queryNAR.getStr(8), " "))
narInfo->references.insert(cache.storeDir + "/" + r);
if (!queryNAR.isNull(9))
narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(9);
for (auto& sig : tokenizeString<Strings>(queryNAR.getStr(10), " "))
narInfo->sigs.insert(sig);
narInfo->ca = queryNAR.getStr(11);
return {oValid, narInfo};
});
}
}
Cache & getCache(State & state, const std::string & uri)
{
auto i = state.caches.find(uri);
if (i == state.caches.end()) abort();
return i->second;
}
void upsertNarInfo(const std::string& uri, const std::string& hashPart,
std::shared_ptr<ValidPathInfo> info) override {
retrySQLite<void>([&]() {
auto state(_state.lock());
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
{
retrySQLite<void>([&]() {
auto state(_state.lock());
auto& cache(getCache(*state, uri));
// FIXME: race
if (info) {
auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
assert(sqlite3_changes(state->db) == 1);
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
});
}
assert(hashPart == storePathToHash(info->path));
bool cacheExists(const std::string & uri,
bool & wantMassQuery, int & priority) override
{
return retrySQLite<bool>([&]() {
auto state(_state.lock());
state->insertNAR
.use()(cache.id)(hashPart)(storePathToName(info->path))(
narInfo ? narInfo->url : "", narInfo != 0)(
narInfo ? narInfo->compression : "", narInfo != 0)(
narInfo && narInfo->fileHash ? narInfo->fileHash.to_string()
: "",
narInfo && narInfo->fileHash)(
narInfo ? narInfo->fileSize : 0,
narInfo != 0 && narInfo->fileSize)(info->narHash.to_string())(
info->narSize)(concatStringsSep(" ", info->shortRefs()))(
info->deriver != "" ? baseNameOf(info->deriver) : "",
info->deriver !=
"")(concatStringsSep(" ", info->sigs))(info->ca)(time(0))
.exec();
auto i = state->caches.find(uri);
if (i == state->caches.end()) {
auto queryCache(state->queryCache.use()(uri));
if (!queryCache.next()) return false;
state->caches.emplace(uri,
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
}
auto & cache(getCache(*state, uri));
wantMassQuery = cache.wantMassQuery;
priority = cache.priority;
return true;
});
}
std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
const std::string & uri, const std::string & hashPart) override
{
return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
[&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
auto state(_state.lock());
auto & cache(getCache(*state, uri));
auto now = time(0);
auto queryNAR(state->queryNAR.use()
(cache.id)
(hashPart)
(now - settings.ttlNegativeNarInfoCache)
(now - settings.ttlPositiveNarInfoCache));
if (!queryNAR.next())
return {oUnknown, 0};
if (!queryNAR.getInt(0))
return {oInvalid, 0};
auto narInfo = make_ref<NarInfo>();
auto namePart = queryNAR.getStr(1);
narInfo->path = cache.storeDir + "/" +
hashPart + (namePart.empty() ? "" : "-" + namePart);
narInfo->url = queryNAR.getStr(2);
narInfo->compression = queryNAR.getStr(3);
if (!queryNAR.isNull(4))
narInfo->fileHash = Hash(queryNAR.getStr(4));
narInfo->fileSize = queryNAR.getInt(5);
narInfo->narHash = Hash(queryNAR.getStr(6));
narInfo->narSize = queryNAR.getInt(7);
for (auto & r : tokenizeString<Strings>(queryNAR.getStr(8), " "))
narInfo->references.insert(cache.storeDir + "/" + r);
if (!queryNAR.isNull(9))
narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(9);
for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(10), " "))
narInfo->sigs.insert(sig);
narInfo->ca = queryNAR.getStr(11);
return {oValid, narInfo};
});
}
void upsertNarInfo(
const std::string & uri, const std::string & hashPart,
std::shared_ptr<ValidPathInfo> info) override
{
retrySQLite<void>([&]() {
auto state(_state.lock());
auto & cache(getCache(*state, uri));
if (info) {
auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
assert(hashPart == storePathToHash(info->path));
state->insertNAR.use()
(cache.id)
(hashPart)
(storePathToName(info->path))
(narInfo ? narInfo->url : "", narInfo != 0)
(narInfo ? narInfo->compression : "", narInfo != 0)
(narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
(narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
(info->narHash.to_string())
(info->narSize)
(concatStringsSep(" ", info->shortRefs()))
(info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
(concatStringsSep(" ", info->sigs))
(info->ca)
(time(0)).exec();
} else {
state->insertMissingNAR.use()
(cache.id)
(hashPart)
(time(0)).exec();
}
});
}
} else {
state->insertMissingNAR.use()(cache.id)(hashPart)(time(0)).exec();
}
});
}
};
ref<NarInfoDiskCache> getNarInfoDiskCache()
{
static ref<NarInfoDiskCache> cache = make_ref<NarInfoDiskCacheImpl>();
return cache;
ref<NarInfoDiskCache> getNarInfoDiskCache() {
static ref<NarInfoDiskCache> cache = make_ref<NarInfoDiskCacheImpl>();
return cache;
}
}
} // namespace nix

View file

@ -1,31 +1,30 @@
#pragma once
#include "ref.hh"
#include "nar-info.hh"
#include "ref.hh"
namespace nix {
class NarInfoDiskCache
{
public:
typedef enum { oValid, oInvalid, oUnknown } Outcome;
class NarInfoDiskCache {
public:
typedef enum { oValid, oInvalid, oUnknown } Outcome;
virtual void createCache(const std::string & uri, const Path & storeDir,
bool wantMassQuery, int priority) = 0;
virtual void createCache(const std::string& uri, const Path& storeDir,
bool wantMassQuery, int priority) = 0;
virtual bool cacheExists(const std::string & uri,
bool & wantMassQuery, int & priority) = 0;
virtual bool cacheExists(const std::string& uri, bool& wantMassQuery,
int& priority) = 0;
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
const std::string & uri, const std::string & hashPart) = 0;
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
const std::string& uri, const std::string& hashPart) = 0;
virtual void upsertNarInfo(
const std::string & uri, const std::string & hashPart,
std::shared_ptr<ValidPathInfo> info) = 0;
virtual void upsertNarInfo(const std::string& uri,
const std::string& hashPart,
std::shared_ptr<ValidPathInfo> info) = 0;
};
/* Return a singleton cache object that can be used concurrently by
multiple threads. */
ref<NarInfoDiskCache> getNarInfoDiskCache();
}
} // namespace nix

View file

@ -1,116 +1,105 @@
#include "globals.hh"
#include "nar-info.hh"
#include "globals.hh"
namespace nix {
NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
{
auto corrupt = [&]() {
throw Error(format("NAR info file '%1%' is corrupt") % whence);
};
NarInfo::NarInfo(const Store& store, const std::string& s,
const std::string& whence) {
auto corrupt = [&]() {
throw Error(format("NAR info file '%1%' is corrupt") % whence);
};
auto parseHashField = [&](const string & s) {
try {
return Hash(s);
} catch (BadHash &) {
corrupt();
return Hash(); // never reached
}
};
auto parseHashField = [&](const string& s) {
try {
return Hash(s);
} catch (BadHash&) {
corrupt();
return Hash(); // never reached
}
};
size_t pos = 0;
while (pos < s.size()) {
size_t pos = 0;
while (pos < s.size()) {
size_t colon = s.find(':', pos);
if (colon == std::string::npos) corrupt();
size_t colon = s.find(':', pos);
if (colon == std::string::npos) corrupt();
std::string name(s, pos, colon - pos);
std::string name(s, pos, colon - pos);
size_t eol = s.find('\n', colon + 2);
if (eol == std::string::npos) corrupt();
size_t eol = s.find('\n', colon + 2);
if (eol == std::string::npos) corrupt();
std::string value(s, colon + 2, eol - colon - 2);
std::string value(s, colon + 2, eol - colon - 2);
if (name == "StorePath") {
if (!store.isStorePath(value)) corrupt();
path = value;
}
else if (name == "URL")
url = value;
else if (name == "Compression")
compression = value;
else if (name == "FileHash")
fileHash = parseHashField(value);
else if (name == "FileSize") {
if (!string2Int(value, fileSize)) corrupt();
}
else if (name == "NarHash")
narHash = parseHashField(value);
else if (name == "NarSize") {
if (!string2Int(value, narSize)) corrupt();
}
else if (name == "References") {
auto refs = tokenizeString<Strings>(value, " ");
if (!references.empty()) corrupt();
for (auto & r : refs) {
auto r2 = store.storeDir + "/" + r;
if (!store.isStorePath(r2)) corrupt();
references.insert(r2);
}
}
else if (name == "Deriver") {
if (value != "unknown-deriver") {
auto p = store.storeDir + "/" + value;
if (!store.isStorePath(p)) corrupt();
deriver = p;
}
}
else if (name == "System")
system = value;
else if (name == "Sig")
sigs.insert(value);
else if (name == "CA") {
if (!ca.empty()) corrupt();
ca = value;
}
pos = eol + 1;
if (name == "StorePath") {
if (!store.isStorePath(value)) corrupt();
path = value;
} else if (name == "URL")
url = value;
else if (name == "Compression")
compression = value;
else if (name == "FileHash")
fileHash = parseHashField(value);
else if (name == "FileSize") {
if (!string2Int(value, fileSize)) corrupt();
} else if (name == "NarHash")
narHash = parseHashField(value);
else if (name == "NarSize") {
if (!string2Int(value, narSize)) corrupt();
} else if (name == "References") {
auto refs = tokenizeString<Strings>(value, " ");
if (!references.empty()) corrupt();
for (auto& r : refs) {
auto r2 = store.storeDir + "/" + r;
if (!store.isStorePath(r2)) corrupt();
references.insert(r2);
}
} else if (name == "Deriver") {
if (value != "unknown-deriver") {
auto p = store.storeDir + "/" + value;
if (!store.isStorePath(p)) corrupt();
deriver = p;
}
} else if (name == "System")
system = value;
else if (name == "Sig")
sigs.insert(value);
else if (name == "CA") {
if (!ca.empty()) corrupt();
ca = value;
}
if (compression == "") compression = "bzip2";
pos = eol + 1;
}
if (path.empty() || url.empty() || narSize == 0 || !narHash) corrupt();
if (compression == "") compression = "bzip2";
if (path.empty() || url.empty() || narSize == 0 || !narHash) corrupt();
}
std::string NarInfo::to_string() const
{
std::string res;
res += "StorePath: " + path + "\n";
res += "URL: " + url + "\n";
assert(compression != "");
res += "Compression: " + compression + "\n";
assert(fileHash.type == htSHA256);
res += "FileHash: " + fileHash.to_string(Base32) + "\n";
res += "FileSize: " + std::to_string(fileSize) + "\n";
assert(narHash.type == htSHA256);
res += "NarHash: " + narHash.to_string(Base32) + "\n";
res += "NarSize: " + std::to_string(narSize) + "\n";
std::string NarInfo::to_string() const {
std::string res;
res += "StorePath: " + path + "\n";
res += "URL: " + url + "\n";
assert(compression != "");
res += "Compression: " + compression + "\n";
assert(fileHash.type == htSHA256);
res += "FileHash: " + fileHash.to_string(Base32) + "\n";
res += "FileSize: " + std::to_string(fileSize) + "\n";
assert(narHash.type == htSHA256);
res += "NarHash: " + narHash.to_string(Base32) + "\n";
res += "NarSize: " + std::to_string(narSize) + "\n";
res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
if (!deriver.empty())
res += "Deriver: " + baseNameOf(deriver) + "\n";
if (!deriver.empty()) res += "Deriver: " + baseNameOf(deriver) + "\n";
if (!system.empty())
res += "System: " + system + "\n";
if (!system.empty()) res += "System: " + system + "\n";
for (auto sig : sigs)
res += "Sig: " + sig + "\n";
for (auto sig : sigs) res += "Sig: " + sig + "\n";
if (!ca.empty())
res += "CA: " + ca + "\n";
if (!ca.empty()) res += "CA: " + ca + "\n";
return res;
return res;
}
}
} // namespace nix

View file

@ -1,24 +1,23 @@
#pragma once
#include "types.hh"
#include "hash.hh"
#include "store-api.hh"
#include "types.hh"
namespace nix {
struct NarInfo : ValidPathInfo
{
std::string url;
std::string compression;
Hash fileHash;
uint64_t fileSize = 0;
std::string system;
struct NarInfo : ValidPathInfo {
std::string url;
std::string compression;
Hash fileHash;
uint64_t fileSize = 0;
std::string system;
NarInfo() { }
NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
NarInfo(const Store & store, const std::string & s, const std::string & whence);
NarInfo() {}
NarInfo(const ValidPathInfo& info) : ValidPathInfo(info) {}
NarInfo(const Store& store, const std::string& s, const std::string& whence);
std::string to_string() const;
std::string to_string() const;
};
}
} // namespace nix

View file

@ -1,302 +1,285 @@
#include "util.hh"
#include "local-store.hh"
#include "globals.hh"
#include <cstdlib>
#include <cstring>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <cstdlib>
#include <cstring>
#include <regex>
#include "globals.hh"
#include "local-store.hh"
#include "util.hh"
namespace nix {
static void makeWritable(const Path & path)
{
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError(format("getting attributes of path '%1%'") % path);
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
throw SysError(format("changing writability of '%1%'") % path);
static void makeWritable(const Path& path) {
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError(format("getting attributes of path '%1%'") % path);
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
throw SysError(format("changing writability of '%1%'") % path);
}
struct MakeReadOnly
{
Path path;
MakeReadOnly(const Path & path) : path(path) { }
~MakeReadOnly()
{
try {
/* This will make the path read-only. */
if (path != "") canonicaliseTimestampAndPermissions(path);
} catch (...) {
ignoreException();
}
struct MakeReadOnly {
Path path;
MakeReadOnly(const Path& path) : path(path) {}
~MakeReadOnly() {
try {
/* This will make the path read-only. */
if (path != "") canonicaliseTimestampAndPermissions(path);
} catch (...) {
ignoreException();
}
}
};
LocalStore::InodeHash LocalStore::loadInodeHash() {
debug("loading hash inodes in memory");
InodeHash inodeHash;
LocalStore::InodeHash LocalStore::loadInodeHash()
{
debug("loading hash inodes in memory");
InodeHash inodeHash;
AutoCloseDir dir(opendir(linksDir.c_str()));
if (!dir) throw SysError(format("opening directory '%1%'") % linksDir);
AutoCloseDir dir(opendir(linksDir.c_str()));
if (!dir) throw SysError(format("opening directory '%1%'") % linksDir);
struct dirent* dirent;
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
checkInterrupt();
// We don't care if we hit non-hash files, anything goes
inodeHash.insert(dirent->d_ino);
}
if (errno) throw SysError(format("reading directory '%1%'") % linksDir);
struct dirent * dirent;
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
checkInterrupt();
// We don't care if we hit non-hash files, anything goes
inodeHash.insert(dirent->d_ino);
}
if (errno) throw SysError(format("reading directory '%1%'") % linksDir);
printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
return inodeHash;
return inodeHash;
}
Strings LocalStore::readDirectoryIgnoringInodes(const Path& path,
const InodeHash& inodeHash) {
Strings names;
Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
{
Strings names;
AutoCloseDir dir(opendir(path.c_str()));
if (!dir) throw SysError(format("opening directory '%1%'") % path);
AutoCloseDir dir(opendir(path.c_str()));
if (!dir) throw SysError(format("opening directory '%1%'") % path);
struct dirent * dirent;
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
checkInterrupt();
if (inodeHash.count(dirent->d_ino)) {
debug(format("'%1%' is already linked") % dirent->d_name);
continue;
}
string name = dirent->d_name;
if (name == "." || name == "..") continue;
names.push_back(name);
}
if (errno) throw SysError(format("reading directory '%1%'") % path);
return names;
}
void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
const Path & path, InodeHash & inodeHash)
{
struct dirent* dirent;
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
checkInterrupt();
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError(format("getting attributes of path '%1%'") % path);
if (inodeHash.count(dirent->d_ino)) {
debug(format("'%1%' is already linked") % dirent->d_name);
continue;
}
string name = dirent->d_name;
if (name == "." || name == "..") continue;
names.push_back(name);
}
if (errno) throw SysError(format("reading directory '%1%'") % path);
return names;
}
void LocalStore::optimisePath_(Activity* act, OptimiseStats& stats,
const Path& path, InodeHash& inodeHash) {
checkInterrupt();
struct stat st;
if (lstat(path.c_str(), &st))
throw SysError(format("getting attributes of path '%1%'") % path);
#if __APPLE__
/* HFS/macOS has some undocumented security feature disabling hardlinking for
special files within .app dirs. *.app/Contents/PkgInfo and
*.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
https://github.com/NixOS/nix/issues/1443 for more discussion. */
/* HFS/macOS has some undocumented security feature disabling hardlinking for
special files within .app dirs. *.app/Contents/PkgInfo and
*.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
https://github.com/NixOS/nix/issues/1443 for more discussion. */
if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
{
debug(format("'%1%' is not allowed to be linked in macOS") % path);
return;
}
if (std::regex_search(path, std::regex("\\.app/Contents/.+$"))) {
debug(format("'%1%' is not allowed to be linked in macOS") % path);
return;
}
#endif
if (S_ISDIR(st.st_mode)) {
Strings names = readDirectoryIgnoringInodes(path, inodeHash);
for (auto & i : names)
optimisePath_(act, stats, path + "/" + i, inodeHash);
return;
}
if (S_ISDIR(st.st_mode)) {
Strings names = readDirectoryIgnoringInodes(path, inodeHash);
for (auto& i : names) optimisePath_(act, stats, path + "/" + i, inodeHash);
return;
}
/* We can hard link regular files and maybe symlinks. */
if (!S_ISREG(st.st_mode)
/* We can hard link regular files and maybe symlinks. */
if (!S_ISREG(st.st_mode)
#if CAN_LINK_SYMLINK
&& !S_ISLNK(st.st_mode)
&& !S_ISLNK(st.st_mode)
#endif
) return;
)
return;
/* Sometimes SNAFUs can cause files in the Nix store to be
modified, in particular when running programs as root under
NixOS (example: $fontconfig/var/cache being modified). Skip
those files. FIXME: check the modification time. */
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
printError(format("skipping suspicious writable file '%1%'") % path);
/* Sometimes SNAFUs can cause files in the Nix store to be
modified, in particular when running programs as root under
NixOS (example: $fontconfig/var/cache being modified). Skip
those files. FIXME: check the modification time. */
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
printError(format("skipping suspicious writable file '%1%'") % path);
return;
}
/* This can still happen on top-level files. */
if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
debug(format("'%1%' is already linked, with %2% other file(s)") % path %
(st.st_nlink - 2));
return;
}
/* Hash the file. Note that hashPath() returns the hash over the
NAR serialisation, which includes the execute bit on the file.
Thus, executable and non-executable files with the same
contents *won't* be linked (which is good because otherwise the
permissions would be screwed up).
Also note that if `path' is a symlink, then we're hashing the
contents of the symlink (i.e. the result of readlink()), not
the contents of the target (which may not even exist). */
Hash hash = hashPath(htSHA256, path).first;
debug(format("'%1%' has hash '%2%'") % path % hash.to_string());
/* Check if this is a known hash. */
Path linkPath = linksDir + "/" + hash.to_string(Base32, false);
retry:
if (!pathExists(linkPath)) {
/* Nope, create a hard link in the links directory. */
if (link(path.c_str(), linkPath.c_str()) == 0) {
inodeHash.insert(st.st_ino);
return;
}
switch (errno) {
case EEXIST:
/* Fall through if another process created linkPath before
we did. */
break;
case ENOSPC:
/* On ext4, that probably means the directory index is
full. When that happens, it's fine to ignore it: we
just effectively disable deduplication of this
file. */
printInfo("cannot link '%s' to '%s': %s", linkPath, path,
strerror(errno));
return;
default:
throw SysError("cannot link '%1%' to '%2%'", linkPath, path);
}
}
/* This can still happen on top-level files. */
if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
debug(format("'%1%' is already linked, with %2% other file(s)") % path % (st.st_nlink - 2));
return;
/* Yes! We've seen a file with the same contents. Replace the
current file with a hard link to that file. */
struct stat stLink;
if (lstat(linkPath.c_str(), &stLink))
throw SysError(format("getting attributes of path '%1%'") % linkPath);
if (st.st_ino == stLink.st_ino) {
debug(format("'%1%' is already linked to '%2%'") % path % linkPath);
return;
}
if (st.st_size != stLink.st_size) {
printError(format("removing corrupted link '%1%'") % linkPath);
unlink(linkPath.c_str());
goto retry;
}
printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath);
/* Make the containing directory writable, but only if it's not
the store itself (we don't want or need to mess with its
permissions). */
bool mustToggle = dirOf(path) != realStoreDir;
if (mustToggle) makeWritable(dirOf(path));
/* When we're done, make the directory read-only again and reset
its timestamp back to 0. */
MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
Path tempLink =
(format("%1%/.tmp-link-%2%-%3%") % realStoreDir % getpid() % random())
.str();
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
if (errno == EMLINK) {
/* Too many links to the same file (>= 32000 on most file
systems). This is likely to happen with empty files.
Just shrug and ignore. */
if (st.st_size)
printInfo(format("'%1%' has maximum number of links") % linkPath);
return;
}
throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath);
}
/* Hash the file. Note that hashPath() returns the hash over the
NAR serialisation, which includes the execute bit on the file.
Thus, executable and non-executable files with the same
contents *won't* be linked (which is good because otherwise the
permissions would be screwed up).
Also note that if `path' is a symlink, then we're hashing the
contents of the symlink (i.e. the result of readlink()), not
the contents of the target (which may not even exist). */
Hash hash = hashPath(htSHA256, path).first;
debug(format("'%1%' has hash '%2%'") % path % hash.to_string());
/* Check if this is a known hash. */
Path linkPath = linksDir + "/" + hash.to_string(Base32, false);
retry:
if (!pathExists(linkPath)) {
/* Nope, create a hard link in the links directory. */
if (link(path.c_str(), linkPath.c_str()) == 0) {
inodeHash.insert(st.st_ino);
return;
}
switch (errno) {
case EEXIST:
/* Fall through if another process created linkPath before
we did. */
break;
case ENOSPC:
/* On ext4, that probably means the directory index is
full. When that happens, it's fine to ignore it: we
just effectively disable deduplication of this
file. */
printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno));
return;
default:
throw SysError("cannot link '%1%' to '%2%'", linkPath, path);
}
/* Atomically replace the old file with the new hard link. */
if (rename(tempLink.c_str(), path.c_str()) == -1) {
if (unlink(tempLink.c_str()) == -1)
printError(format("unable to unlink '%1%'") % tempLink);
if (errno == EMLINK) {
/* Some filesystems generate too many links on the rename,
rather than on the original link. (Probably it
temporarily increases the st_nlink field before
decreasing it again.) */
debug("'%s' has reached maximum number of links", linkPath);
return;
}
throw SysError(format("cannot rename '%1%' to '%2%'") % tempLink % path);
}
/* Yes! We've seen a file with the same contents. Replace the
current file with a hard link to that file. */
struct stat stLink;
if (lstat(linkPath.c_str(), &stLink))
throw SysError(format("getting attributes of path '%1%'") % linkPath);
stats.filesLinked++;
stats.bytesFreed += st.st_size;
stats.blocksFreed += st.st_blocks;
if (st.st_ino == stLink.st_ino) {
debug(format("'%1%' is already linked to '%2%'") % path % linkPath);
return;
}
if (st.st_size != stLink.st_size) {
printError(format("removing corrupted link '%1%'") % linkPath);
unlink(linkPath.c_str());
goto retry;
}
printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath);
/* Make the containing directory writable, but only if it's not
the store itself (we don't want or need to mess with its
permissions). */
bool mustToggle = dirOf(path) != realStoreDir;
if (mustToggle) makeWritable(dirOf(path));
/* When we're done, make the directory read-only again and reset
its timestamp back to 0. */
MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
% realStoreDir % getpid() % random()).str();
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
if (errno == EMLINK) {
/* Too many links to the same file (>= 32000 on most file
systems). This is likely to happen with empty files.
Just shrug and ignore. */
if (st.st_size)
printInfo(format("'%1%' has maximum number of links") % linkPath);
return;
}
throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath);
}
/* Atomically replace the old file with the new hard link. */
if (rename(tempLink.c_str(), path.c_str()) == -1) {
if (unlink(tempLink.c_str()) == -1)
printError(format("unable to unlink '%1%'") % tempLink);
if (errno == EMLINK) {
/* Some filesystems generate too many links on the rename,
rather than on the original link. (Probably it
temporarily increases the st_nlink field before
decreasing it again.) */
debug("'%s' has reached maximum number of links", linkPath);
return;
}
throw SysError(format("cannot rename '%1%' to '%2%'") % tempLink % path);
}
stats.filesLinked++;
stats.bytesFreed += st.st_size;
stats.blocksFreed += st.st_blocks;
if (act)
act->result(resFileLinked, st.st_size, st.st_blocks);
if (act) act->result(resFileLinked, st.st_size, st.st_blocks);
}
void LocalStore::optimiseStore(OptimiseStats& stats) {
Activity act(*logger, actOptimiseStore);
void LocalStore::optimiseStore(OptimiseStats & stats)
{
Activity act(*logger, actOptimiseStore);
PathSet paths = queryAllValidPaths();
InodeHash inodeHash = loadInodeHash();
PathSet paths = queryAllValidPaths();
InodeHash inodeHash = loadInodeHash();
act.progress(0, paths.size());
act.progress(0, paths.size());
uint64_t done = 0;
uint64_t done = 0;
for (auto & i : paths) {
addTempRoot(i);
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
{
Activity act(*logger, lvlTalkative, actUnknown, fmt("optimising path '%s'", i));
optimisePath_(&act, stats, realStoreDir + "/" + baseNameOf(i), inodeHash);
}
done++;
act.progress(done, paths.size());
for (auto& i : paths) {
addTempRoot(i);
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
{
Activity act(*logger, lvlTalkative, actUnknown,
fmt("optimising path '%s'", i));
optimisePath_(&act, stats, realStoreDir + "/" + baseNameOf(i), inodeHash);
}
done++;
act.progress(done, paths.size());
}
}
static string showBytes(unsigned long long bytes)
{
return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
static string showBytes(unsigned long long bytes) {
return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
}
void LocalStore::optimiseStore()
{
OptimiseStats stats;
void LocalStore::optimiseStore() {
OptimiseStats stats;
optimiseStore(stats);
optimiseStore(stats);
printInfo(
format("%1% freed by hard-linking %2% files")
% showBytes(stats.bytesFreed)
% stats.filesLinked);
printInfo(format("%1% freed by hard-linking %2% files") %
showBytes(stats.bytesFreed) % stats.filesLinked);
}
void LocalStore::optimisePath(const Path & path)
{
OptimiseStats stats;
InodeHash inodeHash;
void LocalStore::optimisePath(const Path& path) {
OptimiseStats stats;
InodeHash inodeHash;
if (settings.autoOptimiseStore) optimisePath_(nullptr, stats, path, inodeHash);
if (settings.autoOptimiseStore)
optimisePath_(nullptr, stats, path, inodeHash);
}
}
} // namespace nix

View file

@ -2,115 +2,115 @@
namespace nix {
ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv)
: drvPath(drvPath), drv(drv)
{
/* Parse the __json attribute, if any. */
auto jsonAttr = drv.env.find("__json");
if (jsonAttr != drv.env.end()) {
try {
structuredAttrs = nlohmann::json::parse(jsonAttr->second);
} catch (std::exception & e) {
throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what());
}
ParsedDerivation::ParsedDerivation(const Path& drvPath, BasicDerivation& drv)
: drvPath(drvPath), drv(drv) {
/* Parse the __json attribute, if any. */
auto jsonAttr = drv.env.find("__json");
if (jsonAttr != drv.env.end()) {
try {
structuredAttrs = nlohmann::json::parse(jsonAttr->second);
} catch (std::exception& e) {
throw Error("cannot process __json attribute of '%s': %s", drvPath,
e.what());
}
}
}
std::optional<std::string> ParsedDerivation::getStringAttr(const std::string & name) const
{
if (structuredAttrs) {
auto i = structuredAttrs->find(name);
if (i == structuredAttrs->end())
return {};
else {
if (!i->is_string())
throw Error("attribute '%s' of derivation '%s' must be a string", name, drvPath);
return i->get<std::string>();
}
} else {
auto i = drv.env.find(name);
if (i == drv.env.end())
return {};
else
return i->second;
std::optional<std::string> ParsedDerivation::getStringAttr(
const std::string& name) const {
if (structuredAttrs) {
auto i = structuredAttrs->find(name);
if (i == structuredAttrs->end())
return {};
else {
if (!i->is_string())
throw Error("attribute '%s' of derivation '%s' must be a string", name,
drvPath);
return i->get<std::string>();
}
} else {
auto i = drv.env.find(name);
if (i == drv.env.end())
return {};
else
return i->second;
}
}
bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const
{
if (structuredAttrs) {
auto i = structuredAttrs->find(name);
if (i == structuredAttrs->end())
return def;
else {
if (!i->is_boolean())
throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, drvPath);
return i->get<bool>();
}
} else {
auto i = drv.env.find(name);
if (i == drv.env.end())
return def;
else
return i->second == "1";
bool ParsedDerivation::getBoolAttr(const std::string& name, bool def) const {
if (structuredAttrs) {
auto i = structuredAttrs->find(name);
if (i == structuredAttrs->end())
return def;
else {
if (!i->is_boolean())
throw Error("attribute '%s' of derivation '%s' must be a Boolean", name,
drvPath);
return i->get<bool>();
}
} else {
auto i = drv.env.find(name);
if (i == drv.env.end())
return def;
else
return i->second == "1";
}
}
std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name) const
{
if (structuredAttrs) {
auto i = structuredAttrs->find(name);
if (i == structuredAttrs->end())
return {};
else {
if (!i->is_array())
throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
Strings res;
for (auto j = i->begin(); j != i->end(); ++j) {
if (!j->is_string())
throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
res.push_back(j->get<std::string>());
}
return res;
}
} else {
auto i = drv.env.find(name);
if (i == drv.env.end())
return {};
else
return tokenizeString<Strings>(i->second);
std::optional<Strings> ParsedDerivation::getStringsAttr(
const std::string& name) const {
if (structuredAttrs) {
auto i = structuredAttrs->find(name);
if (i == structuredAttrs->end())
return {};
else {
if (!i->is_array())
throw Error(
"attribute '%s' of derivation '%s' must be a list of strings", name,
drvPath);
Strings res;
for (auto j = i->begin(); j != i->end(); ++j) {
if (!j->is_string())
throw Error(
"attribute '%s' of derivation '%s' must be a list of strings",
name, drvPath);
res.push_back(j->get<std::string>());
}
return res;
}
} else {
auto i = drv.env.find(name);
if (i == drv.env.end())
return {};
else
return tokenizeString<Strings>(i->second);
}
}
StringSet ParsedDerivation::getRequiredSystemFeatures() const
{
StringSet res;
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
return res;
StringSet ParsedDerivation::getRequiredSystemFeatures() const {
StringSet res;
for (auto& i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
res.insert(i);
return res;
}
bool ParsedDerivation::canBuildLocally() const
{
if (drv.platform != settings.thisSystem.get()
&& !settings.extraPlatforms.get().count(drv.platform)
&& !drv.isBuiltin())
return false;
bool ParsedDerivation::canBuildLocally() const {
if (drv.platform != settings.thisSystem.get() &&
!settings.extraPlatforms.get().count(drv.platform) && !drv.isBuiltin())
return false;
for (auto & feature : getRequiredSystemFeatures())
if (!settings.systemFeatures.get().count(feature)) return false;
for (auto& feature : getRequiredSystemFeatures())
if (!settings.systemFeatures.get().count(feature)) return false;
return true;
return true;
}
bool ParsedDerivation::willBuildLocally() const
{
return getBoolAttr("preferLocalBuild") && canBuildLocally();
bool ParsedDerivation::willBuildLocally() const {
return getBoolAttr("preferLocalBuild") && canBuildLocally();
}
bool ParsedDerivation::substitutesAllowed() const
{
return getBoolAttr("allowSubstitutes", true);
bool ParsedDerivation::substitutesAllowed() const {
return getBoolAttr("allowSubstitutes", true);
}
}
} // namespace nix

View file

@ -1,37 +1,33 @@
#include "derivations.hh"
#include <nlohmann/json.hpp>
#include "derivations.hh"
namespace nix {
class ParsedDerivation
{
Path drvPath;
BasicDerivation & drv;
std::optional<nlohmann::json> structuredAttrs;
class ParsedDerivation {
Path drvPath;
BasicDerivation& drv;
std::optional<nlohmann::json> structuredAttrs;
public:
public:
ParsedDerivation(const Path& drvPath, BasicDerivation& drv);
ParsedDerivation(const Path & drvPath, BasicDerivation & drv);
const std::optional<nlohmann::json>& getStructuredAttrs() const {
return structuredAttrs;
}
const std::optional<nlohmann::json> & getStructuredAttrs() const
{
return structuredAttrs;
}
std::optional<std::string> getStringAttr(const std::string& name) const;
std::optional<std::string> getStringAttr(const std::string & name) const;
bool getBoolAttr(const std::string& name, bool def = false) const;
bool getBoolAttr(const std::string & name, bool def = false) const;
std::optional<Strings> getStringsAttr(const std::string& name) const;
std::optional<Strings> getStringsAttr(const std::string & name) const;
StringSet getRequiredSystemFeatures() const;
StringSet getRequiredSystemFeatures() const;
bool canBuildLocally() const;
bool canBuildLocally() const;
bool willBuildLocally() const;
bool willBuildLocally() const;
bool substitutesAllowed() const;
bool substitutesAllowed() const;
};
}
} // namespace nix

View file

@ -1,178 +1,156 @@
#include "pathlocks.hh"
#include "util.hh"
#include "sync.hh"
#include <fcntl.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <cerrno>
#include <cstdlib>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/file.h>
#include "sync.hh"
#include "util.hh"
namespace nix {
AutoCloseFD openLockFile(const Path& path, bool create) {
AutoCloseFD fd;
fd = open(path.c_str(), O_CLOEXEC | O_RDWR | (create ? O_CREAT : 0), 0600);
if (!fd && (create || errno != ENOENT))
throw SysError(format("opening lock file '%1%'") % path);
return fd;
}
void deleteLockFile(const Path& path, int fd) {
/* Get rid of the lock file. Have to be careful not to introduce
races. Write a (meaningless) token to the file to indicate to
other processes waiting on this lock that the lock is stale
(deleted). */
unlink(path.c_str());
writeFull(fd, "d");
/* Note that the result of unlink() is ignored; removing the lock
file is an optimisation, not a necessity. */
}
bool lockFile(int fd, LockType lockType, bool wait) {
int type;
if (lockType == ltRead)
type = LOCK_SH;
else if (lockType == ltWrite)
type = LOCK_EX;
else if (lockType == ltNone)
type = LOCK_UN;
else
abort();
if (wait) {
while (flock(fd, type) != 0) {
checkInterrupt();
if (errno != EINTR)
throw SysError(format("acquiring/releasing lock"));
else
return false;
}
} else {
while (flock(fd, type | LOCK_NB) != 0) {
checkInterrupt();
if (errno == EWOULDBLOCK) return false;
if (errno != EINTR) throw SysError(format("acquiring/releasing lock"));
}
}
return true;
}
PathLocks::PathLocks() : deletePaths(false) {}
PathLocks::PathLocks(const PathSet& paths, const string& waitMsg)
: deletePaths(false) {
lockPaths(paths, waitMsg);
}
bool PathLocks::lockPaths(const PathSet& paths, const string& waitMsg,
bool wait) {
assert(fds.empty());
/* Note that `fds' is built incrementally so that the destructor
will only release those locks that we have already acquired. */
/* Acquire the lock for each path in sorted order. This ensures
that locks are always acquired in the same order, thus
preventing deadlocks. */
for (auto& path : paths) {
checkInterrupt();
Path lockPath = path + ".lock";
debug(format("locking path '%1%'") % path);
AutoCloseFD openLockFile(const Path & path, bool create)
{
AutoCloseFD fd;
fd = open(path.c_str(), O_CLOEXEC | O_RDWR | (create ? O_CREAT : 0), 0600);
if (!fd && (create || errno != ENOENT))
throw SysError(format("opening lock file '%1%'") % path);
while (1) {
/* Open/create the lock file. */
fd = openLockFile(lockPath, true);
return fd;
}
void deleteLockFile(const Path & path, int fd)
{
/* Get rid of the lock file. Have to be careful not to introduce
races. Write a (meaningless) token to the file to indicate to
other processes waiting on this lock that the lock is stale
(deleted). */
unlink(path.c_str());
writeFull(fd, "d");
/* Note that the result of unlink() is ignored; removing the lock
file is an optimisation, not a necessity. */
}
bool lockFile(int fd, LockType lockType, bool wait)
{
int type;
if (lockType == ltRead) type = LOCK_SH;
else if (lockType == ltWrite) type = LOCK_EX;
else if (lockType == ltNone) type = LOCK_UN;
else abort();
if (wait) {
while (flock(fd, type) != 0) {
checkInterrupt();
if (errno != EINTR)
throw SysError(format("acquiring/releasing lock"));
else
return false;
}
} else {
while (flock(fd, type | LOCK_NB) != 0) {
checkInterrupt();
if (errno == EWOULDBLOCK) return false;
if (errno != EINTR)
throw SysError(format("acquiring/releasing lock"));
/* Acquire an exclusive lock. */
if (!lockFile(fd.get(), ltWrite, false)) {
if (wait) {
if (waitMsg != "") printError(waitMsg);
lockFile(fd.get(), ltWrite, true);
} else {
/* Failed to lock this path; release all other
locks. */
unlock();
return false;
}
}
debug(format("lock acquired on '%1%'") % lockPath);
/* Check that the lock file hasn't become stale (i.e.,
hasn't been unlinked). */
struct stat st;
if (fstat(fd.get(), &st) == -1)
throw SysError(format("statting lock file '%1%'") % lockPath);
if (st.st_size != 0)
/* This lock file has been unlinked, so we're holding
a lock on a deleted file. This means that other
processes may create and acquire a lock on
`lockPath', and proceed. So we must retry. */
debug(format("open lock file '%1%' has become stale") % lockPath);
else
break;
}
return true;
/* Use borrow so that the descriptor isn't closed. */
fds.push_back(FDPair(fd.release(), lockPath));
}
return true;
}
PathLocks::PathLocks()
: deletePaths(false)
{
PathLocks::~PathLocks() {
try {
unlock();
} catch (...) {
ignoreException();
}
}
void PathLocks::unlock() {
for (auto& i : fds) {
if (deletePaths) deleteLockFile(i.second, i.first);
PathLocks::PathLocks(const PathSet & paths, const string & waitMsg)
: deletePaths(false)
{
lockPaths(paths, waitMsg);
if (close(i.first) == -1)
printError(format("error (ignored): cannot close lock file on '%1%'") %
i.second);
debug(format("lock released on '%1%'") % i.second);
}
fds.clear();
}
bool PathLocks::lockPaths(const PathSet & paths,
const string & waitMsg, bool wait)
{
assert(fds.empty());
/* Note that `fds' is built incrementally so that the destructor
will only release those locks that we have already acquired. */
/* Acquire the lock for each path in sorted order. This ensures
that locks are always acquired in the same order, thus
preventing deadlocks. */
for (auto & path : paths) {
checkInterrupt();
Path lockPath = path + ".lock";
debug(format("locking path '%1%'") % path);
AutoCloseFD fd;
while (1) {
/* Open/create the lock file. */
fd = openLockFile(lockPath, true);
/* Acquire an exclusive lock. */
if (!lockFile(fd.get(), ltWrite, false)) {
if (wait) {
if (waitMsg != "") printError(waitMsg);
lockFile(fd.get(), ltWrite, true);
} else {
/* Failed to lock this path; release all other
locks. */
unlock();
return false;
}
}
debug(format("lock acquired on '%1%'") % lockPath);
/* Check that the lock file hasn't become stale (i.e.,
hasn't been unlinked). */
struct stat st;
if (fstat(fd.get(), &st) == -1)
throw SysError(format("statting lock file '%1%'") % lockPath);
if (st.st_size != 0)
/* This lock file has been unlinked, so we're holding
a lock on a deleted file. This means that other
processes may create and acquire a lock on
`lockPath', and proceed. So we must retry. */
debug(format("open lock file '%1%' has become stale") % lockPath);
else
break;
}
/* Use borrow so that the descriptor isn't closed. */
fds.push_back(FDPair(fd.release(), lockPath));
}
return true;
void PathLocks::setDeletion(bool deletePaths) {
this->deletePaths = deletePaths;
}
PathLocks::~PathLocks()
{
try {
unlock();
} catch (...) {
ignoreException();
}
}
void PathLocks::unlock()
{
for (auto & i : fds) {
if (deletePaths) deleteLockFile(i.second, i.first);
if (close(i.first) == -1)
printError(
format("error (ignored): cannot close lock file on '%1%'") % i.second);
debug(format("lock released on '%1%'") % i.second);
}
fds.clear();
}
void PathLocks::setDeletion(bool deletePaths)
{
this->deletePaths = deletePaths;
}
}
} // namespace nix

View file

@ -7,32 +7,29 @@ namespace nix {
/* Open (possibly create) a lock file and return the file descriptor.
-1 is returned if create is false and the lock could not be opened
because it doesn't exist. Any other error throws an exception. */
AutoCloseFD openLockFile(const Path & path, bool create);
AutoCloseFD openLockFile(const Path& path, bool create);
/* Delete an open lock file. */
void deleteLockFile(const Path & path, int fd);
void deleteLockFile(const Path& path, int fd);
enum LockType { ltRead, ltWrite, ltNone };
bool lockFile(int fd, LockType lockType, bool wait);
class PathLocks
{
private:
typedef std::pair<int, Path> FDPair;
list<FDPair> fds;
bool deletePaths;
class PathLocks {
private:
typedef std::pair<int, Path> FDPair;
list<FDPair> fds;
bool deletePaths;
public:
PathLocks();
PathLocks(const PathSet & paths,
const string & waitMsg = "");
bool lockPaths(const PathSet & _paths,
const string & waitMsg = "",
bool wait = true);
~PathLocks();
void unlock();
void setDeletion(bool deletePaths);
public:
PathLocks();
PathLocks(const PathSet& paths, const string& waitMsg = "");
bool lockPaths(const PathSet& _paths, const string& waitMsg = "",
bool wait = true);
~PathLocks();
void unlock();
void setDeletion(bool deletePaths);
};
}
} // namespace nix

View file

@ -1,259 +1,226 @@
#include "profiles.hh"
#include <errno.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "store-api.hh"
#include "util.hh"
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <errno.h>
#include <stdio.h>
namespace nix {
static bool cmpGensByNumber(const Generation & a, const Generation & b)
{
return a.number < b.number;
static bool cmpGensByNumber(const Generation& a, const Generation& b) {
return a.number < b.number;
}
/* Parse a generation name of the format
`<profilename>-<number>-link'. */
static int parseName(const string & profileName, const string & name)
{
if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
string s = string(name, profileName.size() + 1);
string::size_type p = s.find("-link");
if (p == string::npos) return -1;
static int parseName(const string& profileName, const string& name) {
if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
string s = string(name, profileName.size() + 1);
string::size_type p = s.find("-link");
if (p == string::npos) return -1;
int n;
if (string2Int(string(s, 0, p), n) && n >= 0)
return n;
else
return -1;
}
Generations findGenerations(Path profile, int& curGen) {
Generations gens;
Path profileDir = dirOf(profile);
string profileName = baseNameOf(profile);
for (auto& i : readDirectory(profileDir)) {
int n;
if (string2Int(string(s, 0, p), n) && n >= 0)
return n;
else
return -1;
if ((n = parseName(profileName, i.name)) != -1) {
Generation gen;
gen.path = profileDir + "/" + i.name;
gen.number = n;
struct stat st;
if (lstat(gen.path.c_str(), &st) != 0)
throw SysError(format("statting '%1%'") % gen.path);
gen.creationTime = st.st_mtime;
gens.push_back(gen);
}
}
gens.sort(cmpGensByNumber);
curGen = pathExists(profile) ? parseName(profileName, readLink(profile)) : -1;
return gens;
}
static void makeName(const Path& profile, unsigned int num, Path& outLink) {
Path prefix = (format("%1%-%2%") % profile % num).str();
outLink = prefix + "-link";
}
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath) {
/* The new generation number should be higher than old the
previous ones. */
int dummy;
Generations gens = findGenerations(profile, dummy);
Generations findGenerations(Path profile, int & curGen)
{
Generations gens;
unsigned int num;
if (gens.size() > 0) {
Generation last = gens.back();
Path profileDir = dirOf(profile);
string profileName = baseNameOf(profile);
if (readLink(last.path) == outPath) {
/* We only create a new generation symlink if it differs
from the last one.
for (auto & i : readDirectory(profileDir)) {
int n;
if ((n = parseName(profileName, i.name)) != -1) {
Generation gen;
gen.path = profileDir + "/" + i.name;
gen.number = n;
struct stat st;
if (lstat(gen.path.c_str(), &st) != 0)
throw SysError(format("statting '%1%'") % gen.path);
gen.creationTime = st.st_mtime;
gens.push_back(gen);
}
This helps keeping gratuitous installs/rebuilds from piling
up uncontrolled numbers of generations, cluttering up the
UI like grub. */
return last.path;
}
gens.sort(cmpGensByNumber);
num = gens.back().number;
} else {
num = 0;
}
curGen = pathExists(profile)
? parseName(profileName, readLink(profile))
: -1;
/* Create the new generation. Note that addPermRoot() blocks if
the garbage collector is running to prevent the stuff we've
built from moving from the temporary roots (which the GC knows)
to the permanent roots (of which the GC would have a stale
view). If we didn't do it this way, the GC might remove the
user environment etc. we've just built. */
Path generation;
makeName(profile, num + 1, generation);
store->addPermRoot(outPath, generation, false, true);
return gens;
return generation;
}
static void makeName(const Path & profile, unsigned int num,
Path & outLink)
{
Path prefix = (format("%1%-%2%") % profile % num).str();
outLink = prefix + "-link";
static void removeFile(const Path& path) {
if (remove(path.c_str()) == -1)
throw SysError(format("cannot unlink '%1%'") % path);
}
void deleteGeneration(const Path& profile, unsigned int gen) {
Path generation;
makeName(profile, gen, generation);
removeFile(generation);
}
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
{
/* The new generation number should be higher than old the
previous ones. */
int dummy;
Generations gens = findGenerations(profile, dummy);
static void deleteGeneration2(const Path& profile, unsigned int gen,
bool dryRun) {
if (dryRun)
printInfo(format("would remove generation %1%") % gen);
else {
printInfo(format("removing generation %1%") % gen);
deleteGeneration(profile, gen);
}
}
unsigned int num;
if (gens.size() > 0) {
Generation last = gens.back();
void deleteGenerations(const Path& profile,
const std::set<unsigned int>& gensToDelete,
bool dryRun) {
PathLocks lock;
lockProfile(lock, profile);
if (readLink(last.path) == outPath) {
/* We only create a new generation symlink if it differs
from the last one.
int curGen;
Generations gens = findGenerations(profile, curGen);
This helps keeping gratuitous installs/rebuilds from piling
up uncontrolled numbers of generations, cluttering up the
UI like grub. */
return last.path;
}
if (gensToDelete.find(curGen) != gensToDelete.end())
throw Error(format("cannot delete current generation of profile %1%'") %
profile);
num = gens.back().number;
} else {
num = 0;
for (auto& i : gens) {
if (gensToDelete.find(i.number) == gensToDelete.end()) continue;
deleteGeneration2(profile, i.number, dryRun);
}
}
void deleteGenerationsGreaterThan(const Path& profile, int max, bool dryRun) {
PathLocks lock;
lockProfile(lock, profile);
int curGen;
bool fromCurGen = false;
Generations gens = findGenerations(profile, curGen);
for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
if (i->number == curGen) {
fromCurGen = true;
max--;
continue;
}
/* Create the new generation. Note that addPermRoot() blocks if
the garbage collector is running to prevent the stuff we've
built from moving from the temporary roots (which the GC knows)
to the permanent roots (of which the GC would have a stale
view). If we didn't do it this way, the GC might remove the
user environment etc. we've just built. */
Path generation;
makeName(profile, num + 1, generation);
store->addPermRoot(outPath, generation, false, true);
return generation;
if (fromCurGen) {
if (max) {
max--;
continue;
}
deleteGeneration2(profile, i->number, dryRun);
}
}
}
void deleteOldGenerations(const Path& profile, bool dryRun) {
PathLocks lock;
lockProfile(lock, profile);
static void removeFile(const Path & path)
{
if (remove(path.c_str()) == -1)
throw SysError(format("cannot unlink '%1%'") % path);
int curGen;
Generations gens = findGenerations(profile, curGen);
for (auto& i : gens)
if (i.number != curGen) deleteGeneration2(profile, i.number, dryRun);
}
void deleteGenerationsOlderThan(const Path& profile, time_t t, bool dryRun) {
PathLocks lock;
lockProfile(lock, profile);
void deleteGeneration(const Path & profile, unsigned int gen)
{
Path generation;
makeName(profile, gen, generation);
removeFile(generation);
}
int curGen;
Generations gens = findGenerations(profile, curGen);
static void deleteGeneration2(const Path & profile, unsigned int gen, bool dryRun)
{
if (dryRun)
printInfo(format("would remove generation %1%") % gen);
else {
printInfo(format("removing generation %1%") % gen);
deleteGeneration(profile, gen);
bool canDelete = false;
for (auto i = gens.rbegin(); i != gens.rend(); ++i)
if (canDelete) {
assert(i->creationTime < t);
if (i->number != curGen) deleteGeneration2(profile, i->number, dryRun);
} else if (i->creationTime < t) {
/* We may now start deleting generations, but we don't
delete this generation yet, because this generation was
still the one that was active at the requested point in
time. */
canDelete = true;
}
}
void deleteGenerationsOlderThan(const Path& profile, const string& timeSpec,
bool dryRun) {
time_t curTime = time(0);
string strDays = string(timeSpec, 0, timeSpec.size() - 1);
int days;
void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun)
{
PathLocks lock;
lockProfile(lock, profile);
if (!string2Int(strDays, days) || days < 1)
throw Error(format("invalid number of days specifier '%1%'") % timeSpec);
int curGen;
Generations gens = findGenerations(profile, curGen);
time_t oldTime = curTime - days * 24 * 3600;
if (gensToDelete.find(curGen) != gensToDelete.end())
throw Error(format("cannot delete current generation of profile %1%'") % profile);
for (auto & i : gens) {
if (gensToDelete.find(i.number) == gensToDelete.end()) continue;
deleteGeneration2(profile, i.number, dryRun);
}
deleteGenerationsOlderThan(profile, oldTime, dryRun);
}
void deleteGenerationsGreaterThan(const Path & profile, int max, bool dryRun)
{
PathLocks lock;
lockProfile(lock, profile);
void switchLink(Path link, Path target) {
/* Hacky. */
if (dirOf(target) == dirOf(link)) target = baseNameOf(target);
int curGen;
bool fromCurGen = false;
Generations gens = findGenerations(profile, curGen);
for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
if (i->number == curGen) {
fromCurGen = true;
max--;
continue;
}
if (fromCurGen) {
if (max) {
max--;
continue;
}
deleteGeneration2(profile, i->number, dryRun);
}
}
replaceSymlink(target, link);
}
void deleteOldGenerations(const Path & profile, bool dryRun)
{
PathLocks lock;
lockProfile(lock, profile);
int curGen;
Generations gens = findGenerations(profile, curGen);
for (auto & i : gens)
if (i.number != curGen)
deleteGeneration2(profile, i.number, dryRun);
void lockProfile(PathLocks& lock, const Path& profile) {
lock.lockPaths({profile},
(format("waiting for lock on profile '%1%'") % profile).str());
lock.setDeletion(true);
}
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
{
PathLocks lock;
lockProfile(lock, profile);
int curGen;
Generations gens = findGenerations(profile, curGen);
bool canDelete = false;
for (auto i = gens.rbegin(); i != gens.rend(); ++i)
if (canDelete) {
assert(i->creationTime < t);
if (i->number != curGen)
deleteGeneration2(profile, i->number, dryRun);
} else if (i->creationTime < t) {
/* We may now start deleting generations, but we don't
delete this generation yet, because this generation was
still the one that was active at the requested point in
time. */
canDelete = true;
}
string optimisticLockProfile(const Path& profile) {
return pathExists(profile) ? readLink(profile) : "";
}
void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun)
{
time_t curTime = time(0);
string strDays = string(timeSpec, 0, timeSpec.size() - 1);
int days;
if (!string2Int(strDays, days) || days < 1)
throw Error(format("invalid number of days specifier '%1%'") % timeSpec);
time_t oldTime = curTime - days * 24 * 3600;
deleteGenerationsOlderThan(profile, oldTime, dryRun);
}
void switchLink(Path link, Path target)
{
/* Hacky. */
if (dirOf(target) == dirOf(link)) target = baseNameOf(target);
replaceSymlink(target, link);
}
void lockProfile(PathLocks & lock, const Path & profile)
{
lock.lockPaths({profile}, (format("waiting for lock on profile '%1%'") % profile).str());
lock.setDeletion(true);
}
string optimisticLockProfile(const Path & profile)
{
return pathExists(profile) ? readLink(profile) : "";
}
}
} // namespace nix

View file

@ -1,57 +1,49 @@
#pragma once
#include "types.hh"
#include "pathlocks.hh"
#include <time.h>
#include "pathlocks.hh"
#include "types.hh"
namespace nix {
struct Generation
{
int number;
Path path;
time_t creationTime;
Generation()
{
number = -1;
}
operator bool() const
{
return number != -1;
}
struct Generation {
int number;
Path path;
time_t creationTime;
Generation() { number = -1; }
operator bool() const { return number != -1; }
};
typedef list<Generation> Generations;
/* Returns the list of currently present generations for the specified
profile, sorted by generation number. */
Generations findGenerations(Path profile, int & curGen);
Generations findGenerations(Path profile, int& curGen);
class LocalFSStore;
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath);
void deleteGeneration(const Path & profile, unsigned int gen);
void deleteGeneration(const Path& profile, unsigned int gen);
void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun);
void deleteGenerations(const Path& profile,
const std::set<unsigned int>& gensToDelete, bool dryRun);
void deleteGenerationsGreaterThan(const Path & profile, const int max, bool dryRun);
void deleteGenerationsGreaterThan(const Path& profile, const int max,
bool dryRun);
void deleteOldGenerations(const Path & profile, bool dryRun);
void deleteOldGenerations(const Path& profile, bool dryRun);
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
void deleteGenerationsOlderThan(const Path& profile, time_t t, bool dryRun);
void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun);
void deleteGenerationsOlderThan(const Path& profile, const string& timeSpec,
bool dryRun);
void switchLink(Path link, Path target);
/* Ensure exclusive access to a profile. Any command that modifies
the profile first acquires this lock. */
void lockProfile(PathLocks & lock, const Path & profile);
void lockProfile(PathLocks& lock, const Path& profile);
/* Optimistic locking is used by long-running operations like `nix-env
-i'. Instead of acquiring the exclusive lock for the entire
@ -62,6 +54,6 @@ void lockProfile(PathLocks & lock, const Path & profile);
generally cheap, since the build results are still in the Nix
store. Most of the time, only the user environment has to be
rebuilt. */
string optimisticLockProfile(const Path & profile);
string optimisticLockProfile(const Path& profile);
}
} // namespace nix

View file

@ -1,122 +1,110 @@
#include "references.hh"
#include <cstdlib>
#include <map>
#include "archive.hh"
#include "hash.hh"
#include "util.hh"
#include "archive.hh"
#include <map>
#include <cstdlib>
namespace nix {
static unsigned int refLength = 32; /* characters */
static void search(const unsigned char* s, size_t len, StringSet& hashes,
StringSet& seen) {
static bool initialised = false;
static bool isBase32[256];
if (!initialised) {
for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
for (unsigned int i = 0; i < base32Chars.size(); ++i)
isBase32[(unsigned char)base32Chars[i]] = true;
initialised = true;
}
static void search(const unsigned char * s, size_t len,
StringSet & hashes, StringSet & seen)
{
static bool initialised = false;
static bool isBase32[256];
if (!initialised) {
for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
for (unsigned int i = 0; i < base32Chars.size(); ++i)
isBase32[(unsigned char) base32Chars[i]] = true;
initialised = true;
}
for (size_t i = 0; i + refLength <= len; ) {
int j;
bool match = true;
for (j = refLength - 1; j >= 0; --j)
if (!isBase32[(unsigned char) s[i + j]]) {
i += j + 1;
match = false;
break;
}
if (!match) continue;
string ref((const char *) s + i, refLength);
if (hashes.find(ref) != hashes.end()) {
debug(format("found reference to '%1%' at offset '%2%'")
% ref % i);
seen.insert(ref);
hashes.erase(ref);
}
++i;
for (size_t i = 0; i + refLength <= len;) {
int j;
bool match = true;
for (j = refLength - 1; j >= 0; --j)
if (!isBase32[(unsigned char)s[i + j]]) {
i += j + 1;
match = false;
break;
}
if (!match) continue;
string ref((const char*)s + i, refLength);
if (hashes.find(ref) != hashes.end()) {
debug(format("found reference to '%1%' at offset '%2%'") % ref % i);
seen.insert(ref);
hashes.erase(ref);
}
++i;
}
}
struct RefScanSink : Sink {
HashSink hashSink;
StringSet hashes;
StringSet seen;
struct RefScanSink : Sink
{
HashSink hashSink;
StringSet hashes;
StringSet seen;
string tail;
string tail;
RefScanSink() : hashSink(htSHA256) {}
RefScanSink() : hashSink(htSHA256) { }
void operator () (const unsigned char * data, size_t len);
void operator()(const unsigned char* data, size_t len);
};
void RefScanSink::operator()(const unsigned char* data, size_t len) {
hashSink(data, len);
void RefScanSink::operator () (const unsigned char * data, size_t len)
{
hashSink(data, len);
/* It's possible that a reference spans the previous and current
fragment, so search in the concatenation of the tail of the
previous fragment and the start of the current fragment. */
string s =
tail + string((const char*)data, len > refLength ? refLength : len);
search((const unsigned char*)s.data(), s.size(), hashes, seen);
/* It's possible that a reference spans the previous and current
fragment, so search in the concatenation of the tail of the
previous fragment and the start of the current fragment. */
string s = tail + string((const char *) data, len > refLength ? refLength : len);
search((const unsigned char *) s.data(), s.size(), hashes, seen);
search(data, len, hashes, seen);
search(data, len, hashes, seen);
size_t tailLen = len <= refLength ? len : refLength;
tail =
string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) +
string((const char *) data + len - tailLen, tailLen);
size_t tailLen = len <= refLength ? len : refLength;
tail = string(tail, tail.size() < refLength - tailLen
? 0
: tail.size() - (refLength - tailLen)) +
string((const char*)data + len - tailLen, tailLen);
}
PathSet scanForReferences(const string& path, const PathSet& refs,
HashResult& hash) {
RefScanSink sink;
std::map<string, Path> backMap;
PathSet scanForReferences(const string & path,
const PathSet & refs, HashResult & hash)
{
RefScanSink sink;
std::map<string, Path> backMap;
/* For efficiency (and a higher hit rate), just search for the
hash part of the file name. (This assumes that all references
have the form `HASH-bla'). */
for (auto& i : refs) {
string baseName = baseNameOf(i);
string::size_type pos = baseName.find('-');
if (pos == string::npos) throw Error(format("bad reference '%1%'") % i);
string s = string(baseName, 0, pos);
assert(s.size() == refLength);
assert(backMap.find(s) == backMap.end());
// parseHash(htSHA256, s);
sink.hashes.insert(s);
backMap[s] = i;
}
/* For efficiency (and a higher hit rate), just search for the
hash part of the file name. (This assumes that all references
have the form `HASH-bla'). */
for (auto & i : refs) {
string baseName = baseNameOf(i);
string::size_type pos = baseName.find('-');
if (pos == string::npos)
throw Error(format("bad reference '%1%'") % i);
string s = string(baseName, 0, pos);
assert(s.size() == refLength);
assert(backMap.find(s) == backMap.end());
// parseHash(htSHA256, s);
sink.hashes.insert(s);
backMap[s] = i;
}
/* Look for the hashes in the NAR dump of the path. */
dumpPath(path, sink);
/* Look for the hashes in the NAR dump of the path. */
dumpPath(path, sink);
/* Map the hashes found back to their store paths. */
PathSet found;
for (auto& i : sink.seen) {
std::map<string, Path>::iterator j;
if ((j = backMap.find(i)) == backMap.end()) abort();
found.insert(j->second);
}
/* Map the hashes found back to their store paths. */
PathSet found;
for (auto & i : sink.seen) {
std::map<string, Path>::iterator j;
if ((j = backMap.find(i)) == backMap.end()) abort();
found.insert(j->second);
}
hash = sink.hashSink.finish();
hash = sink.hashSink.finish();
return found;
return found;
}
}
} // namespace nix

View file

@ -1,11 +1,11 @@
#pragma once
#include "types.hh"
#include "hash.hh"
#include "types.hh"
namespace nix {
PathSet scanForReferences(const Path & path, const PathSet & refs,
HashResult & hash);
PathSet scanForReferences(const Path& path, const PathSet& refs,
HashResult& hash);
}

View file

@ -1,129 +1,120 @@
#include "remote-fs-accessor.hh"
#include "nar-accessor.hh"
#include "json.hh"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "json.hh"
#include "nar-accessor.hh"
namespace nix {
RemoteFSAccessor::RemoteFSAccessor(ref<Store> store, const Path & cacheDir)
: store(store)
, cacheDir(cacheDir)
{
if (cacheDir != "")
createDirs(cacheDir);
RemoteFSAccessor::RemoteFSAccessor(ref<Store> store, const Path& cacheDir)
: store(store), cacheDir(cacheDir) {
if (cacheDir != "") createDirs(cacheDir);
}
Path RemoteFSAccessor::makeCacheFile(const Path & storePath, const std::string & ext)
{
assert(cacheDir != "");
return fmt("%s/%s.%s", cacheDir, storePathToHash(storePath), ext);
Path RemoteFSAccessor::makeCacheFile(const Path& storePath,
const std::string& ext) {
assert(cacheDir != "");
return fmt("%s/%s.%s", cacheDir, storePathToHash(storePath), ext);
}
void RemoteFSAccessor::addToCache(const Path & storePath, const std::string & nar,
ref<FSAccessor> narAccessor)
{
nars.emplace(storePath, narAccessor);
void RemoteFSAccessor::addToCache(const Path& storePath, const std::string& nar,
ref<FSAccessor> narAccessor) {
nars.emplace(storePath, narAccessor);
if (cacheDir != "") {
try {
std::ostringstream str;
JSONPlaceholder jsonRoot(str);
listNar(jsonRoot, narAccessor, "", true);
writeFile(makeCacheFile(storePath, "ls"), str.str());
if (cacheDir != "") {
try {
std::ostringstream str;
JSONPlaceholder jsonRoot(str);
listNar(jsonRoot, narAccessor, "", true);
writeFile(makeCacheFile(storePath, "ls"), str.str());
/* FIXME: do this asynchronously. */
writeFile(makeCacheFile(storePath, "nar"), nar);
/* FIXME: do this asynchronously. */
writeFile(makeCacheFile(storePath, "nar"), nar);
} catch (...) {
ignoreException();
}
} catch (...) {
ignoreException();
}
}
}
std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
{
auto path = canonPath(path_);
std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path& path_) {
auto path = canonPath(path_);
auto storePath = store->toStorePath(path);
std::string restPath = std::string(path, storePath.size());
auto storePath = store->toStorePath(path);
std::string restPath = std::string(path, storePath.size());
if (!store->isValidPath(storePath))
throw InvalidPath(format("path '%1%' is not a valid store path") % storePath);
if (!store->isValidPath(storePath))
throw InvalidPath(format("path '%1%' is not a valid store path") %
storePath);
auto i = nars.find(storePath);
if (i != nars.end()) return {i->second, restPath};
auto i = nars.find(storePath);
if (i != nars.end()) return {i->second, restPath};
StringSink sink;
std::string listing;
Path cacheFile;
StringSink sink;
std::string listing;
Path cacheFile;
if (cacheDir != "" && pathExists(cacheFile = makeCacheFile(storePath, "nar"))) {
if (cacheDir != "" &&
pathExists(cacheFile = makeCacheFile(storePath, "nar"))) {
try {
listing = nix::readFile(makeCacheFile(storePath, "ls"));
try {
listing = nix::readFile(makeCacheFile(storePath, "ls"));
auto narAccessor = makeLazyNarAccessor(
listing, [cacheFile](uint64_t offset, uint64_t length) {
AutoCloseFD fd = open(cacheFile.c_str(), O_RDONLY | O_CLOEXEC);
if (!fd) throw SysError("opening NAR cache file '%s'", cacheFile);
auto narAccessor = makeLazyNarAccessor(listing,
[cacheFile](uint64_t offset, uint64_t length) {
if (lseek(fd.get(), offset, SEEK_SET) != (off_t)offset)
throw SysError("seeking in '%s'", cacheFile);
AutoCloseFD fd = open(cacheFile.c_str(), O_RDONLY | O_CLOEXEC);
if (!fd)
throw SysError("opening NAR cache file '%s'", cacheFile);
std::string buf(length, 0);
readFull(fd.get(), (unsigned char*)buf.data(), length);
if (lseek(fd.get(), offset, SEEK_SET) != (off_t) offset)
throw SysError("seeking in '%s'", cacheFile);
return buf;
});
std::string buf(length, 0);
readFull(fd.get(), (unsigned char *) buf.data(), length);
nars.emplace(storePath, narAccessor);
return {narAccessor, restPath};
return buf;
});
nars.emplace(storePath, narAccessor);
return {narAccessor, restPath};
} catch (SysError &) { }
try {
*sink.s = nix::readFile(cacheFile);
auto narAccessor = makeNarAccessor(sink.s);
nars.emplace(storePath, narAccessor);
return {narAccessor, restPath};
} catch (SysError &) { }
} catch (SysError&) {
}
store->narFromPath(storePath, sink);
auto narAccessor = makeNarAccessor(sink.s);
addToCache(storePath, *sink.s, narAccessor);
return {narAccessor, restPath};
try {
*sink.s = nix::readFile(cacheFile);
auto narAccessor = makeNarAccessor(sink.s);
nars.emplace(storePath, narAccessor);
return {narAccessor, restPath};
} catch (SysError&) {
}
}
store->narFromPath(storePath, sink);
auto narAccessor = makeNarAccessor(sink.s);
addToCache(storePath, *sink.s, narAccessor);
return {narAccessor, restPath};
}
FSAccessor::Stat RemoteFSAccessor::stat(const Path & path)
{
auto res = fetch(path);
return res.first->stat(res.second);
FSAccessor::Stat RemoteFSAccessor::stat(const Path& path) {
auto res = fetch(path);
return res.first->stat(res.second);
}
StringSet RemoteFSAccessor::readDirectory(const Path & path)
{
auto res = fetch(path);
return res.first->readDirectory(res.second);
StringSet RemoteFSAccessor::readDirectory(const Path& path) {
auto res = fetch(path);
return res.first->readDirectory(res.second);
}
std::string RemoteFSAccessor::readFile(const Path & path)
{
auto res = fetch(path);
return res.first->readFile(res.second);
std::string RemoteFSAccessor::readFile(const Path& path) {
auto res = fetch(path);
return res.first->readFile(res.second);
}
std::string RemoteFSAccessor::readLink(const Path & path)
{
auto res = fetch(path);
return res.first->readLink(res.second);
std::string RemoteFSAccessor::readLink(const Path& path) {
auto res = fetch(path);
return res.first->readLink(res.second);
}
}
} // namespace nix

View file

@ -6,35 +6,33 @@
namespace nix {
class RemoteFSAccessor : public FSAccessor
{
ref<Store> store;
class RemoteFSAccessor : public FSAccessor {
ref<Store> store;
std::map<Path, ref<FSAccessor>> nars;
std::map<Path, ref<FSAccessor>> nars;
Path cacheDir;
Path cacheDir;
std::pair<ref<FSAccessor>, Path> fetch(const Path & path_);
std::pair<ref<FSAccessor>, Path> fetch(const Path& path_);
friend class BinaryCacheStore;
friend class BinaryCacheStore;
Path makeCacheFile(const Path & storePath, const std::string & ext);
Path makeCacheFile(const Path& storePath, const std::string& ext);
void addToCache(const Path & storePath, const std::string & nar,
ref<FSAccessor> narAccessor);
void addToCache(const Path& storePath, const std::string& nar,
ref<FSAccessor> narAccessor);
public:
public:
RemoteFSAccessor(ref<Store> store,
const /* FIXME: use std::optional */ Path& cacheDir = "");
RemoteFSAccessor(ref<Store> store,
const /* FIXME: use std::optional */ Path & cacheDir = "");
Stat stat(const Path& path) override;
Stat stat(const Path & path) override;
StringSet readDirectory(const Path& path) override;
StringSet readDirectory(const Path & path) override;
std::string readFile(const Path& path) override;
std::string readFile(const Path & path) override;
std::string readLink(const Path & path) override;
std::string readLink(const Path& path) override;
};
}
} // namespace nix

File diff suppressed because it is too large Load diff

View file

@ -2,160 +2,151 @@
#include <limits>
#include <string>
#include "store-api.hh"
namespace nix {
class Pipe;
class Pid;
struct FdSink;
struct FdSource;
template<typename T> class Pool;
template <typename T>
class Pool;
struct ConnectionHandle;
/* FIXME: RemoteStore is a misnomer - should be something like
DaemonStore. */
class RemoteStore : public virtual Store
{
public:
class RemoteStore : public virtual Store {
public:
const Setting<int> maxConnections{
(Store*)this, 1, "max-connections",
"maximum number of concurrent connections to the Nix daemon"};
const Setting<int> maxConnections{(Store*) this, 1,
"max-connections", "maximum number of concurrent connections to the Nix daemon"};
const Setting<unsigned int> maxConnectionAge{
(Store*)this, std::numeric_limits<unsigned int>::max(),
"max-connection-age", "number of seconds to reuse a connection"};
const Setting<unsigned int> maxConnectionAge{(Store*) this, std::numeric_limits<unsigned int>::max(),
"max-connection-age", "number of seconds to reuse a connection"};
virtual bool sameMachine() = 0;
virtual bool sameMachine() = 0;
RemoteStore(const Params& params);
RemoteStore(const Params & params);
/* Implementations of abstract store API methods. */
/* Implementations of abstract store API methods. */
bool isValidPathUncached(const Path& path) override;
bool isValidPathUncached(const Path & path) override;
PathSet queryValidPaths(const PathSet& paths, SubstituteFlag maybeSubstitute =
NoSubstitute) override;
PathSet queryValidPaths(const PathSet & paths,
SubstituteFlag maybeSubstitute = NoSubstitute) override;
PathSet queryAllValidPaths() override;
PathSet queryAllValidPaths() override;
void queryPathInfoUncached(
const Path& path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
void queryPathInfoUncached(const Path & path,
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
void queryReferrers(const Path& path, PathSet& referrers) override;
void queryReferrers(const Path & path, PathSet & referrers) override;
PathSet queryValidDerivers(const Path& path) override;
PathSet queryValidDerivers(const Path & path) override;
PathSet queryDerivationOutputs(const Path& path) override;
PathSet queryDerivationOutputs(const Path & path) override;
StringSet queryDerivationOutputNames(const Path& path) override;
StringSet queryDerivationOutputNames(const Path & path) override;
Path queryPathFromHashPart(const string& hashPart) override;
Path queryPathFromHashPart(const string & hashPart) override;
PathSet querySubstitutablePaths(const PathSet& paths) override;
PathSet querySubstitutablePaths(const PathSet & paths) override;
void querySubstitutablePathInfos(const PathSet& paths,
SubstitutablePathInfos& infos) override;
void querySubstitutablePathInfos(const PathSet & paths,
SubstitutablePathInfos & infos) override;
void addToStore(const ValidPathInfo& info, Source& nar, RepairFlag repair,
CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override;
void addToStore(const ValidPathInfo & info, Source & nar,
RepairFlag repair, CheckSigsFlag checkSigs,
std::shared_ptr<FSAccessor> accessor) override;
Path addToStore(const string& name, const Path& srcPath,
bool recursive = true, HashType hashAlgo = htSHA256,
PathFilter& filter = defaultPathFilter,
RepairFlag repair = NoRepair) override;
Path addToStore(const string & name, const Path & srcPath,
bool recursive = true, HashType hashAlgo = htSHA256,
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override;
Path addTextToStore(const string& name, const string& s,
const PathSet& references, RepairFlag repair) override;
Path addTextToStore(const string & name, const string & s,
const PathSet & references, RepairFlag repair) override;
void buildPaths(const PathSet& paths, BuildMode buildMode) override;
void buildPaths(const PathSet & paths, BuildMode buildMode) override;
BuildResult buildDerivation(const Path& drvPath, const BasicDerivation& drv,
BuildMode buildMode) override;
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
BuildMode buildMode) override;
void ensurePath(const Path& path) override;
void ensurePath(const Path & path) override;
void addTempRoot(const Path& path) override;
void addTempRoot(const Path & path) override;
void addIndirectRoot(const Path& path) override;
void addIndirectRoot(const Path & path) override;
void syncWithGC() override;
void syncWithGC() override;
Roots findRoots(bool censor) override;
Roots findRoots(bool censor) override;
void collectGarbage(const GCOptions& options, GCResults& results) override;
void collectGarbage(const GCOptions & options, GCResults & results) override;
void optimiseStore() override;
void optimiseStore() override;
bool verifyStore(bool checkContents, RepairFlag repair) override;
bool verifyStore(bool checkContents, RepairFlag repair) override;
void addSignatures(const Path& storePath, const StringSet& sigs) override;
void addSignatures(const Path & storePath, const StringSet & sigs) override;
void queryMissing(const PathSet& targets, PathSet& willBuild,
PathSet& willSubstitute, PathSet& unknown,
unsigned long long& downloadSize,
unsigned long long& narSize) override;
void queryMissing(const PathSet & targets,
PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
unsigned long long & downloadSize, unsigned long long & narSize) override;
void connect() override;
void connect() override;
unsigned int getProtocol() override;
unsigned int getProtocol() override;
void flushBadConnections();
void flushBadConnections();
protected:
struct Connection {
AutoCloseFD fd;
FdSink to;
FdSource from;
unsigned int daemonVersion;
std::chrono::time_point<std::chrono::steady_clock> startTime;
protected:
virtual ~Connection();
struct Connection
{
AutoCloseFD fd;
FdSink to;
FdSource from;
unsigned int daemonVersion;
std::chrono::time_point<std::chrono::steady_clock> startTime;
std::exception_ptr processStderr(Sink* sink = 0, Source* source = 0);
};
virtual ~Connection();
ref<Connection> openConnectionWrapper();
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0);
};
virtual ref<Connection> openConnection() = 0;
ref<Connection> openConnectionWrapper();
void initConnection(Connection& conn);
virtual ref<Connection> openConnection() = 0;
ref<Pool<Connection>> connections;
void initConnection(Connection & conn);
virtual void setOptions(Connection& conn);
ref<Pool<Connection>> connections;
ConnectionHandle getConnection();
virtual void setOptions(Connection & conn);
ConnectionHandle getConnection();
friend struct ConnectionHandle;
private:
std::atomic_bool failed{false};
friend struct ConnectionHandle;
private:
std::atomic_bool failed{false};
};
class UDSRemoteStore : public LocalFSStore, public RemoteStore
{
public:
class UDSRemoteStore : public LocalFSStore, public RemoteStore {
public:
UDSRemoteStore(const Params& params);
UDSRemoteStore(std::string path, const Params& params);
UDSRemoteStore(const Params & params);
UDSRemoteStore(std::string path, const Params & params);
std::string getUri() override;
std::string getUri() override;
bool sameMachine() { return true; }
bool sameMachine()
{ return true; }
private:
ref<RemoteStore::Connection> openConnection() override;
std::optional<std::string> path;
private:
ref<RemoteStore::Connection> openConnection() override;
std::optional<std::string> path;
};
}
} // namespace nix

View file

@ -1,14 +1,6 @@
#if ENABLE_S3
#include "s3.hh"
#include "s3-binary-cache-store.hh"
#include "nar-info.hh"
#include "nar-info-disk-cache.hh"
#include "globals.hh"
#include "compression.hh"
#include "download.hh"
#include "istringstream_nocopy.hh"
#include <aws/core/Aws.h>
#include <aws/core/VersionConfig.h>
#include <aws/core/auth/AWSCredentialsProvider.h>
@ -24,408 +16,403 @@
#include <aws/s3/model/ListObjectsRequest.h>
#include <aws/s3/model/PutObjectRequest.h>
#include <aws/transfer/TransferManager.h>
#include "compression.hh"
#include "download.hh"
#include "globals.hh"
#include "istringstream_nocopy.hh"
#include "nar-info-disk-cache.hh"
#include "nar-info.hh"
#include "s3.hh"
using namespace Aws::Transfer;
namespace nix {
struct S3Error : public Error
{
Aws::S3::S3Errors err;
S3Error(Aws::S3::S3Errors err, const FormatOrString & fs)
: Error(fs), err(err) { };
struct S3Error : public Error {
Aws::S3::S3Errors err;
S3Error(Aws::S3::S3Errors err, const FormatOrString& fs)
: Error(fs), err(err){};
};
/* Helper: given an Outcome<R, E>, return R in case of success, or
throw an exception in case of an error. */
template<typename R, typename E>
R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome<R, E> && outcome)
{
if (!outcome.IsSuccess())
throw S3Error(
outcome.GetError().GetErrorType(),
fs.s + ": " + outcome.GetError().GetMessage());
return outcome.GetResultWithOwnership();
template <typename R, typename E>
R&& checkAws(const FormatOrString& fs, Aws::Utils::Outcome<R, E>&& outcome) {
if (!outcome.IsSuccess())
throw S3Error(outcome.GetError().GetErrorType(),
fs.s + ": " + outcome.GetError().GetMessage());
return outcome.GetResultWithOwnership();
}
class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem
{
using Aws::Utils::Logging::FormattedLogSystem::FormattedLogSystem;
class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem {
using Aws::Utils::Logging::FormattedLogSystem::FormattedLogSystem;
void ProcessFormattedStatement(Aws::String && statement) override
{
debug("AWS: %s", chomp(statement));
}
void ProcessFormattedStatement(Aws::String&& statement) override {
debug("AWS: %s", chomp(statement));
}
};
static void initAWS()
{
static std::once_flag flag;
std::call_once(flag, []() {
Aws::SDKOptions options;
static void initAWS() {
static std::once_flag flag;
std::call_once(flag, []() {
Aws::SDKOptions options;
/* We install our own OpenSSL locking function (see
shared.cc), so don't let aws-sdk-cpp override it. */
options.cryptoOptions.initAndCleanupOpenSSL = false;
/* We install our own OpenSSL locking function (see
shared.cc), so don't let aws-sdk-cpp override it. */
options.cryptoOptions.initAndCleanupOpenSSL = false;
if (verbosity >= lvlDebug) {
options.loggingOptions.logLevel =
verbosity == lvlDebug
? Aws::Utils::Logging::LogLevel::Debug
: Aws::Utils::Logging::LogLevel::Trace;
options.loggingOptions.logger_create_fn = [options]() {
return std::make_shared<AwsLogger>(options.loggingOptions.logLevel);
};
}
if (verbosity >= lvlDebug) {
options.loggingOptions.logLevel =
verbosity == lvlDebug ? Aws::Utils::Logging::LogLevel::Debug
: Aws::Utils::Logging::LogLevel::Trace;
options.loggingOptions.logger_create_fn = [options]() {
return std::make_shared<AwsLogger>(options.loggingOptions.logLevel);
};
}
Aws::InitAPI(options);
});
Aws::InitAPI(options);
});
}
S3Helper::S3Helper(const string & profile, const string & region, const string & scheme, const string & endpoint)
: config(makeConfig(region, scheme, endpoint))
, client(make_ref<Aws::S3::S3Client>(
profile == ""
? std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
std::make_shared<Aws::Auth::DefaultAWSCredentialsProviderChain>())
: std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
std::make_shared<Aws::Auth::ProfileConfigFileAWSCredentialsProvider>(profile.c_str())),
*config,
// FIXME: https://github.com/aws/aws-sdk-cpp/issues/759
S3Helper::S3Helper(const string& profile, const string& region,
const string& scheme, const string& endpoint)
: config(makeConfig(region, scheme, endpoint)),
client(make_ref<Aws::S3::S3Client>(
profile == ""
? std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
std::make_shared<
Aws::Auth::DefaultAWSCredentialsProviderChain>())
: std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
std::make_shared<
Aws::Auth::ProfileConfigFileAWSCredentialsProvider>(
profile.c_str())),
*config,
// FIXME: https://github.com/aws/aws-sdk-cpp/issues/759
#if AWS_VERSION_MAJOR == 1 && AWS_VERSION_MINOR < 3
false,
false,
#else
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
#endif
endpoint.empty()))
{
endpoint.empty())) {
}
/* Log AWS retries. */
class RetryStrategy : public Aws::Client::DefaultRetryStrategy
{
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
{
auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries);
if (retry)
printError("AWS error '%s' (%s), will retry in %d ms",
error.GetExceptionName(), error.GetMessage(), CalculateDelayBeforeNextRetry(error, attemptedRetries));
return retry;
}
class RetryStrategy : public Aws::Client::DefaultRetryStrategy {
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error,
long attemptedRetries) const override {
auto retry =
Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries);
if (retry)
printError("AWS error '%s' (%s), will retry in %d ms",
error.GetExceptionName(), error.GetMessage(),
CalculateDelayBeforeNextRetry(error, attemptedRetries));
return retry;
}
};
ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region, const string & scheme, const string & endpoint)
{
initAWS();
auto res = make_ref<Aws::Client::ClientConfiguration>();
res->region = region;
if (!scheme.empty()) {
res->scheme = Aws::Http::SchemeMapper::FromString(scheme.c_str());
}
if (!endpoint.empty()) {
res->endpointOverride = endpoint;
}
res->requestTimeoutMs = 600 * 1000;
res->connectTimeoutMs = 5 * 1000;
res->retryStrategy = std::make_shared<RetryStrategy>();
res->caFile = settings.caFile;
return res;
ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(
const string& region, const string& scheme, const string& endpoint) {
initAWS();
auto res = make_ref<Aws::Client::ClientConfiguration>();
res->region = region;
if (!scheme.empty()) {
res->scheme = Aws::Http::SchemeMapper::FromString(scheme.c_str());
}
if (!endpoint.empty()) {
res->endpointOverride = endpoint;
}
res->requestTimeoutMs = 600 * 1000;
res->connectTimeoutMs = 5 * 1000;
res->retryStrategy = std::make_shared<RetryStrategy>();
res->caFile = settings.caFile;
return res;
}
S3Helper::DownloadResult S3Helper::getObject(
const std::string & bucketName, const std::string & key)
{
debug("fetching 's3://%s/%s'...", bucketName, key);
S3Helper::DownloadResult S3Helper::getObject(const std::string& bucketName,
const std::string& key) {
debug("fetching 's3://%s/%s'...", bucketName, key);
auto request =
Aws::S3::Model::GetObjectRequest()
.WithBucket(bucketName)
.WithKey(key);
auto request =
Aws::S3::Model::GetObjectRequest().WithBucket(bucketName).WithKey(key);
request.SetResponseStreamFactory([&]() {
return Aws::New<std::stringstream>("STRINGSTREAM");
request.SetResponseStreamFactory(
[&]() { return Aws::New<std::stringstream>("STRINGSTREAM"); });
DownloadResult res;
auto now1 = std::chrono::steady_clock::now();
try {
auto result = checkAws(fmt("AWS error fetching '%s'", key),
client->GetObject(request));
res.data =
decompress(result.GetContentEncoding(),
dynamic_cast<std::stringstream&>(result.GetBody()).str());
} catch (S3Error& e) {
if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
}
auto now2 = std::chrono::steady_clock::now();
res.durationMs =
std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1)
.count();
return res;
}
struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore {
const Setting<std::string> profile{
this, "", "profile", "The name of the AWS configuration profile to use."};
const Setting<std::string> region{
this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
const Setting<std::string> scheme{
this, "", "scheme",
"The scheme to use for S3 requests, https by default."};
const Setting<std::string> endpoint{
this, "", "endpoint",
"An optional override of the endpoint to use when talking to S3."};
const Setting<std::string> narinfoCompression{
this, "", "narinfo-compression", "compression method for .narinfo files"};
const Setting<std::string> lsCompression{this, "", "ls-compression",
"compression method for .ls files"};
const Setting<std::string> logCompression{
this, "", "log-compression", "compression method for log/* files"};
const Setting<bool> multipartUpload{this, false, "multipart-upload",
"whether to use multi-part uploads"};
const Setting<uint64_t> bufferSize{
this, 5 * 1024 * 1024, "buffer-size",
"size (in bytes) of each part in multi-part uploads"};
std::string bucketName;
Stats stats;
S3Helper s3Helper;
S3BinaryCacheStoreImpl(const Params& params, const std::string& bucketName)
: S3BinaryCacheStore(params),
bucketName(bucketName),
s3Helper(profile, region, scheme, endpoint) {
diskCache = getNarInfoDiskCache();
}
std::string getUri() override { return "s3://" + bucketName; }
void init() override {
if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
BinaryCacheStore::init();
diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority);
}
}
const Stats& getS3Stats() override { return stats; }
/* This is a specialisation of isValidPath() that optimistically
fetches the .narinfo file, rather than first checking for its
existence via a HEAD request. Since .narinfos are small, doing
a GET is unlikely to be slower than HEAD. */
bool isValidPathUncached(const Path& storePath) override {
try {
queryPathInfo(storePath);
return true;
} catch (InvalidPath& e) {
return false;
}
}
bool fileExists(const std::string& path) override {
stats.head++;
auto res = s3Helper.client->HeadObject(Aws::S3::Model::HeadObjectRequest()
.WithBucket(bucketName)
.WithKey(path));
if (!res.IsSuccess()) {
auto& error = res.GetError();
if (error.GetErrorType() == Aws::S3::S3Errors::RESOURCE_NOT_FOUND ||
error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY
// If bucket listing is disabled, 404s turn into 403s
|| error.GetErrorType() == Aws::S3::S3Errors::ACCESS_DENIED)
return false;
throw Error(format("AWS error fetching '%s': %s") % path %
error.GetMessage());
}
return true;
}
std::shared_ptr<TransferManager> transferManager;
std::once_flag transferManagerCreated;
void uploadFile(const std::string& path, const std::string& data,
const std::string& mimeType,
const std::string& contentEncoding) {
auto stream = std::make_shared<istringstream_nocopy>(data);
auto maxThreads = std::thread::hardware_concurrency();
static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
executor =
std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(
maxThreads);
std::call_once(transferManagerCreated, [&]() {
if (multipartUpload) {
TransferManagerConfiguration transferConfig(executor.get());
transferConfig.s3Client = s3Helper.client;
transferConfig.bufferSize = bufferSize;
transferConfig.uploadProgressCallback =
[](const TransferManager* transferManager,
const std::shared_ptr<const TransferHandle>& transferHandle) {
// FIXME: find a way to properly abort the multipart upload.
// checkInterrupt();
debug("upload progress ('%s'): '%d' of '%d' bytes",
transferHandle->GetKey(),
transferHandle->GetBytesTransferred(),
transferHandle->GetBytesTotalSize());
};
transferManager = TransferManager::Create(transferConfig);
}
});
DownloadResult res;
auto now1 = std::chrono::steady_clock::now();
try {
if (transferManager) {
if (contentEncoding != "")
throw Error(
"setting a content encoding is not supported with S3 multi-part "
"uploads");
auto result = checkAws(fmt("AWS error fetching '%s'", key),
client->GetObject(request));
std::shared_ptr<TransferHandle> transferHandle =
transferManager->UploadFile(stream, bucketName, path, mimeType,
Aws::Map<Aws::String, Aws::String>(),
nullptr /*, contentEncoding */);
res.data = decompress(result.GetContentEncoding(),
dynamic_cast<std::stringstream &>(result.GetBody()).str());
transferHandle->WaitUntilFinished();
} catch (S3Error & e) {
if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
if (transferHandle->GetStatus() == TransferStatus::FAILED)
throw Error("AWS error: failed to upload 's3://%s/%s': %s", bucketName,
path, transferHandle->GetLastError().GetMessage());
if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
throw Error(
"AWS error: transfer status of 's3://%s/%s' in unexpected state",
bucketName, path);
} else {
auto request = Aws::S3::Model::PutObjectRequest()
.WithBucket(bucketName)
.WithKey(path);
request.SetContentType(mimeType);
if (contentEncoding != "") request.SetContentEncoding(contentEncoding);
auto stream = std::make_shared<istringstream_nocopy>(data);
request.SetBody(stream);
auto result = checkAws(fmt("AWS error uploading '%s'", path),
s3Helper.client->PutObject(request));
}
auto now2 = std::chrono::steady_clock::now();
res.durationMs = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
auto duration =
std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1)
.count();
return res;
}
printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") %
bucketName % path % data.size() % duration);
struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
{
const Setting<std::string> profile{this, "", "profile", "The name of the AWS configuration profile to use."};
const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
const Setting<std::string> scheme{this, "", "scheme", "The scheme to use for S3 requests, https by default."};
const Setting<std::string> endpoint{this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
const Setting<bool> multipartUpload{
this, false, "multipart-upload", "whether to use multi-part uploads"};
const Setting<uint64_t> bufferSize{
this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
stats.putTimeMs += duration;
stats.putBytes += data.size();
stats.put++;
}
std::string bucketName;
void upsertFile(const std::string& path, const std::string& data,
const std::string& mimeType) override {
if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
uploadFile(path, *compress(narinfoCompression, data), mimeType,
narinfoCompression);
else if (lsCompression != "" && hasSuffix(path, ".ls"))
uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
else if (logCompression != "" && hasPrefix(path, "log/"))
uploadFile(path, *compress(logCompression, data), mimeType,
logCompression);
else
uploadFile(path, data, mimeType, "");
}
Stats stats;
void getFile(const std::string& path, Sink& sink) override {
stats.get++;
S3Helper s3Helper;
// FIXME: stream output to sink.
auto res = s3Helper.getObject(bucketName, path);
S3BinaryCacheStoreImpl(
const Params & params, const std::string & bucketName)
: S3BinaryCacheStore(params)
, bucketName(bucketName)
, s3Helper(profile, region, scheme, endpoint)
{
diskCache = getNarInfoDiskCache();
}
stats.getBytes += res.data ? res.data->size() : 0;
stats.getTimeMs += res.durationMs;
std::string getUri() override
{
return "s3://" + bucketName;
}
if (res.data) {
printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms", bucketName,
path, res.data->size(), res.durationMs);
void init() override
{
if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
sink((unsigned char*)res.data->data(), res.data->size());
} else
throw NoSuchBinaryCacheFile(
"file '%s' does not exist in binary cache '%s'", path, getUri());
}
BinaryCacheStore::init();
PathSet queryAllValidPaths() override {
PathSet paths;
std::string marker;
diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority);
}
}
do {
debug(format("listing bucket 's3://%s' from key '%s'...") % bucketName %
marker);
const Stats & getS3Stats() override
{
return stats;
}
auto res = checkAws(
format("AWS error listing bucket '%s'") % bucketName,
s3Helper.client->ListObjects(Aws::S3::Model::ListObjectsRequest()
.WithBucket(bucketName)
.WithDelimiter("/")
.WithMarker(marker)));
/* This is a specialisation of isValidPath() that optimistically
fetches the .narinfo file, rather than first checking for its
existence via a HEAD request. Since .narinfos are small, doing
a GET is unlikely to be slower than HEAD. */
bool isValidPathUncached(const Path & storePath) override
{
try {
queryPathInfo(storePath);
return true;
} catch (InvalidPath & e) {
return false;
}
}
auto& contents = res.GetContents();
bool fileExists(const std::string & path) override
{
stats.head++;
debug(format("got %d keys, next marker '%s'") % contents.size() %
res.GetNextMarker());
auto res = s3Helper.client->HeadObject(
Aws::S3::Model::HeadObjectRequest()
.WithBucket(bucketName)
.WithKey(path));
for (auto object : contents) {
auto& key = object.GetKey();
if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
paths.insert(storeDir + "/" + key.substr(0, key.size() - 8));
}
if (!res.IsSuccess()) {
auto & error = res.GetError();
if (error.GetErrorType() == Aws::S3::S3Errors::RESOURCE_NOT_FOUND
|| error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY
// If bucket listing is disabled, 404s turn into 403s
|| error.GetErrorType() == Aws::S3::S3Errors::ACCESS_DENIED)
return false;
throw Error(format("AWS error fetching '%s': %s") % path % error.GetMessage());
}
return true;
}
std::shared_ptr<TransferManager> transferManager;
std::once_flag transferManagerCreated;
void uploadFile(const std::string & path, const std::string & data,
const std::string & mimeType,
const std::string & contentEncoding)
{
auto stream = std::make_shared<istringstream_nocopy>(data);
auto maxThreads = std::thread::hardware_concurrency();
static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads);
std::call_once(transferManagerCreated, [&]()
{
if (multipartUpload) {
TransferManagerConfiguration transferConfig(executor.get());
transferConfig.s3Client = s3Helper.client;
transferConfig.bufferSize = bufferSize;
transferConfig.uploadProgressCallback =
[](const TransferManager *transferManager,
const std::shared_ptr<const TransferHandle>
&transferHandle)
{
//FIXME: find a way to properly abort the multipart upload.
//checkInterrupt();
debug("upload progress ('%s'): '%d' of '%d' bytes",
transferHandle->GetKey(),
transferHandle->GetBytesTransferred(),
transferHandle->GetBytesTotalSize());
};
transferManager = TransferManager::Create(transferConfig);
}
});
auto now1 = std::chrono::steady_clock::now();
if (transferManager) {
if (contentEncoding != "")
throw Error("setting a content encoding is not supported with S3 multi-part uploads");
std::shared_ptr<TransferHandle> transferHandle =
transferManager->UploadFile(
stream, bucketName, path, mimeType,
Aws::Map<Aws::String, Aws::String>(),
nullptr /*, contentEncoding */);
transferHandle->WaitUntilFinished();
if (transferHandle->GetStatus() == TransferStatus::FAILED)
throw Error("AWS error: failed to upload 's3://%s/%s': %s",
bucketName, path, transferHandle->GetLastError().GetMessage());
if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
bucketName, path);
} else {
auto request =
Aws::S3::Model::PutObjectRequest()
.WithBucket(bucketName)
.WithKey(path);
request.SetContentType(mimeType);
if (contentEncoding != "")
request.SetContentEncoding(contentEncoding);
auto stream = std::make_shared<istringstream_nocopy>(data);
request.SetBody(stream);
auto result = checkAws(fmt("AWS error uploading '%s'", path),
s3Helper.client->PutObject(request));
}
auto now2 = std::chrono::steady_clock::now();
auto duration =
std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1)
.count();
printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") %
bucketName % path % data.size() % duration);
stats.putTimeMs += duration;
stats.putBytes += data.size();
stats.put++;
}
void upsertFile(const std::string & path, const std::string & data,
const std::string & mimeType) override
{
if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression);
else if (lsCompression != "" && hasSuffix(path, ".ls"))
uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
else if (logCompression != "" && hasPrefix(path, "log/"))
uploadFile(path, *compress(logCompression, data), mimeType, logCompression);
else
uploadFile(path, data, mimeType, "");
}
void getFile(const std::string & path, Sink & sink) override
{
stats.get++;
// FIXME: stream output to sink.
auto res = s3Helper.getObject(bucketName, path);
stats.getBytes += res.data ? res.data->size() : 0;
stats.getTimeMs += res.durationMs;
if (res.data) {
printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms",
bucketName, path, res.data->size(), res.durationMs);
sink((unsigned char *) res.data->data(), res.data->size());
} else
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
}
PathSet queryAllValidPaths() override
{
PathSet paths;
std::string marker;
do {
debug(format("listing bucket 's3://%s' from key '%s'...") % bucketName % marker);
auto res = checkAws(format("AWS error listing bucket '%s'") % bucketName,
s3Helper.client->ListObjects(
Aws::S3::Model::ListObjectsRequest()
.WithBucket(bucketName)
.WithDelimiter("/")
.WithMarker(marker)));
auto & contents = res.GetContents();
debug(format("got %d keys, next marker '%s'")
% contents.size() % res.GetNextMarker());
for (auto object : contents) {
auto & key = object.GetKey();
if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
paths.insert(storeDir + "/" + key.substr(0, key.size() - 8));
}
marker = res.GetNextMarker();
} while (!marker.empty());
return paths;
}
marker = res.GetNextMarker();
} while (!marker.empty());
return paths;
}
};
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, 5) != "s3://") return 0;
auto store = std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5));
store->init();
return store;
});
static RegisterStoreImplementation regStore(
[](const std::string& uri,
const Store::Params& params) -> std::shared_ptr<Store> {
if (std::string(uri, 0, 5) != "s3://") return 0;
auto store =
std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5));
store->init();
return store;
});
}
} // namespace nix
#endif

View file

@ -1,33 +1,26 @@
#pragma once
#include "binary-cache-store.hh"
#include <atomic>
#include "binary-cache-store.hh"
namespace nix {
class S3BinaryCacheStore : public BinaryCacheStore
{
protected:
class S3BinaryCacheStore : public BinaryCacheStore {
protected:
S3BinaryCacheStore(const Params& params) : BinaryCacheStore(params) {}
S3BinaryCacheStore(const Params & params)
: BinaryCacheStore(params)
{ }
public:
struct Stats {
std::atomic<uint64_t> put{0};
std::atomic<uint64_t> putBytes{0};
std::atomic<uint64_t> putTimeMs{0};
std::atomic<uint64_t> get{0};
std::atomic<uint64_t> getBytes{0};
std::atomic<uint64_t> getTimeMs{0};
std::atomic<uint64_t> head{0};
};
public:
struct Stats
{
std::atomic<uint64_t> put{0};
std::atomic<uint64_t> putBytes{0};
std::atomic<uint64_t> putTimeMs{0};
std::atomic<uint64_t> get{0};
std::atomic<uint64_t> getBytes{0};
std::atomic<uint64_t> getTimeMs{0};
std::atomic<uint64_t> head{0};
};
virtual const Stats & getS3Stats() = 0;
virtual const Stats& getS3Stats() = 0;
};
}
} // namespace nix

View file

@ -4,30 +4,39 @@
#include "ref.hh"
namespace Aws { namespace Client { class ClientConfiguration; } }
namespace Aws { namespace S3 { class S3Client; } }
namespace Aws {
namespace Client {
class ClientConfiguration;
}
} // namespace Aws
namespace Aws {
namespace S3 {
class S3Client;
}
} // namespace Aws
namespace nix {
struct S3Helper
{
ref<Aws::Client::ClientConfiguration> config;
ref<Aws::S3::S3Client> client;
struct S3Helper {
ref<Aws::Client::ClientConfiguration> config;
ref<Aws::S3::S3Client> client;
S3Helper(const std::string & profile, const std::string & region, const std::string & scheme, const std::string & endpoint);
S3Helper(const std::string& profile, const std::string& region,
const std::string& scheme, const std::string& endpoint);
ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint);
ref<Aws::Client::ClientConfiguration> makeConfig(const std::string& region,
const std::string& scheme,
const std::string& endpoint);
struct DownloadResult
{
std::shared_ptr<std::string> data;
unsigned int durationMs;
};
struct DownloadResult {
std::shared_ptr<std::string> data;
unsigned int durationMs;
};
DownloadResult getObject(
const std::string & bucketName, const std::string & key);
DownloadResult getObject(const std::string& bucketName,
const std::string& key);
};
}
} // namespace nix
#endif

View file

@ -6,19 +6,19 @@ namespace nix {
#define SERVE_MAGIC_2 0x5452eecb
#define SERVE_PROTOCOL_VERSION 0x205
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
#define GET_PROTOCOL_MAJOR(x) ((x)&0xff00)
#define GET_PROTOCOL_MINOR(x) ((x)&0x00ff)
typedef enum {
cmdQueryValidPaths = 1,
cmdQueryPathInfos = 2,
cmdDumpStorePath = 3,
cmdImportPaths = 4,
cmdExportPaths = 5,
cmdBuildPaths = 6,
cmdQueryClosure = 7,
cmdBuildDerivation = 8,
cmdAddToStoreNar = 9,
cmdQueryValidPaths = 1,
cmdQueryPathInfos = 2,
cmdDumpStorePath = 3,
cmdImportPaths = 4,
cmdExportPaths = 5,
cmdBuildPaths = 6,
cmdQueryClosure = 7,
cmdBuildDerivation = 8,
cmdAddToStoreNar = 9,
} ServeCommand;
}
} // namespace nix

View file

@ -1,198 +1,172 @@
#include "sqlite.hh"
#include "util.hh"
#include <sqlite3.h>
#include <atomic>
#include "util.hh"
namespace nix {
[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs)
{
int err = sqlite3_errcode(db);
int exterr = sqlite3_extended_errcode(db);
[[noreturn]] void throwSQLiteError(sqlite3* db, const FormatOrString& fs) {
int err = sqlite3_errcode(db);
int exterr = sqlite3_extended_errcode(db);
auto path = sqlite3_db_filename(db, nullptr);
if (!path) path = "(in-memory)";
auto path = sqlite3_db_filename(db, nullptr);
if (!path) path = "(in-memory)";
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
throw SQLiteBusy(
err == SQLITE_PROTOCOL
if (err == SQLITE_BUSY || err == SQLITE_PROTOCOL) {
throw SQLiteBusy(
err == SQLITE_PROTOCOL
? fmt("SQLite database '%s' is busy (SQLITE_PROTOCOL)", path)
: fmt("SQLite database '%s' is busy", path));
}
else
throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path);
} else
throw SQLiteError("%s: %s (in '%s')", fs.s, sqlite3_errstr(exterr), path);
}
SQLite::SQLite(const Path & path)
{
if (sqlite3_open_v2(path.c_str(), &db,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE, 0) != SQLITE_OK)
throw Error(format("cannot open SQLite database '%s'") % path);
SQLite::SQLite(const Path& path) {
if (sqlite3_open_v2(path.c_str(), &db,
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE,
0) != SQLITE_OK)
throw Error(format("cannot open SQLite database '%s'") % path);
}
SQLite::~SQLite()
{
try {
if (db && sqlite3_close(db) != SQLITE_OK)
throwSQLiteError(db, "closing database");
} catch (...) {
ignoreException();
}
SQLite::~SQLite() {
try {
if (db && sqlite3_close(db) != SQLITE_OK)
throwSQLiteError(db, "closing database");
} catch (...) {
ignoreException();
}
}
void SQLite::exec(const std::string & stmt)
{
retrySQLite<void>([&]() {
if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, format("executing SQLite statement '%s'") % stmt);
});
void SQLite::exec(const std::string& stmt) {
retrySQLite<void>([&]() {
if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, format("executing SQLite statement '%s'") % stmt);
});
}
void SQLiteStmt::create(sqlite3 * db, const string & sql)
{
checkInterrupt();
assert(!stmt);
if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
throwSQLiteError(db, fmt("creating statement '%s'", sql));
this->db = db;
this->sql = sql;
void SQLiteStmt::create(sqlite3* db, const string& sql) {
checkInterrupt();
assert(!stmt);
if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK)
throwSQLiteError(db, fmt("creating statement '%s'", sql));
this->db = db;
this->sql = sql;
}
SQLiteStmt::~SQLiteStmt()
{
try {
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
throwSQLiteError(db, fmt("finalizing statement '%s'", sql));
} catch (...) {
ignoreException();
}
SQLiteStmt::~SQLiteStmt() {
try {
if (stmt && sqlite3_finalize(stmt) != SQLITE_OK)
throwSQLiteError(db, fmt("finalizing statement '%s'", sql));
} catch (...) {
ignoreException();
}
}
SQLiteStmt::Use::Use(SQLiteStmt & stmt)
: stmt(stmt)
{
assert(stmt.stmt);
/* Note: sqlite3_reset() returns the error code for the most
recent call to sqlite3_step(). So ignore it. */
sqlite3_reset(stmt);
SQLiteStmt::Use::Use(SQLiteStmt& stmt) : stmt(stmt) {
assert(stmt.stmt);
/* Note: sqlite3_reset() returns the error code for the most
recent call to sqlite3_step(). So ignore it. */
sqlite3_reset(stmt);
}
SQLiteStmt::Use::~Use()
{
sqlite3_reset(stmt);
SQLiteStmt::Use::~Use() { sqlite3_reset(stmt); }
SQLiteStmt::Use& SQLiteStmt::Use::operator()(const std::string& value,
bool notNull) {
if (notNull) {
if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1,
SQLITE_TRANSIENT) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
} else
bind();
return *this;
}
SQLiteStmt::Use & SQLiteStmt::Use::operator () (const std::string & value, bool notNull)
{
if (notNull) {
if (sqlite3_bind_text(stmt, curArg++, value.c_str(), -1, SQLITE_TRANSIENT) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
} else
bind();
return *this;
SQLiteStmt::Use& SQLiteStmt::Use::operator()(int64_t value, bool notNull) {
if (notNull) {
if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
} else
bind();
return *this;
}
SQLiteStmt::Use & SQLiteStmt::Use::operator () (int64_t value, bool notNull)
{
if (notNull) {
if (sqlite3_bind_int64(stmt, curArg++, value) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
} else
bind();
return *this;
SQLiteStmt::Use& SQLiteStmt::Use::bind() {
if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
return *this;
}
SQLiteStmt::Use & SQLiteStmt::Use::bind()
{
if (sqlite3_bind_null(stmt, curArg++) != SQLITE_OK)
throwSQLiteError(stmt.db, "binding argument");
return *this;
int SQLiteStmt::Use::step() { return sqlite3_step(stmt); }
void SQLiteStmt::Use::exec() {
int r = step();
assert(r != SQLITE_ROW);
if (r != SQLITE_DONE)
throwSQLiteError(stmt.db, fmt("executing SQLite statement '%s'", stmt.sql));
}
int SQLiteStmt::Use::step()
{
return sqlite3_step(stmt);
bool SQLiteStmt::Use::next() {
int r = step();
if (r != SQLITE_DONE && r != SQLITE_ROW)
throwSQLiteError(stmt.db, fmt("executing SQLite query '%s'", stmt.sql));
return r == SQLITE_ROW;
}
void SQLiteStmt::Use::exec()
{
int r = step();
assert(r != SQLITE_ROW);
if (r != SQLITE_DONE)
throwSQLiteError(stmt.db, fmt("executing SQLite statement '%s'", stmt.sql));
std::string SQLiteStmt::Use::getStr(int col) {
auto s = (const char*)sqlite3_column_text(stmt, col);
assert(s);
return s;
}
bool SQLiteStmt::Use::next()
{
int r = step();
if (r != SQLITE_DONE && r != SQLITE_ROW)
throwSQLiteError(stmt.db, fmt("executing SQLite query '%s'", stmt.sql));
return r == SQLITE_ROW;
int64_t SQLiteStmt::Use::getInt(int col) {
// FIXME: detect nulls?
return sqlite3_column_int64(stmt, col);
}
std::string SQLiteStmt::Use::getStr(int col)
{
auto s = (const char *) sqlite3_column_text(stmt, col);
assert(s);
return s;
bool SQLiteStmt::Use::isNull(int col) {
return sqlite3_column_type(stmt, col) == SQLITE_NULL;
}
int64_t SQLiteStmt::Use::getInt(int col)
{
// FIXME: detect nulls?
return sqlite3_column_int64(stmt, col);
SQLiteTxn::SQLiteTxn(sqlite3* db) {
this->db = db;
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "starting transaction");
active = true;
}
bool SQLiteStmt::Use::isNull(int col)
{
return sqlite3_column_type(stmt, col) == SQLITE_NULL;
void SQLiteTxn::commit() {
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "committing transaction");
active = false;
}
SQLiteTxn::SQLiteTxn(sqlite3 * db)
{
this->db = db;
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "starting transaction");
active = true;
SQLiteTxn::~SQLiteTxn() {
try {
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "aborting transaction");
} catch (...) {
ignoreException();
}
}
void SQLiteTxn::commit()
{
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "committing transaction");
active = false;
void handleSQLiteBusy(const SQLiteBusy& e) {
static std::atomic<time_t> lastWarned{0};
time_t now = time(0);
if (now > lastWarned + 10) {
lastWarned = now;
printError("warning: %s", e.what());
}
/* Sleep for a while since retrying the transaction right away
is likely to fail again. */
checkInterrupt();
struct timespec t;
t.tv_sec = 0;
t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
nanosleep(&t, 0);
}
SQLiteTxn::~SQLiteTxn()
{
try {
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK)
throwSQLiteError(db, "aborting transaction");
} catch (...) {
ignoreException();
}
}
void handleSQLiteBusy(const SQLiteBusy & e)
{
static std::atomic<time_t> lastWarned{0};
time_t now = time(0);
if (now > lastWarned + 10) {
lastWarned = now;
printError("warning: %s", e.what());
}
/* Sleep for a while since retrying the transaction right away
is likely to fail again. */
checkInterrupt();
struct timespec t;
t.tv_sec = 0;
t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
nanosleep(&t, 0);
}
}
} // namespace nix

View file

@ -2,7 +2,6 @@
#include <functional>
#include <string>
#include "types.hh"
class sqlite3;
@ -11,104 +10,99 @@ class sqlite3_stmt;
namespace nix {
/* RAII wrapper to close a SQLite database automatically. */
struct SQLite
{
sqlite3 * db = 0;
SQLite() { }
SQLite(const Path & path);
SQLite(const SQLite & from) = delete;
SQLite& operator = (const SQLite & from) = delete;
SQLite& operator = (SQLite && from) { db = from.db; from.db = 0; return *this; }
~SQLite();
operator sqlite3 * () { return db; }
struct SQLite {
sqlite3* db = 0;
SQLite() {}
SQLite(const Path& path);
SQLite(const SQLite& from) = delete;
SQLite& operator=(const SQLite& from) = delete;
SQLite& operator=(SQLite&& from) {
db = from.db;
from.db = 0;
return *this;
}
~SQLite();
operator sqlite3*() { return db; }
void exec(const std::string & stmt);
void exec(const std::string& stmt);
};
/* RAII wrapper to create and destroy SQLite prepared statements. */
struct SQLiteStmt
{
sqlite3 * db = 0;
sqlite3_stmt * stmt = 0;
std::string sql;
SQLiteStmt() { }
SQLiteStmt(sqlite3 * db, const std::string & sql) { create(db, sql); }
void create(sqlite3 * db, const std::string & s);
~SQLiteStmt();
operator sqlite3_stmt * () { return stmt; }
struct SQLiteStmt {
sqlite3* db = 0;
sqlite3_stmt* stmt = 0;
std::string sql;
SQLiteStmt() {}
SQLiteStmt(sqlite3* db, const std::string& sql) { create(db, sql); }
void create(sqlite3* db, const std::string& s);
~SQLiteStmt();
operator sqlite3_stmt*() { return stmt; }
/* Helper for binding / executing statements. */
class Use
{
friend struct SQLiteStmt;
private:
SQLiteStmt & stmt;
unsigned int curArg = 1;
Use(SQLiteStmt & stmt);
/* Helper for binding / executing statements. */
class Use {
friend struct SQLiteStmt;
public:
private:
SQLiteStmt& stmt;
unsigned int curArg = 1;
Use(SQLiteStmt& stmt);
~Use();
public:
~Use();
/* Bind the next parameter. */
Use & operator () (const std::string & value, bool notNull = true);
Use & operator () (int64_t value, bool notNull = true);
Use & bind(); // null
/* Bind the next parameter. */
Use& operator()(const std::string& value, bool notNull = true);
Use& operator()(int64_t value, bool notNull = true);
Use& bind(); // null
int step();
int step();
/* Execute a statement that does not return rows. */
void exec();
/* Execute a statement that does not return rows. */
void exec();
/* For statements that return 0 or more rows. Returns true iff
a row is available. */
bool next();
/* For statements that return 0 or more rows. Returns true iff
a row is available. */
bool next();
std::string getStr(int col);
int64_t getInt(int col);
bool isNull(int col);
};
std::string getStr(int col);
int64_t getInt(int col);
bool isNull(int col);
};
Use use()
{
return Use(*this);
}
Use use() { return Use(*this); }
};
/* RAII helper that ensures transactions are aborted unless explicitly
committed. */
struct SQLiteTxn
{
bool active = false;
sqlite3 * db;
struct SQLiteTxn {
bool active = false;
sqlite3* db;
SQLiteTxn(sqlite3 * db);
SQLiteTxn(sqlite3* db);
void commit();
void commit();
~SQLiteTxn();
~SQLiteTxn();
};
MakeError(SQLiteError, Error);
MakeError(SQLiteBusy, SQLiteError);
[[noreturn]] void throwSQLiteError(sqlite3 * db, const FormatOrString & fs);
[[noreturn]] void throwSQLiteError(sqlite3* db, const FormatOrString& fs);
void handleSQLiteBusy(const SQLiteBusy & e);
void handleSQLiteBusy(const SQLiteBusy& e);
/* Convenience function for retrying a SQLite transaction when the
database is busy. */
template<typename T>
T retrySQLite(std::function<T()> fun)
{
while (true) {
try {
return fun();
} catch (SQLiteBusy & e) {
handleSQLiteBusy(e);
}
template <typename T>
T retrySQLite(std::function<T()> fun) {
while (true) {
try {
return fun();
} catch (SQLiteBusy& e) {
handleSQLiteBusy(e);
}
}
}
}
} // namespace nix

View file

@ -1,100 +1,84 @@
#include "store-api.hh"
#include "remote-store.hh"
#include "remote-fs-accessor.hh"
#include "archive.hh"
#include "worker-protocol.hh"
#include "pool.hh"
#include "remote-fs-accessor.hh"
#include "remote-store.hh"
#include "ssh.hh"
#include "store-api.hh"
#include "worker-protocol.hh"
namespace nix {
static std::string uriScheme = "ssh-ng://";
class SSHStore : public RemoteStore
{
public:
class SSHStore : public RemoteStore {
public:
const Setting<Path> sshKey{(Store*)this, "", "ssh-key",
"path to an SSH private key"};
const Setting<bool> compress{(Store*)this, false, "compress",
"whether to compress the connection"};
const Setting<Path> sshKey{(Store*) this, "", "ssh-key", "path to an SSH private key"};
const Setting<bool> compress{(Store*) this, false, "compress", "whether to compress the connection"};
SSHStore(const std::string& host, const Params& params)
: Store(params),
RemoteStore(params),
host(host),
master(host, sshKey,
// Use SSH master only if using more than 1 connection.
connections->capacity() > 1, compress) {}
SSHStore(const std::string & host, const Params & params)
: Store(params)
, RemoteStore(params)
, host(host)
, master(
host,
sshKey,
// Use SSH master only if using more than 1 connection.
connections->capacity() > 1,
compress)
{
}
std::string getUri() override { return uriScheme + host; }
std::string getUri() override
{
return uriScheme + host;
}
bool sameMachine() { return false; }
bool sameMachine()
{ return false; }
void narFromPath(const Path& path, Sink& sink) override;
void narFromPath(const Path & path, Sink & sink) override;
ref<FSAccessor> getFSAccessor() override;
ref<FSAccessor> getFSAccessor() override;
private:
struct Connection : RemoteStore::Connection {
std::unique_ptr<SSHMaster::Connection> sshConn;
};
private:
ref<RemoteStore::Connection> openConnection() override;
struct Connection : RemoteStore::Connection
{
std::unique_ptr<SSHMaster::Connection> sshConn;
};
std::string host;
ref<RemoteStore::Connection> openConnection() override;
SSHMaster master;
std::string host;
SSHMaster master;
void setOptions(RemoteStore::Connection & conn) override
{
/* TODO Add a way to explicitly ask for some options to be
forwarded. One option: A way to query the daemon for its
settings, and then a series of params to SSHStore like
forward-cores or forward-overridden-cores that only
override the requested settings.
*/
};
void setOptions(RemoteStore::Connection& conn) override{
/* TODO Add a way to explicitly ask for some options to be
forwarded. One option: A way to query the daemon for its
settings, and then a series of params to SSHStore like
forward-cores or forward-overridden-cores that only
override the requested settings.
*/
};
};
void SSHStore::narFromPath(const Path & path, Sink & sink)
{
auto conn(connections->get());
conn->to << wopNarFromPath << path;
conn->processStderr();
copyNAR(conn->from, sink);
void SSHStore::narFromPath(const Path& path, Sink& sink) {
auto conn(connections->get());
conn->to << wopNarFromPath << path;
conn->processStderr();
copyNAR(conn->from, sink);
}
ref<FSAccessor> SSHStore::getFSAccessor()
{
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
ref<FSAccessor> SSHStore::getFSAccessor() {
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()));
}
ref<RemoteStore::Connection> SSHStore::openConnection()
{
auto conn = make_ref<Connection>();
conn->sshConn = master.startCommand("nix-daemon --stdio");
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
initConnection(*conn);
return conn;
ref<RemoteStore::Connection> SSHStore::openConnection() {
auto conn = make_ref<Connection>();
conn->sshConn = master.startCommand("nix-daemon --stdio");
conn->to = FdSink(conn->sshConn->in.get());
conn->from = FdSource(conn->sshConn->out.get());
initConnection(*conn);
return conn;
}
static RegisterStoreImplementation regStore([](
const std::string & uri, const Store::Params & params)
-> std::shared_ptr<Store>
{
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
static RegisterStoreImplementation regStore([](const std::string& uri,
const Store::Params& params)
-> std::shared_ptr<Store> {
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
});
}
} // namespace nix

View file

@ -2,64 +2,60 @@
namespace nix {
SSHMaster::SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress, int logFD)
: host(host)
, fakeSSH(host == "localhost")
, keyFile(keyFile)
, useMaster(useMaster && !fakeSSH)
, compress(compress)
, logFD(logFD)
{
if (host == "" || hasPrefix(host, "-"))
throw Error("invalid SSH host name '%s'", host);
SSHMaster::SSHMaster(const std::string& host, const std::string& keyFile,
bool useMaster, bool compress, int logFD)
: host(host),
fakeSSH(host == "localhost"),
keyFile(keyFile),
useMaster(useMaster && !fakeSSH),
compress(compress),
logFD(logFD) {
if (host == "" || hasPrefix(host, "-"))
throw Error("invalid SSH host name '%s'", host);
}
void SSHMaster::addCommonSSHOpts(Strings & args)
{
for (auto & i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
args.push_back(i);
if (!keyFile.empty())
args.insert(args.end(), {"-i", keyFile});
if (compress)
args.push_back("-C");
void SSHMaster::addCommonSSHOpts(Strings& args) {
for (auto& i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
args.push_back(i);
if (!keyFile.empty()) args.insert(args.end(), {"-i", keyFile});
if (compress) args.push_back("-C");
}
std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string & command)
{
Path socketPath = startMaster();
std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(
const std::string& command) {
Path socketPath = startMaster();
Pipe in, out;
in.create();
out.create();
Pipe in, out;
in.create();
out.create();
auto conn = std::make_unique<Connection>();
ProcessOptions options;
options.dieWithParent = false;
auto conn = std::make_unique<Connection>();
ProcessOptions options;
options.dieWithParent = false;
conn->sshPid = startProcess([&]() {
conn->sshPid = startProcess(
[&]() {
restoreSignals();
close(in.writeSide.get());
close(out.readSide.get());
if (dup2(in.readSide.get(), STDIN_FILENO) == -1)
throw SysError("duping over stdin");
throw SysError("duping over stdin");
if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
throw SysError("duping over stdout");
throw SysError("duping over stdout");
if (logFD != -1 && dup2(logFD, STDERR_FILENO) == -1)
throw SysError("duping over stderr");
throw SysError("duping over stderr");
Strings args;
if (fakeSSH) {
args = { "bash", "-c" };
args = {"bash", "-c"};
} else {
args = { "ssh", host.c_str(), "-x", "-a" };
addCommonSSHOpts(args);
if (socketPath != "")
args.insert(args.end(), {"-S", socketPath});
if (verbosity >= lvlChatty)
args.push_back("-v");
args = {"ssh", host.c_str(), "-x", "-a"};
addCommonSSHOpts(args);
if (socketPath != "") args.insert(args.end(), {"-S", socketPath});
if (verbosity >= lvlChatty) args.push_back("-v");
}
args.push_back(command);
@ -67,68 +63,70 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(const std::string
// could not exec ssh/bash
throw SysError("unable to execute '%s'", args.front());
}, options);
},
options);
in.readSide = -1;
out.writeSide = -1;
in.readSide = -1;
out.writeSide = -1;
conn->out = std::move(out.readSide);
conn->in = std::move(in.writeSide);
conn->out = std::move(out.readSide);
conn->in = std::move(in.writeSide);
return conn;
return conn;
}
Path SSHMaster::startMaster()
{
if (!useMaster) return "";
Path SSHMaster::startMaster() {
if (!useMaster) return "";
auto state(state_.lock());
auto state(state_.lock());
if (state->sshMaster != -1) return state->socketPath;
if (state->sshMaster != -1) return state->socketPath;
state->tmpDir = std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
state->tmpDir =
std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
state->socketPath = (Path) *state->tmpDir + "/ssh.sock";
state->socketPath = (Path)*state->tmpDir + "/ssh.sock";
Pipe out;
out.create();
Pipe out;
out.create();
ProcessOptions options;
options.dieWithParent = false;
ProcessOptions options;
options.dieWithParent = false;
state->sshMaster = startProcess([&]() {
state->sshMaster = startProcess(
[&]() {
restoreSignals();
close(out.readSide.get());
if (dup2(out.writeSide.get(), STDOUT_FILENO) == -1)
throw SysError("duping over stdout");
throw SysError("duping over stdout");
Strings args =
{ "ssh", host.c_str(), "-M", "-N", "-S", state->socketPath
, "-o", "LocalCommand=echo started"
, "-o", "PermitLocalCommand=yes"
};
if (verbosity >= lvlChatty)
args.push_back("-v");
Strings args = {"ssh", host.c_str(),
"-M", "-N",
"-S", state->socketPath,
"-o", "LocalCommand=echo started",
"-o", "PermitLocalCommand=yes"};
if (verbosity >= lvlChatty) args.push_back("-v");
addCommonSSHOpts(args);
execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
throw SysError("unable to execute '%s'", args.front());
}, options);
},
options);
out.writeSide = -1;
out.writeSide = -1;
std::string reply;
try {
reply = readLine(out.readSide.get());
} catch (EndOfFile & e) { }
std::string reply;
try {
reply = readLine(out.readSide.get());
} catch (EndOfFile& e) {
}
if (reply != "started")
throw Error("failed to start SSH master connection to '%s'", host);
if (reply != "started")
throw Error("failed to start SSH master connection to '%s'", host);
return state->socketPath;
return state->socketPath;
}
}
} // namespace nix

View file

@ -1,45 +1,41 @@
#pragma once
#include "util.hh"
#include "sync.hh"
#include "util.hh"
namespace nix {
class SSHMaster
{
private:
class SSHMaster {
private:
const std::string host;
bool fakeSSH;
const std::string keyFile;
const bool useMaster;
const bool compress;
const int logFD;
const std::string host;
bool fakeSSH;
const std::string keyFile;
const bool useMaster;
const bool compress;
const int logFD;
struct State {
Pid sshMaster;
std::unique_ptr<AutoDelete> tmpDir;
Path socketPath;
};
struct State
{
Pid sshMaster;
std::unique_ptr<AutoDelete> tmpDir;
Path socketPath;
};
Sync<State> state_;
Sync<State> state_;
void addCommonSSHOpts(Strings& args);
void addCommonSSHOpts(Strings & args);
public:
SSHMaster(const std::string& host, const std::string& keyFile, bool useMaster,
bool compress, int logFD = -1);
public:
struct Connection {
Pid sshPid;
AutoCloseFD out, in;
};
SSHMaster(const std::string & host, const std::string & keyFile, bool useMaster, bool compress, int logFD = -1);
std::unique_ptr<Connection> startCommand(const std::string& command);
struct Connection
{
Pid sshPid;
AutoCloseFD out, in;
};
std::unique_ptr<Connection> startCommand(const std::string & command);
Path startMaster();
Path startMaster();
};
}
} // namespace nix

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -2,68 +2,64 @@
namespace nix {
#define WORKER_MAGIC_1 0x6e697863
#define WORKER_MAGIC_2 0x6478696f
#define PROTOCOL_VERSION 0x115
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
#define GET_PROTOCOL_MAJOR(x) ((x)&0xff00)
#define GET_PROTOCOL_MINOR(x) ((x)&0x00ff)
typedef enum {
wopIsValidPath = 1,
wopHasSubstitutes = 3,
wopQueryPathHash = 4, // obsolete
wopQueryReferences = 5, // obsolete
wopQueryReferrers = 6,
wopAddToStore = 7,
wopAddTextToStore = 8,
wopBuildPaths = 9,
wopEnsurePath = 10,
wopAddTempRoot = 11,
wopAddIndirectRoot = 12,
wopSyncWithGC = 13,
wopFindRoots = 14,
wopExportPath = 16, // obsolete
wopQueryDeriver = 18, // obsolete
wopSetOptions = 19,
wopCollectGarbage = 20,
wopQuerySubstitutablePathInfo = 21,
wopQueryDerivationOutputs = 22,
wopQueryAllValidPaths = 23,
wopQueryFailedPaths = 24,
wopClearFailedPaths = 25,
wopQueryPathInfo = 26,
wopImportPaths = 27, // obsolete
wopQueryDerivationOutputNames = 28,
wopQueryPathFromHashPart = 29,
wopQuerySubstitutablePathInfos = 30,
wopQueryValidPaths = 31,
wopQuerySubstitutablePaths = 32,
wopQueryValidDerivers = 33,
wopOptimiseStore = 34,
wopVerifyStore = 35,
wopBuildDerivation = 36,
wopAddSignatures = 37,
wopNarFromPath = 38,
wopAddToStoreNar = 39,
wopQueryMissing = 40,
wopIsValidPath = 1,
wopHasSubstitutes = 3,
wopQueryPathHash = 4, // obsolete
wopQueryReferences = 5, // obsolete
wopQueryReferrers = 6,
wopAddToStore = 7,
wopAddTextToStore = 8,
wopBuildPaths = 9,
wopEnsurePath = 10,
wopAddTempRoot = 11,
wopAddIndirectRoot = 12,
wopSyncWithGC = 13,
wopFindRoots = 14,
wopExportPath = 16, // obsolete
wopQueryDeriver = 18, // obsolete
wopSetOptions = 19,
wopCollectGarbage = 20,
wopQuerySubstitutablePathInfo = 21,
wopQueryDerivationOutputs = 22,
wopQueryAllValidPaths = 23,
wopQueryFailedPaths = 24,
wopClearFailedPaths = 25,
wopQueryPathInfo = 26,
wopImportPaths = 27, // obsolete
wopQueryDerivationOutputNames = 28,
wopQueryPathFromHashPart = 29,
wopQuerySubstitutablePathInfos = 30,
wopQueryValidPaths = 31,
wopQuerySubstitutablePaths = 32,
wopQueryValidDerivers = 33,
wopOptimiseStore = 34,
wopVerifyStore = 35,
wopBuildDerivation = 36,
wopAddSignatures = 37,
wopNarFromPath = 38,
wopAddToStoreNar = 39,
wopQueryMissing = 40,
} WorkerOp;
#define STDERR_NEXT 0x6f6c6d67
#define STDERR_READ 0x64617461 // data needed from source
#define STDERR_WRITE 0x64617416 // data for sink
#define STDERR_LAST 0x616c7473
#define STDERR_NEXT 0x6f6c6d67
#define STDERR_READ 0x64617461 // data needed from source
#define STDERR_WRITE 0x64617416 // data for sink
#define STDERR_LAST 0x616c7473
#define STDERR_ERROR 0x63787470
#define STDERR_START_ACTIVITY 0x53545254
#define STDERR_STOP_ACTIVITY 0x53544f50
#define STDERR_RESULT 0x52534c54
#define STDERR_STOP_ACTIVITY 0x53544f50
#define STDERR_RESULT 0x52534c54
Path readStorePath(Store& store, Source& from);
template <class T>
T readStorePaths(Store& store, Source& from);
Path readStorePath(Store & store, Source & from);
template<class T> T readStorePaths(Store & store, Source & from);
}
} // namespace nix