style(3p/nix): Add braces around single-line conditionals
These were not caught by the previous clang-tidy invocation, but were instead sorted out using amber[0] as such: ambr --regex 'if (\(.+\))\s([a-z].*;)' 'if $1 { $2 }' [0]: https://github.com/dalance/amber
This commit is contained in:
parent
c6a31838cd
commit
867055133d
97 changed files with 2223 additions and 753 deletions
|
@ -38,7 +38,9 @@ void BinaryCacheStore::init() {
|
|||
} else {
|
||||
for (auto& line : tokenizeString<Strings>(*cacheInfo, "\n")) {
|
||||
size_t colon = line.find(':');
|
||||
if (colon == std::string::npos) continue;
|
||||
if (colon == std::string::npos) {
|
||||
continue;
|
||||
}
|
||||
auto name = line.substr(0, colon);
|
||||
auto value = trim(line.substr(colon + 1, std::string::npos));
|
||||
if (name == "StoreDir") {
|
||||
|
@ -115,12 +117,16 @@ void BinaryCacheStore::addToStore(const ValidPathInfo& info,
|
|||
const ref<std::string>& nar,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) {
|
||||
if (!repair && isValidPath(info.path)) return;
|
||||
if (!repair && isValidPath(info.path)) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Verify that all references are valid. This may do some .narinfo
|
||||
reads, but typically they'll already be cached. */
|
||||
for (auto& ref : info.references) try {
|
||||
if (ref != info.path) queryPathInfo(ref);
|
||||
if (ref != info.path) {
|
||||
queryPathInfo(ref);
|
||||
}
|
||||
} catch (InvalidPath&) {
|
||||
throw Error(format("cannot add '%s' to the binary cache because the "
|
||||
"reference '%s' is not valid") %
|
||||
|
@ -152,7 +158,9 @@ void BinaryCacheStore::addToStore(const ValidPathInfo& info,
|
|||
|
||||
auto narAccessor = makeNarAccessor(nar);
|
||||
|
||||
if (accessor_) accessor_->addToCache(info.path, *nar, narAccessor);
|
||||
if (accessor_) {
|
||||
accessor_->addToCache(info.path, *nar, narAccessor);
|
||||
}
|
||||
|
||||
{
|
||||
auto res = jsonRoot.placeholder("root");
|
||||
|
@ -165,7 +173,9 @@ void BinaryCacheStore::addToStore(const ValidPathInfo& info,
|
|||
}
|
||||
|
||||
else {
|
||||
if (accessor_) accessor_->addToCache(info.path, *nar, makeNarAccessor(nar));
|
||||
if (accessor_) {
|
||||
accessor_->addToCache(info.path, *nar, makeNarAccessor(nar));
|
||||
}
|
||||
}
|
||||
|
||||
/* Compress the NAR. */
|
||||
|
@ -201,7 +211,9 @@ void BinaryCacheStore::addToStore(const ValidPathInfo& info,
|
|||
stats.narWriteCompressionTimeMs += duration;
|
||||
|
||||
/* Atomically write the NAR info file.*/
|
||||
if (secretKey) narInfo->sign(*secretKey);
|
||||
if (secretKey) {
|
||||
narInfo->sign(*secretKey);
|
||||
}
|
||||
|
||||
writeNarInfo(narInfo);
|
||||
|
||||
|
@ -254,7 +266,9 @@ void BinaryCacheStore::queryPathInfoUncached(
|
|||
try {
|
||||
auto data = fut.get();
|
||||
|
||||
if (!data) return (*callbackPtr)(nullptr);
|
||||
if (!data) {
|
||||
return (*callbackPtr)(nullptr);
|
||||
}
|
||||
|
||||
stats.narInfoRead++;
|
||||
|
||||
|
@ -341,7 +355,9 @@ std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path& path) {
|
|||
try {
|
||||
auto info = queryPathInfo(path);
|
||||
// FIXME: add a "Log" field to .narinfo
|
||||
if (info->deriver == "") return nullptr;
|
||||
if (info->deriver == "") {
|
||||
return nullptr;
|
||||
}
|
||||
drvPath = info->deriver;
|
||||
} catch (InvalidPath&) {
|
||||
return nullptr;
|
||||
|
|
324
third_party/nix/src/libstore/build.cc
vendored
324
third_party/nix/src/libstore/build.cc
vendored
|
@ -334,7 +334,9 @@ void addToWeakGoals(WeakGoals& goals, GoalPtr p) {
|
|||
// FIXME: necessary?
|
||||
// FIXME: O(n)
|
||||
for (auto& i : goals)
|
||||
if (i.lock() == p) return;
|
||||
if (i.lock() == p) {
|
||||
return;
|
||||
}
|
||||
goals.push_back(p);
|
||||
}
|
||||
|
||||
|
@ -368,7 +370,9 @@ void Goal::waiteeDone(GoalPtr waitee, ExitCode result) {
|
|||
for (auto& goal : waitees) {
|
||||
WeakGoals waiters2;
|
||||
for (auto& j : goal->waiters)
|
||||
if (j.lock() != shared_from_this()) waiters2.push_back(j);
|
||||
if (j.lock() != shared_from_this()) {
|
||||
waiters2.push_back(j);
|
||||
}
|
||||
goal->waiters = waiters2;
|
||||
}
|
||||
waitees.clear();
|
||||
|
@ -385,7 +389,9 @@ void Goal::amDone(ExitCode result) {
|
|||
exitCode = result;
|
||||
for (auto& i : waiters) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) goal->waiteeDone(shared_from_this(), result);
|
||||
if (goal) {
|
||||
goal->waiteeDone(shared_from_this(), result);
|
||||
}
|
||||
}
|
||||
waiters.clear();
|
||||
worker.removeGoal(shared_from_this());
|
||||
|
@ -405,7 +411,9 @@ static void commonChildInit(Pipe& logPipe) {
|
|||
process group) so that it has no controlling terminal (meaning
|
||||
that e.g. ssh cannot open /dev/tty) and it doesn't receive
|
||||
terminal signals. */
|
||||
if (setsid() == -1) throw SysError(format("creating a new session"));
|
||||
if (setsid() == -1) {
|
||||
throw SysError(format("creating a new session"));
|
||||
}
|
||||
|
||||
/* Dup the write side of the logger pipe into stderr. */
|
||||
if (dup2(logPipe.writeSide.get(), STDERR_FILENO) == -1)
|
||||
|
@ -539,7 +547,9 @@ UserLock::UserLock() {
|
|||
try {
|
||||
AutoCloseFD fd =
|
||||
open(fnUserLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
|
||||
if (!fd) throw SysError(format("opening user lock '%1%'") % fnUserLock);
|
||||
if (!fd) {
|
||||
throw SysError(format("opening user lock '%1%'") % fnUserLock);
|
||||
}
|
||||
|
||||
if (lockFile(fd.get(), ltWrite, false)) {
|
||||
fdUserLock = std::move(fd);
|
||||
|
@ -671,7 +681,9 @@ HookInstance::HookInstance() {
|
|||
HookInstance::~HookInstance() {
|
||||
try {
|
||||
toHook.writeSide = -1;
|
||||
if (pid != -1) pid.kill();
|
||||
if (pid != -1) {
|
||||
pid.kill();
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
|
@ -1051,7 +1063,9 @@ void DerivationGoal::work() { (this->*state)(); }
|
|||
|
||||
void DerivationGoal::addWantedOutputs(const StringSet& outputs) {
|
||||
/* If we already want all outputs, there is nothing to do. */
|
||||
if (wantedOutputs.empty()) return;
|
||||
if (wantedOutputs.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (outputs.empty()) {
|
||||
wantedOutputs.clear();
|
||||
|
@ -1192,7 +1206,9 @@ void DerivationGoal::outputsSubstituted() {
|
|||
i.first, i.second, buildMode == bmRepair ? bmRepair : bmNormal));
|
||||
|
||||
for (auto& i : drv->inputSrcs) {
|
||||
if (worker.store.isValidPath(i)) continue;
|
||||
if (worker.store.isValidPath(i)) {
|
||||
continue;
|
||||
}
|
||||
if (!settings.useSubstitutes)
|
||||
throw Error(format("dependency '%1%' of '%2%' does not exist, and "
|
||||
"substitution is disabled") %
|
||||
|
@ -1215,7 +1231,9 @@ void DerivationGoal::repairClosure() {
|
|||
/* Get the output closure. */
|
||||
PathSet outputClosure;
|
||||
for (auto& i : drv->outputs) {
|
||||
if (!wantOutput(i.first, wantedOutputs)) continue;
|
||||
if (!wantOutput(i.first, wantedOutputs)) {
|
||||
continue;
|
||||
}
|
||||
worker.store.computeFSClosure(i.second.path, outputClosure);
|
||||
}
|
||||
|
||||
|
@ -1226,7 +1244,9 @@ void DerivationGoal::repairClosure() {
|
|||
derivation is responsible for which path in the output
|
||||
closure. */
|
||||
PathSet inputClosure;
|
||||
if (useDerivation) worker.store.computeFSClosure(drvPath, inputClosure);
|
||||
if (useDerivation) {
|
||||
worker.store.computeFSClosure(drvPath, inputClosure);
|
||||
}
|
||||
std::map<Path, Path> outputsToDrv;
|
||||
for (auto& i : inputClosure)
|
||||
if (isDerivation(i)) {
|
||||
|
@ -1237,7 +1257,9 @@ void DerivationGoal::repairClosure() {
|
|||
/* Check each path (slow!). */
|
||||
PathSet broken;
|
||||
for (auto& i : outputClosure) {
|
||||
if (worker.pathContentsGood(i)) continue;
|
||||
if (worker.pathContentsGood(i)) {
|
||||
continue;
|
||||
}
|
||||
LOG(ERROR) << "found corrupted or missing path '" << i
|
||||
<< "' in the output closure of '" << drvPath << "'";
|
||||
Path drvPath2 = outputsToDrv[i];
|
||||
|
@ -1373,7 +1395,9 @@ void DerivationGoal::tryToBuild() {
|
|||
them. */
|
||||
for (auto& i : drv->outputs) {
|
||||
Path path = i.second.path;
|
||||
if (worker.store.isValidPath(path)) continue;
|
||||
if (worker.store.isValidPath(path)) {
|
||||
continue;
|
||||
}
|
||||
DLOG(INFO) << "removing invalid path " << path;
|
||||
deletePath(worker.store.toRealPath(path));
|
||||
}
|
||||
|
@ -1459,7 +1483,9 @@ void replaceValidPath(const Path& storePath, const Path tmpPath) {
|
|||
we're repairing (say) Glibc, we end up with a broken system. */
|
||||
Path oldPath =
|
||||
(format("%1%.old-%2%-%3%") % storePath % getpid() % random()).str();
|
||||
if (pathExists(storePath)) rename(storePath.c_str(), oldPath.c_str());
|
||||
if (pathExists(storePath)) {
|
||||
rename(storePath.c_str(), oldPath.c_str());
|
||||
}
|
||||
if (rename(tmpPath.c_str(), storePath.c_str()) == -1)
|
||||
throw SysError(format("moving '%1%' to '%2%'") % tmpPath % storePath);
|
||||
deletePath(oldPath);
|
||||
|
@ -1504,7 +1530,9 @@ MakeError(NotDeterministic, BuildError)
|
|||
malicious user from leaving behind a process that keeps files
|
||||
open and modifies them after they have been chown'ed to
|
||||
root. */
|
||||
if (buildUser) buildUser->kill();
|
||||
if (buildUser) {
|
||||
buildUser->kill();
|
||||
}
|
||||
|
||||
bool diskFull = false;
|
||||
|
||||
|
@ -1670,7 +1698,9 @@ HookReply DerivationGoal::tryBuildHook() {
|
|||
return rpDecline;
|
||||
}
|
||||
|
||||
if (!worker.hook) worker.hook = std::make_unique<HookInstance>();
|
||||
if (!worker.hook) {
|
||||
worker.hook = std::make_unique<HookInstance>();
|
||||
}
|
||||
|
||||
try {
|
||||
/* Send the request to the hook. */
|
||||
|
@ -1808,7 +1838,9 @@ static void preloadNSS() {
|
|||
|
||||
if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http",
|
||||
NULL, &res) != 0) {
|
||||
if (res) freeaddrinfo(res);
|
||||
if (res) {
|
||||
freeaddrinfo(res);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -1823,7 +1855,9 @@ void DerivationGoal::startBuilder() {
|
|||
concatStringsSep(", ", parsedDrv->getRequiredSystemFeatures()), drvPath,
|
||||
settings.thisSystem, concatStringsSep(", ", settings.systemFeatures));
|
||||
|
||||
if (drv->isBuiltin()) preloadNSS();
|
||||
if (drv->isBuiltin()) {
|
||||
preloadNSS();
|
||||
}
|
||||
|
||||
#if __APPLE__
|
||||
additionalSandboxProfile =
|
||||
|
@ -1930,7 +1964,9 @@ void DerivationGoal::startBuilder() {
|
|||
dirsInChroot.clear();
|
||||
|
||||
for (auto i : dirs) {
|
||||
if (i.empty()) continue;
|
||||
if (i.empty()) {
|
||||
continue;
|
||||
}
|
||||
bool optional = false;
|
||||
if (i[i.size() - 1] == '?') {
|
||||
optional = true;
|
||||
|
@ -2161,7 +2197,9 @@ void DerivationGoal::startBuilder() {
|
|||
// builderOut.create();
|
||||
|
||||
builderOut.readSide = posix_openpt(O_RDWR | O_NOCTTY);
|
||||
if (!builderOut.readSide) throw SysError("opening pseudoterminal master");
|
||||
if (!builderOut.readSide) {
|
||||
throw SysError("opening pseudoterminal master");
|
||||
}
|
||||
|
||||
std::string slaveName(ptsname(builderOut.readSide.get()));
|
||||
|
||||
|
@ -2187,7 +2225,9 @@ void DerivationGoal::startBuilder() {
|
|||
throw SysError("unlocking pseudoterminal");
|
||||
|
||||
builderOut.writeSide = open(slaveName.c_str(), O_RDWR | O_NOCTTY);
|
||||
if (!builderOut.writeSide) throw SysError("opening pseudoterminal slave");
|
||||
if (!builderOut.writeSide) {
|
||||
throw SysError("opening pseudoterminal slave");
|
||||
}
|
||||
|
||||
// Put the pt into raw mode to prevent \n -> \r\n translation.
|
||||
struct termios term;
|
||||
|
@ -2265,11 +2305,15 @@ void DerivationGoal::startBuilder() {
|
|||
char* stack =
|
||||
(char*)mmap(0, stackSize, PROT_WRITE | PROT_READ,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
|
||||
if (stack == MAP_FAILED) throw SysError("allocating stack");
|
||||
if (stack == MAP_FAILED) {
|
||||
throw SysError("allocating stack");
|
||||
}
|
||||
|
||||
int flags = CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNS |
|
||||
CLONE_NEWIPC | CLONE_NEWUTS | CLONE_PARENT | SIGCHLD;
|
||||
if (privateNetwork) flags |= CLONE_NEWNET;
|
||||
if (privateNetwork) {
|
||||
flags |= CLONE_NEWNET;
|
||||
}
|
||||
|
||||
pid_t child = clone(childEntry, stack + stackSize, flags, this);
|
||||
if (child == -1 && errno == EINVAL) {
|
||||
|
@ -2292,7 +2336,9 @@ void DerivationGoal::startBuilder() {
|
|||
if (child == -1 && (errno == EPERM || errno == EINVAL) &&
|
||||
settings.sandboxFallback)
|
||||
_exit(1);
|
||||
if (child == -1) throw SysError("cloning builder process");
|
||||
if (child == -1) {
|
||||
throw SysError("cloning builder process");
|
||||
}
|
||||
|
||||
writeFull(builderOut.writeSide.get(), std::to_string(child) + "\n");
|
||||
_exit(0);
|
||||
|
@ -2310,7 +2356,9 @@ void DerivationGoal::startBuilder() {
|
|||
userNamespaceSync.readSide = -1;
|
||||
|
||||
pid_t tmp;
|
||||
if (!string2Int<pid_t>(readLine(builderOut.readSide.get()), tmp)) abort();
|
||||
if (!string2Int<pid_t>(readLine(builderOut.readSide.get()), tmp)) {
|
||||
abort();
|
||||
}
|
||||
pid = tmp;
|
||||
|
||||
/* Set the UID/GID mapping of the builder's user namespace
|
||||
|
@ -2350,7 +2398,9 @@ void DerivationGoal::startBuilder() {
|
|||
while (true) {
|
||||
string msg = readLine(builderOut.readSide.get());
|
||||
if (string(msg, 0, 1) == "\1") {
|
||||
if (msg.size() == 1) break;
|
||||
if (msg.size() == 1) {
|
||||
break;
|
||||
}
|
||||
throw Error(string(msg, 1));
|
||||
}
|
||||
DLOG(INFO) << msg;
|
||||
|
@ -2435,7 +2485,9 @@ void DerivationGoal::initEnv() {
|
|||
derivation, tell the builder, so that for instance `fetchurl'
|
||||
can skip checking the output. On older Nixes, this environment
|
||||
variable won't be set, so `fetchurl' will do the check. */
|
||||
if (fixedOutput) env["NIX_OUTPUT_CHECKED"] = "1";
|
||||
if (fixedOutput) {
|
||||
env["NIX_OUTPUT_CHECKED"] = "1";
|
||||
}
|
||||
|
||||
/* *Only* if this is a fixed-output derivation, propagate the
|
||||
values of the environment variables specified in the
|
||||
|
@ -2465,7 +2517,9 @@ static std::regex shVarName("[A-Za-z_][A-Za-z0-9_]*");
|
|||
|
||||
void DerivationGoal::writeStructuredAttrs() {
|
||||
auto& structuredAttrs = parsedDrv->getStructuredAttrs();
|
||||
if (!structuredAttrs) return;
|
||||
if (!structuredAttrs) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto json = *structuredAttrs;
|
||||
|
||||
|
@ -2503,14 +2557,20 @@ void DerivationGoal::writeStructuredAttrs() {
|
|||
|
||||
auto handleSimpleType =
|
||||
[](const nlohmann::json& value) -> std::optional<std::string> {
|
||||
if (value.is_string()) return shellEscape(value);
|
||||
if (value.is_string()) {
|
||||
return shellEscape(value);
|
||||
}
|
||||
|
||||
if (value.is_number()) {
|
||||
auto f = value.get<float>();
|
||||
if (std::ceil(f) == f) return std::to_string(value.get<int>());
|
||||
if (std::ceil(f) == f) {
|
||||
return std::to_string(value.get<int>());
|
||||
}
|
||||
}
|
||||
|
||||
if (value.is_null()) return std::string("''");
|
||||
if (value.is_null()) {
|
||||
return std::string("''");
|
||||
}
|
||||
|
||||
if (value.is_boolean())
|
||||
return value.get<bool>() ? std::string("1") : std::string("");
|
||||
|
@ -2521,7 +2581,9 @@ void DerivationGoal::writeStructuredAttrs() {
|
|||
std::string jsonSh;
|
||||
|
||||
for (auto i = json.begin(); i != json.end(); ++i) {
|
||||
if (!std::regex_match(i.key(), shVarName)) continue;
|
||||
if (!std::regex_match(i.key(), shVarName)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto& value = i.value();
|
||||
|
||||
|
@ -2543,7 +2605,9 @@ void DerivationGoal::writeStructuredAttrs() {
|
|||
s2 += ' ';
|
||||
}
|
||||
|
||||
if (good) jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2);
|
||||
if (good) {
|
||||
jsonSh += fmt("declare -a %s=(%s)\n", i.key(), s2);
|
||||
}
|
||||
}
|
||||
|
||||
else if (value.is_object()) {
|
||||
|
@ -2559,7 +2623,9 @@ void DerivationGoal::writeStructuredAttrs() {
|
|||
s2 += fmt("[%s]=%s ", shellEscape(i.key()), *s3);
|
||||
}
|
||||
|
||||
if (good) jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2);
|
||||
if (good) {
|
||||
jsonSh += fmt("declare -A %s=(%s)\n", i.key(), s2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2568,14 +2634,18 @@ void DerivationGoal::writeStructuredAttrs() {
|
|||
}
|
||||
|
||||
void DerivationGoal::chownToBuilder(const Path& path) {
|
||||
if (!buildUser) return;
|
||||
if (!buildUser) {
|
||||
return;
|
||||
}
|
||||
if (chown(path.c_str(), buildUser->getUID(), buildUser->getGID()) == -1)
|
||||
throw SysError(format("cannot change ownership of '%1%'") % path);
|
||||
}
|
||||
|
||||
void setupSeccomp() {
|
||||
#if __linux__
|
||||
if (!settings.filterSyscalls) return;
|
||||
if (!settings.filterSyscalls) {
|
||||
return;
|
||||
}
|
||||
#if HAVE_SECCOMP
|
||||
scmp_filter_ctx ctx;
|
||||
|
||||
|
@ -2653,7 +2723,9 @@ void DerivationGoal::runChild() {
|
|||
try {
|
||||
setupSeccomp();
|
||||
} catch (...) {
|
||||
if (buildUser) throw;
|
||||
if (buildUser) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
bool setUser = true;
|
||||
|
@ -2679,7 +2751,9 @@ void DerivationGoal::runChild() {
|
|||
if (privateNetwork) {
|
||||
/* Initialise the loopback interface. */
|
||||
AutoCloseFD fd(socket(PF_INET, SOCK_DGRAM, IPPROTO_IP));
|
||||
if (!fd) throw SysError("cannot open IP socket");
|
||||
if (!fd) {
|
||||
throw SysError("cannot open IP socket");
|
||||
}
|
||||
|
||||
struct ifreq ifr;
|
||||
strcpy(ifr.ifr_name, "lo");
|
||||
|
@ -2782,7 +2856,9 @@ void DerivationGoal::runChild() {
|
|||
};
|
||||
|
||||
for (auto& i : dirsInChroot) {
|
||||
if (i.second.source == "/proc") continue; // backwards compatibility
|
||||
if (i.second.source == "/proc") {
|
||||
continue;
|
||||
} // backwards compatibility
|
||||
doBind(i.second.source, chrootRootDir + i.first, i.second.optional);
|
||||
}
|
||||
|
||||
|
@ -2813,7 +2889,9 @@ void DerivationGoal::runChild() {
|
|||
Linux versions, it is created with permissions 0. */
|
||||
chmod_(chrootRootDir + "/dev/pts/ptmx", 0666);
|
||||
} else {
|
||||
if (errno != EINVAL) throw SysError("mounting /dev/pts");
|
||||
if (errno != EINVAL) {
|
||||
throw SysError("mounting /dev/pts");
|
||||
}
|
||||
doBind("/dev/pts", chrootRootDir + "/dev/pts");
|
||||
doBind("/dev/ptmx", chrootRootDir + "/dev/ptmx");
|
||||
}
|
||||
|
@ -2844,8 +2922,12 @@ void DerivationGoal::runChild() {
|
|||
/* Switch to the sandbox uid/gid in the user namespace,
|
||||
which corresponds to the build user or calling user in
|
||||
the parent namespace. */
|
||||
if (setgid(sandboxGid) == -1) throw SysError("setgid failed");
|
||||
if (setuid(sandboxUid) == -1) throw SysError("setuid failed");
|
||||
if (setgid(sandboxGid) == -1) {
|
||||
throw SysError("setgid failed");
|
||||
}
|
||||
if (setuid(sandboxUid) == -1) {
|
||||
throw SysError("setuid failed");
|
||||
}
|
||||
|
||||
setUser = false;
|
||||
}
|
||||
|
@ -2875,13 +2957,17 @@ void DerivationGoal::runChild() {
|
|||
if ((drv->platform == "i686-linux" || drv->platform == "x86_64-linux") &&
|
||||
settings.impersonateLinux26) {
|
||||
int cur = personality(0xffffffff);
|
||||
if (cur != -1) personality(cur | 0x0020000 /* == UNAME26 */);
|
||||
if (cur != -1) {
|
||||
personality(cur | 0x0020000 /* == UNAME26 */);
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable address space randomization for improved
|
||||
determinism. */
|
||||
int cur = personality(0xffffffff);
|
||||
if (cur != -1) personality(cur | ADDR_NO_RANDOMIZE);
|
||||
if (cur != -1) {
|
||||
personality(cur | ADDR_NO_RANDOMIZE);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Disable core dumps by default. */
|
||||
|
@ -2972,7 +3058,9 @@ void DerivationGoal::runChild() {
|
|||
|
||||
sandboxProfile += "(import \"sandbox-defaults.sb\")\n";
|
||||
|
||||
if (fixedOutput) sandboxProfile += "(import \"sandbox-network.sb\")\n";
|
||||
if (fixedOutput) {
|
||||
sandboxProfile += "(import \"sandbox-network.sb\")\n";
|
||||
}
|
||||
|
||||
/* Our rwx outputs */
|
||||
sandboxProfile += "(allow file-read* file-write* process-exec\n";
|
||||
|
@ -3001,7 +3089,9 @@ void DerivationGoal::runChild() {
|
|||
string path = i.first;
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st)) {
|
||||
if (i.second.optional && errno == ENOENT) continue;
|
||||
if (i.second.optional && errno == ENOENT) {
|
||||
continue;
|
||||
}
|
||||
throw SysError(format("getting attributes of path '%1%'") % path);
|
||||
}
|
||||
if (S_ISDIR(st.st_mode))
|
||||
|
@ -3040,7 +3130,9 @@ void DerivationGoal::runChild() {
|
|||
Path globalTmpDir = canonPath(getEnv("TMPDIR", "/tmp"), true);
|
||||
|
||||
/* They don't like trailing slashes on subpath directives */
|
||||
if (globalTmpDir.back() == '/') globalTmpDir.pop_back();
|
||||
if (globalTmpDir.back() == '/') {
|
||||
globalTmpDir.pop_back();
|
||||
}
|
||||
|
||||
builder = "/usr/bin/sandbox-exec";
|
||||
args.push_back("sandbox-exec");
|
||||
|
@ -3127,7 +3219,9 @@ void DerivationGoal::registerOutputs() {
|
|||
if (hook) {
|
||||
bool allValid = true;
|
||||
for (auto& i : drv->outputs)
|
||||
if (!worker.store.isValidPath(i.second.path)) allValid = false;
|
||||
if (!worker.store.isValidPath(i.second.path)) {
|
||||
allValid = false;
|
||||
}
|
||||
if (allValid) {
|
||||
return;
|
||||
}
|
||||
|
@ -3150,7 +3244,9 @@ void DerivationGoal::registerOutputs() {
|
|||
output paths read-only. */
|
||||
for (auto& i : drv->outputs) {
|
||||
Path path = i.second.path;
|
||||
if (missingPaths.find(path) == missingPaths.end()) continue;
|
||||
if (missingPaths.find(path) == missingPaths.end()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ValidPathInfo info;
|
||||
|
||||
|
@ -3168,7 +3264,9 @@ void DerivationGoal::registerOutputs() {
|
|||
"the Nix store") %
|
||||
path);
|
||||
}
|
||||
if (buildMode != bmCheck) actualPath = worker.store.toRealPath(path);
|
||||
if (buildMode != bmCheck) {
|
||||
actualPath = worker.store.toRealPath(path);
|
||||
}
|
||||
}
|
||||
|
||||
if (needsHashRewrite()) {
|
||||
|
@ -3177,7 +3275,9 @@ void DerivationGoal::registerOutputs() {
|
|||
redirectedBadOutputs.find(path) != redirectedBadOutputs.end() &&
|
||||
pathExists(redirected))
|
||||
replaceValidPath(path, redirected);
|
||||
if (buildMode == bmCheck && redirected != "") actualPath = redirected;
|
||||
if (buildMode == bmCheck && redirected != "") {
|
||||
actualPath = redirected;
|
||||
}
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
|
@ -3293,7 +3393,9 @@ void DerivationGoal::registerOutputs() {
|
|||
PathSet references = scanForReferences(actualPath, allPaths, hash);
|
||||
|
||||
if (buildMode == bmCheck) {
|
||||
if (!worker.store.isValidPath(path)) continue;
|
||||
if (!worker.store.isValidPath(path)) {
|
||||
continue;
|
||||
}
|
||||
auto info = *worker.store.queryPathInfo(path);
|
||||
if (hash.first != info.narHash) {
|
||||
worker.checkMismatch = true;
|
||||
|
@ -3353,7 +3455,9 @@ void DerivationGoal::registerOutputs() {
|
|||
info.ultimate = true;
|
||||
worker.store.signPathInfo(info);
|
||||
|
||||
if (!info.references.empty()) info.ca.clear();
|
||||
if (!info.references.empty()) {
|
||||
info.ca.clear();
|
||||
}
|
||||
|
||||
infos[i.first] = info;
|
||||
}
|
||||
|
@ -3434,7 +3538,9 @@ void DerivationGoal::registerOutputs() {
|
|||
|
||||
/* In case of a fixed-output derivation hash mismatch, throw an
|
||||
exception now that we have registered the output as valid. */
|
||||
if (delayedException) std::rethrow_exception(delayedException);
|
||||
if (delayedException) {
|
||||
std::rethrow_exception(delayedException);
|
||||
}
|
||||
}
|
||||
|
||||
void DerivationGoal::checkOutputs(
|
||||
|
@ -3466,7 +3572,9 @@ void DerivationGoal::checkOutputs(
|
|||
while (!pathsLeft.empty()) {
|
||||
auto path = pathsLeft.front();
|
||||
pathsLeft.pop();
|
||||
if (!pathsDone.insert(path).second) continue;
|
||||
if (!pathsDone.insert(path).second) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto i = outputsByPath.find(path);
|
||||
if (i != outputsByPath.end()) {
|
||||
|
@ -3499,22 +3607,30 @@ void DerivationGoal::checkOutputs(
|
|||
|
||||
auto checkRefs = [&](const std::optional<Strings>& value, bool allowed,
|
||||
bool recursive) {
|
||||
if (!value) return;
|
||||
if (!value) {
|
||||
return;
|
||||
}
|
||||
|
||||
PathSet spec = parseReferenceSpecifiers(worker.store, *drv, *value);
|
||||
|
||||
PathSet used =
|
||||
recursive ? getClosure(info.path).first : info.references;
|
||||
|
||||
if (recursive && checks.ignoreSelfRefs) used.erase(info.path);
|
||||
if (recursive && checks.ignoreSelfRefs) {
|
||||
used.erase(info.path);
|
||||
}
|
||||
|
||||
PathSet badPaths;
|
||||
|
||||
for (auto& i : used)
|
||||
if (allowed) {
|
||||
if (!spec.count(i)) badPaths.insert(i);
|
||||
if (!spec.count(i)) {
|
||||
badPaths.insert(i);
|
||||
}
|
||||
} else {
|
||||
if (spec.count(i)) badPaths.insert(i);
|
||||
if (spec.count(i)) {
|
||||
badPaths.insert(i);
|
||||
}
|
||||
}
|
||||
|
||||
if (!badPaths.empty()) {
|
||||
|
@ -3595,7 +3711,9 @@ void DerivationGoal::checkOutputs(
|
|||
Path DerivationGoal::openLogFile() {
|
||||
logSize = 0;
|
||||
|
||||
if (!settings.keepLog) return "";
|
||||
if (!settings.keepLog) {
|
||||
return "";
|
||||
}
|
||||
|
||||
string baseName = baseNameOf(drvPath);
|
||||
|
||||
|
@ -3625,8 +3743,12 @@ Path DerivationGoal::openLogFile() {
|
|||
|
||||
void DerivationGoal::closeLogFile() {
|
||||
auto logSink2 = std::dynamic_pointer_cast<CompressionSink>(logSink);
|
||||
if (logSink2) logSink2->finish();
|
||||
if (logFileSink) logFileSink->flush();
|
||||
if (logSink2) {
|
||||
logSink2->finish();
|
||||
}
|
||||
if (logFileSink) {
|
||||
logFileSink->flush();
|
||||
}
|
||||
logSink = logFileSink = 0;
|
||||
fdLogFile = -1;
|
||||
}
|
||||
|
@ -3682,7 +3804,9 @@ void DerivationGoal::handleChildOutput(int fd, const string& data) {
|
|||
}
|
||||
|
||||
void DerivationGoal::handleEOF(int fd) {
|
||||
if (!currentLogLine.empty()) flushLine();
|
||||
if (!currentLogLine.empty()) {
|
||||
flushLine();
|
||||
}
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
|
||||
|
@ -3691,7 +3815,9 @@ void DerivationGoal::flushLine() {
|
|||
LOG(INFO) << currentLogLine;
|
||||
else {
|
||||
logTail.push_back(currentLogLine);
|
||||
if (logTail.size() > settings.logLines) logTail.pop_front();
|
||||
if (logTail.size() > settings.logLines) {
|
||||
logTail.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
currentLogLine = "";
|
||||
|
@ -3732,7 +3858,9 @@ void DerivationGoal::done(BuildResult::Status status, const string& msg) {
|
|||
result.status = status;
|
||||
result.errorMsg = msg;
|
||||
amDone(result.success() ? ecSuccess : ecFailed);
|
||||
if (result.status == BuildResult::TimedOut) worker.timedOut = true;
|
||||
if (result.status == BuildResult::TimedOut) {
|
||||
worker.timedOut = true;
|
||||
}
|
||||
if (result.status == BuildResult::PermanentFailure)
|
||||
worker.permanentFailure = true;
|
||||
|
||||
|
@ -3740,9 +3868,13 @@ void DerivationGoal::done(BuildResult::Status status, const string& msg) {
|
|||
mcRunningBuilds.reset();
|
||||
|
||||
if (result.success()) {
|
||||
if (status == BuildResult::Built) worker.doneBuilds++;
|
||||
if (status == BuildResult::Built) {
|
||||
worker.doneBuilds++;
|
||||
}
|
||||
} else {
|
||||
if (status != BuildResult::DependencyFailed) worker.failedBuilds++;
|
||||
if (status != BuildResult::DependencyFailed) {
|
||||
worker.failedBuilds++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4068,7 +4200,9 @@ void SubstitutionGoal::finished() {
|
|||
void SubstitutionGoal::handleChildOutput(int fd, const string& data) {}
|
||||
|
||||
void SubstitutionGoal::handleEOF(int fd) {
|
||||
if (fd == outPipe.readSide.get()) worker.wakeUp(shared_from_this());
|
||||
if (fd == outPipe.readSide.get()) {
|
||||
worker.wakeUp(shared_from_this());
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////
|
||||
|
@ -4077,7 +4211,9 @@ static bool working = false;
|
|||
|
||||
Worker::Worker(LocalStore& store) : store(store) {
|
||||
/* Debugging: prevent recursive workers. */
|
||||
if (working) abort();
|
||||
if (working) {
|
||||
abort();
|
||||
}
|
||||
working = true;
|
||||
nrLocalBuilds = 0;
|
||||
lastWokenUp = steady_time_point::min();
|
||||
|
@ -4160,7 +4296,9 @@ void Worker::removeGoal(GoalPtr goal) {
|
|||
/* Wake up goals waiting for any goal to finish. */
|
||||
for (auto& i : waitingForAnyGoal) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
if (goal) {
|
||||
wakeUp(goal);
|
||||
}
|
||||
}
|
||||
|
||||
waitingForAnyGoal.clear();
|
||||
|
@ -4192,7 +4330,9 @@ void Worker::childTerminated(Goal* goal, bool wakeSleepers) {
|
|||
auto i =
|
||||
std::find_if(children.begin(), children.end(),
|
||||
[&](const Child& child) { return child.goal2 == goal; });
|
||||
if (i == children.end()) return;
|
||||
if (i == children.end()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (i->inBuildSlot) {
|
||||
assert(nrLocalBuilds > 0);
|
||||
|
@ -4205,7 +4345,9 @@ void Worker::childTerminated(Goal* goal, bool wakeSleepers) {
|
|||
/* Wake up goals waiting for a build slot. */
|
||||
for (auto& j : wantingToBuild) {
|
||||
GoalPtr goal = j.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
if (goal) {
|
||||
wakeUp(goal);
|
||||
}
|
||||
}
|
||||
|
||||
wantingToBuild.clear();
|
||||
|
@ -4246,17 +4388,23 @@ void Worker::run(const Goals& _topGoals) {
|
|||
Goals awake2;
|
||||
for (auto& i : awake) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) awake2.insert(goal);
|
||||
if (goal) {
|
||||
awake2.insert(goal);
|
||||
}
|
||||
}
|
||||
awake.clear();
|
||||
for (auto& goal : awake2) {
|
||||
checkInterrupt();
|
||||
goal->work();
|
||||
if (topGoals.empty()) break; // stuff may have been cancelled
|
||||
if (topGoals.empty()) {
|
||||
break;
|
||||
} // stuff may have been cancelled
|
||||
}
|
||||
}
|
||||
|
||||
if (topGoals.empty()) break;
|
||||
if (topGoals.empty()) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* Wait for input. */
|
||||
if (!children.empty() || !waitingForAWhile.empty())
|
||||
|
@ -4300,7 +4448,9 @@ void Worker::waitForInput() {
|
|||
// Periodicallty wake up to see if we need to run the garbage collector.
|
||||
nearest = before + std::chrono::seconds(10);
|
||||
for (auto& i : children) {
|
||||
if (!i.respectTimeouts) continue;
|
||||
if (!i.respectTimeouts) {
|
||||
continue;
|
||||
}
|
||||
if (0 != settings.maxSilentTime)
|
||||
nearest = std::min(
|
||||
nearest, i.lastOutput + std::chrono::seconds(settings.maxSilentTime));
|
||||
|
@ -4344,14 +4494,20 @@ void Worker::waitForInput() {
|
|||
int fdMax = 0;
|
||||
for (auto& i : children) {
|
||||
for (auto& j : i.fds) {
|
||||
if (j >= FD_SETSIZE) throw Error("reached FD_SETSIZE limit");
|
||||
if (j >= FD_SETSIZE) {
|
||||
throw Error("reached FD_SETSIZE limit");
|
||||
}
|
||||
FD_SET(j, &fds);
|
||||
if (j >= fdMax) fdMax = j + 1;
|
||||
if (j >= fdMax) {
|
||||
fdMax = j + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (select(fdMax, &fds, 0, 0, useTimeout ? &timeout : 0) == -1) {
|
||||
if (errno == EINTR) return;
|
||||
if (errno == EINTR) {
|
||||
return;
|
||||
}
|
||||
throw SysError("waiting for input");
|
||||
}
|
||||
|
||||
|
@ -4414,7 +4570,9 @@ void Worker::waitForInput() {
|
|||
lastWokenUp = after;
|
||||
for (auto& i : waitingForAWhile) {
|
||||
GoalPtr goal = i.lock();
|
||||
if (goal) wakeUp(goal);
|
||||
if (goal) {
|
||||
wakeUp(goal);
|
||||
}
|
||||
}
|
||||
waitingForAWhile.clear();
|
||||
}
|
||||
|
@ -4452,7 +4610,9 @@ unsigned int Worker::exitStatus() {
|
|||
|
||||
bool Worker::pathContentsGood(const Path& path) {
|
||||
std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path);
|
||||
if (i != pathContentsGoodCache.end()) return i->second;
|
||||
if (i != pathContentsGoodCache.end()) {
|
||||
return i->second;
|
||||
}
|
||||
LOG(INFO) << "checking path '" << path << "'...";
|
||||
auto info = store.queryPathInfo(path);
|
||||
bool res;
|
||||
|
@ -4543,7 +4703,9 @@ BuildResult LocalStore::buildDerivation(const Path& drvPath,
|
|||
|
||||
void LocalStore::ensurePath(const Path& path) {
|
||||
/* If the path is already valid, we're done. */
|
||||
if (isValidPath(path)) return;
|
||||
if (isValidPath(path)) {
|
||||
return;
|
||||
}
|
||||
|
||||
primeCache(*this, {path});
|
||||
|
||||
|
|
|
@ -99,7 +99,9 @@ static void createLinks(const Path& srcDir, const Path& dstDir, int priority) {
|
|||
"to change the priority of one of the conflicting packages"
|
||||
" (0 being the highest priority)",
|
||||
srcFile, readLink(dstFile), priority);
|
||||
if (prevPriority < priority) continue;
|
||||
if (prevPriority < priority) {
|
||||
continue;
|
||||
}
|
||||
if (unlink(dstFile.c_str()) == -1)
|
||||
throw SysError(format("unlinking '%1%'") % dstFile);
|
||||
} else if (S_ISDIR(dstSt.st_mode))
|
||||
|
@ -124,7 +126,9 @@ static FileProp postponed = FileProp{};
|
|||
static Path out;
|
||||
|
||||
static void addPkg(const Path& pkgDir, int priority) {
|
||||
if (done.count(pkgDir)) return;
|
||||
if (done.count(pkgDir)) {
|
||||
return;
|
||||
}
|
||||
done.insert(pkgDir);
|
||||
createLinks(pkgDir, out, priority);
|
||||
|
||||
|
@ -132,9 +136,13 @@ static void addPkg(const Path& pkgDir, int priority) {
|
|||
for (const auto& p : tokenizeString<std::vector<string>>(
|
||||
readFile(pkgDir + "/nix-support/propagated-user-env-packages"),
|
||||
" \n"))
|
||||
if (!done.count(p)) postponed.insert(p);
|
||||
if (!done.count(p)) {
|
||||
postponed.insert(p);
|
||||
}
|
||||
} catch (SysError& e) {
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +159,9 @@ typedef std::vector<Package> Packages;
|
|||
void builtinBuildenv(const BasicDerivation& drv) {
|
||||
auto getAttr = [&](const string& name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
if (i == drv.env.end()) {
|
||||
throw Error("attribute '%s' missing", name);
|
||||
}
|
||||
return i->second;
|
||||
};
|
||||
|
||||
|
@ -187,7 +197,9 @@ void builtinBuildenv(const BasicDerivation& drv) {
|
|||
(a.priority == b.priority && a.path < b.path);
|
||||
});
|
||||
for (const auto& pkg : pkgs)
|
||||
if (pkg.active) addPkg(pkg.path, pkg.priority);
|
||||
if (pkg.active) {
|
||||
addPkg(pkg.path, pkg.priority);
|
||||
}
|
||||
|
||||
/* Symlink to the packages that have been "propagated" by packages
|
||||
* installed by the user (i.e., package X declares that it wants Y
|
||||
|
|
|
@ -61,7 +61,9 @@ void builtinFetchurl(const BasicDerivation& drv, const std::string& netrcData) {
|
|||
/* Try the hashed mirrors first. */
|
||||
if (getAttr("outputHashMode") == "flat")
|
||||
for (auto hashedMirror : settings.hashedMirrors.get()) try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
if (!hasSuffix(hashedMirror, "/")) {
|
||||
hashedMirror += '/';
|
||||
}
|
||||
auto ht = parseHashType(getAttr("outputHashAlgo"));
|
||||
auto h = Hash(getAttr("outputHash"), ht);
|
||||
fetch(hashedMirror + printHashType(h.type) + "/" +
|
||||
|
|
16
third_party/nix/src/libstore/crypto.cc
vendored
16
third_party/nix/src/libstore/crypto.cc
vendored
|
@ -11,7 +11,9 @@ namespace nix {
|
|||
|
||||
static std::pair<std::string, std::string> split(const string& s) {
|
||||
size_t colon = s.find(':');
|
||||
if (colon == std::string::npos || colon == 0) return {"", ""};
|
||||
if (colon == std::string::npos || colon == 0) {
|
||||
return {"", ""};
|
||||
}
|
||||
return {std::string(s, 0, colon), std::string(s, colon + 1)};
|
||||
}
|
||||
|
||||
|
@ -21,7 +23,9 @@ Key::Key(const string& s) {
|
|||
name = ss.first;
|
||||
key = ss.second;
|
||||
|
||||
if (name == "" || key == "") throw Error("secret key is corrupt");
|
||||
if (name == "" || key == "") {
|
||||
throw Error("secret key is corrupt");
|
||||
}
|
||||
|
||||
key = base64Decode(key);
|
||||
}
|
||||
|
@ -76,10 +80,14 @@ bool verifyDetached(const std::string& data, const std::string& sig,
|
|||
auto ss = split(sig);
|
||||
|
||||
auto key = publicKeys.find(ss.first);
|
||||
if (key == publicKeys.end()) return false;
|
||||
if (key == publicKeys.end()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
auto sig2 = base64Decode(ss.second);
|
||||
if (sig2.size() != crypto_sign_BYTES) throw Error("signature is not valid");
|
||||
if (sig2.size() != crypto_sign_BYTES) {
|
||||
throw Error("signature is not valid");
|
||||
}
|
||||
|
||||
return crypto_sign_verify_detached(
|
||||
(unsigned char*)sig2.data(), (unsigned char*)data.data(),
|
||||
|
|
44
third_party/nix/src/libstore/download.cc
vendored
44
third_party/nix/src/libstore/download.cc
vendored
|
@ -232,7 +232,9 @@ struct CurlDownloader : public Downloader {
|
|||
|
||||
size_t readOffset = 0;
|
||||
size_t readCallback(char* buffer, size_t size, size_t nitems) {
|
||||
if (readOffset == request.data->length()) return 0;
|
||||
if (readOffset == request.data->length()) {
|
||||
return 0;
|
||||
}
|
||||
auto count = std::min(size * nitems, request.data->length() - readOffset);
|
||||
assert(count);
|
||||
memcpy(buffer, request.data->data() + readOffset, count);
|
||||
|
@ -291,7 +293,9 @@ struct CurlDownloader : public Downloader {
|
|||
|
||||
curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
|
||||
|
||||
if (request.head) curl_easy_setopt(req, CURLOPT_NOBODY, 1);
|
||||
if (request.head) {
|
||||
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
|
||||
}
|
||||
|
||||
if (request.data) {
|
||||
curl_easy_setopt(req, CURLOPT_UPLOAD, 1L);
|
||||
|
@ -336,7 +340,9 @@ struct CurlDownloader : public Downloader {
|
|||
|
||||
char* effectiveUriCStr;
|
||||
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
|
||||
if (effectiveUriCStr) result.effectiveUri = effectiveUriCStr;
|
||||
if (effectiveUriCStr) {
|
||||
result.effectiveUri = effectiveUriCStr;
|
||||
}
|
||||
|
||||
DLOG(INFO) << "finished " << request.verb() << " of " << request.uri
|
||||
<< "; curl status = " << code
|
||||
|
@ -664,7 +670,9 @@ struct CurlDownloader : public Downloader {
|
|||
auto [path, params] = splitUriAndParams(uri);
|
||||
|
||||
auto slash = path.find('/', 5); // 5 is the length of "s3://" prefix
|
||||
if (slash == std::string::npos) throw nix::Error("bad S3 URI '%s'", path);
|
||||
if (slash == std::string::npos) {
|
||||
throw nix::Error("bad S3 URI '%s'", path);
|
||||
}
|
||||
|
||||
std::string bucketName(path, 5, slash - 5);
|
||||
std::string key(path, slash + 1);
|
||||
|
@ -766,7 +774,9 @@ void Downloader::download(DownloadRequest&& request, Sink& sink) {
|
|||
request.dataCallback = [_state](char* buf, size_t len) {
|
||||
auto state(_state->lock());
|
||||
|
||||
if (state->quit) return;
|
||||
if (state->quit) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the buffer is full, then go to sleep until the calling
|
||||
thread wakes us up (i.e. when it has removed data from the
|
||||
|
@ -808,7 +818,9 @@ void Downloader::download(DownloadRequest&& request, Sink& sink) {
|
|||
|
||||
while (state->data.empty()) {
|
||||
if (state->quit) {
|
||||
if (state->exc) std::rethrow_exception(state->exc);
|
||||
if (state->exc) {
|
||||
std::rethrow_exception(state->exc);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -835,7 +847,9 @@ CachedDownloadResult Downloader::downloadCached(
|
|||
auto name = request.name;
|
||||
if (name == "") {
|
||||
auto p = url.rfind('/');
|
||||
if (p != string::npos) name = string(url, p + 1);
|
||||
if (p != string::npos) {
|
||||
name = string(url, p + 1);
|
||||
}
|
||||
}
|
||||
|
||||
Path expectedStorePath;
|
||||
|
@ -919,7 +933,9 @@ CachedDownloadResult Downloader::downloadCached(
|
|||
writeFile(dataFile,
|
||||
url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
|
||||
} catch (DownloadError& e) {
|
||||
if (storePath.empty()) throw;
|
||||
if (storePath.empty()) {
|
||||
throw;
|
||||
}
|
||||
LOG(WARNING) << e.msg() << "; using cached result";
|
||||
result.etag = expectedETag;
|
||||
}
|
||||
|
@ -933,7 +949,9 @@ CachedDownloadResult Downloader::downloadCached(
|
|||
if (pathExists(unpackedLink)) {
|
||||
unpackedStorePath = readLink(unpackedLink);
|
||||
store->addTempRoot(unpackedStorePath);
|
||||
if (!store->isValidPath(unpackedStorePath)) unpackedStorePath = "";
|
||||
if (!store->isValidPath(unpackedStorePath)) {
|
||||
unpackedStorePath = "";
|
||||
}
|
||||
}
|
||||
if (unpackedStorePath.empty()) {
|
||||
LOG(INFO) << "unpacking '" << url << "' ...";
|
||||
|
@ -970,9 +988,13 @@ CachedDownloadResult Downloader::downloadCached(
|
|||
}
|
||||
|
||||
bool isUri(const string& s) {
|
||||
if (s.compare(0, 8, "channel:") == 0) return true;
|
||||
if (s.compare(0, 8, "channel:") == 0) {
|
||||
return true;
|
||||
}
|
||||
size_t pos = s.find("://");
|
||||
if (pos == string::npos) return false;
|
||||
if (pos == string::npos) {
|
||||
return false;
|
||||
}
|
||||
string scheme(s, 0, pos);
|
||||
return scheme == "http" || scheme == "https" || scheme == "file" ||
|
||||
scheme == "channel" || scheme == "git" || scheme == "s3" ||
|
||||
|
|
12
third_party/nix/src/libstore/export-import.cc
vendored
12
third_party/nix/src/libstore/export-import.cc
vendored
|
@ -59,7 +59,9 @@ Paths Store::importPaths(Source& source, std::shared_ptr<FSAccessor> accessor,
|
|||
Paths res;
|
||||
while (true) {
|
||||
auto n = readNum<uint64_t>(source);
|
||||
if (n == 0) break;
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
if (n != 1)
|
||||
throw Error(
|
||||
"input doesn't look like something created by 'nix-store --export'");
|
||||
|
@ -82,13 +84,17 @@ Paths Store::importPaths(Source& source, std::shared_ptr<FSAccessor> accessor,
|
|||
info.references = readStorePaths<PathSet>(*this, source);
|
||||
|
||||
info.deriver = readString(source);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
if (info.deriver != "") {
|
||||
assertStorePath(info.deriver);
|
||||
}
|
||||
|
||||
info.narHash = hashString(htSHA256, *tee.source.data);
|
||||
info.narSize = tee.source.data->size();
|
||||
|
||||
// Ignore optional legacy signature.
|
||||
if (readInt(source) == 1) readString(source);
|
||||
if (readInt(source) == 1) {
|
||||
readString(source);
|
||||
}
|
||||
|
||||
addToStore(info, tee.source.data, NoRepair, checkSigs, accessor);
|
||||
|
||||
|
|
104
third_party/nix/src/libstore/gc.cc
vendored
104
third_party/nix/src/libstore/gc.cc
vendored
|
@ -161,7 +161,9 @@ void LocalStore::addTempRoot(const Path& path) {
|
|||
struct stat st;
|
||||
if (fstat(state->fdTempRoots.get(), &st) == -1)
|
||||
throw SysError(format("statting '%1%'") % fnTempRoots);
|
||||
if (st.st_size == 0) break;
|
||||
if (st.st_size == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
/* The garbage collector deleted this file before we could
|
||||
get a lock. (It won't delete the file after we get a
|
||||
|
@ -196,13 +198,15 @@ void LocalStore::findTempRoots(FDs& fds, Roots& tempRoots, bool censor) {
|
|||
FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666)));
|
||||
if (!*fd) {
|
||||
/* It's okay if the file has disappeared. */
|
||||
if (errno == ENOENT) continue;
|
||||
if (errno == ENOENT) {
|
||||
continue;
|
||||
}
|
||||
throw SysError(format("opening temporary roots file '%1%'") % path);
|
||||
}
|
||||
|
||||
/* This should work, but doesn't, for some reason. */
|
||||
// FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
|
||||
// if (*fd == -1) continue;
|
||||
// if (*fd == -1) { continue; }
|
||||
|
||||
/* Try to acquire a write lock without blocking. This can
|
||||
only succeed if the owning process has died. In that case
|
||||
|
@ -249,7 +253,9 @@ void LocalStore::findRoots(const Path& path, unsigned char type, Roots& roots) {
|
|||
};
|
||||
|
||||
try {
|
||||
if (type == DT_UNKNOWN) type = getFileType(path);
|
||||
if (type == DT_UNKNOWN) {
|
||||
type = getFileType(path);
|
||||
}
|
||||
|
||||
if (type == DT_DIR) {
|
||||
for (auto& i : readDirectory(path))
|
||||
|
@ -258,7 +264,9 @@ void LocalStore::findRoots(const Path& path, unsigned char type, Roots& roots) {
|
|||
|
||||
else if (type == DT_LNK) {
|
||||
Path target = readLink(path);
|
||||
if (isInStore(target)) foundRoot(path, target);
|
||||
if (isInStore(target)) {
|
||||
foundRoot(path, target);
|
||||
}
|
||||
|
||||
/* Handle indirect roots. */
|
||||
else {
|
||||
|
@ -271,9 +279,13 @@ void LocalStore::findRoots(const Path& path, unsigned char type, Roots& roots) {
|
|||
}
|
||||
} else {
|
||||
struct stat st2 = lstat(target);
|
||||
if (!S_ISLNK(st2.st_mode)) return;
|
||||
if (!S_ISLNK(st2.st_mode)) {
|
||||
return;
|
||||
}
|
||||
Path target2 = readLink(target);
|
||||
if (isInStore(target2)) foundRoot(target, target2);
|
||||
if (isInStore(target2)) {
|
||||
foundRoot(target, target2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -376,7 +388,9 @@ void LocalStore::findRuntimeRoots(Roots& roots, bool censor) {
|
|||
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
|
||||
auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
|
||||
if (!fdDir) {
|
||||
if (errno == ENOENT || errno == EACCES) continue;
|
||||
if (errno == ENOENT || errno == EACCES) {
|
||||
continue;
|
||||
}
|
||||
throw SysError(format("opening %1%") % fdStr);
|
||||
}
|
||||
struct dirent* fd_ent;
|
||||
|
@ -385,7 +399,9 @@ void LocalStore::findRuntimeRoots(Roots& roots, bool censor) {
|
|||
readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
|
||||
}
|
||||
if (errno) {
|
||||
if (errno == ESRCH) continue;
|
||||
if (errno == ESRCH) {
|
||||
continue;
|
||||
}
|
||||
throw SysError(format("iterating /proc/%1%/fd") % ent->d_name);
|
||||
}
|
||||
fdDir.reset();
|
||||
|
@ -408,12 +424,16 @@ void LocalStore::findRuntimeRoots(Roots& roots, bool censor) {
|
|||
i != env_end; ++i)
|
||||
unchecked[i->str()].emplace(envFile);
|
||||
} catch (SysError& e) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH) continue;
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH) {
|
||||
continue;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errno) throw SysError("iterating /proc");
|
||||
if (errno) {
|
||||
throw SysError("iterating /proc");
|
||||
}
|
||||
}
|
||||
|
||||
#if !defined(__linux__)
|
||||
|
@ -495,7 +515,9 @@ void LocalStore::deletePathRecursive(GCState& state, const Path& path) {
|
|||
PathSet referrers;
|
||||
queryReferrers(path, referrers);
|
||||
for (auto& i : referrers)
|
||||
if (i != path) deletePathRecursive(state, i);
|
||||
if (i != path) {
|
||||
deletePathRecursive(state, i);
|
||||
}
|
||||
size = queryPathInfo(path)->narSize;
|
||||
invalidatePathChecked(path);
|
||||
}
|
||||
|
@ -504,7 +526,9 @@ void LocalStore::deletePathRecursive(GCState& state, const Path& path) {
|
|||
|
||||
struct stat st;
|
||||
if (lstat(realPath.c_str(), &st)) {
|
||||
if (errno == ENOENT) return;
|
||||
if (errno == ENOENT) {
|
||||
return;
|
||||
}
|
||||
throw SysError(format("getting status of %1%") % realPath);
|
||||
}
|
||||
|
||||
|
@ -549,11 +573,17 @@ void LocalStore::deletePathRecursive(GCState& state, const Path& path) {
|
|||
|
||||
bool LocalStore::canReachRoot(GCState& state, PathSet& visited,
|
||||
const Path& path) {
|
||||
if (visited.count(path)) return false;
|
||||
if (visited.count(path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (state.alive.count(path)) return true;
|
||||
if (state.alive.count(path)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (state.dead.count(path)) return false;
|
||||
if (state.dead.count(path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (state.roots.count(path)) {
|
||||
DLOG(INFO) << "cannot delete '" << path << "' because it's a root";
|
||||
|
@ -563,7 +593,9 @@ bool LocalStore::canReachRoot(GCState& state, PathSet& visited,
|
|||
|
||||
visited.insert(path);
|
||||
|
||||
if (!isStorePath(path) || !isValidPath(path)) return false;
|
||||
if (!isStorePath(path) || !isValidPath(path)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
PathSet incoming;
|
||||
|
||||
|
@ -600,7 +632,9 @@ void LocalStore::tryToDelete(GCState& state, const Path& path) {
|
|||
checkInterrupt();
|
||||
|
||||
auto realPath = realStoreDir + "/" + baseNameOf(path);
|
||||
if (realPath == linksDir || realPath == trashDir) return;
|
||||
if (realPath == linksDir || realPath == trashDir) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Activity act(*logger, lvlDebug, format("considering whether to delete
|
||||
// '%1%'") % path);
|
||||
|
@ -608,16 +642,22 @@ void LocalStore::tryToDelete(GCState& state, const Path& path) {
|
|||
if (!isStorePath(path) || !isValidPath(path)) {
|
||||
/* A lock file belonging to a path that we're building right
|
||||
now isn't garbage. */
|
||||
if (isActiveTempFile(state, path, ".lock")) return;
|
||||
if (isActiveTempFile(state, path, ".lock")) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Don't delete .chroot directories for derivations that are
|
||||
currently being built. */
|
||||
if (isActiveTempFile(state, path, ".chroot")) return;
|
||||
if (isActiveTempFile(state, path, ".chroot")) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Don't delete .check directories for derivations that are
|
||||
currently being built, because we may need to run
|
||||
diff-hook. */
|
||||
if (isActiveTempFile(state, path, ".check")) return;
|
||||
if (isActiveTempFile(state, path, ".check")) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
PathSet visited;
|
||||
|
@ -653,7 +693,9 @@ void LocalStore::removeUnusedLinks(const GCState& state) {
|
|||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
if (name == "." || name == "..") {
|
||||
continue;
|
||||
}
|
||||
Path path = linksDir + "/" + name;
|
||||
|
||||
struct stat st;
|
||||
|
@ -789,7 +831,9 @@ void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) {
|
|||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
if (name == "." || name == "..") {
|
||||
continue;
|
||||
}
|
||||
Path path = storeDir + "/" + name;
|
||||
if (isStorePath(path) && isValidPath(path))
|
||||
entries.push_back(path);
|
||||
|
@ -840,7 +884,7 @@ void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) {
|
|||
}
|
||||
|
||||
/* While we're at it, vacuum the database. */
|
||||
// if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||
// if (options.action == GCOptions::gcDeleteDead) { vacuumDB(); }
|
||||
}
|
||||
|
||||
void LocalStore::autoGC(bool sync) {
|
||||
|
@ -879,9 +923,13 @@ void LocalStore::autoGC(bool sync) {
|
|||
|
||||
state->lastGCCheck = now;
|
||||
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree) return;
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (avail > state->availAfterGC * 0.97) return;
|
||||
if (avail > state->availAfterGC * 0.97) {
|
||||
return;
|
||||
}
|
||||
|
||||
state->gcRunning = true;
|
||||
|
||||
|
@ -919,7 +967,9 @@ void LocalStore::autoGC(bool sync) {
|
|||
|
||||
sync:
|
||||
// Wait for the future outside of the state lock.
|
||||
if (sync) future.get();
|
||||
if (sync) {
|
||||
future.get();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace nix
|
||||
|
|
8
third_party/nix/src/libstore/globals.cc
vendored
8
third_party/nix/src/libstore/globals.cc
vendored
|
@ -98,7 +98,9 @@ StringSet Settings::getDefaultSystemFeatures() {
|
|||
StringSet features{"nixos-test", "benchmark", "big-parallel"};
|
||||
|
||||
#if __linux__
|
||||
if (access("/dev/kvm", R_OK | W_OK) == 0) features.insert("kvm");
|
||||
if (access("/dev/kvm", R_OK | W_OK) == 0) {
|
||||
features.insert("kvm");
|
||||
}
|
||||
#endif
|
||||
|
||||
return features;
|
||||
|
@ -171,7 +173,9 @@ void initPlugins() {
|
|||
for (const auto& ent : ents)
|
||||
pluginFiles.emplace_back(pluginFile + "/" + ent.name);
|
||||
} catch (SysError& e) {
|
||||
if (e.errNo != ENOTDIR) throw;
|
||||
if (e.errNo != ENOTDIR) {
|
||||
throw;
|
||||
}
|
||||
pluginFiles.emplace_back(pluginFile);
|
||||
}
|
||||
for (const auto& file : pluginFiles) {
|
||||
|
|
|
@ -23,7 +23,9 @@ class HttpBinaryCacheStore : public BinaryCacheStore {
|
|||
public:
|
||||
HttpBinaryCacheStore(const Params& params, const Path& _cacheUri)
|
||||
: BinaryCacheStore(params), cacheUri(_cacheUri) {
|
||||
if (cacheUri.back() == '/') cacheUri.pop_back();
|
||||
if (cacheUri.back() == '/') {
|
||||
cacheUri.pop_back();
|
||||
}
|
||||
|
||||
diskCache = getNarInfoDiskCache();
|
||||
}
|
||||
|
@ -57,7 +59,9 @@ class HttpBinaryCacheStore : public BinaryCacheStore {
|
|||
|
||||
void checkEnabled() {
|
||||
auto state(_state.lock());
|
||||
if (state->enabled) return;
|
||||
if (state->enabled) {
|
||||
return;
|
||||
}
|
||||
if (std::chrono::steady_clock::now() > state->disabledUntil) {
|
||||
state->enabled = true;
|
||||
DLOG(INFO) << "re-enabling binary cache '" << getUri() << "'";
|
||||
|
|
|
@ -99,7 +99,9 @@ struct LegacySSHStore : public Store {
|
|||
|
||||
auto info = std::make_shared<ValidPathInfo>();
|
||||
conn->from >> info->path;
|
||||
if (info->path.empty()) return callback(nullptr);
|
||||
if (info->path.empty()) {
|
||||
return callback(nullptr);
|
||||
}
|
||||
assert(path == info->path);
|
||||
|
||||
PathSet references;
|
||||
|
@ -253,7 +255,9 @@ struct LegacySSHStore : public Store {
|
|||
static RegisterStoreImplementation regStore(
|
||||
[](const std::string& uri,
|
||||
const Store::Params& params) -> std::shared_ptr<Store> {
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) {
|
||||
return 0;
|
||||
}
|
||||
return std::make_shared<LegacySSHStore>(
|
||||
std::string(uri, uriScheme.size()), params);
|
||||
});
|
||||
|
|
|
@ -68,7 +68,9 @@ ref<FSAccessor> LocalFSStore::getFSAccessor() {
|
|||
}
|
||||
|
||||
void LocalFSStore::narFromPath(const Path& path, Sink& sink) {
|
||||
if (!isValidPath(path)) throw Error(format("path '%s' is not valid") % path);
|
||||
if (!isValidPath(path)) {
|
||||
throw Error(format("path '%s' is not valid") % path);
|
||||
}
|
||||
dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
|
||||
}
|
||||
|
||||
|
@ -85,7 +87,9 @@ std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path& path_) {
|
|||
} catch (InvalidPath&) {
|
||||
return nullptr;
|
||||
}
|
||||
if (path == "") return nullptr;
|
||||
if (path == "") {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
string baseName = baseNameOf(path);
|
||||
|
|
108
third_party/nix/src/libstore/local-store.cc
vendored
108
third_party/nix/src/libstore/local-store.cc
vendored
|
@ -268,7 +268,9 @@ LocalStore::~LocalStore() {
|
|||
|
||||
{
|
||||
auto state(_state.lock());
|
||||
if (state->gcRunning) future = state->gcFuture;
|
||||
if (state->gcRunning) {
|
||||
future = state->gcFuture;
|
||||
}
|
||||
}
|
||||
|
||||
if (future.valid()) {
|
||||
|
@ -375,7 +377,9 @@ void LocalStore::openDB(State& state, bool create) {
|
|||
bind mount. So make the Nix store writable for this process. */
|
||||
void LocalStore::makeStoreWritable() {
|
||||
#if __linux__
|
||||
if (getuid() != 0) return;
|
||||
if (getuid() != 0) {
|
||||
return;
|
||||
}
|
||||
/* Check if /nix/store is on a read-only mount. */
|
||||
struct statvfs stat;
|
||||
if (statvfs(realStoreDir.c_str(), &stat) != 0)
|
||||
|
@ -469,7 +473,9 @@ static void canonicalisePathMetaData_(const Path& path, uid_t fromUid,
|
|||
std::string(eaBuf.data(), eaSize), std::string("\000", 1))) {
|
||||
/* Ignore SELinux security labels since these cannot be
|
||||
removed even by root. */
|
||||
if (eaName == "security.selinux") continue;
|
||||
if (eaName == "security.selinux") {
|
||||
continue;
|
||||
}
|
||||
if (lremovexattr(path.c_str(), eaName.c_str()) == -1)
|
||||
throw SysError("removing extended attribute '%s' from '%s'", eaName,
|
||||
path);
|
||||
|
@ -654,7 +660,9 @@ void LocalStore::queryPathInfoUncached(
|
|||
/* Get the path info. */
|
||||
auto useQueryPathInfo(state->stmtQueryPathInfo.use()(path));
|
||||
|
||||
if (!useQueryPathInfo.next()) return std::shared_ptr<ValidPathInfo>();
|
||||
if (!useQueryPathInfo.next()) {
|
||||
return std::shared_ptr<ValidPathInfo>();
|
||||
}
|
||||
|
||||
info->id = useQueryPathInfo.getInt(0);
|
||||
|
||||
|
@ -667,7 +675,9 @@ void LocalStore::queryPathInfoUncached(
|
|||
info->registrationTime = useQueryPathInfo.getInt(2);
|
||||
|
||||
auto s = (const char*)sqlite3_column_text(state->stmtQueryPathInfo, 3);
|
||||
if (s) info->deriver = s;
|
||||
if (s) {
|
||||
info->deriver = s;
|
||||
}
|
||||
|
||||
/* Note that narSize = NULL yields 0. */
|
||||
info->narSize = useQueryPathInfo.getInt(4);
|
||||
|
@ -675,10 +685,14 @@ void LocalStore::queryPathInfoUncached(
|
|||
info->ultimate = useQueryPathInfo.getInt(5) == 1;
|
||||
|
||||
s = (const char*)sqlite3_column_text(state->stmtQueryPathInfo, 6);
|
||||
if (s) info->sigs = tokenizeString<StringSet>(s, " ");
|
||||
if (s) {
|
||||
info->sigs = tokenizeString<StringSet>(s, " ");
|
||||
}
|
||||
|
||||
s = (const char*)sqlite3_column_text(state->stmtQueryPathInfo, 7);
|
||||
if (s) info->ca = s;
|
||||
if (s) {
|
||||
info->ca = s;
|
||||
}
|
||||
|
||||
/* Get the references. */
|
||||
auto useQueryReferences(state->stmtQueryReferences.use()(info->id));
|
||||
|
@ -706,7 +720,9 @@ void LocalStore::updatePathInfo(State& state, const ValidPathInfo& info) {
|
|||
|
||||
uint64_t LocalStore::queryValidPathId(State& state, const Path& path) {
|
||||
auto use(state.stmtQueryPathInfo.use()(path));
|
||||
if (!use.next()) throw Error(format("path '%1%' is not valid") % path);
|
||||
if (!use.next()) {
|
||||
throw Error(format("path '%1%' is not valid") % path);
|
||||
}
|
||||
return use.getInt(0);
|
||||
}
|
||||
|
||||
|
@ -725,7 +741,9 @@ PathSet LocalStore::queryValidPaths(const PathSet& paths,
|
|||
SubstituteFlag maybeSubstitute) {
|
||||
PathSet res;
|
||||
for (auto& i : paths)
|
||||
if (isValidPath(i)) res.insert(i);
|
||||
if (isValidPath(i)) {
|
||||
res.insert(i);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
|
@ -802,7 +820,9 @@ StringSet LocalStore::queryDerivationOutputNames(const Path& path) {
|
|||
}
|
||||
|
||||
Path LocalStore::queryPathFromHashPart(const string& hashPart) {
|
||||
if (hashPart.size() != storePathHashLen) throw Error("invalid hash part");
|
||||
if (hashPart.size() != storePathHashLen) {
|
||||
throw Error("invalid hash part");
|
||||
}
|
||||
|
||||
Path prefix = storeDir + "/" + hashPart;
|
||||
|
||||
|
@ -812,7 +832,9 @@ Path LocalStore::queryPathFromHashPart(const string& hashPart) {
|
|||
auto useQueryPathFromHashPart(
|
||||
state->stmtQueryPathFromHashPart.use()(prefix));
|
||||
|
||||
if (!useQueryPathFromHashPart.next()) return "";
|
||||
if (!useQueryPathFromHashPart.next()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
const char* s =
|
||||
(const char*)sqlite3_column_text(state->stmtQueryPathFromHashPart, 0);
|
||||
|
@ -822,15 +844,23 @@ Path LocalStore::queryPathFromHashPart(const string& hashPart) {
|
|||
}
|
||||
|
||||
PathSet LocalStore::querySubstitutablePaths(const PathSet& paths) {
|
||||
if (!settings.useSubstitutes) return PathSet();
|
||||
if (!settings.useSubstitutes) {
|
||||
return PathSet();
|
||||
}
|
||||
|
||||
auto remaining = paths;
|
||||
PathSet res;
|
||||
|
||||
for (auto& sub : getDefaultSubstituters()) {
|
||||
if (remaining.empty()) break;
|
||||
if (sub->storeDir != storeDir) continue;
|
||||
if (!sub->wantMassQuery()) continue;
|
||||
if (remaining.empty()) {
|
||||
break;
|
||||
}
|
||||
if (sub->storeDir != storeDir) {
|
||||
continue;
|
||||
}
|
||||
if (!sub->wantMassQuery()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto valid = sub->queryValidPaths(remaining);
|
||||
|
||||
|
@ -849,11 +879,17 @@ PathSet LocalStore::querySubstitutablePaths(const PathSet& paths) {
|
|||
|
||||
void LocalStore::querySubstitutablePathInfos(const PathSet& paths,
|
||||
SubstitutablePathInfos& infos) {
|
||||
if (!settings.useSubstitutes) return;
|
||||
if (!settings.useSubstitutes) {
|
||||
return;
|
||||
}
|
||||
for (auto& sub : getDefaultSubstituters()) {
|
||||
if (sub->storeDir != storeDir) continue;
|
||||
if (sub->storeDir != storeDir) {
|
||||
continue;
|
||||
}
|
||||
for (auto& path : paths) {
|
||||
if (infos.count(path)) continue;
|
||||
if (infos.count(path)) {
|
||||
continue;
|
||||
}
|
||||
DLOG(INFO) << "checking substituter '" << sub->getUri() << "' for path '"
|
||||
<< path << "'";
|
||||
try {
|
||||
|
@ -887,7 +923,9 @@ void LocalStore::registerValidPaths(const ValidPathInfos& infos) {
|
|||
be fsync-ed. So some may want to fsync them before registering
|
||||
the validity, at the expense of some speed of the path
|
||||
registering operation. */
|
||||
if (settings.syncBeforeRegistering) sync();
|
||||
if (settings.syncBeforeRegistering) {
|
||||
sync();
|
||||
}
|
||||
|
||||
return retrySQLite<void>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
@ -979,7 +1017,9 @@ void LocalStore::addToStore(const ValidPathInfo& info, Source& source,
|
|||
/* Lock the output path. But don't lock if we're being called
|
||||
from a build hook (whose parent process already acquired a
|
||||
lock on this path). */
|
||||
if (!locksHeld.count(info.path)) outputLock.lockPaths({realPath});
|
||||
if (!locksHeld.count(info.path)) {
|
||||
outputLock.lockPaths({realPath});
|
||||
}
|
||||
|
||||
if (repair || !isValidPath(info.path)) {
|
||||
deletePath(realPath);
|
||||
|
@ -1289,7 +1329,9 @@ void LocalStore::verifyPath(const Path& path, const PathSet& store,
|
|||
for (auto& i : referrers)
|
||||
if (i != path) {
|
||||
verifyPath(i, store, done, validPaths, repair, errors);
|
||||
if (validPaths.find(i) != validPaths.end()) canInvalidate = false;
|
||||
if (validPaths.find(i) != validPaths.end()) {
|
||||
canInvalidate = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (canInvalidate) {
|
||||
|
@ -1328,7 +1370,9 @@ static void makeMutable(const Path& path) {
|
|||
|
||||
struct stat st = lstat(path);
|
||||
|
||||
if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) return;
|
||||
if (!S_ISDIR(st.st_mode) && !S_ISREG(st.st_mode)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
for (auto& i : readDirectory(path)) makeMutable(path + "/" + i.name);
|
||||
|
@ -1339,7 +1383,9 @@ static void makeMutable(const Path& path) {
|
|||
security hole). */
|
||||
AutoCloseFD fd = open(path.c_str(), O_RDONLY | O_NOFOLLOW | O_CLOEXEC);
|
||||
if (fd == -1) {
|
||||
if (errno == ELOOP) return; // it's a symlink
|
||||
if (errno == ELOOP) {
|
||||
return;
|
||||
} // it's a symlink
|
||||
throw SysError(format("opening file '%1%'") % path);
|
||||
}
|
||||
|
||||
|
@ -1347,16 +1393,24 @@ static void makeMutable(const Path& path) {
|
|||
|
||||
/* Silently ignore errors getting/setting the immutable flag so
|
||||
that we work correctly on filesystems that don't support it. */
|
||||
if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) return;
|
||||
if (ioctl(fd, FS_IOC_GETFLAGS, &flags)) {
|
||||
return;
|
||||
}
|
||||
old = flags;
|
||||
flags &= ~FS_IMMUTABLE_FL;
|
||||
if (old == flags) return;
|
||||
if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) return;
|
||||
if (old == flags) {
|
||||
return;
|
||||
}
|
||||
if (ioctl(fd, FS_IOC_SETFLAGS, &flags)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Upgrade from schema 6 (Nix 0.15) to schema 7 (Nix >= 1.3). */
|
||||
void LocalStore::upgradeStore7() {
|
||||
if (getuid() != 0) return;
|
||||
if (getuid() != 0) {
|
||||
return;
|
||||
}
|
||||
printError(
|
||||
"removing immutable bits from the Nix store (this may take a while)...");
|
||||
makeMutable(realStoreDir);
|
||||
|
|
4
third_party/nix/src/libstore/machines.cc
vendored
4
third_party/nix/src/libstore/machines.cc
vendored
|
@ -50,7 +50,9 @@ void parseMachines(const std::string& s, Machines& machines) {
|
|||
for (auto line : tokenizeString<std::vector<string>>(s, "\n;")) {
|
||||
trim(line);
|
||||
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
|
||||
if (line.empty()) continue;
|
||||
if (line.empty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (line[0] == '@') {
|
||||
auto file = trim(std::string(line, 1));
|
||||
|
|
64
third_party/nix/src/libstore/misc.cc
vendored
64
third_party/nix/src/libstore/misc.cc
vendored
|
@ -27,8 +27,12 @@ void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_,
|
|||
enqueue = [&](const Path& path) -> void {
|
||||
{
|
||||
auto state(state_.lock());
|
||||
if (state->exc) return;
|
||||
if (state->paths.count(path)) return;
|
||||
if (state->exc) {
|
||||
return;
|
||||
}
|
||||
if (state->paths.count(path)) {
|
||||
return;
|
||||
}
|
||||
state->paths.insert(path);
|
||||
state->pending++;
|
||||
}
|
||||
|
@ -44,7 +48,9 @@ void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_,
|
|||
PathSet referrers;
|
||||
queryReferrers(path, referrers);
|
||||
for (auto& ref : referrers)
|
||||
if (ref != path) enqueue(ref);
|
||||
if (ref != path) {
|
||||
enqueue(ref);
|
||||
}
|
||||
|
||||
if (includeOutputs)
|
||||
for (auto& i : queryValidDerivers(path)) enqueue(i);
|
||||
|
@ -56,11 +62,15 @@ void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_,
|
|||
|
||||
} else {
|
||||
for (auto& ref : info->references)
|
||||
if (ref != path) enqueue(ref);
|
||||
if (ref != path) {
|
||||
enqueue(ref);
|
||||
}
|
||||
|
||||
if (includeOutputs && isDerivation(path))
|
||||
for (auto& i : queryDerivationOutputs(path))
|
||||
if (isValidPath(i)) enqueue(i);
|
||||
if (isValidPath(i)) {
|
||||
enqueue(i);
|
||||
}
|
||||
|
||||
if (includeDerivers && isValidPath(info->deriver))
|
||||
enqueue(info->deriver);
|
||||
|
@ -69,14 +79,20 @@ void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_,
|
|||
{
|
||||
auto state(state_.lock());
|
||||
assert(state->pending);
|
||||
if (!--state->pending) done.notify_one();
|
||||
if (!--state->pending) {
|
||||
done.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
} catch (...) {
|
||||
auto state(state_.lock());
|
||||
if (!state->exc) state->exc = std::current_exception();
|
||||
if (!state->exc) {
|
||||
state->exc = std::current_exception();
|
||||
}
|
||||
assert(state->pending);
|
||||
if (!--state->pending) done.notify_one();
|
||||
if (!--state->pending) {
|
||||
done.notify_one();
|
||||
}
|
||||
};
|
||||
}});
|
||||
};
|
||||
|
@ -86,7 +102,9 @@ void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_,
|
|||
{
|
||||
auto state(state_.lock());
|
||||
while (state->pending) state.wait(done);
|
||||
if (state->exc) std::rethrow_exception(state->exc);
|
||||
if (state->exc) {
|
||||
std::rethrow_exception(state->exc);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -139,7 +157,9 @@ void Store::queryMissing(const PathSet& targets, PathSet& willBuild_,
|
|||
|
||||
auto checkOutput = [&](const Path& drvPath, ref<Derivation> drv,
|
||||
const Path& outPath, ref<Sync<DrvState>> drvState_) {
|
||||
if (drvState_->lock()->done) return;
|
||||
if (drvState_->lock()->done) {
|
||||
return;
|
||||
}
|
||||
|
||||
SubstitutablePathInfos infos;
|
||||
querySubstitutablePathInfos({outPath}, infos);
|
||||
|
@ -150,7 +170,9 @@ void Store::queryMissing(const PathSet& targets, PathSet& willBuild_,
|
|||
} else {
|
||||
{
|
||||
auto drvState(drvState_->lock());
|
||||
if (drvState->done) return;
|
||||
if (drvState->done) {
|
||||
return;
|
||||
}
|
||||
assert(drvState->left);
|
||||
drvState->left--;
|
||||
drvState->outPaths.insert(outPath);
|
||||
|
@ -165,7 +187,9 @@ void Store::queryMissing(const PathSet& targets, PathSet& willBuild_,
|
|||
doPath = [&](const Path& path) {
|
||||
{
|
||||
auto state(state_.lock());
|
||||
if (state->done.count(path)) return;
|
||||
if (state->done.count(path)) {
|
||||
return;
|
||||
}
|
||||
state->done.insert(path);
|
||||
}
|
||||
|
||||
|
@ -186,7 +210,9 @@ void Store::queryMissing(const PathSet& targets, PathSet& willBuild_,
|
|||
for (auto& j : drv.outputs)
|
||||
if (wantOutput(j.first, i2.second) && !isValidPath(j.second.path))
|
||||
invalid.insert(j.second.path);
|
||||
if (invalid.empty()) return;
|
||||
if (invalid.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
|
||||
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
|
||||
|
@ -197,7 +223,9 @@ void Store::queryMissing(const PathSet& targets, PathSet& willBuild_,
|
|||
mustBuildDrv(i2.first, drv);
|
||||
|
||||
} else {
|
||||
if (isValidPath(path)) return;
|
||||
if (isValidPath(path)) {
|
||||
return;
|
||||
}
|
||||
|
||||
SubstitutablePathInfos infos;
|
||||
querySubstitutablePathInfos({path}, infos);
|
||||
|
@ -240,7 +268,9 @@ Paths Store::topoSortPaths(const PathSet& paths) {
|
|||
format("cycle detected in the references of '%1%' from '%2%'") %
|
||||
path % *parent);
|
||||
|
||||
if (visited.find(path) != visited.end()) return;
|
||||
if (visited.find(path) != visited.end()) {
|
||||
return;
|
||||
}
|
||||
visited.insert(path);
|
||||
parents.insert(path);
|
||||
|
||||
|
@ -253,7 +283,9 @@ Paths Store::topoSortPaths(const PathSet& paths) {
|
|||
for (auto& i : references)
|
||||
/* Don't traverse into paths that don't exist. That can
|
||||
happen due to substitutes for non-existent paths. */
|
||||
if (i != path && paths.find(i) != paths.end()) dfsVisit(i, &path);
|
||||
if (i != path && paths.find(i) != paths.end()) {
|
||||
dfsVisit(i, &path);
|
||||
}
|
||||
|
||||
sorted.push_front(path);
|
||||
parents.erase(path);
|
||||
|
|
24
third_party/nix/src/libstore/nar-accessor.cc
vendored
24
third_party/nix/src/libstore/nar-accessor.cc
vendored
|
@ -133,7 +133,9 @@ struct NarAccessor : public FSAccessor {
|
|||
for (auto it = path.begin(); it != end;) {
|
||||
// because it != end, the remaining component is non-empty so we need
|
||||
// a directory
|
||||
if (current->type != FSAccessor::Type::tDirectory) return nullptr;
|
||||
if (current->type != FSAccessor::Type::tDirectory) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// skip slash (canonPath above ensures that this is always a slash)
|
||||
assert(*it == '/');
|
||||
|
@ -142,7 +144,9 @@ struct NarAccessor : public FSAccessor {
|
|||
// lookup current component
|
||||
auto next = std::find(it, end, '/');
|
||||
auto child = current->children.find(std::string(it, next));
|
||||
if (child == current->children.end()) return nullptr;
|
||||
if (child == current->children.end()) {
|
||||
return nullptr;
|
||||
}
|
||||
current = &child->second;
|
||||
|
||||
it = next;
|
||||
|
@ -160,7 +164,9 @@ struct NarAccessor : public FSAccessor {
|
|||
|
||||
Stat stat(const Path& path) override {
|
||||
auto i = find(path);
|
||||
if (i == nullptr) return {FSAccessor::Type::tMissing, 0, false};
|
||||
if (i == nullptr) {
|
||||
return {FSAccessor::Type::tMissing, 0, false};
|
||||
}
|
||||
return {i->type, i->size, i->isExecutable, i->start};
|
||||
}
|
||||
|
||||
|
@ -183,7 +189,9 @@ struct NarAccessor : public FSAccessor {
|
|||
throw Error(format("path '%1%' inside NAR file is not a regular file") %
|
||||
path);
|
||||
|
||||
if (getNarBytes) return getNarBytes(i.start, i.size);
|
||||
if (getNarBytes) {
|
||||
return getNarBytes(i.start, i.size);
|
||||
}
|
||||
|
||||
assert(nar);
|
||||
return std::string(*nar, i.start, i.size);
|
||||
|
@ -216,8 +224,12 @@ void listNar(JSONPlaceholder& res, ref<FSAccessor> accessor, const Path& path,
|
|||
case FSAccessor::Type::tRegular:
|
||||
obj.attr("type", "regular");
|
||||
obj.attr("size", st.fileSize);
|
||||
if (st.isExecutable) obj.attr("executable", true);
|
||||
if (st.narOffset) obj.attr("narOffset", st.narOffset);
|
||||
if (st.isExecutable) {
|
||||
obj.attr("executable", true);
|
||||
}
|
||||
if (st.narOffset) {
|
||||
obj.attr("narOffset", st.narOffset);
|
||||
}
|
||||
break;
|
||||
case FSAccessor::Type::tDirectory:
|
||||
obj.attr("type", "directory");
|
||||
|
|
|
@ -143,7 +143,9 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
|
||||
Cache& getCache(State& state, const std::string& uri) {
|
||||
auto i = state.caches.find(uri);
|
||||
if (i == state.caches.end()) abort();
|
||||
if (i == state.caches.end()) {
|
||||
abort();
|
||||
}
|
||||
return i->second;
|
||||
}
|
||||
|
||||
|
@ -170,7 +172,9 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
auto i = state->caches.find(uri);
|
||||
if (i == state->caches.end()) {
|
||||
auto queryCache(state->queryCache.use()(uri));
|
||||
if (!queryCache.next()) return false;
|
||||
if (!queryCache.next()) {
|
||||
return false;
|
||||
}
|
||||
state->caches.emplace(
|
||||
uri, Cache{(int)queryCache.getInt(0), queryCache.getStr(1),
|
||||
queryCache.getInt(2) != 0, (int)queryCache.getInt(3)});
|
||||
|
@ -199,9 +203,13 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
now - settings.ttlNegativeNarInfoCache)(
|
||||
now - settings.ttlPositiveNarInfoCache));
|
||||
|
||||
if (!queryNAR.next()) return {oUnknown, 0};
|
||||
if (!queryNAR.next()) {
|
||||
return {oUnknown, 0};
|
||||
}
|
||||
|
||||
if (!queryNAR.getInt(0)) return {oInvalid, 0};
|
||||
if (!queryNAR.getInt(0)) {
|
||||
return {oInvalid, 0};
|
||||
}
|
||||
|
||||
auto narInfo = make_ref<NarInfo>();
|
||||
|
||||
|
@ -210,7 +218,9 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
(namePart.empty() ? "" : "-" + namePart);
|
||||
narInfo->url = queryNAR.getStr(2);
|
||||
narInfo->compression = queryNAR.getStr(3);
|
||||
if (!queryNAR.isNull(4)) narInfo->fileHash = Hash(queryNAR.getStr(4));
|
||||
if (!queryNAR.isNull(4)) {
|
||||
narInfo->fileHash = Hash(queryNAR.getStr(4));
|
||||
}
|
||||
narInfo->fileSize = queryNAR.getInt(5);
|
||||
narInfo->narHash = Hash(queryNAR.getStr(6));
|
||||
narInfo->narSize = queryNAR.getInt(7);
|
||||
|
|
56
third_party/nix/src/libstore/nar-info.cc
vendored
56
third_party/nix/src/libstore/nar-info.cc
vendored
|
@ -22,17 +22,23 @@ NarInfo::NarInfo(const Store& store, const std::string& s,
|
|||
size_t pos = 0;
|
||||
while (pos < s.size()) {
|
||||
size_t colon = s.find(':', pos);
|
||||
if (colon == std::string::npos) corrupt();
|
||||
if (colon == std::string::npos) {
|
||||
corrupt();
|
||||
}
|
||||
|
||||
std::string name(s, pos, colon - pos);
|
||||
|
||||
size_t eol = s.find('\n', colon + 2);
|
||||
if (eol == std::string::npos) corrupt();
|
||||
if (eol == std::string::npos) {
|
||||
corrupt();
|
||||
}
|
||||
|
||||
std::string value(s, colon + 2, eol - colon - 2);
|
||||
|
||||
if (name == "StorePath") {
|
||||
if (!store.isStorePath(value)) corrupt();
|
||||
if (!store.isStorePath(value)) {
|
||||
corrupt();
|
||||
}
|
||||
path = value;
|
||||
} else if (name == "URL")
|
||||
url = value;
|
||||
|
@ -41,23 +47,33 @@ NarInfo::NarInfo(const Store& store, const std::string& s,
|
|||
else if (name == "FileHash")
|
||||
fileHash = parseHashField(value);
|
||||
else if (name == "FileSize") {
|
||||
if (!string2Int(value, fileSize)) corrupt();
|
||||
if (!string2Int(value, fileSize)) {
|
||||
corrupt();
|
||||
}
|
||||
} else if (name == "NarHash")
|
||||
narHash = parseHashField(value);
|
||||
else if (name == "NarSize") {
|
||||
if (!string2Int(value, narSize)) corrupt();
|
||||
if (!string2Int(value, narSize)) {
|
||||
corrupt();
|
||||
}
|
||||
} else if (name == "References") {
|
||||
auto refs = tokenizeString<Strings>(value, " ");
|
||||
if (!references.empty()) corrupt();
|
||||
if (!references.empty()) {
|
||||
corrupt();
|
||||
}
|
||||
for (auto& r : refs) {
|
||||
auto r2 = store.storeDir + "/" + r;
|
||||
if (!store.isStorePath(r2)) corrupt();
|
||||
if (!store.isStorePath(r2)) {
|
||||
corrupt();
|
||||
}
|
||||
references.insert(r2);
|
||||
}
|
||||
} else if (name == "Deriver") {
|
||||
if (value != "unknown-deriver") {
|
||||
auto p = store.storeDir + "/" + value;
|
||||
if (!store.isStorePath(p)) corrupt();
|
||||
if (!store.isStorePath(p)) {
|
||||
corrupt();
|
||||
}
|
||||
deriver = p;
|
||||
}
|
||||
} else if (name == "System")
|
||||
|
@ -65,16 +81,22 @@ NarInfo::NarInfo(const Store& store, const std::string& s,
|
|||
else if (name == "Sig")
|
||||
sigs.insert(value);
|
||||
else if (name == "CA") {
|
||||
if (!ca.empty()) corrupt();
|
||||
if (!ca.empty()) {
|
||||
corrupt();
|
||||
}
|
||||
ca = value;
|
||||
}
|
||||
|
||||
pos = eol + 1;
|
||||
}
|
||||
|
||||
if (compression == "") compression = "bzip2";
|
||||
if (compression == "") {
|
||||
compression = "bzip2";
|
||||
}
|
||||
|
||||
if (path.empty() || url.empty() || narSize == 0 || !narHash) corrupt();
|
||||
if (path.empty() || url.empty() || narSize == 0 || !narHash) {
|
||||
corrupt();
|
||||
}
|
||||
}
|
||||
|
||||
std::string NarInfo::to_string() const {
|
||||
|
@ -92,13 +114,19 @@ std::string NarInfo::to_string() const {
|
|||
|
||||
res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
|
||||
|
||||
if (!deriver.empty()) res += "Deriver: " + baseNameOf(deriver) + "\n";
|
||||
if (!deriver.empty()) {
|
||||
res += "Deriver: " + baseNameOf(deriver) + "\n";
|
||||
}
|
||||
|
||||
if (!system.empty()) res += "System: " + system + "\n";
|
||||
if (!system.empty()) {
|
||||
res += "System: " + system + "\n";
|
||||
}
|
||||
|
||||
for (auto sig : sigs) res += "Sig: " + sig + "\n";
|
||||
|
||||
if (!ca.empty()) res += "CA: " + ca + "\n";
|
||||
if (!ca.empty()) {
|
||||
res += "CA: " + ca + "\n";
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
|
12
third_party/nix/src/libstore/optimise-store.cc
vendored
12
third_party/nix/src/libstore/optimise-store.cc
vendored
|
@ -29,7 +29,9 @@ struct MakeReadOnly {
|
|||
~MakeReadOnly() {
|
||||
try {
|
||||
/* This will make the path read-only. */
|
||||
if (path != "") canonicaliseTimestampAndPermissions(path);
|
||||
if (path != "") {
|
||||
canonicaliseTimestampAndPermissions(path);
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
|
@ -79,7 +81,9 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path& path,
|
|||
}
|
||||
|
||||
string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
if (name == "." || name == "..") {
|
||||
continue;
|
||||
}
|
||||
names.push_back(name);
|
||||
}
|
||||
if (errno) {
|
||||
|
@ -261,7 +265,9 @@ void LocalStore::optimiseStore(OptimiseStats& stats) {
|
|||
|
||||
for (auto& i : paths) {
|
||||
addTempRoot(i);
|
||||
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
|
||||
if (!isValidPath(i)) {
|
||||
continue;
|
||||
} /* path was GC'ed, probably */
|
||||
{
|
||||
LOG(INFO) << "optimising path '" << i << "'";
|
||||
optimisePath_(stats, realStoreDir + "/" + baseNameOf(i), inodeHash);
|
||||
|
|
|
@ -100,7 +100,9 @@ bool ParsedDerivation::canBuildLocally() const {
|
|||
return false;
|
||||
|
||||
for (auto& feature : getRequiredSystemFeatures())
|
||||
if (!settings.systemFeatures.get().count(feature)) return false;
|
||||
if (!settings.systemFeatures.get().count(feature)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
12
third_party/nix/src/libstore/pathlocks.cc
vendored
12
third_party/nix/src/libstore/pathlocks.cc
vendored
|
@ -57,8 +57,12 @@ bool lockFile(int fd, LockType lockType, bool wait) {
|
|||
} else {
|
||||
while (flock(fd, type | LOCK_NB) != 0) {
|
||||
checkInterrupt();
|
||||
if (errno == EWOULDBLOCK) return false;
|
||||
if (errno != EINTR) throw SysError(format("acquiring/releasing lock"));
|
||||
if (errno == EWOULDBLOCK) {
|
||||
return false;
|
||||
}
|
||||
if (errno != EINTR) {
|
||||
throw SysError(format("acquiring/releasing lock"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -143,7 +147,9 @@ PathLocks::~PathLocks() {
|
|||
|
||||
void PathLocks::unlock() {
|
||||
for (auto& i : fds) {
|
||||
if (deletePaths) deleteLockFile(i.second, i.first);
|
||||
if (deletePaths) {
|
||||
deleteLockFile(i.second, i.first);
|
||||
}
|
||||
|
||||
if (close(i.first) == -1) {
|
||||
LOG(WARNING) << "cannot close lock file on '" << i.second << "'";
|
||||
|
|
24
third_party/nix/src/libstore/profiles.cc
vendored
24
third_party/nix/src/libstore/profiles.cc
vendored
|
@ -19,10 +19,14 @@ static bool cmpGensByNumber(const Generation& a, const Generation& b) {
|
|||
/* Parse a generation name of the format
|
||||
`<profilename>-<number>-link'. */
|
||||
static int parseName(const string& profileName, const string& name) {
|
||||
if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
|
||||
if (string(name, 0, profileName.size() + 1) != profileName + "-") {
|
||||
return -1;
|
||||
}
|
||||
string s = string(name, profileName.size() + 1);
|
||||
string::size_type p = s.find("-link");
|
||||
if (p == string::npos) return -1;
|
||||
if (p == string::npos) {
|
||||
return -1;
|
||||
}
|
||||
int n;
|
||||
if (string2Int(string(s, 0, p), n) && n >= 0)
|
||||
return n;
|
||||
|
@ -135,7 +139,9 @@ void deleteGenerations(const Path& profile,
|
|||
profile);
|
||||
|
||||
for (auto& i : gens) {
|
||||
if (gensToDelete.find(i.number) == gensToDelete.end()) continue;
|
||||
if (gensToDelete.find(i.number) == gensToDelete.end()) {
|
||||
continue;
|
||||
}
|
||||
deleteGeneration2(profile, i.number, dryRun);
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +177,9 @@ void deleteOldGenerations(const Path& profile, bool dryRun) {
|
|||
Generations gens = findGenerations(profile, curGen);
|
||||
|
||||
for (auto& i : gens)
|
||||
if (i.number != curGen) deleteGeneration2(profile, i.number, dryRun);
|
||||
if (i.number != curGen) {
|
||||
deleteGeneration2(profile, i.number, dryRun);
|
||||
}
|
||||
}
|
||||
|
||||
void deleteGenerationsOlderThan(const Path& profile, time_t t, bool dryRun) {
|
||||
|
@ -185,7 +193,9 @@ void deleteGenerationsOlderThan(const Path& profile, time_t t, bool dryRun) {
|
|||
for (auto i = gens.rbegin(); i != gens.rend(); ++i)
|
||||
if (canDelete) {
|
||||
assert(i->creationTime < t);
|
||||
if (i->number != curGen) deleteGeneration2(profile, i->number, dryRun);
|
||||
if (i->number != curGen) {
|
||||
deleteGeneration2(profile, i->number, dryRun);
|
||||
}
|
||||
} else if (i->creationTime < t) {
|
||||
/* We may now start deleting generations, but we don't
|
||||
delete this generation yet, because this generation was
|
||||
|
@ -211,7 +221,9 @@ void deleteGenerationsOlderThan(const Path& profile, const string& timeSpec,
|
|||
|
||||
void switchLink(Path link, Path target) {
|
||||
/* Hacky. */
|
||||
if (dirOf(target) == dirOf(link)) target = baseNameOf(target);
|
||||
if (dirOf(target) == dirOf(link)) {
|
||||
target = baseNameOf(target);
|
||||
}
|
||||
|
||||
replaceSymlink(target, link);
|
||||
}
|
||||
|
|
12
third_party/nix/src/libstore/references.cc
vendored
12
third_party/nix/src/libstore/references.cc
vendored
|
@ -35,7 +35,9 @@ static void search(const unsigned char* s, size_t len, StringSet& hashes,
|
|||
match = false;
|
||||
break;
|
||||
}
|
||||
if (!match) continue;
|
||||
if (!match) {
|
||||
continue;
|
||||
}
|
||||
string ref((const char*)s + i, refLength);
|
||||
if (hashes.find(ref) != hashes.end()) {
|
||||
DLOG(INFO) << "found reference to '" << ref << "' at offset " << i;
|
||||
|
@ -88,7 +90,9 @@ PathSet scanForReferences(const string& path, const PathSet& refs,
|
|||
for (auto& i : refs) {
|
||||
string baseName = baseNameOf(i);
|
||||
string::size_type pos = baseName.find('-');
|
||||
if (pos == string::npos) throw Error(format("bad reference '%1%'") % i);
|
||||
if (pos == string::npos) {
|
||||
throw Error(format("bad reference '%1%'") % i);
|
||||
}
|
||||
string s = string(baseName, 0, pos);
|
||||
assert(s.size() == refLength);
|
||||
assert(backMap.find(s) == backMap.end());
|
||||
|
@ -104,7 +108,9 @@ PathSet scanForReferences(const string& path, const PathSet& refs,
|
|||
PathSet found;
|
||||
for (auto& i : sink.seen) {
|
||||
std::map<string, Path>::iterator j;
|
||||
if ((j = backMap.find(i)) == backMap.end()) abort();
|
||||
if ((j = backMap.find(i)) == backMap.end()) {
|
||||
abort();
|
||||
}
|
||||
found.insert(j->second);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,9 @@ namespace nix {
|
|||
|
||||
RemoteFSAccessor::RemoteFSAccessor(ref<Store> store, const Path& cacheDir)
|
||||
: store(store), cacheDir(cacheDir) {
|
||||
if (cacheDir != "") createDirs(cacheDir);
|
||||
if (cacheDir != "") {
|
||||
createDirs(cacheDir);
|
||||
}
|
||||
}
|
||||
|
||||
Path RemoteFSAccessor::makeCacheFile(const Path& storePath,
|
||||
|
@ -51,7 +53,9 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path& path_) {
|
|||
storePath);
|
||||
|
||||
auto i = nars.find(storePath);
|
||||
if (i != nars.end()) return {i->second, restPath};
|
||||
if (i != nars.end()) {
|
||||
return {i->second, restPath};
|
||||
}
|
||||
|
||||
StringSink sink;
|
||||
std::string listing;
|
||||
|
@ -65,7 +69,9 @@ std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path& path_) {
|
|||
auto narAccessor = makeLazyNarAccessor(
|
||||
listing, [cacheFile](uint64_t offset, uint64_t length) {
|
||||
AutoCloseFD fd = open(cacheFile.c_str(), O_RDONLY | O_CLOEXEC);
|
||||
if (!fd) throw SysError("opening NAR cache file '%s'", cacheFile);
|
||||
if (!fd) {
|
||||
throw SysError("opening NAR cache file '%s'", cacheFile);
|
||||
}
|
||||
|
||||
if (lseek(fd.get(), offset, SEEK_SET) != (off_t)offset)
|
||||
throw SysError("seeking in '%s'", cacheFile);
|
||||
|
|
72
third_party/nix/src/libstore/remote-store.cc
vendored
72
third_party/nix/src/libstore/remote-store.cc
vendored
|
@ -96,7 +96,9 @@ ref<RemoteStore::Connection> UDSRemoteStore::openConnection() {
|
|||
#endif
|
||||
,
|
||||
0);
|
||||
if (!conn->fd) throw SysError("cannot create Unix domain socket");
|
||||
if (!conn->fd) {
|
||||
throw SysError("cannot create Unix domain socket");
|
||||
}
|
||||
closeOnExec(conn->fd.get());
|
||||
|
||||
string socketPath = path ? *path : settings.nixDaemonSocketFile;
|
||||
|
@ -126,7 +128,9 @@ void RemoteStore::initConnection(Connection& conn) {
|
|||
conn.to << WORKER_MAGIC_1;
|
||||
conn.to.flush();
|
||||
unsigned int magic = readInt(conn.from);
|
||||
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
|
||||
if (magic != WORKER_MAGIC_2) {
|
||||
throw Error("protocol mismatch");
|
||||
}
|
||||
|
||||
conn.from >> conn.daemonVersion;
|
||||
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) !=
|
||||
|
@ -144,10 +148,14 @@ void RemoteStore::initConnection(Connection& conn) {
|
|||
conn.to << 0;
|
||||
}
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11) conn.to << false;
|
||||
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11) {
|
||||
conn.to << false;
|
||||
}
|
||||
|
||||
auto ex = conn.processStderr();
|
||||
if (ex) std::rethrow_exception(ex);
|
||||
if (ex) {
|
||||
std::rethrow_exception(ex);
|
||||
}
|
||||
} catch (Error& e) {
|
||||
throw Error("cannot open connection to remote store '%s': %s", getUri(),
|
||||
e.what());
|
||||
|
@ -185,7 +193,9 @@ void RemoteStore::setOptions(Connection& conn) {
|
|||
}
|
||||
|
||||
auto ex = conn.processStderr();
|
||||
if (ex) std::rethrow_exception(ex);
|
||||
if (ex) {
|
||||
std::rethrow_exception(ex);
|
||||
}
|
||||
}
|
||||
|
||||
/* A wrapper around Pool<RemoteStore::Connection>::Handle that marks
|
||||
|
@ -238,7 +248,9 @@ PathSet RemoteStore::queryValidPaths(const PathSet& paths,
|
|||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
||||
PathSet res;
|
||||
for (auto& i : paths)
|
||||
if (isValidPath(i)) res.insert(i);
|
||||
if (isValidPath(i)) {
|
||||
res.insert(i);
|
||||
}
|
||||
return res;
|
||||
} else {
|
||||
conn->to << wopQueryValidPaths << paths;
|
||||
|
@ -261,7 +273,9 @@ PathSet RemoteStore::querySubstitutablePaths(const PathSet& paths) {
|
|||
for (auto& i : paths) {
|
||||
conn->to << wopHasSubstitutes << i;
|
||||
conn.processStderr();
|
||||
if (readInt(conn->from)) res.insert(i);
|
||||
if (readInt(conn->from)) {
|
||||
res.insert(i);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
} else {
|
||||
|
@ -273,7 +287,9 @@ PathSet RemoteStore::querySubstitutablePaths(const PathSet& paths) {
|
|||
|
||||
void RemoteStore::querySubstitutablePathInfos(const PathSet& paths,
|
||||
SubstitutablePathInfos& infos) {
|
||||
if (paths.empty()) return;
|
||||
if (paths.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto conn(getConnection());
|
||||
|
||||
|
@ -283,9 +299,13 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet& paths,
|
|||
conn->to << wopQuerySubstitutablePathInfo << i;
|
||||
conn.processStderr();
|
||||
unsigned int reply = readInt(conn->from);
|
||||
if (reply == 0) continue;
|
||||
if (reply == 0) {
|
||||
continue;
|
||||
}
|
||||
info.deriver = readString(conn->from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
if (info.deriver != "") {
|
||||
assertStorePath(info.deriver);
|
||||
}
|
||||
info.references = readStorePaths<PathSet>(*this, conn->from);
|
||||
info.downloadSize = readLongLong(conn->from);
|
||||
info.narSize = readLongLong(conn->from);
|
||||
|
@ -300,7 +320,9 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet& paths,
|
|||
Path path = readStorePath(*this, conn->from);
|
||||
SubstitutablePathInfo& info(infos[path]);
|
||||
info.deriver = readString(conn->from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
if (info.deriver != "") {
|
||||
assertStorePath(info.deriver);
|
||||
}
|
||||
info.references = readStorePaths<PathSet>(*this, conn->from);
|
||||
info.downloadSize = readLongLong(conn->from);
|
||||
info.narSize = readLongLong(conn->from);
|
||||
|
@ -327,12 +349,16 @@ void RemoteStore::queryPathInfoUncached(
|
|||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
|
||||
bool valid;
|
||||
conn->from >> valid;
|
||||
if (!valid) throw InvalidPath(format("path '%s' is not valid") % path);
|
||||
if (!valid) {
|
||||
throw InvalidPath(format("path '%s' is not valid") % path);
|
||||
}
|
||||
}
|
||||
info = std::make_shared<ValidPathInfo>();
|
||||
info->path = path;
|
||||
info->deriver = readString(conn->from);
|
||||
if (info->deriver != "") assertStorePath(info->deriver);
|
||||
if (info->deriver != "") {
|
||||
assertStorePath(info->deriver);
|
||||
}
|
||||
info->narHash = Hash(readString(conn->from), htSHA256);
|
||||
info->references = readStorePaths<PathSet>(*this, conn->from);
|
||||
conn->from >> info->registrationTime >> info->narSize;
|
||||
|
@ -382,7 +408,9 @@ Path RemoteStore::queryPathFromHashPart(const string& hashPart) {
|
|||
conn->to << wopQueryPathFromHashPart << hashPart;
|
||||
conn.processStderr();
|
||||
Path path = readString(conn->from);
|
||||
if (!path.empty()) assertStorePath(path);
|
||||
if (!path.empty()) {
|
||||
assertStorePath(path);
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
|
@ -416,7 +444,9 @@ void RemoteStore::addToStore(const ValidPathInfo& info, Source& source,
|
|||
<< info.registrationTime << info.narSize << info.ultimate
|
||||
<< info.sigs << info.ca << repair << !checkSigs;
|
||||
bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21;
|
||||
if (!tunnel) copyNAR(source, conn->to);
|
||||
if (!tunnel) {
|
||||
copyNAR(source, conn->to);
|
||||
}
|
||||
conn.processStderr(0, tunnel ? &source : nullptr);
|
||||
}
|
||||
}
|
||||
|
@ -647,12 +677,16 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink* sink,
|
|||
|
||||
if (msg == STDERR_WRITE) {
|
||||
string s = readString(from);
|
||||
if (!sink) throw Error("no sink");
|
||||
if (!sink) {
|
||||
throw Error("no sink");
|
||||
}
|
||||
(*sink)(s);
|
||||
}
|
||||
|
||||
else if (msg == STDERR_READ) {
|
||||
if (!source) throw Error("no source");
|
||||
if (!source) {
|
||||
throw Error("no source");
|
||||
}
|
||||
size_t len = readNum<size_t>(from);
|
||||
auto buf = std::make_unique<unsigned char[]>(len);
|
||||
writeString(buf.get(), source->read(buf.get(), len), to);
|
||||
|
@ -690,7 +724,9 @@ static std::string uriScheme = "unix://";
|
|||
static RegisterStoreImplementation regStore(
|
||||
[](const std::string& uri,
|
||||
const Store::Params& params) -> std::shared_ptr<Store> {
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) {
|
||||
return 0;
|
||||
}
|
||||
return std::make_shared<UDSRemoteStore>(
|
||||
std::string(uri, uriScheme.size()), params);
|
||||
});
|
||||
|
|
|
@ -153,7 +153,9 @@ S3Helper::DownloadResult S3Helper::getObject(const std::string& bucketName,
|
|||
dynamic_cast<std::stringstream&>(result.GetBody()).str());
|
||||
|
||||
} catch (S3Error& e) {
|
||||
if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
|
||||
if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
@ -315,7 +317,9 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore {
|
|||
|
||||
request.SetContentType(mimeType);
|
||||
|
||||
if (contentEncoding != "") request.SetContentEncoding(contentEncoding);
|
||||
if (contentEncoding != "") {
|
||||
request.SetContentEncoding(contentEncoding);
|
||||
}
|
||||
|
||||
auto stream = std::make_shared<istringstream_nocopy>(data);
|
||||
|
||||
|
@ -394,7 +398,9 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore {
|
|||
|
||||
for (auto object : contents) {
|
||||
auto& key = object.GetKey();
|
||||
if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
|
||||
if (key.size() != 40 || !hasSuffix(key, ".narinfo")) {
|
||||
continue;
|
||||
}
|
||||
paths.insert(storeDir + "/" + key.substr(0, key.size() - 8));
|
||||
}
|
||||
|
||||
|
@ -408,7 +414,9 @@ struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore {
|
|||
static RegisterStoreImplementation regStore(
|
||||
[](const std::string& uri,
|
||||
const Store::Params& params) -> std::shared_ptr<Store> {
|
||||
if (std::string(uri, 0, 5) != "s3://") return 0;
|
||||
if (std::string(uri, 0, 5) != "s3://") {
|
||||
return 0;
|
||||
}
|
||||
auto store =
|
||||
std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5));
|
||||
store->init();
|
||||
|
|
4
third_party/nix/src/libstore/ssh-store.cc
vendored
4
third_party/nix/src/libstore/ssh-store.cc
vendored
|
@ -77,7 +77,9 @@ ref<RemoteStore::Connection> SSHStore::openConnection() {
|
|||
static RegisterStoreImplementation regStore([](const std::string& uri,
|
||||
const Store::Params& params)
|
||||
-> std::shared_ptr<Store> {
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) {
|
||||
return 0;
|
||||
}
|
||||
return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
|
||||
});
|
||||
|
||||
|
|
18
third_party/nix/src/libstore/ssh.cc
vendored
18
third_party/nix/src/libstore/ssh.cc
vendored
|
@ -17,8 +17,12 @@ SSHMaster::SSHMaster(const std::string& host, const std::string& keyFile,
|
|||
void SSHMaster::addCommonSSHOpts(Strings& args) {
|
||||
for (auto& i : tokenizeString<Strings>(getEnv("NIX_SSHOPTS")))
|
||||
args.push_back(i);
|
||||
if (!keyFile.empty()) args.insert(args.end(), {"-i", keyFile});
|
||||
if (compress) args.push_back("-C");
|
||||
if (!keyFile.empty()) {
|
||||
args.insert(args.end(), {"-i", keyFile});
|
||||
}
|
||||
if (compress) {
|
||||
args.push_back("-C");
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(
|
||||
|
@ -81,11 +85,15 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand(
|
|||
}
|
||||
|
||||
Path SSHMaster::startMaster() {
|
||||
if (!useMaster) return "";
|
||||
if (!useMaster) {
|
||||
return "";
|
||||
}
|
||||
|
||||
auto state(state_.lock());
|
||||
|
||||
if (state->sshMaster != -1) return state->socketPath;
|
||||
if (state->sshMaster != -1) {
|
||||
return state->socketPath;
|
||||
}
|
||||
|
||||
state->tmpDir =
|
||||
std::make_unique<AutoDelete>(createTempDir("", "nix", true, true, 0700));
|
||||
|
@ -112,7 +120,7 @@ Path SSHMaster::startMaster() {
|
|||
"-S", state->socketPath,
|
||||
"-o", "LocalCommand=echo started",
|
||||
"-o", "PermitLocalCommand=yes"};
|
||||
// if (verbosity >= lvlChatty) args.push_back("-v");
|
||||
// if (verbosity >= lvlChatty) { args.push_back("-v"); }
|
||||
addCommonSSHOpts(args);
|
||||
execvp(args.begin()->c_str(), stringsToCharPtrs(args).data());
|
||||
|
||||
|
|
92
third_party/nix/src/libstore/store-api.cc
vendored
92
third_party/nix/src/libstore/store-api.cc
vendored
|
@ -342,7 +342,9 @@ void Store::queryPathInfo(const Path& storePath,
|
|||
try {
|
||||
auto info = fut.get();
|
||||
|
||||
if (diskCache) diskCache->upsertNarInfo(getUri(), hashPart, info);
|
||||
if (diskCache) {
|
||||
diskCache->upsertNarInfo(getUri(), hashPart, info);
|
||||
}
|
||||
|
||||
{
|
||||
auto state_(state.lock());
|
||||
|
@ -388,7 +390,9 @@ PathSet Store::queryValidPaths(const PathSet& paths,
|
|||
state->exc = std::current_exception();
|
||||
}
|
||||
assert(state->left);
|
||||
if (!--state->left) wakeup.notify_one();
|
||||
if (!--state->left) {
|
||||
wakeup.notify_one();
|
||||
}
|
||||
}});
|
||||
};
|
||||
|
||||
|
@ -399,7 +403,9 @@ PathSet Store::queryValidPaths(const PathSet& paths,
|
|||
while (true) {
|
||||
auto state(state_.lock());
|
||||
if (!state->left) {
|
||||
if (state->exc) std::rethrow_exception(state->exc);
|
||||
if (state->exc) {
|
||||
std::rethrow_exception(state->exc);
|
||||
}
|
||||
return state->valid;
|
||||
}
|
||||
state.wait(wakeup);
|
||||
|
@ -455,7 +461,9 @@ void Store::pathInfoToJSON(JSONPlaceholder& jsonOut, const PathSet& storePaths,
|
|||
for (auto& ref : info->references) jsonRefs.elem(ref);
|
||||
}
|
||||
|
||||
if (info->ca != "") jsonPath.attr("ca", info->ca);
|
||||
if (info->ca != "") {
|
||||
jsonPath.attr("ca", info->ca);
|
||||
}
|
||||
|
||||
std::pair<uint64_t, uint64_t> closureSizes;
|
||||
|
||||
|
@ -465,12 +473,16 @@ void Store::pathInfoToJSON(JSONPlaceholder& jsonOut, const PathSet& storePaths,
|
|||
}
|
||||
|
||||
if (includeImpureInfo) {
|
||||
if (info->deriver != "") jsonPath.attr("deriver", info->deriver);
|
||||
if (info->deriver != "") {
|
||||
jsonPath.attr("deriver", info->deriver);
|
||||
}
|
||||
|
||||
if (info->registrationTime)
|
||||
jsonPath.attr("registrationTime", info->registrationTime);
|
||||
|
||||
if (info->ultimate) jsonPath.attr("ultimate", info->ultimate);
|
||||
if (info->ultimate) {
|
||||
jsonPath.attr("ultimate", info->ultimate);
|
||||
}
|
||||
|
||||
if (!info->sigs.empty()) {
|
||||
auto jsonSigs = jsonPath.list("signatures");
|
||||
|
@ -481,7 +493,9 @@ void Store::pathInfoToJSON(JSONPlaceholder& jsonOut, const PathSet& storePaths,
|
|||
std::shared_ptr<const ValidPathInfo>(info));
|
||||
|
||||
if (narInfo) {
|
||||
if (!narInfo->url.empty()) jsonPath.attr("url", narInfo->url);
|
||||
if (!narInfo->url.empty()) {
|
||||
jsonPath.attr("url", narInfo->url);
|
||||
}
|
||||
if (narInfo->fileHash)
|
||||
jsonPath.attr("downloadHash", narInfo->fileHash.to_string());
|
||||
if (narInfo->fileSize)
|
||||
|
@ -506,7 +520,9 @@ std::pair<uint64_t, uint64_t> Store::getClosureSize(const Path& storePath) {
|
|||
totalNarSize += info->narSize;
|
||||
auto narInfo = std::dynamic_pointer_cast<const NarInfo>(
|
||||
std::shared_ptr<const ValidPathInfo>(info));
|
||||
if (narInfo) totalDownloadSize += narInfo->fileSize;
|
||||
if (narInfo) {
|
||||
totalDownloadSize += narInfo->fileSize;
|
||||
}
|
||||
}
|
||||
return {totalNarSize, totalDownloadSize};
|
||||
}
|
||||
|
@ -521,9 +537,13 @@ const Store::Stats& Store::getStats() {
|
|||
|
||||
void Store::buildPaths(const PathSet& paths, BuildMode buildMode) {
|
||||
for (auto& path : paths)
|
||||
if (isDerivation(path)) unsupported("buildPaths");
|
||||
if (isDerivation(path)) {
|
||||
unsupported("buildPaths");
|
||||
}
|
||||
|
||||
if (queryValidPaths(paths).size() != paths.size()) unsupported("buildPaths");
|
||||
if (queryValidPaths(paths).size() != paths.size()) {
|
||||
unsupported("buildPaths");
|
||||
}
|
||||
}
|
||||
|
||||
void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
||||
|
@ -552,8 +572,12 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore,
|
|||
srcStore->narFromPath({storePath}, sink);
|
||||
auto info2 = make_ref<ValidPathInfo>(*info);
|
||||
info2->narHash = hashString(htSHA256, *sink.s);
|
||||
if (!info->narSize) info2->narSize = sink.s->size();
|
||||
if (info->ultimate) info2->ultimate = false;
|
||||
if (!info->narSize) {
|
||||
info2->narSize = sink.s->size();
|
||||
}
|
||||
if (info->ultimate) {
|
||||
info2->ultimate = false;
|
||||
}
|
||||
info = info2;
|
||||
|
||||
StringSource source(*sink.s);
|
||||
|
@ -590,9 +614,13 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore,
|
|||
|
||||
PathSet missing;
|
||||
for (auto& path : storePaths)
|
||||
if (!valid.count(path)) missing.insert(path);
|
||||
if (!valid.count(path)) {
|
||||
missing.insert(path);
|
||||
}
|
||||
|
||||
if (missing.empty()) return;
|
||||
if (missing.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
LOG(INFO) << "copying " << missing.size() << " paths";
|
||||
|
||||
|
@ -628,7 +656,9 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore,
|
|||
copyStorePath(srcStore, dstStore, storePath, repair, checkSigs);
|
||||
} catch (Error& e) {
|
||||
nrFailed++;
|
||||
if (!settings.keepGoing) throw e;
|
||||
if (!settings.keepGoing) {
|
||||
throw e;
|
||||
}
|
||||
LOG(ERROR) << "could not copy " << storePath << ": " << e.what();
|
||||
return;
|
||||
}
|
||||
|
@ -658,25 +688,33 @@ ValidPathInfo decodeValidPathInfo(std::istream& str, bool hashGiven) {
|
|||
getline(str, s);
|
||||
info.narHash = Hash(s, htSHA256);
|
||||
getline(str, s);
|
||||
if (!string2Int(s, info.narSize)) throw Error("number expected");
|
||||
if (!string2Int(s, info.narSize)) {
|
||||
throw Error("number expected");
|
||||
}
|
||||
}
|
||||
getline(str, info.deriver);
|
||||
string s;
|
||||
int n;
|
||||
getline(str, s);
|
||||
if (!string2Int(s, n)) throw Error("number expected");
|
||||
if (!string2Int(s, n)) {
|
||||
throw Error("number expected");
|
||||
}
|
||||
while (n--) {
|
||||
getline(str, s);
|
||||
info.references.insert(s);
|
||||
}
|
||||
if (!str || str.eof()) throw Error("missing input");
|
||||
if (!str || str.eof()) {
|
||||
throw Error("missing input");
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
string showPaths(const PathSet& paths) {
|
||||
string s;
|
||||
for (auto& i : paths) {
|
||||
if (s.size() != 0) s += ", ";
|
||||
if (s.size() != 0) {
|
||||
s += ", ";
|
||||
}
|
||||
s += "'" + i + "'";
|
||||
}
|
||||
return s;
|
||||
|
@ -725,11 +763,15 @@ bool ValidPathInfo::isContentAddressed(const Store& store) const {
|
|||
|
||||
size_t ValidPathInfo::checkSignatures(const Store& store,
|
||||
const PublicKeys& publicKeys) const {
|
||||
if (isContentAddressed(store)) return maxSigs;
|
||||
if (isContentAddressed(store)) {
|
||||
return maxSigs;
|
||||
}
|
||||
|
||||
size_t good = 0;
|
||||
for (auto& sig : sigs)
|
||||
if (checkSignature(publicKeys, sig)) good++;
|
||||
if (checkSignature(publicKeys, sig)) {
|
||||
good++;
|
||||
}
|
||||
return good;
|
||||
}
|
||||
|
||||
|
@ -848,7 +890,9 @@ static RegisterStoreImplementation regStore([](const std::string& uri,
|
|||
return std::shared_ptr<Store>(std::make_shared<UDSRemoteStore>(params));
|
||||
case tLocal: {
|
||||
Store::Params params2 = params;
|
||||
if (hasPrefix(uri, "/")) params2["root"] = uri;
|
||||
if (hasPrefix(uri, "/")) {
|
||||
params2["root"] = uri;
|
||||
}
|
||||
return std::shared_ptr<Store>(std::make_shared<LocalStore>(params2));
|
||||
}
|
||||
default:
|
||||
|
@ -863,7 +907,9 @@ std::list<ref<Store>> getDefaultSubstituters() {
|
|||
StringSet done;
|
||||
|
||||
auto addStore = [&](const std::string& uri) {
|
||||
if (done.count(uri)) return;
|
||||
if (done.count(uri)) {
|
||||
return;
|
||||
}
|
||||
done.insert(uri);
|
||||
try {
|
||||
stores.push_back(openStore(uri));
|
||||
|
|
4
third_party/nix/src/libstore/store-api.hh
vendored
4
third_party/nix/src/libstore/store-api.hh
vendored
|
@ -747,7 +747,9 @@ struct RegisterStoreImplementation {
|
|||
static Implementations* implementations;
|
||||
|
||||
RegisterStoreImplementation(OpenStore fun) {
|
||||
if (!implementations) implementations = new Implementations;
|
||||
if (!implementations) {
|
||||
implementations = new Implementations;
|
||||
}
|
||||
implementations->push_back(fun);
|
||||
}
|
||||
};
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue