refactor(3p/nix): Apply clang-tidy's modernize-* fixes
This applies the modernization fixes listed here: https://clang.llvm.org/extra/clang-tidy/checks/list.html The 'modernize-use-trailing-return-type' fix was excluded due to my personal preference (more specifically, I think the 'auto' keyword is misleading in that position).
This commit is contained in:
parent
fed31b2c9b
commit
d331d3a0b5
59 changed files with 349 additions and 321 deletions
|
@ -2,6 +2,7 @@
|
|||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <memory>
|
||||
|
||||
#include "archive.hh"
|
||||
#include "compression.hh"
|
||||
|
@ -20,8 +21,7 @@ namespace nix {
|
|||
|
||||
BinaryCacheStore::BinaryCacheStore(const Params& params) : Store(params) {
|
||||
if (secretKeyFile != "") {
|
||||
secretKey =
|
||||
std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
|
||||
secretKey = std::make_unique<SecretKey>(readFile(secretKeyFile));
|
||||
}
|
||||
|
||||
StringSink sink;
|
||||
|
|
101
third_party/nix/src/libstore/build.cc
vendored
101
third_party/nix/src/libstore/build.cc
vendored
|
@ -1,18 +1,19 @@
|
|||
#include <algorithm>
|
||||
#include <cerrno>
|
||||
#include <chrono>
|
||||
#include <climits>
|
||||
#include <cstring>
|
||||
#include <future>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
#include <regex>
|
||||
#include <sstream>
|
||||
#include <thread>
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <grp.h>
|
||||
#include <limits.h>
|
||||
#include <netdb.h>
|
||||
#include <pwd.h>
|
||||
#include <sys/resource.h>
|
||||
|
@ -67,6 +68,7 @@
|
|||
#endif
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
#include <utility>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
@ -81,8 +83,8 @@ struct HookInstance;
|
|||
/* A pointer to a goal. */
|
||||
class Goal;
|
||||
class DerivationGoal;
|
||||
typedef std::shared_ptr<Goal> GoalPtr;
|
||||
typedef std::weak_ptr<Goal> WeakGoalPtr;
|
||||
using GoalPtr = std::shared_ptr<Goal>;
|
||||
using WeakGoalPtr = std::weak_ptr<Goal>;
|
||||
|
||||
struct CompareGoalPtrs {
|
||||
bool operator()(const GoalPtr& a, const GoalPtr& b) const;
|
||||
|
@ -90,7 +92,7 @@ struct CompareGoalPtrs {
|
|||
|
||||
/* Set of goals. */
|
||||
typedef set<GoalPtr, CompareGoalPtrs> Goals;
|
||||
typedef list<WeakGoalPtr> WeakGoals;
|
||||
using WeakGoals = list<WeakGoalPtr>;
|
||||
|
||||
/* A map of paths to goals (and the other way around). */
|
||||
typedef map<Path, WeakGoalPtr> WeakGoalMap;
|
||||
|
@ -174,7 +176,7 @@ bool CompareGoalPtrs::operator()(const GoalPtr& a, const GoalPtr& b) const {
|
|||
return s1 < s2;
|
||||
}
|
||||
|
||||
typedef std::chrono::time_point<std::chrono::steady_clock> steady_time_point;
|
||||
using steady_time_point = std::chrono::time_point<std::chrono::steady_clock>;
|
||||
|
||||
/* A mapping used to remember for each child process to what goal it
|
||||
belongs, and file descriptors for receiving log data and output
|
||||
|
@ -816,7 +818,7 @@ class DerivationGoal : public Goal {
|
|||
/* Whether to run the build in a private network namespace. */
|
||||
bool privateNetwork = false;
|
||||
|
||||
typedef void (DerivationGoal::*GoalState)();
|
||||
using GoalState = void (DerivationGoal::*)();
|
||||
GoalState state;
|
||||
|
||||
/* Stuff we need to pass to initChild(). */
|
||||
|
@ -824,7 +826,7 @@ class DerivationGoal : public Goal {
|
|||
Path source;
|
||||
bool optional;
|
||||
explicit ChrootPath(Path source = "", bool optional = false)
|
||||
: source(source), optional(optional) {}
|
||||
: source(std::move(source)), optional(optional) {}
|
||||
};
|
||||
typedef map<Path, ChrootPath>
|
||||
DirsInChroot; // maps target path to source path
|
||||
|
@ -874,11 +876,11 @@ class DerivationGoal : public Goal {
|
|||
std::string machineName;
|
||||
|
||||
public:
|
||||
DerivationGoal(const Path& drvPath, const StringSet& wantedOutputs,
|
||||
Worker& worker, BuildMode buildMode = bmNormal);
|
||||
DerivationGoal(const Path& drvPath, StringSet wantedOutputs, Worker& worker,
|
||||
BuildMode buildMode = bmNormal);
|
||||
DerivationGoal(const Path& drvPath, const BasicDerivation& drv,
|
||||
Worker& worker, BuildMode buildMode = bmNormal);
|
||||
~DerivationGoal();
|
||||
~DerivationGoal() override;
|
||||
|
||||
/* Whether we need to perform hash rewriting if there are valid output paths.
|
||||
*/
|
||||
|
@ -982,13 +984,12 @@ class DerivationGoal : public Goal {
|
|||
|
||||
const Path DerivationGoal::homeDir = "/homeless-shelter";
|
||||
|
||||
DerivationGoal::DerivationGoal(const Path& drvPath,
|
||||
const StringSet& wantedOutputs, Worker& worker,
|
||||
BuildMode buildMode)
|
||||
DerivationGoal::DerivationGoal(const Path& drvPath, StringSet wantedOutputs,
|
||||
Worker& worker, BuildMode buildMode)
|
||||
: Goal(worker),
|
||||
useDerivation(true),
|
||||
drvPath(drvPath),
|
||||
wantedOutputs(wantedOutputs),
|
||||
wantedOutputs(std::move(wantedOutputs)),
|
||||
buildMode(buildMode) {
|
||||
state = &DerivationGoal::getDerivation;
|
||||
name = (format("building of '%1%'") % drvPath).str();
|
||||
|
@ -1004,7 +1005,7 @@ DerivationGoal::DerivationGoal(const Path& drvPath, const BasicDerivation& drv,
|
|||
useDerivation(false),
|
||||
drvPath(drvPath),
|
||||
buildMode(buildMode) {
|
||||
this->drv = std::unique_ptr<BasicDerivation>(new BasicDerivation(drv));
|
||||
this->drv = std::make_unique<BasicDerivation>(drv);
|
||||
state = &DerivationGoal::haveDerivation;
|
||||
name = (format("building of %1%") % showPaths(drv.outputPaths())).str();
|
||||
trace("created");
|
||||
|
@ -1473,7 +1474,7 @@ void DerivationGoal::tryToBuild() {
|
|||
case rpAccept:
|
||||
/* Yes, it has started doing so. Wait until we get
|
||||
EOF from the hook. */
|
||||
result.startTime = time(0); // inexact
|
||||
result.startTime = time(nullptr); // inexact
|
||||
state = &DerivationGoal::buildDone;
|
||||
started();
|
||||
return;
|
||||
|
@ -1554,7 +1555,7 @@ MakeError(NotDeterministic, BuildError)
|
|||
DLOG(INFO) << "builder process for '" << drvPath << "' finished";
|
||||
|
||||
result.timesBuilt++;
|
||||
result.stopTime = time(0);
|
||||
result.stopTime = time(nullptr);
|
||||
|
||||
/* So the child is gone now. */
|
||||
worker.childTerminated(this);
|
||||
|
@ -1674,7 +1675,7 @@ MakeError(NotDeterministic, BuildError)
|
|||
currentLine.clear();
|
||||
}
|
||||
|
||||
~LogSink() {
|
||||
~LogSink() override {
|
||||
if (currentLine != "") {
|
||||
currentLine += '\n';
|
||||
flushLine();
|
||||
|
@ -1785,7 +1786,7 @@ HookReply DerivationGoal::tryBuildHook() {
|
|||
return rpDecline;
|
||||
} else if (reply == "decline-permanently") {
|
||||
worker.tryBuildHook = false;
|
||||
worker.hook = 0;
|
||||
worker.hook = nullptr;
|
||||
return rpDecline;
|
||||
} else if (reply == "postpone") {
|
||||
return rpPostpone;
|
||||
|
@ -1796,7 +1797,7 @@ HookReply DerivationGoal::tryBuildHook() {
|
|||
if (e.errNo == EPIPE) {
|
||||
LOG(ERROR) << "build hook died unexpectedly: "
|
||||
<< chomp(drainFD(worker.hook->fromHook.readSide.get()));
|
||||
worker.hook = 0;
|
||||
worker.hook = nullptr;
|
||||
return rpDecline;
|
||||
} else {
|
||||
throw;
|
||||
|
@ -1892,10 +1893,10 @@ static void preloadNSS() {
|
|||
load its lookup libraries in the parent before any child gets a chance to.
|
||||
*/
|
||||
std::call_once(dns_resolve_flag, []() {
|
||||
struct addrinfo* res = NULL;
|
||||
struct addrinfo* res = nullptr;
|
||||
|
||||
if (getaddrinfo("this.pre-initializes.the.dns.resolvers.invalid.", "http",
|
||||
NULL, &res) != 0) {
|
||||
nullptr, &res) != 0) {
|
||||
if (res) {
|
||||
freeaddrinfo(res);
|
||||
}
|
||||
|
@ -2000,12 +2001,12 @@ void DerivationGoal::startBuilder() {
|
|||
by `nix-store --register-validity'. However, the deriver
|
||||
fields are left empty. */
|
||||
string s = get(drv->env, "exportReferencesGraph");
|
||||
Strings ss = tokenizeString<Strings>(s);
|
||||
auto ss = tokenizeString<Strings>(s);
|
||||
if (ss.size() % 2 != 0) {
|
||||
throw BuildError(
|
||||
format("odd number of tokens in 'exportReferencesGraph': '%1%'") % s);
|
||||
}
|
||||
for (Strings::iterator i = ss.begin(); i != ss.end();) {
|
||||
for (auto i = ss.begin(); i != ss.end();) {
|
||||
string fileName = *i++;
|
||||
checkStoreName(fileName); /* !!! abuse of this function */
|
||||
Path storePath = *i++;
|
||||
|
@ -2331,7 +2332,7 @@ void DerivationGoal::startBuilder() {
|
|||
throw SysError("putting pseudoterminal into raw mode");
|
||||
}
|
||||
|
||||
result.startTime = time(0);
|
||||
result.startTime = time(nullptr);
|
||||
|
||||
/* Fork a child to build the package. */
|
||||
ProcessOptions options;
|
||||
|
@ -2390,13 +2391,13 @@ void DerivationGoal::startBuilder() {
|
|||
not seem to be a workaround for this. (But who can tell
|
||||
from reading user_namespaces(7)?)
|
||||
See also https://lwn.net/Articles/621612/. */
|
||||
if (getuid() == 0 && setgroups(0, 0) == -1) {
|
||||
if (getuid() == 0 && setgroups(0, nullptr) == -1) {
|
||||
throw SysError("setgroups failed");
|
||||
}
|
||||
|
||||
size_t stackSize = 1 * 1024 * 1024;
|
||||
char* stack =
|
||||
(char*)mmap(0, stackSize, PROT_WRITE | PROT_READ,
|
||||
(char*)mmap(nullptr, stackSize, PROT_WRITE | PROT_READ,
|
||||
MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
|
||||
if (stack == MAP_FAILED) {
|
||||
throw SysError("allocating stack");
|
||||
|
@ -2519,8 +2520,7 @@ void DerivationGoal::initTmpDir() {
|
|||
needed (attributes are not passed through the environment, so
|
||||
there is no size constraint). */
|
||||
if (!parsedDrv->getStructuredAttrs()) {
|
||||
StringSet passAsFile =
|
||||
tokenizeString<StringSet>(get(drv->env, "passAsFile"));
|
||||
auto passAsFile = tokenizeString<StringSet>(get(drv->env, "passAsFile"));
|
||||
int fileNr = 0;
|
||||
for (auto& i : drv->env) {
|
||||
if (passAsFile.find(i.first) == passAsFile.end()) {
|
||||
|
@ -2893,14 +2893,14 @@ void DerivationGoal::runChild() {
|
|||
outside of the namespace. Making a subtree private is
|
||||
local to the namespace, though, so setting MS_PRIVATE
|
||||
does not affect the outside world. */
|
||||
if (mount(0, "/", 0, MS_REC | MS_PRIVATE, 0) == -1) {
|
||||
if (mount(nullptr, "/", nullptr, MS_REC | MS_PRIVATE, nullptr) == -1) {
|
||||
throw SysError("unable to make '/' private mount");
|
||||
}
|
||||
|
||||
/* Bind-mount chroot directory to itself, to treat it as a
|
||||
different filesystem from /, as needed for pivot_root. */
|
||||
if (mount(chrootRootDir.c_str(), chrootRootDir.c_str(), 0, MS_BIND, 0) ==
|
||||
-1) {
|
||||
if (mount(chrootRootDir.c_str(), chrootRootDir.c_str(), nullptr, MS_BIND,
|
||||
nullptr) == -1) {
|
||||
throw SysError(format("unable to bind mount '%1%'") % chrootRootDir);
|
||||
}
|
||||
|
||||
|
@ -2970,8 +2970,8 @@ void DerivationGoal::runChild() {
|
|||
createDirs(dirOf(target));
|
||||
writeFile(target, "");
|
||||
}
|
||||
if (mount(source.c_str(), target.c_str(), "", MS_BIND | MS_REC, 0) ==
|
||||
-1) {
|
||||
if (mount(source.c_str(), target.c_str(), "", MS_BIND | MS_REC,
|
||||
nullptr) == -1) {
|
||||
throw SysError("bind mount from '%1%' to '%2%' failed", source,
|
||||
target);
|
||||
}
|
||||
|
@ -2986,8 +2986,8 @@ void DerivationGoal::runChild() {
|
|||
|
||||
/* Bind a new instance of procfs on /proc. */
|
||||
createDirs(chrootRootDir + "/proc");
|
||||
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0, 0) ==
|
||||
-1) {
|
||||
if (mount("none", (chrootRootDir + "/proc").c_str(), "proc", 0,
|
||||
nullptr) == -1) {
|
||||
throw SysError("mounting /proc");
|
||||
}
|
||||
|
||||
|
@ -3589,7 +3589,7 @@ void DerivationGoal::registerOutputs() {
|
|||
/* For debugging, print out the referenced and unreferenced
|
||||
paths. */
|
||||
for (auto& i : inputPaths) {
|
||||
PathSet::iterator j = references.find(i);
|
||||
auto j = references.find(i);
|
||||
if (j == references.end()) {
|
||||
DLOG(INFO) << "unreferenced input: '" << i << "'";
|
||||
} else {
|
||||
|
@ -3841,14 +3841,14 @@ void DerivationGoal::checkOutputs(
|
|||
auto i = output->find(name);
|
||||
if (i != output->end()) {
|
||||
Strings res;
|
||||
for (auto j = i->begin(); j != i->end(); ++j) {
|
||||
if (!j->is_string()) {
|
||||
for (auto& j : *i) {
|
||||
if (!j.is_string()) {
|
||||
throw Error(
|
||||
"attribute '%s' of derivation '%s' must be a list of "
|
||||
"strings",
|
||||
name, drvPath);
|
||||
}
|
||||
res.push_back(j->get<std::string>());
|
||||
res.push_back(j.get<std::string>());
|
||||
}
|
||||
checks.disallowedRequisites = res;
|
||||
return res;
|
||||
|
@ -3922,7 +3922,7 @@ void DerivationGoal::closeLogFile() {
|
|||
if (logFileSink) {
|
||||
logFileSink->flush();
|
||||
}
|
||||
logSink = logFileSink = 0;
|
||||
logSink = logFileSink = nullptr;
|
||||
fdLogFile = -1;
|
||||
}
|
||||
|
||||
|
@ -4099,13 +4099,13 @@ class SubstitutionGoal : public Goal {
|
|||
maintainRunningSubstitutions, maintainExpectedNar,
|
||||
maintainExpectedDownload;
|
||||
|
||||
typedef void (SubstitutionGoal::*GoalState)();
|
||||
using GoalState = void (SubstitutionGoal::*)();
|
||||
GoalState state;
|
||||
|
||||
public:
|
||||
SubstitutionGoal(const Path& storePath, Worker& worker,
|
||||
RepairFlag repair = NoRepair);
|
||||
~SubstitutionGoal();
|
||||
~SubstitutionGoal() override;
|
||||
|
||||
void timedOut() override { abort(); };
|
||||
|
||||
|
@ -4459,9 +4459,9 @@ GoalPtr Worker::makeSubstitutionGoal(const Path& path, RepairFlag repair) {
|
|||
|
||||
static void removeGoal(GoalPtr goal, WeakGoalMap& goalMap) {
|
||||
/* !!! inefficient */
|
||||
for (WeakGoalMap::iterator i = goalMap.begin(); i != goalMap.end();) {
|
||||
for (auto i = goalMap.begin(); i != goalMap.end();) {
|
||||
if (i->second.lock() == goal) {
|
||||
WeakGoalMap::iterator j = i;
|
||||
auto j = i;
|
||||
++j;
|
||||
goalMap.erase(i);
|
||||
i = j;
|
||||
|
@ -4570,7 +4570,7 @@ void Worker::run(const Goals& _topGoals) {
|
|||
|
||||
DLOG(INFO) << "entered goal loop";
|
||||
|
||||
while (1) {
|
||||
while (true) {
|
||||
checkInterrupt();
|
||||
|
||||
store.autoGC(false);
|
||||
|
@ -4704,7 +4704,8 @@ void Worker::waitForInput() {
|
|||
}
|
||||
}
|
||||
|
||||
if (select(fdMax, &fds, 0, 0, useTimeout ? &timeout : 0) == -1) {
|
||||
if (select(fdMax, &fds, nullptr, nullptr, useTimeout ? &timeout : nullptr) ==
|
||||
-1) {
|
||||
if (errno == EINTR) {
|
||||
return;
|
||||
}
|
||||
|
@ -4810,7 +4811,7 @@ unsigned int Worker::exitStatus() {
|
|||
}
|
||||
|
||||
bool Worker::pathContentsGood(const Path& path) {
|
||||
std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path);
|
||||
auto i = pathContentsGoodCache.find(path);
|
||||
if (i != pathContentsGoodCache.end()) {
|
||||
return i->second;
|
||||
}
|
||||
|
@ -4874,7 +4875,7 @@ void LocalStore::buildPaths(const PathSet& drvPaths, BuildMode buildMode) {
|
|||
PathSet failed;
|
||||
for (auto& i : goals) {
|
||||
if (i->getExitCode() != Goal::ecSuccess) {
|
||||
DerivationGoal* i2 = dynamic_cast<DerivationGoal*>(i.get());
|
||||
auto* i2 = dynamic_cast<DerivationGoal*>(i.get());
|
||||
if (i2) {
|
||||
failed.insert(i2->getDrvPath());
|
||||
} else {
|
||||
|
|
2
third_party/nix/src/libstore/derivations.cc
vendored
2
third_party/nix/src/libstore/derivations.cc
vendored
|
@ -331,7 +331,7 @@ DrvHashes drvHashes;
|
|||
Hash hashDerivationModulo(Store& store, Derivation drv) {
|
||||
/* Return a fixed hash for fixed-output derivations. */
|
||||
if (drv.isFixedOutput()) {
|
||||
DerivationOutputs::const_iterator i = drv.outputs.begin();
|
||||
auto i = drv.outputs.begin();
|
||||
return hashString(htSHA256, "fixed:out:" + i->second.hashAlgo + ":" +
|
||||
i->second.hash + ":" + i->second.path);
|
||||
}
|
||||
|
|
14
third_party/nix/src/libstore/download.cc
vendored
14
third_party/nix/src/libstore/download.cc
vendored
|
@ -45,7 +45,7 @@ std::string resolveUri(const std::string& uri) {
|
|||
}
|
||||
|
||||
struct CurlDownloader : public Downloader {
|
||||
CURLM* curlm = 0;
|
||||
CURLM* curlm = nullptr;
|
||||
|
||||
std::random_device rd;
|
||||
std::mt19937 mt19937;
|
||||
|
@ -57,7 +57,7 @@ struct CurlDownloader : public Downloader {
|
|||
bool done = false; // whether either the success or failure function has
|
||||
// been called
|
||||
Callback<DownloadResult> callback;
|
||||
CURL* req = 0;
|
||||
CURL* req = nullptr;
|
||||
bool active =
|
||||
false; // whether the handle has been added to the multi object
|
||||
std::string status;
|
||||
|
@ -68,7 +68,7 @@ struct CurlDownloader : public Downloader {
|
|||
has been reached. */
|
||||
std::chrono::steady_clock::time_point embargo;
|
||||
|
||||
struct curl_slist* requestHeaders = 0;
|
||||
struct curl_slist* requestHeaders = nullptr;
|
||||
|
||||
std::string encoding;
|
||||
|
||||
|
@ -523,7 +523,7 @@ struct CurlDownloader : public Downloader {
|
|||
workerThread = std::thread([&]() { workerThreadEntry(); });
|
||||
}
|
||||
|
||||
~CurlDownloader() {
|
||||
~CurlDownloader() override {
|
||||
stopWorkerThread();
|
||||
|
||||
workerThread.join();
|
||||
|
@ -909,7 +909,7 @@ CachedDownloadResult Downloader::downloadCached(
|
|||
if (ss.size() >= 3 && ss[0] == url) {
|
||||
time_t lastChecked;
|
||||
if (string2Int(ss[2], lastChecked) &&
|
||||
(uint64_t)lastChecked + request.ttl >= (uint64_t)time(0)) {
|
||||
(uint64_t)lastChecked + request.ttl >= (uint64_t)time(nullptr)) {
|
||||
skip = true;
|
||||
result.effectiveUri = request.uri;
|
||||
result.etag = ss[1];
|
||||
|
@ -949,8 +949,8 @@ CachedDownloadResult Downloader::downloadCached(
|
|||
assert(!storePath.empty());
|
||||
replaceSymlink(storePath, fileLink);
|
||||
|
||||
writeFile(dataFile,
|
||||
url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
|
||||
writeFile(dataFile, url + "\n" + res.etag + "\n" +
|
||||
std::to_string(time(nullptr)) + "\n");
|
||||
} catch (DownloadError& e) {
|
||||
if (storePath.empty()) {
|
||||
throw;
|
||||
|
|
|
@ -11,7 +11,7 @@ struct HashAndWriteSink : Sink {
|
|||
HashSink hashSink;
|
||||
explicit HashAndWriteSink(Sink& writeSink)
|
||||
: writeSink(writeSink), hashSink(htSHA256) {}
|
||||
virtual void operator()(const unsigned char* data, size_t len) {
|
||||
void operator()(const unsigned char* data, size_t len) override {
|
||||
writeSink(data, len);
|
||||
hashSink(data, len);
|
||||
}
|
||||
|
|
7
third_party/nix/src/libstore/gc.cc
vendored
7
third_party/nix/src/libstore/gc.cc
vendored
|
@ -1,11 +1,11 @@
|
|||
#include <algorithm>
|
||||
#include <cerrno>
|
||||
#include <climits>
|
||||
#include <functional>
|
||||
#include <queue>
|
||||
#include <random>
|
||||
#include <regex>
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
|
@ -147,7 +147,7 @@ void LocalStore::addTempRoot(const Path& path) {
|
|||
|
||||
/* Create the temporary roots file for this process. */
|
||||
if (!state->fdTempRoots) {
|
||||
while (1) {
|
||||
while (true) {
|
||||
AutoCloseFD fdGCLock = openGCLock(ltRead);
|
||||
|
||||
if (pathExists(fnTempRoots)) {
|
||||
|
@ -505,7 +505,8 @@ struct LocalStore::GCState {
|
|||
unsigned long long bytesInvalidated;
|
||||
bool moveToTrash = true;
|
||||
bool shouldDelete;
|
||||
explicit GCState(GCResults& results_) : results(results_), bytesInvalidated(0) {}
|
||||
explicit GCState(GCResults& results_)
|
||||
: results(results_), bytesInvalidated(0) {}
|
||||
};
|
||||
|
||||
bool LocalStore::isActiveTempFile(const GCState& state, const Path& path,
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#include <utility>
|
||||
|
||||
#include <glog/logging.h>
|
||||
|
||||
#include "binary-cache-store.hh"
|
||||
|
@ -21,8 +23,8 @@ class HttpBinaryCacheStore : public BinaryCacheStore {
|
|||
Sync<State> _state;
|
||||
|
||||
public:
|
||||
HttpBinaryCacheStore(const Params& params, const Path& _cacheUri)
|
||||
: BinaryCacheStore(params), cacheUri(_cacheUri) {
|
||||
HttpBinaryCacheStore(const Params& params, Path _cacheUri)
|
||||
: BinaryCacheStore(params), cacheUri(std::move(_cacheUri)) {
|
||||
if (cacheUri.back() == '/') {
|
||||
cacheUri.pop_back();
|
||||
}
|
||||
|
@ -157,7 +159,7 @@ static RegisterStoreImplementation regStore(
|
|||
std::string(uri, 0, 8) != "https://" &&
|
||||
(getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" ||
|
||||
std::string(uri, 0, 7) != "file://")) {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
|
||||
store->init();
|
||||
|
|
|
@ -262,7 +262,7 @@ static RegisterStoreImplementation regStore(
|
|||
[](const std::string& uri,
|
||||
const Store::Params& params) -> std::shared_ptr<Store> {
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_shared<LegacySSHStore>(
|
||||
std::string(uri, uriScheme.size()), params);
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#include <utility>
|
||||
|
||||
#include "binary-cache-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
|
@ -9,8 +11,8 @@ class LocalBinaryCacheStore : public BinaryCacheStore {
|
|||
Path binaryCacheDir;
|
||||
|
||||
public:
|
||||
LocalBinaryCacheStore(const Params& params, const Path& binaryCacheDir)
|
||||
: BinaryCacheStore(params), binaryCacheDir(binaryCacheDir) {}
|
||||
LocalBinaryCacheStore(const Params& params, Path binaryCacheDir)
|
||||
: BinaryCacheStore(params), binaryCacheDir(std::move(binaryCacheDir)) {}
|
||||
|
||||
void init() override;
|
||||
|
||||
|
@ -78,7 +80,7 @@ static RegisterStoreImplementation regStore(
|
|||
const Store::Params& params) -> std::shared_ptr<Store> {
|
||||
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" ||
|
||||
std::string(uri, 0, 7) != "file://") {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
auto store =
|
||||
std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7));
|
||||
|
|
27
third_party/nix/src/libstore/local-store.cc
vendored
27
third_party/nix/src/libstore/local-store.cc
vendored
|
@ -1,19 +1,19 @@
|
|||
#include "local-store.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include <iostream>
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <glog/logging.h>
|
||||
#include <grp.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/select.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#include <utime.h>
|
||||
|
||||
|
@ -321,7 +321,7 @@ void LocalStore::openDB(State& state, bool create) {
|
|||
auto& db(state.db);
|
||||
if (sqlite3_open_v2(dbPath.c_str(), &db.db,
|
||||
SQLITE_OPEN_READWRITE | (create ? SQLITE_OPEN_CREATE : 0),
|
||||
0) != SQLITE_OK) {
|
||||
nullptr) != SQLITE_OK) {
|
||||
throw Error(format("cannot open Nix database '%1%'") % dbPath);
|
||||
}
|
||||
|
||||
|
@ -364,16 +364,16 @@ void LocalStore::openDB(State& state, bool create) {
|
|||
prevMode = string((const char*)sqlite3_column_text(stmt, 0));
|
||||
}
|
||||
if (prevMode != mode &&
|
||||
sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(), 0,
|
||||
0, 0) != SQLITE_OK) {
|
||||
sqlite3_exec(db, ("pragma main.journal_mode = " + mode + ";").c_str(),
|
||||
nullptr, nullptr, nullptr) != SQLITE_OK) {
|
||||
throwSQLiteError(db, "setting journal mode");
|
||||
}
|
||||
|
||||
/* Increase the auto-checkpoint interval to 40000 pages. This
|
||||
seems enough to ensure that instantiating the NixOS system
|
||||
derivation is done in a single fsync(). */
|
||||
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;", 0,
|
||||
0, 0) != SQLITE_OK) {
|
||||
if (mode == "wal" && sqlite3_exec(db, "pragma wal_autocheckpoint = 40000;",
|
||||
nullptr, nullptr, nullptr) != SQLITE_OK) {
|
||||
throwSQLiteError(db, "setting autocheckpoint interval");
|
||||
}
|
||||
|
||||
|
@ -404,7 +404,8 @@ void LocalStore::makeStoreWritable() {
|
|||
throw SysError("setting up a private mount namespace");
|
||||
}
|
||||
|
||||
if (mount(0, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND, 0) == -1) {
|
||||
if (mount(nullptr, realStoreDir.c_str(), "none", MS_REMOUNT | MS_BIND,
|
||||
nullptr) == -1) {
|
||||
throw SysError(format("remounting %1% writable") % realStoreDir);
|
||||
}
|
||||
}
|
||||
|
@ -585,7 +586,7 @@ void LocalStore::checkDerivationOutputs(const Path& drvPath,
|
|||
drvName = string(drvName, 0, drvName.size() - drvExtension.size());
|
||||
|
||||
if (drv.isFixedOutput()) {
|
||||
DerivationOutputs::const_iterator out = drv.outputs.find("out");
|
||||
auto out = drv.outputs.find("out");
|
||||
if (out == drv.outputs.end()) {
|
||||
throw Error(
|
||||
format("derivation '%1%' does not have an output named 'out'") %
|
||||
|
@ -597,7 +598,7 @@ void LocalStore::checkDerivationOutputs(const Path& drvPath,
|
|||
out->second.parseHashInfo(recursive, h);
|
||||
Path outPath = makeFixedOutputPath(recursive, h, drvName);
|
||||
|
||||
StringPairs::const_iterator j = drv.env.find("out");
|
||||
auto j = drv.env.find("out");
|
||||
if (out->second.path != outPath || j == drv.env.end() ||
|
||||
j->second != outPath) {
|
||||
throw Error(
|
||||
|
@ -618,7 +619,7 @@ void LocalStore::checkDerivationOutputs(const Path& drvPath,
|
|||
|
||||
for (auto& i : drv.outputs) {
|
||||
Path outPath = makeOutputPath(i.first, h, drvName);
|
||||
StringPairs::const_iterator j = drv.env.find(i.first);
|
||||
auto j = drv.env.find(i.first);
|
||||
if (i.second.path != outPath || j == drv.env.end() ||
|
||||
j->second != outPath) {
|
||||
throw Error(format("derivation '%1%' has incorrect output '%2%', "
|
||||
|
@ -640,7 +641,7 @@ uint64_t LocalStore::addValidPath(State& state, const ValidPathInfo& info,
|
|||
|
||||
state.stmtRegisterValidPath
|
||||
.use()(info.path)(info.narHash.to_string(Base16))(
|
||||
info.registrationTime == 0 ? time(0) : info.registrationTime)(
|
||||
info.registrationTime == 0 ? time(nullptr) : info.registrationTime)(
|
||||
info.deriver, info.deriver != "")(info.narSize, info.narSize != 0)(
|
||||
info.ultimate ? 1 : 0, info.ultimate)(
|
||||
concatStringsSep(" ", info.sigs), !info.sigs.empty())(
|
||||
|
|
2
third_party/nix/src/libstore/misc.cc
vendored
2
third_party/nix/src/libstore/misc.cc
vendored
|
@ -18,7 +18,7 @@ void Store::computeFSClosure(const PathSet& startPaths, PathSet& paths_,
|
|||
std::exception_ptr exc;
|
||||
};
|
||||
|
||||
Sync<State> state_(State{0, paths_, 0});
|
||||
Sync<State> state_(State{0, paths_, nullptr});
|
||||
|
||||
std::function<void(const Path&)> enqueue;
|
||||
|
||||
|
|
3
third_party/nix/src/libstore/nar-accessor.cc
vendored
3
third_party/nix/src/libstore/nar-accessor.cc
vendored
|
@ -4,6 +4,7 @@
|
|||
#include <map>
|
||||
#include <nlohmann/json.hpp>
|
||||
#include <stack>
|
||||
#include <utility>
|
||||
|
||||
#include "archive.hh"
|
||||
#include "json.hh"
|
||||
|
@ -99,7 +100,7 @@ struct NarAccessor : public FSAccessor {
|
|||
}
|
||||
|
||||
NarAccessor(const std::string& listing, GetNarBytes getNarBytes)
|
||||
: getNarBytes(getNarBytes) {
|
||||
: getNarBytes(std::move(getNarBytes)) {
|
||||
using json = nlohmann::json;
|
||||
|
||||
std::function<void(NarMember&, json&)> recurse;
|
||||
|
|
|
@ -116,7 +116,7 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
|
||||
/* Periodically purge expired entries from the database. */
|
||||
retrySQLite<void>([&]() {
|
||||
auto now = time(0);
|
||||
auto now = time(nullptr);
|
||||
|
||||
SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
|
||||
auto queryLastPurge_(queryLastPurge.use());
|
||||
|
@ -157,7 +157,8 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
|
||||
// FIXME: race
|
||||
|
||||
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority)
|
||||
state->insertCache
|
||||
.use()(uri)(time(nullptr))(storeDir)(wantMassQuery)(priority)
|
||||
.exec();
|
||||
assert(sqlite3_changes(state->db) == 1);
|
||||
state->caches[uri] = Cache{(int)sqlite3_last_insert_rowid(state->db),
|
||||
|
@ -198,18 +199,18 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
|
||||
auto& cache(getCache(*state, uri));
|
||||
|
||||
auto now = time(0);
|
||||
auto now = time(nullptr);
|
||||
|
||||
auto queryNAR(state->queryNAR.use()(cache.id)(hashPart)(
|
||||
now - settings.ttlNegativeNarInfoCache)(
|
||||
now - settings.ttlPositiveNarInfoCache));
|
||||
|
||||
if (!queryNAR.next()) {
|
||||
return {oUnknown, 0};
|
||||
return {oUnknown, nullptr};
|
||||
}
|
||||
|
||||
if (!queryNAR.getInt(0)) {
|
||||
return {oInvalid, 0};
|
||||
return {oInvalid, nullptr};
|
||||
}
|
||||
|
||||
auto narInfo = make_ref<NarInfo>();
|
||||
|
@ -254,21 +255,22 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache {
|
|||
|
||||
state->insertNAR
|
||||
.use()(cache.id)(hashPart)(storePathToName(info->path))(
|
||||
narInfo ? narInfo->url : "", narInfo != 0)(
|
||||
narInfo ? narInfo->compression : "", narInfo != 0)(
|
||||
narInfo ? narInfo->url : "", narInfo != nullptr)(
|
||||
narInfo ? narInfo->compression : "", narInfo != nullptr)(
|
||||
narInfo && narInfo->fileHash ? narInfo->fileHash.to_string()
|
||||
: "",
|
||||
narInfo && narInfo->fileHash)(
|
||||
narInfo ? narInfo->fileSize : 0,
|
||||
narInfo != 0 && narInfo->fileSize)(info->narHash.to_string())(
|
||||
narInfo != nullptr &&
|
||||
narInfo->fileSize)(info->narHash.to_string())(
|
||||
info->narSize)(concatStringsSep(" ", info->shortRefs()))(
|
||||
info->deriver != "" ? baseNameOf(info->deriver) : "",
|
||||
info->deriver !=
|
||||
"")(concatStringsSep(" ", info->sigs))(info->ca)(time(0))
|
||||
info->deriver != "")(concatStringsSep(" ", info->sigs))(
|
||||
info->ca)(time(nullptr))
|
||||
.exec();
|
||||
|
||||
} else {
|
||||
state->insertMissingNAR.use()(cache.id)(hashPart)(time(0)).exec();
|
||||
state->insertMissingNAR.use()(cache.id)(hashPart)(time(nullptr)).exec();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <regex>
|
||||
#include <utility>
|
||||
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
@ -27,7 +28,7 @@ static void makeWritable(const Path& path) {
|
|||
|
||||
struct MakeReadOnly {
|
||||
Path path;
|
||||
explicit MakeReadOnly(const Path& path) : path(path) {}
|
||||
explicit MakeReadOnly(Path path) : path(std::move(path)) {}
|
||||
~MakeReadOnly() {
|
||||
try {
|
||||
/* This will make the path read-only. */
|
||||
|
|
|
@ -74,13 +74,13 @@ std::optional<Strings> ParsedDerivation::getStringsAttr(
|
|||
drvPath);
|
||||
}
|
||||
Strings res;
|
||||
for (auto j = i->begin(); j != i->end(); ++j) {
|
||||
if (!j->is_string()) {
|
||||
for (const auto& j : *i) {
|
||||
if (!j.is_string()) {
|
||||
throw Error(
|
||||
"attribute '%s' of derivation '%s' must be a list of strings",
|
||||
name, drvPath);
|
||||
}
|
||||
res.push_back(j->get<std::string>());
|
||||
res.push_back(j.get<std::string>());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
4
third_party/nix/src/libstore/pathlocks.cc
vendored
4
third_party/nix/src/libstore/pathlocks.cc
vendored
|
@ -97,7 +97,7 @@ bool PathLocks::lockPaths(const PathSet& paths, const string& waitMsg,
|
|||
|
||||
AutoCloseFD fd;
|
||||
|
||||
while (1) {
|
||||
while (true) {
|
||||
/* Open/create the lock file. */
|
||||
fd = openLockFile(lockPath, true);
|
||||
|
||||
|
@ -136,7 +136,7 @@ bool PathLocks::lockPaths(const PathSet& paths, const string& waitMsg,
|
|||
}
|
||||
|
||||
/* Use borrow so that the descriptor isn't closed. */
|
||||
fds.push_back(FDPair(fd.release(), lockPath));
|
||||
fds.emplace_back(fd.release(), lockPath);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
7
third_party/nix/src/libstore/profiles.cc
vendored
7
third_party/nix/src/libstore/profiles.cc
vendored
|
@ -1,8 +1,9 @@
|
|||
#include "profiles.hh"
|
||||
|
||||
#include <errno.h>
|
||||
#include <cerrno>
|
||||
#include <cstdio>
|
||||
|
||||
#include <glog/logging.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
@ -213,7 +214,7 @@ void deleteGenerationsOlderThan(const Path& profile, time_t t, bool dryRun) {
|
|||
|
||||
void deleteGenerationsOlderThan(const Path& profile, const string& timeSpec,
|
||||
bool dryRun) {
|
||||
time_t curTime = time(0);
|
||||
time_t curTime = time(nullptr);
|
||||
string strDays = string(timeSpec, 0, timeSpec.size() - 1);
|
||||
int days;
|
||||
|
||||
|
|
10
third_party/nix/src/libstore/references.cc
vendored
10
third_party/nix/src/libstore/references.cc
vendored
|
@ -18,11 +18,11 @@ static void search(const unsigned char* s, size_t len, StringSet& hashes,
|
|||
static bool initialised = false;
|
||||
static bool isBase32[256];
|
||||
if (!initialised) {
|
||||
for (unsigned int i = 0; i < 256; ++i) {
|
||||
isBase32[i] = false;
|
||||
for (bool& i : isBase32) {
|
||||
i = false;
|
||||
}
|
||||
for (unsigned int i = 0; i < base32Chars.size(); ++i) {
|
||||
isBase32[(unsigned char)base32Chars[i]] = true;
|
||||
for (char base32Char : base32Chars) {
|
||||
isBase32[(unsigned char)base32Char] = true;
|
||||
}
|
||||
initialised = true;
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ struct RefScanSink : Sink {
|
|||
|
||||
RefScanSink() : hashSink(htSHA256) {}
|
||||
|
||||
void operator()(const unsigned char* data, size_t len);
|
||||
void operator()(const unsigned char* data, size_t len) override;
|
||||
};
|
||||
|
||||
void RefScanSink::operator()(const unsigned char* data, size_t len) {
|
||||
|
|
18
third_party/nix/src/libstore/remote-store.cc
vendored
18
third_party/nix/src/libstore/remote-store.cc
vendored
|
@ -1,8 +1,8 @@
|
|||
#include "remote-store.hh"
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstring>
|
||||
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <glog/logging.h>
|
||||
#include <sys/socket.h>
|
||||
|
@ -230,7 +230,7 @@ struct ConnectionHandle {
|
|||
|
||||
RemoteStore::Connection* operator->() { return &*handle; }
|
||||
|
||||
void processStderr(Sink* sink = 0, Source* source = 0) {
|
||||
void processStderr(Sink* sink = nullptr, Source* source = nullptr) {
|
||||
auto ex = handle->processStderr(sink, source);
|
||||
if (ex) {
|
||||
daemonException = true;
|
||||
|
@ -324,7 +324,7 @@ void RemoteStore::querySubstitutablePathInfos(const PathSet& paths,
|
|||
} else {
|
||||
conn->to << wopQuerySubstitutablePathInfos << paths;
|
||||
conn.processStderr();
|
||||
size_t count = readNum<size_t>(conn->from);
|
||||
auto count = readNum<size_t>(conn->from);
|
||||
for (size_t n = 0; n < count; n++) {
|
||||
Path path = readStorePath(*this, conn->from);
|
||||
SubstitutablePathInfo& info(infos[path]);
|
||||
|
@ -388,7 +388,7 @@ void RemoteStore::queryReferrers(const Path& path, PathSet& referrers) {
|
|||
auto conn(getConnection());
|
||||
conn->to << wopQueryReferrers << path;
|
||||
conn.processStderr();
|
||||
PathSet referrers2 = readStorePaths<PathSet>(*this, conn->from);
|
||||
auto referrers2 = readStorePaths<PathSet>(*this, conn->from);
|
||||
referrers.insert(referrers2.begin(), referrers2.end());
|
||||
}
|
||||
|
||||
|
@ -442,7 +442,7 @@ void RemoteStore::addToStore(const ValidPathInfo& info, Source& source,
|
|||
;
|
||||
});
|
||||
|
||||
conn.processStderr(0, source2.get());
|
||||
conn.processStderr(nullptr, source2.get());
|
||||
|
||||
auto importedPaths = readStorePaths<PathSet>(*this, conn->from);
|
||||
assert(importedPaths.size() <= 1);
|
||||
|
@ -457,7 +457,7 @@ void RemoteStore::addToStore(const ValidPathInfo& info, Source& source,
|
|||
if (!tunnel) {
|
||||
copyNAR(source, conn->to);
|
||||
}
|
||||
conn.processStderr(0, tunnel ? &source : nullptr);
|
||||
conn.processStderr(nullptr, tunnel ? &source : nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,7 +591,7 @@ Roots RemoteStore::findRoots(bool censor) {
|
|||
auto conn(getConnection());
|
||||
conn->to << wopFindRoots;
|
||||
conn.processStderr();
|
||||
size_t count = readNum<size_t>(conn->from);
|
||||
auto count = readNum<size_t>(conn->from);
|
||||
Roots result;
|
||||
while (count--) {
|
||||
Path link = readString(conn->from);
|
||||
|
@ -704,7 +704,7 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink* sink,
|
|||
if (!source) {
|
||||
throw Error("no source");
|
||||
}
|
||||
size_t len = readNum<size_t>(from);
|
||||
auto len = readNum<size_t>(from);
|
||||
auto buf = std::make_unique<unsigned char[]>(len);
|
||||
writeString(buf.get(), source->read(buf.get(), len), to);
|
||||
to.flush();
|
||||
|
@ -742,7 +742,7 @@ static RegisterStoreImplementation regStore(
|
|||
[](const std::string& uri,
|
||||
const Store::Params& params) -> std::shared_ptr<Store> {
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_shared<UDSRemoteStore>(
|
||||
std::string(uri, uriScheme.size()), params);
|
||||
|
|
18
third_party/nix/src/libstore/sqlite.cc
vendored
18
third_party/nix/src/libstore/sqlite.cc
vendored
|
@ -31,7 +31,7 @@ namespace nix {
|
|||
SQLite::SQLite(const Path& path) {
|
||||
if (sqlite3_open_v2(path.c_str(), &db,
|
||||
SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE,
|
||||
0) != SQLITE_OK) {
|
||||
nullptr) != SQLITE_OK) {
|
||||
throw Error(format("cannot open SQLite database '%s'") % path);
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,8 @@ SQLite::~SQLite() {
|
|||
|
||||
void SQLite::exec(const std::string& stmt) {
|
||||
retrySQLite<void>([&]() {
|
||||
if (sqlite3_exec(db, stmt.c_str(), 0, 0, 0) != SQLITE_OK) {
|
||||
if (sqlite3_exec(db, stmt.c_str(), nullptr, nullptr, nullptr) !=
|
||||
SQLITE_OK) {
|
||||
throwSQLiteError(db, format("executing SQLite statement '%s'") % stmt);
|
||||
}
|
||||
});
|
||||
|
@ -57,7 +58,7 @@ void SQLite::exec(const std::string& stmt) {
|
|||
void SQLiteStmt::create(sqlite3* db, const string& sql) {
|
||||
checkInterrupt();
|
||||
assert(!stmt);
|
||||
if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, 0) != SQLITE_OK) {
|
||||
if (sqlite3_prepare_v2(db, sql.c_str(), -1, &stmt, nullptr) != SQLITE_OK) {
|
||||
throwSQLiteError(db, fmt("creating statement '%s'", sql));
|
||||
}
|
||||
this->db = db;
|
||||
|
@ -149,14 +150,14 @@ bool SQLiteStmt::Use::isNull(int col) {
|
|||
|
||||
SQLiteTxn::SQLiteTxn(sqlite3* db) {
|
||||
this->db = db;
|
||||
if (sqlite3_exec(db, "begin;", 0, 0, 0) != SQLITE_OK) {
|
||||
if (sqlite3_exec(db, "begin;", nullptr, nullptr, nullptr) != SQLITE_OK) {
|
||||
throwSQLiteError(db, "starting transaction");
|
||||
}
|
||||
active = true;
|
||||
}
|
||||
|
||||
void SQLiteTxn::commit() {
|
||||
if (sqlite3_exec(db, "commit;", 0, 0, 0) != SQLITE_OK) {
|
||||
if (sqlite3_exec(db, "commit;", nullptr, nullptr, nullptr) != SQLITE_OK) {
|
||||
throwSQLiteError(db, "committing transaction");
|
||||
}
|
||||
active = false;
|
||||
|
@ -164,7 +165,8 @@ void SQLiteTxn::commit() {
|
|||
|
||||
SQLiteTxn::~SQLiteTxn() {
|
||||
try {
|
||||
if (active && sqlite3_exec(db, "rollback;", 0, 0, 0) != SQLITE_OK) {
|
||||
if (active &&
|
||||
sqlite3_exec(db, "rollback;", nullptr, nullptr, nullptr) != SQLITE_OK) {
|
||||
throwSQLiteError(db, "aborting transaction");
|
||||
}
|
||||
} catch (...) {
|
||||
|
@ -175,7 +177,7 @@ SQLiteTxn::~SQLiteTxn() {
|
|||
void handleSQLiteBusy(const SQLiteBusy& e) {
|
||||
static std::atomic<time_t> lastWarned{0};
|
||||
|
||||
time_t now = time(0);
|
||||
time_t now = time(nullptr);
|
||||
|
||||
if (now > lastWarned + 10) {
|
||||
lastWarned = now;
|
||||
|
@ -188,7 +190,7 @@ void handleSQLiteBusy(const SQLiteBusy& e) {
|
|||
struct timespec t;
|
||||
t.tv_sec = 0;
|
||||
t.tv_nsec = (random() % 100) * 1000 * 1000; /* <= 0.1s */
|
||||
nanosleep(&t, 0);
|
||||
nanosleep(&t, nullptr);
|
||||
}
|
||||
|
||||
} // namespace nix
|
||||
|
|
4
third_party/nix/src/libstore/ssh-store.cc
vendored
4
third_party/nix/src/libstore/ssh-store.cc
vendored
|
@ -27,7 +27,7 @@ class SSHStore : public RemoteStore {
|
|||
|
||||
std::string getUri() override { return uriScheme + host; }
|
||||
|
||||
bool sameMachine() { return false; }
|
||||
bool sameMachine() override { return false; }
|
||||
|
||||
void narFromPath(const Path& path, Sink& sink) override;
|
||||
|
||||
|
@ -78,7 +78,7 @@ static RegisterStoreImplementation regStore([](const std::string& uri,
|
|||
const Store::Params& params)
|
||||
-> std::shared_ptr<Store> {
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) {
|
||||
return 0;
|
||||
return nullptr;
|
||||
}
|
||||
return std::make_shared<SSHStore>(std::string(uri, uriScheme.size()), params);
|
||||
});
|
||||
|
|
6
third_party/nix/src/libstore/ssh.cc
vendored
6
third_party/nix/src/libstore/ssh.cc
vendored
|
@ -1,12 +1,14 @@
|
|||
#include "ssh.hh"
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace nix {
|
||||
|
||||
SSHMaster::SSHMaster(const std::string& host, const std::string& keyFile,
|
||||
SSHMaster::SSHMaster(const std::string& host, std::string keyFile,
|
||||
bool useMaster, bool compress, int logFD)
|
||||
: host(host),
|
||||
fakeSSH(host == "localhost"),
|
||||
keyFile(keyFile),
|
||||
keyFile(std::move(keyFile)),
|
||||
useMaster(useMaster && !fakeSSH),
|
||||
compress(compress),
|
||||
logFD(logFD) {
|
||||
|
|
2
third_party/nix/src/libstore/ssh.hh
vendored
2
third_party/nix/src/libstore/ssh.hh
vendored
|
@ -25,7 +25,7 @@ class SSHMaster {
|
|||
void addCommonSSHOpts(Strings& args);
|
||||
|
||||
public:
|
||||
SSHMaster(const std::string& host, const std::string& keyFile, bool useMaster,
|
||||
SSHMaster(const std::string& host, std::string keyFile, bool useMaster,
|
||||
bool compress, int logFD = -1);
|
||||
|
||||
struct Connection {
|
||||
|
|
13
third_party/nix/src/libstore/store-api.cc
vendored
13
third_party/nix/src/libstore/store-api.cc
vendored
|
@ -251,7 +251,7 @@ bool Store::isValidPath(const Path& storePath) {
|
|||
auto res = state_->pathInfoCache.get(hashPart);
|
||||
if (res) {
|
||||
stats.narInfoReadAverted++;
|
||||
return *res != 0;
|
||||
return *res != nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -261,7 +261,8 @@ bool Store::isValidPath(const Path& storePath) {
|
|||
stats.narInfoReadAverted++;
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(
|
||||
hashPart, res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
|
||||
hashPart,
|
||||
res.first == NarInfoDiskCache::oInvalid ? nullptr : res.second);
|
||||
return res.first == NarInfoDiskCache::oValid;
|
||||
}
|
||||
}
|
||||
|
@ -270,7 +271,7 @@ bool Store::isValidPath(const Path& storePath) {
|
|||
|
||||
if (diskCache && !valid) {
|
||||
// FIXME: handle valid = true case.
|
||||
diskCache->upsertNarInfo(getUri(), hashPart, 0);
|
||||
diskCache->upsertNarInfo(getUri(), hashPart, nullptr);
|
||||
}
|
||||
|
||||
return valid;
|
||||
|
@ -329,7 +330,7 @@ void Store::queryPathInfo(const Path& storePath,
|
|||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(
|
||||
hashPart,
|
||||
res.first == NarInfoDiskCache::oInvalid ? 0 : res.second);
|
||||
res.first == NarInfoDiskCache::oInvalid ? nullptr : res.second);
|
||||
if (res.first == NarInfoDiskCache::oInvalid ||
|
||||
(res.second->path != storePath &&
|
||||
storePathToName(storePath) != "")) {
|
||||
|
@ -842,7 +843,7 @@ void Store::addToStore(const ValidPathInfo& info, const ref<std::string>& nar,
|
|||
namespace nix {
|
||||
|
||||
RegisterStoreImplementation::Implementations*
|
||||
RegisterStoreImplementation::implementations = 0;
|
||||
RegisterStoreImplementation::implementations = nullptr;
|
||||
|
||||
/* Split URI into protocol+hierarchy part and its parameter set. */
|
||||
std::pair<std::string, Store::Params> splitUriAndParams(
|
||||
|
@ -862,7 +863,7 @@ std::pair<std::string, Store::Params> splitUriAndParams(
|
|||
throw Error("invalid URI parameter '%s'", value);
|
||||
}
|
||||
try {
|
||||
decoded += std::stoul(std::string(value, i + 1, 2), 0, 16);
|
||||
decoded += std::stoul(std::string(value, i + 1, 2), nullptr, 16);
|
||||
i += 3;
|
||||
} catch (...) {
|
||||
throw Error("invalid URI parameter '%s'", value);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue