Add 'third_party/nix/' from commit 'be66c7a6b24e3c3c6157fd37b86c7203d14acf10'
git-subtree-dir: third_party/nix
git-subtree-mainline: cf8cd640c1
git-subtree-split: be66c7a6b24e3c3c6157fd37b86c7203d14acf10
This commit is contained in:
commit
7994fd1d54
737 changed files with 105390 additions and 0 deletions
266
third_party/nix/src/build-remote/build-remote.cc
vendored
Normal file
266
third_party/nix/src/build-remote/build-remote.cc
vendored
Normal file
|
|
@ -0,0 +1,266 @@
|
|||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <memory>
|
||||
#include <tuple>
|
||||
#include <iomanip>
|
||||
#if __APPLE__
|
||||
#include <sys/time.h>
|
||||
#endif
|
||||
|
||||
#include "machines.hh"
|
||||
#include "shared.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "globals.hh"
|
||||
#include "serialise.hh"
|
||||
#include "store-api.hh"
|
||||
#include "derivations.hh"
|
||||
#include "local-store.hh"
|
||||
#include "legacy.hh"
|
||||
|
||||
using namespace nix;
|
||||
using std::cin;
|
||||
|
||||
static void handleAlarm(int sig) {
|
||||
}
|
||||
|
||||
std::string escapeUri(std::string uri)
|
||||
{
|
||||
std::replace(uri.begin(), uri.end(), '/', '_');
|
||||
return uri;
|
||||
}
|
||||
|
||||
static string currentLoad;
|
||||
|
||||
static AutoCloseFD openSlotLock(const Machine & m, unsigned long long slot)
|
||||
{
|
||||
return openLockFile(fmt("%s/%s-%d", currentLoad, escapeUri(m.storeUri), slot), true);
|
||||
}
|
||||
|
||||
static bool allSupportedLocally(const std::set<std::string>& requiredFeatures) {
|
||||
for (auto & feature : requiredFeatures)
|
||||
if (!settings.systemFeatures.get().count(feature)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int _main(int argc, char * * argv)
|
||||
{
|
||||
{
|
||||
logger = makeJSONLogger(*logger);
|
||||
|
||||
/* Ensure we don't get any SSH passphrase or host key popups. */
|
||||
unsetenv("DISPLAY");
|
||||
unsetenv("SSH_ASKPASS");
|
||||
|
||||
if (argc != 2)
|
||||
throw UsageError("called without required arguments");
|
||||
|
||||
verbosity = (Verbosity) std::stoll(argv[1]);
|
||||
|
||||
FdSource source(STDIN_FILENO);
|
||||
|
||||
/* Read the parent's settings. */
|
||||
while (readInt(source)) {
|
||||
auto name = readString(source);
|
||||
auto value = readString(source);
|
||||
settings.set(name, value);
|
||||
}
|
||||
|
||||
settings.maxBuildJobs.set("1"); // hack to make tests with local?root= work
|
||||
|
||||
initPlugins();
|
||||
|
||||
auto store = openStore().cast<LocalStore>();
|
||||
|
||||
/* It would be more appropriate to use $XDG_RUNTIME_DIR, since
|
||||
that gets cleared on reboot, but it wouldn't work on macOS. */
|
||||
currentLoad = store->stateDir + "/current-load";
|
||||
|
||||
std::shared_ptr<Store> sshStore;
|
||||
AutoCloseFD bestSlotLock;
|
||||
|
||||
auto machines = getMachines();
|
||||
debug("got %d remote builders", machines.size());
|
||||
|
||||
if (machines.empty()) {
|
||||
std::cerr << "# decline-permanently\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
string drvPath;
|
||||
string storeUri;
|
||||
|
||||
while (true) {
|
||||
|
||||
try {
|
||||
auto s = readString(source);
|
||||
if (s != "try") return 0;
|
||||
} catch (EndOfFile &) { return 0; }
|
||||
|
||||
auto amWilling = readInt(source);
|
||||
auto neededSystem = readString(source);
|
||||
source >> drvPath;
|
||||
auto requiredFeatures = readStrings<std::set<std::string>>(source);
|
||||
|
||||
auto canBuildLocally = amWilling
|
||||
&& ( neededSystem == settings.thisSystem
|
||||
|| settings.extraPlatforms.get().count(neededSystem) > 0)
|
||||
&& allSupportedLocally(requiredFeatures);
|
||||
|
||||
/* Error ignored here, will be caught later */
|
||||
mkdir(currentLoad.c_str(), 0777);
|
||||
|
||||
while (true) {
|
||||
bestSlotLock = -1;
|
||||
AutoCloseFD lock = openLockFile(currentLoad + "/main-lock", true);
|
||||
lockFile(lock.get(), ltWrite, true);
|
||||
|
||||
bool rightType = false;
|
||||
|
||||
Machine * bestMachine = nullptr;
|
||||
unsigned long long bestLoad = 0;
|
||||
for (auto & m : machines) {
|
||||
debug("considering building on remote machine '%s'", m.storeUri);
|
||||
|
||||
if (m.enabled && std::find(m.systemTypes.begin(),
|
||||
m.systemTypes.end(),
|
||||
neededSystem) != m.systemTypes.end() &&
|
||||
m.allSupported(requiredFeatures) &&
|
||||
m.mandatoryMet(requiredFeatures)) {
|
||||
rightType = true;
|
||||
AutoCloseFD free;
|
||||
unsigned long long load = 0;
|
||||
for (unsigned long long slot = 0; slot < m.maxJobs; ++slot) {
|
||||
auto slotLock = openSlotLock(m, slot);
|
||||
if (lockFile(slotLock.get(), ltWrite, false)) {
|
||||
if (!free) {
|
||||
free = std::move(slotLock);
|
||||
}
|
||||
} else {
|
||||
++load;
|
||||
}
|
||||
}
|
||||
if (!free) {
|
||||
continue;
|
||||
}
|
||||
bool best = false;
|
||||
if (!bestSlotLock) {
|
||||
best = true;
|
||||
} else if (load / m.speedFactor < bestLoad / bestMachine->speedFactor) {
|
||||
best = true;
|
||||
} else if (load / m.speedFactor == bestLoad / bestMachine->speedFactor) {
|
||||
if (m.speedFactor > bestMachine->speedFactor) {
|
||||
best = true;
|
||||
} else if (m.speedFactor == bestMachine->speedFactor) {
|
||||
if (load < bestLoad) {
|
||||
best = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (best) {
|
||||
bestLoad = load;
|
||||
bestSlotLock = std::move(free);
|
||||
bestMachine = &m;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!bestSlotLock) {
|
||||
if (rightType && !canBuildLocally)
|
||||
std::cerr << "# postpone\n";
|
||||
else
|
||||
std::cerr << "# decline\n";
|
||||
break;
|
||||
}
|
||||
|
||||
#if __APPLE__
|
||||
futimes(bestSlotLock.get(), NULL);
|
||||
#else
|
||||
futimens(bestSlotLock.get(), NULL);
|
||||
#endif
|
||||
|
||||
lock = -1;
|
||||
|
||||
try {
|
||||
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("connecting to '%s'", bestMachine->storeUri));
|
||||
|
||||
Store::Params storeParams;
|
||||
if (hasPrefix(bestMachine->storeUri, "ssh://")) {
|
||||
storeParams["max-connections"] ="1";
|
||||
storeParams["log-fd"] = "4";
|
||||
if (bestMachine->sshKey != "")
|
||||
storeParams["ssh-key"] = bestMachine->sshKey;
|
||||
}
|
||||
|
||||
sshStore = openStore(bestMachine->storeUri, storeParams);
|
||||
sshStore->connect();
|
||||
storeUri = bestMachine->storeUri;
|
||||
|
||||
} catch (std::exception & e) {
|
||||
auto msg = chomp(drainFD(5, false));
|
||||
printError("cannot build on '%s': %s%s",
|
||||
bestMachine->storeUri, e.what(),
|
||||
(msg.empty() ? "" : ": " + msg));
|
||||
bestMachine->enabled = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
goto connected;
|
||||
}
|
||||
}
|
||||
|
||||
connected:
|
||||
close(5);
|
||||
|
||||
std::cerr << "# accept\n" << storeUri << "\n";
|
||||
|
||||
auto inputs = readStrings<PathSet>(source);
|
||||
auto outputs = readStrings<PathSet>(source);
|
||||
|
||||
AutoCloseFD uploadLock = openLockFile(currentLoad + "/" + escapeUri(storeUri) + ".upload-lock", true);
|
||||
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("waiting for the upload lock to '%s'", storeUri));
|
||||
|
||||
auto old = signal(SIGALRM, handleAlarm);
|
||||
alarm(15 * 60);
|
||||
if (!lockFile(uploadLock.get(), ltWrite, true))
|
||||
printError("somebody is hogging the upload lock for '%s', continuing...");
|
||||
alarm(0);
|
||||
signal(SIGALRM, old);
|
||||
}
|
||||
|
||||
auto substitute = settings.buildersUseSubstitutes ? Substitute : NoSubstitute;
|
||||
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying dependencies to '%s'", storeUri));
|
||||
copyPaths(store, ref<Store>(sshStore), inputs, NoRepair, NoCheckSigs, substitute);
|
||||
}
|
||||
|
||||
uploadLock = -1;
|
||||
|
||||
BasicDerivation drv(readDerivation(store->realStoreDir + "/" + baseNameOf(drvPath)));
|
||||
drv.inputSrcs = inputs;
|
||||
|
||||
auto result = sshStore->buildDerivation(drvPath, drv);
|
||||
|
||||
if (!result.success())
|
||||
throw Error("build of '%s' on '%s' failed: %s", drvPath, storeUri, result.errorMsg);
|
||||
|
||||
PathSet missing;
|
||||
for (auto & path : outputs)
|
||||
if (!store->isValidPath(path)) missing.insert(path);
|
||||
|
||||
if (!missing.empty()) {
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("copying outputs from '%s'", storeUri));
|
||||
store->locksHeld.insert(missing.begin(), missing.end()); /* FIXME: ugly */
|
||||
copyPaths(ref<Store>(sshStore), store, missing, NoRepair, NoCheckSigs, NoSubstitute);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static RegisterLegacyCommand s1("build-remote", _main);
|
||||
18
third_party/nix/src/cpptoml/LICENSE
vendored
Normal file
18
third_party/nix/src/cpptoml/LICENSE
vendored
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
Copyright (c) 2014 Chase Geigle
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
3668
third_party/nix/src/cpptoml/cpptoml.h
vendored
Normal file
3668
third_party/nix/src/cpptoml/cpptoml.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
96
third_party/nix/src/libexpr/attr-path.cc
vendored
Normal file
96
third_party/nix/src/libexpr/attr-path.cc
vendored
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
#include "attr-path.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "util.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static Strings parseAttrPath(const string & s)
|
||||
{
|
||||
Strings res;
|
||||
string cur;
|
||||
string::const_iterator i = s.begin();
|
||||
while (i != s.end()) {
|
||||
if (*i == '.') {
|
||||
res.push_back(cur);
|
||||
cur.clear();
|
||||
} else if (*i == '"') {
|
||||
++i;
|
||||
while (1) {
|
||||
if (i == s.end())
|
||||
throw Error(format("missing closing quote in selection path '%1%'") % s);
|
||||
if (*i == '"') break;
|
||||
cur.push_back(*i++);
|
||||
}
|
||||
} else
|
||||
cur.push_back(*i);
|
||||
++i;
|
||||
}
|
||||
if (!cur.empty()) res.push_back(cur);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
Value * findAlongAttrPath(EvalState & state, const string & attrPath,
|
||||
Bindings & autoArgs, Value & vIn)
|
||||
{
|
||||
Strings tokens = parseAttrPath(attrPath);
|
||||
|
||||
Error attrError =
|
||||
Error(format("attribute selection path '%1%' does not match expression") % attrPath);
|
||||
|
||||
Value * v = &vIn;
|
||||
|
||||
for (auto & attr : tokens) {
|
||||
|
||||
/* Is i an index (integer) or a normal attribute name? */
|
||||
enum { apAttr, apIndex } apType = apAttr;
|
||||
unsigned int attrIndex;
|
||||
if (string2Int(attr, attrIndex)) apType = apIndex;
|
||||
|
||||
/* Evaluate the expression. */
|
||||
Value * vNew = state.allocValue();
|
||||
state.autoCallFunction(autoArgs, *v, *vNew);
|
||||
v = vNew;
|
||||
state.forceValue(*v);
|
||||
|
||||
/* It should evaluate to either a set or an expression,
|
||||
according to what is specified in the attrPath. */
|
||||
|
||||
if (apType == apAttr) {
|
||||
|
||||
if (v->type != tAttrs)
|
||||
throw TypeError(
|
||||
format("the expression selected by the selection path '%1%' should be a set but is %2%")
|
||||
% attrPath % showType(*v));
|
||||
|
||||
if (attr.empty())
|
||||
throw Error(format("empty attribute name in selection path '%1%'") % attrPath);
|
||||
|
||||
Bindings::iterator a = v->attrs->find(state.symbols.create(attr));
|
||||
if (a == v->attrs->end())
|
||||
throw Error(format("attribute '%1%' in selection path '%2%' not found") % attr % attrPath);
|
||||
v = &*a->value;
|
||||
}
|
||||
|
||||
else if (apType == apIndex) {
|
||||
|
||||
if (!v->isList())
|
||||
throw TypeError(
|
||||
format("the expression selected by the selection path '%1%' should be a list but is %2%")
|
||||
% attrPath % showType(*v));
|
||||
|
||||
if (attrIndex >= v->listSize())
|
||||
throw Error(format("list index %1% in selection path '%2%' is out of range") % attrIndex % attrPath);
|
||||
|
||||
v = v->listElems()[attrIndex];
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
13
third_party/nix/src/libexpr/attr-path.hh
vendored
Normal file
13
third_party/nix/src/libexpr/attr-path.hh
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
#pragma once
|
||||
|
||||
#include "eval.hh"
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
namespace nix {
|
||||
|
||||
Value * findAlongAttrPath(EvalState & state, const string & attrPath,
|
||||
Bindings & autoArgs, Value & vIn);
|
||||
|
||||
}
|
||||
52
third_party/nix/src/libexpr/attr-set.cc
vendored
Normal file
52
third_party/nix/src/libexpr/attr-set.cc
vendored
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
#include "attr-set.hh"
|
||||
#include "eval-inline.hh"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* Allocate a new array of attributes for an attribute set with a specific
|
||||
capacity. The space is implicitly reserved after the Bindings
|
||||
structure. */
|
||||
Bindings * EvalState::allocBindings(size_t capacity)
|
||||
{
|
||||
if (capacity > std::numeric_limits<Bindings::size_t>::max())
|
||||
throw Error("attribute set of size %d is too big", capacity);
|
||||
return new (allocBytes(sizeof(Bindings) + sizeof(Attr) * capacity)) Bindings((Bindings::size_t) capacity);
|
||||
}
|
||||
|
||||
|
||||
void EvalState::mkAttrs(Value & v, size_t capacity)
|
||||
{
|
||||
if (capacity == 0) {
|
||||
v = vEmptySet;
|
||||
return;
|
||||
}
|
||||
clearValue(v);
|
||||
v.type = tAttrs;
|
||||
v.attrs = allocBindings(capacity);
|
||||
nrAttrsets++;
|
||||
nrAttrsInAttrsets += capacity;
|
||||
}
|
||||
|
||||
|
||||
/* Create a new attribute named 'name' on an existing attribute set stored
|
||||
in 'vAttrs' and return the newly allocated Value which is associated with
|
||||
this attribute. */
|
||||
Value * EvalState::allocAttr(Value & vAttrs, const Symbol & name)
|
||||
{
|
||||
Value * v = allocValue();
|
||||
vAttrs.attrs->push_back(Attr(name, v));
|
||||
return v;
|
||||
}
|
||||
|
||||
|
||||
void Bindings::sort()
|
||||
{
|
||||
std::sort(begin(), end());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
95
third_party/nix/src/libexpr/attr-set.hh
vendored
Normal file
95
third_party/nix/src/libexpr/attr-set.hh
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
#pragma once
|
||||
|
||||
#include "nixexpr.hh"
|
||||
#include "symbol-table.hh"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
class EvalState;
|
||||
struct Value;
|
||||
|
||||
/* Map one attribute name to its value. */
|
||||
struct Attr
|
||||
{
|
||||
Symbol name;
|
||||
Value * value;
|
||||
Pos * pos;
|
||||
Attr(Symbol name, Value * value, Pos * pos = &noPos)
|
||||
: name(name), value(value), pos(pos) { };
|
||||
Attr() : pos(&noPos) { };
|
||||
bool operator < (const Attr & a) const
|
||||
{
|
||||
return name < a.name;
|
||||
}
|
||||
};
|
||||
|
||||
/* Bindings contains all the attributes of an attribute set. It is defined
|
||||
by its size and its capacity, the capacity being the number of Attr
|
||||
elements allocated after this structure, while the size corresponds to
|
||||
the number of elements already inserted in this structure. */
|
||||
class Bindings
|
||||
{
|
||||
public:
|
||||
typedef uint32_t size_t;
|
||||
|
||||
private:
|
||||
size_t size_, capacity_;
|
||||
Attr attrs[0];
|
||||
|
||||
Bindings(size_t capacity) : size_(0), capacity_(capacity) { }
|
||||
Bindings(const Bindings & bindings) = delete;
|
||||
|
||||
public:
|
||||
size_t size() const { return size_; }
|
||||
|
||||
bool empty() const { return !size_; }
|
||||
|
||||
typedef Attr * iterator;
|
||||
|
||||
void push_back(const Attr & attr)
|
||||
{
|
||||
assert(size_ < capacity_);
|
||||
attrs[size_++] = attr;
|
||||
}
|
||||
|
||||
iterator find(const Symbol & name)
|
||||
{
|
||||
Attr key(name, 0);
|
||||
iterator i = std::lower_bound(begin(), end(), key);
|
||||
if (i != end() && i->name == name) return i;
|
||||
return end();
|
||||
}
|
||||
|
||||
iterator begin() { return &attrs[0]; }
|
||||
iterator end() { return &attrs[size_]; }
|
||||
|
||||
Attr & operator[](size_t pos)
|
||||
{
|
||||
return attrs[pos];
|
||||
}
|
||||
|
||||
void sort();
|
||||
|
||||
size_t capacity() { return capacity_; }
|
||||
|
||||
/* Returns the attributes in lexicographically sorted order. */
|
||||
std::vector<const Attr *> lexicographicOrder() const
|
||||
{
|
||||
std::vector<const Attr *> res;
|
||||
res.reserve(size_);
|
||||
for (size_t n = 0; n < size_; n++)
|
||||
res.emplace_back(&attrs[n]);
|
||||
std::sort(res.begin(), res.end(), [](const Attr * a, const Attr * b) {
|
||||
return (const string &) a->name < (const string &) b->name;
|
||||
});
|
||||
return res;
|
||||
}
|
||||
|
||||
friend class EvalState;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
59
third_party/nix/src/libexpr/common-eval-args.cc
vendored
Normal file
59
third_party/nix/src/libexpr/common-eval-args.cc
vendored
Normal file
|
|
@ -0,0 +1,59 @@
|
|||
#include "common-eval-args.hh"
|
||||
#include "shared.hh"
|
||||
#include "download.hh"
|
||||
#include "util.hh"
|
||||
#include "eval.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
MixEvalArgs::MixEvalArgs()
|
||||
{
|
||||
mkFlag()
|
||||
.longName("arg")
|
||||
.description("argument to be passed to Nix functions")
|
||||
.labels({"name", "expr"})
|
||||
.handler([&](std::vector<std::string> ss) { autoArgs[ss[0]] = 'E' + ss[1]; });
|
||||
|
||||
mkFlag()
|
||||
.longName("argstr")
|
||||
.description("string-valued argument to be passed to Nix functions")
|
||||
.labels({"name", "string"})
|
||||
.handler([&](std::vector<std::string> ss) { autoArgs[ss[0]] = 'S' + ss[1]; });
|
||||
|
||||
mkFlag()
|
||||
.shortName('I')
|
||||
.longName("include")
|
||||
.description("add a path to the list of locations used to look up <...> file names")
|
||||
.label("path")
|
||||
.handler([&](std::string s) { searchPath.push_back(s); });
|
||||
}
|
||||
|
||||
Bindings * MixEvalArgs::getAutoArgs(EvalState & state)
|
||||
{
|
||||
Bindings * res = state.allocBindings(autoArgs.size());
|
||||
for (auto & i : autoArgs) {
|
||||
Value * v = state.allocValue();
|
||||
if (i.second[0] == 'E')
|
||||
state.mkThunk_(*v, state.parseExprFromString(string(i.second, 1), absPath(".")));
|
||||
else
|
||||
mkString(*v, string(i.second, 1));
|
||||
res->push_back(Attr(state.symbols.create(i.first), v));
|
||||
}
|
||||
res->sort();
|
||||
return res;
|
||||
}
|
||||
|
||||
Path lookupFileArg(EvalState & state, string s)
|
||||
{
|
||||
if (isUri(s)) {
|
||||
CachedDownloadRequest request(s);
|
||||
request.unpack = true;
|
||||
return getDownloader()->downloadCached(state.store, request).path;
|
||||
} else if (s.size() > 2 && s.at(0) == '<' && s.at(s.size() - 1) == '>') {
|
||||
Path p = s.substr(1, s.size() - 2);
|
||||
return state.findFile(p);
|
||||
} else
|
||||
return absPath(s);
|
||||
}
|
||||
|
||||
}
|
||||
26
third_party/nix/src/libexpr/common-eval-args.hh
vendored
Normal file
26
third_party/nix/src/libexpr/common-eval-args.hh
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
#pragma once
|
||||
|
||||
#include "args.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Store;
|
||||
class EvalState;
|
||||
class Bindings;
|
||||
|
||||
struct MixEvalArgs : virtual Args
|
||||
{
|
||||
MixEvalArgs();
|
||||
|
||||
Bindings * getAutoArgs(EvalState & state);
|
||||
|
||||
Strings searchPath;
|
||||
|
||||
private:
|
||||
|
||||
std::map<std::string, std::string> autoArgs;
|
||||
};
|
||||
|
||||
Path lookupFileArg(EvalState & state, string s);
|
||||
|
||||
}
|
||||
95
third_party/nix/src/libexpr/eval-inline.hh
vendored
Normal file
95
third_party/nix/src/libexpr/eval-inline.hh
vendored
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
#pragma once
|
||||
|
||||
#include "eval.hh"
|
||||
|
||||
#define LocalNoInline(f) static f __attribute__((noinline)); f
|
||||
#define LocalNoInlineNoReturn(f) static f __attribute__((noinline, noreturn)); f
|
||||
|
||||
namespace nix {
|
||||
|
||||
LocalNoInlineNoReturn(void throwEvalError(const char * s, const Pos & pos))
|
||||
{
|
||||
throw EvalError(format(s) % pos);
|
||||
}
|
||||
|
||||
LocalNoInlineNoReturn(void throwTypeError(const char * s, const Value & v))
|
||||
{
|
||||
throw TypeError(format(s) % showType(v));
|
||||
}
|
||||
|
||||
|
||||
LocalNoInlineNoReturn(void throwTypeError(const char * s, const Value & v, const Pos & pos))
|
||||
{
|
||||
throw TypeError(format(s) % showType(v) % pos);
|
||||
}
|
||||
|
||||
|
||||
void EvalState::forceValue(Value & v, const Pos & pos)
|
||||
{
|
||||
if (v.type == tThunk) {
|
||||
Env * env = v.thunk.env;
|
||||
Expr * expr = v.thunk.expr;
|
||||
try {
|
||||
v.type = tBlackhole;
|
||||
//checkInterrupt();
|
||||
expr->eval(*this, *env, v);
|
||||
} catch (...) {
|
||||
v.type = tThunk;
|
||||
v.thunk.env = env;
|
||||
v.thunk.expr = expr;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
else if (v.type == tApp)
|
||||
callFunction(*v.app.left, *v.app.right, v, noPos);
|
||||
else if (v.type == tBlackhole)
|
||||
throwEvalError("infinite recursion encountered, at %1%", pos);
|
||||
}
|
||||
|
||||
|
||||
inline void EvalState::forceAttrs(Value & v)
|
||||
{
|
||||
forceValue(v);
|
||||
if (v.type != tAttrs)
|
||||
throwTypeError("value is %1% while a set was expected", v);
|
||||
}
|
||||
|
||||
|
||||
inline void EvalState::forceAttrs(Value & v, const Pos & pos)
|
||||
{
|
||||
forceValue(v);
|
||||
if (v.type != tAttrs)
|
||||
throwTypeError("value is %1% while a set was expected, at %2%", v, pos);
|
||||
}
|
||||
|
||||
|
||||
inline void EvalState::forceList(Value & v)
|
||||
{
|
||||
forceValue(v);
|
||||
if (!v.isList())
|
||||
throwTypeError("value is %1% while a list was expected", v);
|
||||
}
|
||||
|
||||
|
||||
inline void EvalState::forceList(Value & v, const Pos & pos)
|
||||
{
|
||||
forceValue(v);
|
||||
if (!v.isList())
|
||||
throwTypeError("value is %1% while a list was expected, at %2%", v, pos);
|
||||
}
|
||||
|
||||
/* Note: Various places expect the allocated memory to be zeroed. */
|
||||
inline void * allocBytes(size_t n)
|
||||
{
|
||||
void * p;
|
||||
#if HAVE_BOEHMGC
|
||||
p = GC_MALLOC(n);
|
||||
#else
|
||||
p = calloc(n, 1);
|
||||
#endif
|
||||
if (!p) throw std::bad_alloc();
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
1991
third_party/nix/src/libexpr/eval.cc
vendored
Normal file
1991
third_party/nix/src/libexpr/eval.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
363
third_party/nix/src/libexpr/eval.hh
vendored
Normal file
363
third_party/nix/src/libexpr/eval.hh
vendored
Normal file
|
|
@ -0,0 +1,363 @@
|
|||
#pragma once
|
||||
|
||||
#include "attr-set.hh"
|
||||
#include "value.hh"
|
||||
#include "nixexpr.hh"
|
||||
#include "symbol-table.hh"
|
||||
#include "hash.hh"
|
||||
#include "config.hh"
|
||||
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <unordered_map>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
class Store;
|
||||
class EvalState;
|
||||
enum RepairFlag : bool;
|
||||
|
||||
|
||||
typedef void (* PrimOpFun) (EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
|
||||
|
||||
struct PrimOp
|
||||
{
|
||||
PrimOpFun fun;
|
||||
size_t arity;
|
||||
Symbol name;
|
||||
PrimOp(PrimOpFun fun, size_t arity, Symbol name)
|
||||
: fun(fun), arity(arity), name(name) { }
|
||||
};
|
||||
|
||||
|
||||
struct Env
|
||||
{
|
||||
Env * up;
|
||||
unsigned short size; // used by ‘valueSize’
|
||||
unsigned short prevWith:14; // nr of levels up to next `with' environment
|
||||
enum { Plain = 0, HasWithExpr, HasWithAttrs } type:2;
|
||||
Value * values[0];
|
||||
};
|
||||
|
||||
|
||||
Value & mkString(Value & v, const string & s, const PathSet & context = PathSet());
|
||||
|
||||
void copyContext(const Value & v, PathSet & context);
|
||||
|
||||
|
||||
/* Cache for calls to addToStore(); maps source paths to the store
|
||||
paths. */
|
||||
typedef std::map<Path, Path> SrcToStore;
|
||||
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const Value & v);
|
||||
|
||||
|
||||
typedef std::pair<std::string, std::string> SearchPathElem;
|
||||
typedef std::list<SearchPathElem> SearchPath;
|
||||
|
||||
|
||||
/* Initialise the Boehm GC, if applicable. */
|
||||
void initGC();
|
||||
|
||||
|
||||
class EvalState
|
||||
{
|
||||
public:
|
||||
SymbolTable symbols;
|
||||
|
||||
const Symbol sWith, sOutPath, sDrvPath, sType, sMeta, sName, sValue,
|
||||
sSystem, sOverrides, sOutputs, sOutputName, sIgnoreNulls,
|
||||
sFile, sLine, sColumn, sFunctor, sToString,
|
||||
sRight, sWrong, sStructuredAttrs, sBuilder, sArgs,
|
||||
sOutputHash, sOutputHashAlgo, sOutputHashMode;
|
||||
Symbol sDerivationNix;
|
||||
|
||||
/* If set, force copying files to the Nix store even if they
|
||||
already exist there. */
|
||||
RepairFlag repair;
|
||||
|
||||
/* The allowed filesystem paths in restricted or pure evaluation
|
||||
mode. */
|
||||
std::optional<PathSet> allowedPaths;
|
||||
|
||||
Value vEmptySet;
|
||||
|
||||
const ref<Store> store;
|
||||
|
||||
private:
|
||||
SrcToStore srcToStore;
|
||||
|
||||
/* A cache from path names to parse trees. */
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::map<Path, Expr *, std::less<Path>, traceable_allocator<std::pair<const Path, Expr *> > > FileParseCache;
|
||||
#else
|
||||
typedef std::map<Path, Expr *> FileParseCache;
|
||||
#endif
|
||||
FileParseCache fileParseCache;
|
||||
|
||||
/* A cache from path names to values. */
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::map<Path, Value, std::less<Path>, traceable_allocator<std::pair<const Path, Value> > > FileEvalCache;
|
||||
#else
|
||||
typedef std::map<Path, Value> FileEvalCache;
|
||||
#endif
|
||||
FileEvalCache fileEvalCache;
|
||||
|
||||
SearchPath searchPath;
|
||||
|
||||
std::map<std::string, std::pair<bool, std::string>> searchPathResolved;
|
||||
|
||||
/* Cache used by checkSourcePath(). */
|
||||
std::unordered_map<Path, Path> resolvedPaths;
|
||||
|
||||
public:
|
||||
|
||||
EvalState(const Strings & _searchPath, ref<Store> store);
|
||||
~EvalState();
|
||||
|
||||
void addToSearchPath(const string & s);
|
||||
|
||||
SearchPath getSearchPath() { return searchPath; }
|
||||
|
||||
Path checkSourcePath(const Path & path);
|
||||
|
||||
void checkURI(const std::string & uri);
|
||||
|
||||
/* When using a diverted store and 'path' is in the Nix store, map
|
||||
'path' to the diverted location (e.g. /nix/store/foo is mapped
|
||||
to /home/alice/my-nix/nix/store/foo). However, this is only
|
||||
done if the context is not empty, since otherwise we're
|
||||
probably trying to read from the actual /nix/store. This is
|
||||
intended to distinguish between import-from-derivation and
|
||||
sources stored in the actual /nix/store. */
|
||||
Path toRealPath(const Path & path, const PathSet & context);
|
||||
|
||||
/* Parse a Nix expression from the specified file. */
|
||||
Expr * parseExprFromFile(const Path & path);
|
||||
Expr * parseExprFromFile(const Path & path, StaticEnv & staticEnv);
|
||||
|
||||
/* Parse a Nix expression from the specified string. */
|
||||
Expr * parseExprFromString(const string & s, const Path & basePath, StaticEnv & staticEnv);
|
||||
Expr * parseExprFromString(const string & s, const Path & basePath);
|
||||
|
||||
Expr * parseStdin();
|
||||
|
||||
/* Evaluate an expression read from the given file to normal
|
||||
form. */
|
||||
void evalFile(const Path & path, Value & v);
|
||||
|
||||
void resetFileCache();
|
||||
|
||||
/* Look up a file in the search path. */
|
||||
Path findFile(const string & path);
|
||||
Path findFile(SearchPath & searchPath, const string & path, const Pos & pos = noPos);
|
||||
|
||||
/* If the specified search path element is a URI, download it. */
|
||||
std::pair<bool, std::string> resolveSearchPathElem(const SearchPathElem & elem);
|
||||
|
||||
/* Evaluate an expression to normal form, storing the result in
|
||||
value `v'. */
|
||||
void eval(Expr * e, Value & v);
|
||||
|
||||
/* Evaluation the expression, then verify that it has the expected
|
||||
type. */
|
||||
inline bool evalBool(Env & env, Expr * e);
|
||||
inline bool evalBool(Env & env, Expr * e, const Pos & pos);
|
||||
inline void evalAttrs(Env & env, Expr * e, Value & v);
|
||||
|
||||
/* If `v' is a thunk, enter it and overwrite `v' with the result
|
||||
of the evaluation of the thunk. If `v' is a delayed function
|
||||
application, call the function and overwrite `v' with the
|
||||
result. Otherwise, this is a no-op. */
|
||||
inline void forceValue(Value & v, const Pos & pos = noPos);
|
||||
|
||||
/* Force a value, then recursively force list elements and
|
||||
attributes. */
|
||||
void forceValueDeep(Value & v);
|
||||
|
||||
/* Force `v', and then verify that it has the expected type. */
|
||||
NixInt forceInt(Value & v, const Pos & pos);
|
||||
NixFloat forceFloat(Value & v, const Pos & pos);
|
||||
bool forceBool(Value & v, const Pos & pos);
|
||||
inline void forceAttrs(Value & v);
|
||||
inline void forceAttrs(Value & v, const Pos & pos);
|
||||
inline void forceList(Value & v);
|
||||
inline void forceList(Value & v, const Pos & pos);
|
||||
void forceFunction(Value & v, const Pos & pos); // either lambda or primop
|
||||
string forceString(Value & v, const Pos & pos = noPos);
|
||||
string forceString(Value & v, PathSet & context, const Pos & pos = noPos);
|
||||
string forceStringNoCtx(Value & v, const Pos & pos = noPos);
|
||||
|
||||
/* Return true iff the value `v' denotes a derivation (i.e. a
|
||||
set with attribute `type = "derivation"'). */
|
||||
bool isDerivation(Value & v);
|
||||
|
||||
std::optional<string> tryAttrsToString(const Pos & pos, Value & v,
|
||||
PathSet & context, bool coerceMore = false, bool copyToStore = true);
|
||||
|
||||
/* String coercion. Converts strings, paths and derivations to a
|
||||
string. If `coerceMore' is set, also converts nulls, integers,
|
||||
booleans and lists to a string. If `copyToStore' is set,
|
||||
referenced paths are copied to the Nix store as a side effect. */
|
||||
string coerceToString(const Pos & pos, Value & v, PathSet & context,
|
||||
bool coerceMore = false, bool copyToStore = true);
|
||||
|
||||
string copyPathToStore(PathSet & context, const Path & path);
|
||||
|
||||
/* Path coercion. Converts strings, paths and derivations to a
|
||||
path. The result is guaranteed to be a canonicalised, absolute
|
||||
path. Nothing is copied to the store. */
|
||||
Path coerceToPath(const Pos & pos, Value & v, PathSet & context);
|
||||
|
||||
public:
|
||||
|
||||
/* The base environment, containing the builtin functions and
|
||||
values. */
|
||||
Env & baseEnv;
|
||||
|
||||
/* The same, but used during parsing to resolve variables. */
|
||||
StaticEnv staticBaseEnv; // !!! should be private
|
||||
|
||||
private:
|
||||
|
||||
unsigned int baseEnvDispl = 0;
|
||||
|
||||
void createBaseEnv();
|
||||
|
||||
Value * addConstant(const string & name, Value & v);
|
||||
|
||||
Value * addPrimOp(const string & name,
|
||||
size_t arity, PrimOpFun primOp);
|
||||
|
||||
public:
|
||||
|
||||
Value & getBuiltin(const string & name);
|
||||
|
||||
private:
|
||||
|
||||
inline Value * lookupVar(Env * env, const ExprVar & var, bool noEval);
|
||||
|
||||
friend struct ExprVar;
|
||||
friend struct ExprAttrs;
|
||||
friend struct ExprLet;
|
||||
|
||||
Expr * parse(const char * text, const Path & path,
|
||||
const Path & basePath, StaticEnv & staticEnv);
|
||||
|
||||
public:
|
||||
|
||||
/* Do a deep equality test between two values. That is, list
|
||||
elements and attributes are compared recursively. */
|
||||
bool eqValues(Value & v1, Value & v2);
|
||||
|
||||
bool isFunctor(Value & fun);
|
||||
|
||||
void callFunction(Value & fun, Value & arg, Value & v, const Pos & pos);
|
||||
void callPrimOp(Value & fun, Value & arg, Value & v, const Pos & pos);
|
||||
|
||||
/* Automatically call a function for which each argument has a
|
||||
default value or has a binding in the `args' map. */
|
||||
void autoCallFunction(Bindings & args, Value & fun, Value & res);
|
||||
|
||||
/* Allocation primitives. */
|
||||
Value * allocValue();
|
||||
Env & allocEnv(size_t size);
|
||||
|
||||
Value * allocAttr(Value & vAttrs, const Symbol & name);
|
||||
|
||||
Bindings * allocBindings(size_t capacity);
|
||||
|
||||
void mkList(Value & v, size_t length);
|
||||
void mkAttrs(Value & v, size_t capacity);
|
||||
void mkThunk_(Value & v, Expr * expr);
|
||||
void mkPos(Value & v, Pos * pos);
|
||||
|
||||
void concatLists(Value & v, size_t nrLists, Value * * lists, const Pos & pos);
|
||||
|
||||
/* Print statistics. */
|
||||
void printStats();
|
||||
|
||||
void realiseContext(const PathSet & context);
|
||||
|
||||
private:
|
||||
|
||||
unsigned long nrEnvs = 0;
|
||||
unsigned long nrValuesInEnvs = 0;
|
||||
unsigned long nrValues = 0;
|
||||
unsigned long nrListElems = 0;
|
||||
unsigned long nrAttrsets = 0;
|
||||
unsigned long nrAttrsInAttrsets = 0;
|
||||
unsigned long nrOpUpdates = 0;
|
||||
unsigned long nrOpUpdateValuesCopied = 0;
|
||||
unsigned long nrListConcats = 0;
|
||||
unsigned long nrPrimOpCalls = 0;
|
||||
unsigned long nrFunctionCalls = 0;
|
||||
|
||||
bool countCalls;
|
||||
|
||||
typedef std::map<Symbol, size_t> PrimOpCalls;
|
||||
PrimOpCalls primOpCalls;
|
||||
|
||||
typedef std::map<ExprLambda *, size_t> FunctionCalls;
|
||||
FunctionCalls functionCalls;
|
||||
|
||||
void incrFunctionCall(ExprLambda * fun);
|
||||
|
||||
typedef std::map<Pos, size_t> AttrSelects;
|
||||
AttrSelects attrSelects;
|
||||
|
||||
friend struct ExprOpUpdate;
|
||||
friend struct ExprOpConcatLists;
|
||||
friend struct ExprSelect;
|
||||
friend void prim_getAttr(EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
};
|
||||
|
||||
|
||||
/* Return a string representing the type of the value `v'. */
|
||||
string showType(const Value & v);
|
||||
|
||||
/* Decode a context string ‘!<name>!<path>’ into a pair <path,
|
||||
name>. */
|
||||
std::pair<string, string> decodeContext(const string & s);
|
||||
|
||||
/* If `path' refers to a directory, then append "/default.nix". */
|
||||
Path resolveExprPath(Path path);
|
||||
|
||||
struct InvalidPathError : EvalError
|
||||
{
|
||||
Path path;
|
||||
InvalidPathError(const Path & path);
|
||||
#ifdef EXCEPTION_NEEDS_THROW_SPEC
|
||||
~InvalidPathError() throw () { };
|
||||
#endif
|
||||
};
|
||||
|
||||
struct EvalSettings : Config
|
||||
{
|
||||
Setting<bool> enableNativeCode{this, false, "allow-unsafe-native-code-during-evaluation",
|
||||
"Whether builtin functions that allow executing native code should be enabled."};
|
||||
|
||||
Setting<bool> restrictEval{this, false, "restrict-eval",
|
||||
"Whether to restrict file system access to paths in $NIX_PATH, "
|
||||
"and network access to the URI prefixes listed in 'allowed-uris'."};
|
||||
|
||||
Setting<bool> pureEval{this, false, "pure-eval",
|
||||
"Whether to restrict file system and network access to files specified by cryptographic hash."};
|
||||
|
||||
Setting<bool> enableImportFromDerivation{this, true, "allow-import-from-derivation",
|
||||
"Whether the evaluator allows importing the result of a derivation."};
|
||||
|
||||
Setting<Strings> allowedUris{this, {}, "allowed-uris",
|
||||
"Prefixes of URIs that builtin functions such as fetchurl and fetchGit are allowed to fetch."};
|
||||
|
||||
Setting<bool> traceFunctionCalls{this, false, "trace-function-calls",
|
||||
"Emit log messages for each function entry and exit at the 'vomit' log level (-vvvv)"};
|
||||
};
|
||||
|
||||
extern EvalSettings evalSettings;
|
||||
|
||||
}
|
||||
17
third_party/nix/src/libexpr/function-trace.cc
vendored
Normal file
17
third_party/nix/src/libexpr/function-trace.cc
vendored
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
#include "function-trace.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
FunctionCallTrace::FunctionCallTrace(const Pos & pos) : pos(pos) {
|
||||
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch();
|
||||
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
|
||||
printMsg(lvlInfo, "function-trace entered %1% at %2%", pos, ns.count());
|
||||
}
|
||||
|
||||
FunctionCallTrace::~FunctionCallTrace() {
|
||||
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch();
|
||||
auto ns = std::chrono::duration_cast<std::chrono::nanoseconds>(duration);
|
||||
printMsg(lvlInfo, "function-trace exited %1% at %2%", pos, ns.count());
|
||||
}
|
||||
|
||||
}
|
||||
15
third_party/nix/src/libexpr/function-trace.hh
vendored
Normal file
15
third_party/nix/src/libexpr/function-trace.hh
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
#pragma once
|
||||
|
||||
#include "eval.hh"
|
||||
|
||||
#include <chrono>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct FunctionCallTrace
|
||||
{
|
||||
const Pos & pos;
|
||||
FunctionCallTrace(const Pos & pos);
|
||||
~FunctionCallTrace();
|
||||
};
|
||||
}
|
||||
380
third_party/nix/src/libexpr/get-drvs.cc
vendored
Normal file
380
third_party/nix/src/libexpr/get-drvs.cc
vendored
Normal file
|
|
@ -0,0 +1,380 @@
|
|||
#include "get-drvs.hh"
|
||||
#include "util.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "derivations.hh"
|
||||
|
||||
#include <cstring>
|
||||
#include <regex>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
DrvInfo::DrvInfo(EvalState & state, const string & attrPath, Bindings * attrs)
|
||||
: state(&state), attrs(attrs), attrPath(attrPath)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
DrvInfo::DrvInfo(EvalState & state, ref<Store> store, const std::string & drvPathWithOutputs)
|
||||
: state(&state), attrs(nullptr), attrPath("")
|
||||
{
|
||||
auto spec = parseDrvPathWithOutputs(drvPathWithOutputs);
|
||||
|
||||
drvPath = spec.first;
|
||||
|
||||
auto drv = store->derivationFromPath(drvPath);
|
||||
|
||||
name = storePathToName(drvPath);
|
||||
|
||||
if (spec.second.size() > 1)
|
||||
throw Error("building more than one derivation output is not supported, in '%s'", drvPathWithOutputs);
|
||||
|
||||
outputName =
|
||||
spec.second.empty()
|
||||
? get(drv.env, "outputName", "out")
|
||||
: *spec.second.begin();
|
||||
|
||||
auto i = drv.outputs.find(outputName);
|
||||
if (i == drv.outputs.end())
|
||||
throw Error("derivation '%s' does not have output '%s'", drvPath, outputName);
|
||||
|
||||
outPath = i->second.path;
|
||||
}
|
||||
|
||||
|
||||
string DrvInfo::queryName() const
|
||||
{
|
||||
if (name == "" && attrs) {
|
||||
auto i = attrs->find(state->sName);
|
||||
if (i == attrs->end()) throw TypeError("derivation name missing");
|
||||
name = state->forceStringNoCtx(*i->value);
|
||||
}
|
||||
return name;
|
||||
}
|
||||
|
||||
|
||||
string DrvInfo::querySystem() const
|
||||
{
|
||||
if (system == "" && attrs) {
|
||||
auto i = attrs->find(state->sSystem);
|
||||
system = i == attrs->end() ? "unknown" : state->forceStringNoCtx(*i->value, *i->pos);
|
||||
}
|
||||
return system;
|
||||
}
|
||||
|
||||
|
||||
string DrvInfo::queryDrvPath() const
|
||||
{
|
||||
if (drvPath == "" && attrs) {
|
||||
Bindings::iterator i = attrs->find(state->sDrvPath);
|
||||
PathSet context;
|
||||
drvPath = i != attrs->end() ? state->coerceToPath(*i->pos, *i->value, context) : "";
|
||||
}
|
||||
return drvPath;
|
||||
}
|
||||
|
||||
|
||||
string DrvInfo::queryOutPath() const
|
||||
{
|
||||
if (outPath == "" && attrs) {
|
||||
Bindings::iterator i = attrs->find(state->sOutPath);
|
||||
PathSet context;
|
||||
outPath = i != attrs->end() ? state->coerceToPath(*i->pos, *i->value, context) : "";
|
||||
}
|
||||
return outPath;
|
||||
}
|
||||
|
||||
|
||||
DrvInfo::Outputs DrvInfo::queryOutputs(bool onlyOutputsToInstall)
|
||||
{
|
||||
if (outputs.empty()) {
|
||||
/* Get the ‘outputs’ list. */
|
||||
Bindings::iterator i;
|
||||
if (attrs && (i = attrs->find(state->sOutputs)) != attrs->end()) {
|
||||
state->forceList(*i->value, *i->pos);
|
||||
|
||||
/* For each output... */
|
||||
for (unsigned int j = 0; j < i->value->listSize(); ++j) {
|
||||
/* Evaluate the corresponding set. */
|
||||
string name = state->forceStringNoCtx(*i->value->listElems()[j], *i->pos);
|
||||
Bindings::iterator out = attrs->find(state->symbols.create(name));
|
||||
if (out == attrs->end()) continue; // FIXME: throw error?
|
||||
state->forceAttrs(*out->value);
|
||||
|
||||
/* And evaluate its ‘outPath’ attribute. */
|
||||
Bindings::iterator outPath = out->value->attrs->find(state->sOutPath);
|
||||
if (outPath == out->value->attrs->end()) continue; // FIXME: throw error?
|
||||
PathSet context;
|
||||
outputs[name] = state->coerceToPath(*outPath->pos, *outPath->value, context);
|
||||
}
|
||||
} else
|
||||
outputs["out"] = queryOutPath();
|
||||
}
|
||||
if (!onlyOutputsToInstall || !attrs)
|
||||
return outputs;
|
||||
|
||||
/* Check for `meta.outputsToInstall` and return `outputs` reduced to that. */
|
||||
const Value * outTI = queryMeta("outputsToInstall");
|
||||
if (!outTI) return outputs;
|
||||
const auto errMsg = Error("this derivation has bad 'meta.outputsToInstall'");
|
||||
/* ^ this shows during `nix-env -i` right under the bad derivation */
|
||||
if (!outTI->isList()) throw errMsg;
|
||||
Outputs result;
|
||||
for (auto i = outTI->listElems(); i != outTI->listElems() + outTI->listSize(); ++i) {
|
||||
if ((*i)->type != tString) throw errMsg;
|
||||
auto out = outputs.find((*i)->string.s);
|
||||
if (out == outputs.end()) throw errMsg;
|
||||
result.insert(*out);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
string DrvInfo::queryOutputName() const
|
||||
{
|
||||
if (outputName == "" && attrs) {
|
||||
Bindings::iterator i = attrs->find(state->sOutputName);
|
||||
outputName = i != attrs->end() ? state->forceStringNoCtx(*i->value) : "";
|
||||
}
|
||||
return outputName;
|
||||
}
|
||||
|
||||
|
||||
Bindings * DrvInfo::getMeta()
|
||||
{
|
||||
if (meta) return meta;
|
||||
if (!attrs) return 0;
|
||||
Bindings::iterator a = attrs->find(state->sMeta);
|
||||
if (a == attrs->end()) return 0;
|
||||
state->forceAttrs(*a->value, *a->pos);
|
||||
meta = a->value->attrs;
|
||||
return meta;
|
||||
}
|
||||
|
||||
|
||||
StringSet DrvInfo::queryMetaNames()
|
||||
{
|
||||
StringSet res;
|
||||
if (!getMeta()) return res;
|
||||
for (auto & i : *meta)
|
||||
res.insert(i.name);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
bool DrvInfo::checkMeta(Value & v)
|
||||
{
|
||||
state->forceValue(v);
|
||||
if (v.isList()) {
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
if (!checkMeta(*v.listElems()[n])) return false;
|
||||
return true;
|
||||
}
|
||||
else if (v.type == tAttrs) {
|
||||
Bindings::iterator i = v.attrs->find(state->sOutPath);
|
||||
if (i != v.attrs->end()) return false;
|
||||
for (auto & i : *v.attrs)
|
||||
if (!checkMeta(*i.value)) return false;
|
||||
return true;
|
||||
}
|
||||
else return v.type == tInt || v.type == tBool || v.type == tString ||
|
||||
v.type == tFloat;
|
||||
}
|
||||
|
||||
|
||||
Value * DrvInfo::queryMeta(const string & name)
|
||||
{
|
||||
if (!getMeta()) return 0;
|
||||
Bindings::iterator a = meta->find(state->symbols.create(name));
|
||||
if (a == meta->end() || !checkMeta(*a->value)) return 0;
|
||||
return a->value;
|
||||
}
|
||||
|
||||
|
||||
string DrvInfo::queryMetaString(const string & name)
|
||||
{
|
||||
Value * v = queryMeta(name);
|
||||
if (!v || v->type != tString) return "";
|
||||
return v->string.s;
|
||||
}
|
||||
|
||||
|
||||
NixInt DrvInfo::queryMetaInt(const string & name, NixInt def)
|
||||
{
|
||||
Value * v = queryMeta(name);
|
||||
if (!v) return def;
|
||||
if (v->type == tInt) return v->integer;
|
||||
if (v->type == tString) {
|
||||
/* Backwards compatibility with before we had support for
|
||||
integer meta fields. */
|
||||
NixInt n;
|
||||
if (string2Int(v->string.s, n)) return n;
|
||||
}
|
||||
return def;
|
||||
}
|
||||
|
||||
NixFloat DrvInfo::queryMetaFloat(const string & name, NixFloat def)
|
||||
{
|
||||
Value * v = queryMeta(name);
|
||||
if (!v) return def;
|
||||
if (v->type == tFloat) return v->fpoint;
|
||||
if (v->type == tString) {
|
||||
/* Backwards compatibility with before we had support for
|
||||
float meta fields. */
|
||||
NixFloat n;
|
||||
if (string2Float(v->string.s, n)) return n;
|
||||
}
|
||||
return def;
|
||||
}
|
||||
|
||||
|
||||
bool DrvInfo::queryMetaBool(const string & name, bool def)
|
||||
{
|
||||
Value * v = queryMeta(name);
|
||||
if (!v) return def;
|
||||
if (v->type == tBool) return v->boolean;
|
||||
if (v->type == tString) {
|
||||
/* Backwards compatibility with before we had support for
|
||||
Boolean meta fields. */
|
||||
if (strcmp(v->string.s, "true") == 0) return true;
|
||||
if (strcmp(v->string.s, "false") == 0) return false;
|
||||
}
|
||||
return def;
|
||||
}
|
||||
|
||||
|
||||
void DrvInfo::setMeta(const string & name, Value * v)
|
||||
{
|
||||
getMeta();
|
||||
Bindings * old = meta;
|
||||
meta = state->allocBindings(1 + (old ? old->size() : 0));
|
||||
Symbol sym = state->symbols.create(name);
|
||||
if (old)
|
||||
for (auto i : *old)
|
||||
if (i.name != sym)
|
||||
meta->push_back(i);
|
||||
if (v) meta->push_back(Attr(sym, v));
|
||||
meta->sort();
|
||||
}
|
||||
|
||||
|
||||
/* Cache for already considered attrsets. */
|
||||
typedef set<Bindings *> Done;
|
||||
|
||||
|
||||
/* Evaluate value `v'. If it evaluates to a set of type `derivation',
|
||||
then put information about it in `drvs' (unless it's already in `done').
|
||||
The result boolean indicates whether it makes sense
|
||||
for the caller to recursively search for derivations in `v'. */
|
||||
static bool getDerivation(EvalState & state, Value & v,
|
||||
const string & attrPath, DrvInfos & drvs, Done & done,
|
||||
bool ignoreAssertionFailures)
|
||||
{
|
||||
try {
|
||||
state.forceValue(v);
|
||||
if (!state.isDerivation(v)) return true;
|
||||
|
||||
/* Remove spurious duplicates (e.g., a set like `rec { x =
|
||||
derivation {...}; y = x;}'. */
|
||||
if (done.find(v.attrs) != done.end()) return false;
|
||||
done.insert(v.attrs);
|
||||
|
||||
DrvInfo drv(state, attrPath, v.attrs);
|
||||
|
||||
drv.queryName();
|
||||
|
||||
drvs.push_back(drv);
|
||||
|
||||
return false;
|
||||
|
||||
} catch (AssertionError & e) {
|
||||
if (ignoreAssertionFailures) return false;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::optional<DrvInfo> getDerivation(EvalState & state, Value & v,
|
||||
bool ignoreAssertionFailures)
|
||||
{
|
||||
Done done;
|
||||
DrvInfos drvs;
|
||||
getDerivation(state, v, "", drvs, done, ignoreAssertionFailures);
|
||||
if (drvs.size() != 1) return {};
|
||||
return std::move(drvs.front());
|
||||
}
|
||||
|
||||
|
||||
static string addToPath(const string & s1, const string & s2)
|
||||
{
|
||||
return s1.empty() ? s2 : s1 + "." + s2;
|
||||
}
|
||||
|
||||
|
||||
static std::regex attrRegex("[A-Za-z_][A-Za-z0-9-_+]*");
|
||||
|
||||
|
||||
static void getDerivations(EvalState & state, Value & vIn,
|
||||
const string & pathPrefix, Bindings & autoArgs,
|
||||
DrvInfos & drvs, Done & done,
|
||||
bool ignoreAssertionFailures)
|
||||
{
|
||||
Value v;
|
||||
state.autoCallFunction(autoArgs, vIn, v);
|
||||
|
||||
/* Process the expression. */
|
||||
if (!getDerivation(state, v, pathPrefix, drvs, done, ignoreAssertionFailures)) ;
|
||||
|
||||
else if (v.type == tAttrs) {
|
||||
|
||||
/* !!! undocumented hackery to support combining channels in
|
||||
nix-env.cc. */
|
||||
bool combineChannels = v.attrs->find(state.symbols.create("_combineChannels")) != v.attrs->end();
|
||||
|
||||
/* Consider the attributes in sorted order to get more
|
||||
deterministic behaviour in nix-env operations (e.g. when
|
||||
there are names clashes between derivations, the derivation
|
||||
bound to the attribute with the "lower" name should take
|
||||
precedence). */
|
||||
for (auto & i : v.attrs->lexicographicOrder()) {
|
||||
debug("evaluating attribute '%1%'", i->name);
|
||||
if (!std::regex_match(std::string(i->name), attrRegex))
|
||||
continue;
|
||||
string pathPrefix2 = addToPath(pathPrefix, i->name);
|
||||
if (combineChannels)
|
||||
getDerivations(state, *i->value, pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
|
||||
else if (getDerivation(state, *i->value, pathPrefix2, drvs, done, ignoreAssertionFailures)) {
|
||||
/* If the value of this attribute is itself a set,
|
||||
should we recurse into it? => Only if it has a
|
||||
`recurseForDerivations = true' attribute. */
|
||||
if (i->value->type == tAttrs) {
|
||||
Bindings::iterator j = i->value->attrs->find(state.symbols.create("recurseForDerivations"));
|
||||
if (j != i->value->attrs->end() && state.forceBool(*j->value, *j->pos))
|
||||
getDerivations(state, *i->value, pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if (v.isList()) {
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n) {
|
||||
string pathPrefix2 = addToPath(pathPrefix, (format("%1%") % n).str());
|
||||
if (getDerivation(state, *v.listElems()[n], pathPrefix2, drvs, done, ignoreAssertionFailures))
|
||||
getDerivations(state, *v.listElems()[n], pathPrefix2, autoArgs, drvs, done, ignoreAssertionFailures);
|
||||
}
|
||||
}
|
||||
|
||||
else throw TypeError("expression does not evaluate to a derivation (or a set or list of those)");
|
||||
}
|
||||
|
||||
|
||||
void getDerivations(EvalState & state, Value & v, const string & pathPrefix,
|
||||
Bindings & autoArgs, DrvInfos & drvs, bool ignoreAssertionFailures)
|
||||
{
|
||||
Done done;
|
||||
getDerivations(state, v, pathPrefix, autoArgs, drvs, done, ignoreAssertionFailures);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
89
third_party/nix/src/libexpr/get-drvs.hh
vendored
Normal file
89
third_party/nix/src/libexpr/get-drvs.hh
vendored
Normal file
|
|
@ -0,0 +1,89 @@
|
|||
#pragma once
|
||||
|
||||
#include "eval.hh"
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
struct DrvInfo
|
||||
{
|
||||
public:
|
||||
typedef std::map<string, Path> Outputs;
|
||||
|
||||
private:
|
||||
EvalState * state;
|
||||
|
||||
mutable string name;
|
||||
mutable string system;
|
||||
mutable string drvPath;
|
||||
mutable string outPath;
|
||||
mutable string outputName;
|
||||
Outputs outputs;
|
||||
|
||||
bool failed = false; // set if we get an AssertionError
|
||||
|
||||
Bindings * attrs = nullptr, * meta = nullptr;
|
||||
|
||||
Bindings * getMeta();
|
||||
|
||||
bool checkMeta(Value & v);
|
||||
|
||||
public:
|
||||
string attrPath; /* path towards the derivation */
|
||||
|
||||
DrvInfo(EvalState & state) : state(&state) { };
|
||||
DrvInfo(EvalState & state, const string & attrPath, Bindings * attrs);
|
||||
DrvInfo(EvalState & state, ref<Store> store, const std::string & drvPathWithOutputs);
|
||||
|
||||
string queryName() const;
|
||||
string querySystem() const;
|
||||
string queryDrvPath() const;
|
||||
string queryOutPath() const;
|
||||
string queryOutputName() const;
|
||||
/** Return the list of outputs. The "outputs to install" are determined by `meta.outputsToInstall`. */
|
||||
Outputs queryOutputs(bool onlyOutputsToInstall = false);
|
||||
|
||||
StringSet queryMetaNames();
|
||||
Value * queryMeta(const string & name);
|
||||
string queryMetaString(const string & name);
|
||||
NixInt queryMetaInt(const string & name, NixInt def);
|
||||
NixFloat queryMetaFloat(const string & name, NixFloat def);
|
||||
bool queryMetaBool(const string & name, bool def);
|
||||
void setMeta(const string & name, Value * v);
|
||||
|
||||
/*
|
||||
MetaInfo queryMetaInfo(EvalState & state) const;
|
||||
MetaValue queryMetaInfo(EvalState & state, const string & name) const;
|
||||
*/
|
||||
|
||||
void setName(const string & s) { name = s; }
|
||||
void setDrvPath(const string & s) { drvPath = s; }
|
||||
void setOutPath(const string & s) { outPath = s; }
|
||||
|
||||
void setFailed() { failed = true; };
|
||||
bool hasFailed() { return failed; };
|
||||
};
|
||||
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
typedef list<DrvInfo, traceable_allocator<DrvInfo> > DrvInfos;
|
||||
#else
|
||||
typedef list<DrvInfo> DrvInfos;
|
||||
#endif
|
||||
|
||||
|
||||
/* If value `v' denotes a derivation, return a DrvInfo object
|
||||
describing it. Otherwise return nothing. */
|
||||
std::optional<DrvInfo> getDerivation(EvalState & state,
|
||||
Value & v, bool ignoreAssertionFailures);
|
||||
|
||||
void getDerivations(EvalState & state, Value & v, const string & pathPrefix,
|
||||
Bindings & autoArgs, DrvInfos & drvs,
|
||||
bool ignoreAssertionFailures);
|
||||
|
||||
|
||||
}
|
||||
149
third_party/nix/src/libexpr/json-to-value.cc
vendored
Normal file
149
third_party/nix/src/libexpr/json-to-value.cc
vendored
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
#include "json-to-value.hh"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void skipWhitespace(const char * & s)
|
||||
{
|
||||
while (*s == ' ' || *s == '\t' || *s == '\n' || *s == '\r') s++;
|
||||
}
|
||||
|
||||
|
||||
static string parseJSONString(const char * & s)
|
||||
{
|
||||
string res;
|
||||
if (*s++ != '"') throw JSONParseError("expected JSON string");
|
||||
while (*s != '"') {
|
||||
if (!*s) throw JSONParseError("got end-of-string in JSON string");
|
||||
if (*s == '\\') {
|
||||
s++;
|
||||
if (*s == '"') res += '"';
|
||||
else if (*s == '\\') res += '\\';
|
||||
else if (*s == '/') res += '/';
|
||||
else if (*s == '/') res += '/';
|
||||
else if (*s == 'b') res += '\b';
|
||||
else if (*s == 'f') res += '\f';
|
||||
else if (*s == 'n') res += '\n';
|
||||
else if (*s == 'r') res += '\r';
|
||||
else if (*s == 't') res += '\t';
|
||||
else if (*s == 'u') throw JSONParseError("\\u characters in JSON strings are currently not supported");
|
||||
else throw JSONParseError("invalid escaped character in JSON string");
|
||||
s++;
|
||||
} else
|
||||
res += *s++;
|
||||
}
|
||||
s++;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
static void parseJSON(EvalState & state, const char * & s, Value & v)
|
||||
{
|
||||
skipWhitespace(s);
|
||||
|
||||
if (!*s) throw JSONParseError("expected JSON value");
|
||||
|
||||
if (*s == '[') {
|
||||
s++;
|
||||
ValueVector values;
|
||||
values.reserve(128);
|
||||
skipWhitespace(s);
|
||||
while (1) {
|
||||
if (values.empty() && *s == ']') break;
|
||||
Value * v2 = state.allocValue();
|
||||
parseJSON(state, s, *v2);
|
||||
values.push_back(v2);
|
||||
skipWhitespace(s);
|
||||
if (*s == ']') break;
|
||||
if (*s != ',') throw JSONParseError("expected ',' or ']' after JSON array element");
|
||||
s++;
|
||||
}
|
||||
s++;
|
||||
state.mkList(v, values.size());
|
||||
for (size_t n = 0; n < values.size(); ++n)
|
||||
v.listElems()[n] = values[n];
|
||||
}
|
||||
|
||||
else if (*s == '{') {
|
||||
s++;
|
||||
ValueMap attrs;
|
||||
while (1) {
|
||||
skipWhitespace(s);
|
||||
if (attrs.empty() && *s == '}') break;
|
||||
string name = parseJSONString(s);
|
||||
skipWhitespace(s);
|
||||
if (*s != ':') throw JSONParseError("expected ':' in JSON object");
|
||||
s++;
|
||||
Value * v2 = state.allocValue();
|
||||
parseJSON(state, s, *v2);
|
||||
attrs[state.symbols.create(name)] = v2;
|
||||
skipWhitespace(s);
|
||||
if (*s == '}') break;
|
||||
if (*s != ',') throw JSONParseError("expected ',' or '}' after JSON member");
|
||||
s++;
|
||||
}
|
||||
state.mkAttrs(v, attrs.size());
|
||||
for (auto & i : attrs)
|
||||
v.attrs->push_back(Attr(i.first, i.second));
|
||||
v.attrs->sort();
|
||||
s++;
|
||||
}
|
||||
|
||||
else if (*s == '"') {
|
||||
mkString(v, parseJSONString(s));
|
||||
}
|
||||
|
||||
else if (isdigit(*s) || *s == '-' || *s == '.' ) {
|
||||
// Buffer into a string first, then use built-in C++ conversions
|
||||
std::string tmp_number;
|
||||
ValueType number_type = tInt;
|
||||
|
||||
while (isdigit(*s) || *s == '-' || *s == '.' || *s == 'e' || *s == 'E') {
|
||||
if (*s == '.' || *s == 'e' || *s == 'E')
|
||||
number_type = tFloat;
|
||||
tmp_number += *s++;
|
||||
}
|
||||
|
||||
try {
|
||||
if (number_type == tFloat)
|
||||
mkFloat(v, stod(tmp_number));
|
||||
else
|
||||
mkInt(v, stol(tmp_number));
|
||||
} catch (std::invalid_argument & e) {
|
||||
throw JSONParseError("invalid JSON number");
|
||||
} catch (std::out_of_range & e) {
|
||||
throw JSONParseError("out-of-range JSON number");
|
||||
}
|
||||
}
|
||||
|
||||
else if (strncmp(s, "true", 4) == 0) {
|
||||
s += 4;
|
||||
mkBool(v, true);
|
||||
}
|
||||
|
||||
else if (strncmp(s, "false", 5) == 0) {
|
||||
s += 5;
|
||||
mkBool(v, false);
|
||||
}
|
||||
|
||||
else if (strncmp(s, "null", 4) == 0) {
|
||||
s += 4;
|
||||
mkNull(v);
|
||||
}
|
||||
|
||||
else throw JSONParseError("unrecognised JSON value");
|
||||
}
|
||||
|
||||
|
||||
void parseJSON(EvalState & state, const string & s_, Value & v)
|
||||
{
|
||||
const char * s = s_.c_str();
|
||||
parseJSON(state, s, v);
|
||||
skipWhitespace(s);
|
||||
if (*s) throw JSONParseError(format("expected end-of-string while parsing JSON value: %1%") % s);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
13
third_party/nix/src/libexpr/json-to-value.hh
vendored
Normal file
13
third_party/nix/src/libexpr/json-to-value.hh
vendored
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
#pragma once
|
||||
|
||||
#include "eval.hh"
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace nix {
|
||||
|
||||
MakeError(JSONParseError, EvalError)
|
||||
|
||||
void parseJSON(EvalState & state, const string & s, Value & v);
|
||||
|
||||
}
|
||||
222
third_party/nix/src/libexpr/lexer.l
vendored
Normal file
222
third_party/nix/src/libexpr/lexer.l
vendored
Normal file
|
|
@ -0,0 +1,222 @@
|
|||
%option reentrant bison-bridge bison-locations
|
||||
%option noyywrap
|
||||
%option never-interactive
|
||||
%option stack
|
||||
%option nodefault
|
||||
%option nounput noyy_top_state
|
||||
|
||||
|
||||
%s DEFAULT
|
||||
%x STRING
|
||||
%x IND_STRING
|
||||
|
||||
|
||||
%{
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
#include "nixexpr.hh"
|
||||
#include "parser-tab.hh"
|
||||
|
||||
using namespace nix;
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void initLoc(YYLTYPE * loc)
|
||||
{
|
||||
loc->first_line = loc->last_line = 1;
|
||||
loc->first_column = loc->last_column = 1;
|
||||
}
|
||||
|
||||
|
||||
static void adjustLoc(YYLTYPE * loc, const char * s, size_t len)
|
||||
{
|
||||
loc->first_line = loc->last_line;
|
||||
loc->first_column = loc->last_column;
|
||||
|
||||
while (len--) {
|
||||
switch (*s++) {
|
||||
case '\r':
|
||||
if (*s == '\n') /* cr/lf */
|
||||
s++;
|
||||
/* fall through */
|
||||
case '\n':
|
||||
++loc->last_line;
|
||||
loc->last_column = 1;
|
||||
break;
|
||||
default:
|
||||
++loc->last_column;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static Expr * unescapeStr(SymbolTable & symbols, const char * s, size_t length)
|
||||
{
|
||||
string t;
|
||||
t.reserve(length);
|
||||
char c;
|
||||
while ((c = *s++)) {
|
||||
if (c == '\\') {
|
||||
assert(*s);
|
||||
c = *s++;
|
||||
if (c == 'n') t += '\n';
|
||||
else if (c == 'r') t += '\r';
|
||||
else if (c == 't') t += '\t';
|
||||
else t += c;
|
||||
}
|
||||
else if (c == '\r') {
|
||||
/* Normalise CR and CR/LF into LF. */
|
||||
t += '\n';
|
||||
if (*s == '\n') s++; /* cr/lf */
|
||||
}
|
||||
else t += c;
|
||||
}
|
||||
return new ExprString(symbols.create(t));
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
#define YY_USER_INIT initLoc(yylloc)
|
||||
#define YY_USER_ACTION adjustLoc(yylloc, yytext, yyleng);
|
||||
|
||||
#define PUSH_STATE(state) yy_push_state(state, yyscanner)
|
||||
#define POP_STATE() yy_pop_state(yyscanner)
|
||||
|
||||
%}
|
||||
|
||||
|
||||
ANY .|\n
|
||||
ID [a-zA-Z\_][a-zA-Z0-9\_\'\-]*
|
||||
INT [0-9]+
|
||||
FLOAT (([1-9][0-9]*\.[0-9]*)|(0?\.[0-9]+))([Ee][+-]?[0-9]+)?
|
||||
PATH [a-zA-Z0-9\.\_\-\+]*(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
|
||||
HPATH \~(\/[a-zA-Z0-9\.\_\-\+]+)+\/?
|
||||
SPATH \<[a-zA-Z0-9\.\_\-\+]+(\/[a-zA-Z0-9\.\_\-\+]+)*\>
|
||||
URI [a-zA-Z][a-zA-Z0-9\+\-\.]*\:[a-zA-Z0-9\%\/\?\:\@\&\=\+\$\,\-\_\.\!\~\*\']+
|
||||
|
||||
|
||||
%%
|
||||
|
||||
|
||||
if { return IF; }
|
||||
then { return THEN; }
|
||||
else { return ELSE; }
|
||||
assert { return ASSERT; }
|
||||
with { return WITH; }
|
||||
let { return LET; }
|
||||
in { return IN; }
|
||||
rec { return REC; }
|
||||
inherit { return INHERIT; }
|
||||
or { return OR_KW; }
|
||||
\.\.\. { return ELLIPSIS; }
|
||||
|
||||
\=\= { return EQ; }
|
||||
\!\= { return NEQ; }
|
||||
\<\= { return LEQ; }
|
||||
\>\= { return GEQ; }
|
||||
\&\& { return AND; }
|
||||
\|\| { return OR; }
|
||||
\-\> { return IMPL; }
|
||||
\/\/ { return UPDATE; }
|
||||
\+\+ { return CONCAT; }
|
||||
|
||||
{ID} { yylval->id = strdup(yytext); return ID; }
|
||||
{INT} { errno = 0;
|
||||
try {
|
||||
yylval->n = boost::lexical_cast<int64_t>(yytext);
|
||||
} catch (const boost::bad_lexical_cast &) {
|
||||
throw ParseError(format("invalid integer '%1%'") % yytext);
|
||||
}
|
||||
return INT;
|
||||
}
|
||||
{FLOAT} { errno = 0;
|
||||
yylval->nf = strtod(yytext, 0);
|
||||
if (errno != 0)
|
||||
throw ParseError(format("invalid float '%1%'") % yytext);
|
||||
return FLOAT;
|
||||
}
|
||||
|
||||
\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; }
|
||||
|
||||
\} { /* State INITIAL only exists at the bottom of the stack and is
|
||||
used as a marker. DEFAULT replaces it everywhere else.
|
||||
Popping when in INITIAL state causes an empty stack exception,
|
||||
so don't */
|
||||
if (YYSTATE != INITIAL)
|
||||
POP_STATE();
|
||||
return '}';
|
||||
}
|
||||
\{ { PUSH_STATE(DEFAULT); return '{'; }
|
||||
|
||||
\" { PUSH_STATE(STRING); return '"'; }
|
||||
<STRING>([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})*\$/\" |
|
||||
<STRING>([^\$\"\\]|\$[^\{\"\\]|\\{ANY}|\$\\{ANY})+ {
|
||||
/* It is impossible to match strings ending with '$' with one
|
||||
regex because trailing contexts are only valid at the end
|
||||
of a rule. (A sane but undocumented limitation.) */
|
||||
yylval->e = unescapeStr(data->symbols, yytext, yyleng);
|
||||
return STR;
|
||||
}
|
||||
<STRING>\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; }
|
||||
<STRING>\" { POP_STATE(); return '"'; }
|
||||
<STRING>\$|\\|\$\\ {
|
||||
/* This can only occur when we reach EOF, otherwise the above
|
||||
(...|\$[^\{\"\\]|\\.|\$\\.)+ would have triggered.
|
||||
This is technically invalid, but we leave the problem to the
|
||||
parser who fails with exact location. */
|
||||
return STR;
|
||||
}
|
||||
|
||||
\'\'(\ *\n)? { PUSH_STATE(IND_STRING); return IND_STRING_OPEN; }
|
||||
<IND_STRING>([^\$\']|\$[^\{\']|\'[^\'\$])+ {
|
||||
yylval->e = new ExprIndStr(yytext);
|
||||
return IND_STR;
|
||||
}
|
||||
<IND_STRING>\'\'\$ |
|
||||
<IND_STRING>\$ {
|
||||
yylval->e = new ExprIndStr("$");
|
||||
return IND_STR;
|
||||
}
|
||||
<IND_STRING>\'\'\' {
|
||||
yylval->e = new ExprIndStr("''");
|
||||
return IND_STR;
|
||||
}
|
||||
<IND_STRING>\'\'\\{ANY} {
|
||||
yylval->e = unescapeStr(data->symbols, yytext + 2, yyleng - 2);
|
||||
return IND_STR;
|
||||
}
|
||||
<IND_STRING>\$\{ { PUSH_STATE(DEFAULT); return DOLLAR_CURLY; }
|
||||
<IND_STRING>\'\' { POP_STATE(); return IND_STRING_CLOSE; }
|
||||
<IND_STRING>\' {
|
||||
yylval->e = new ExprIndStr("'");
|
||||
return IND_STR;
|
||||
}
|
||||
|
||||
|
||||
{PATH} { if (yytext[yyleng-1] == '/')
|
||||
throw ParseError("path '%s' has a trailing slash", yytext);
|
||||
yylval->path = strdup(yytext);
|
||||
return PATH;
|
||||
}
|
||||
{HPATH} { if (yytext[yyleng-1] == '/')
|
||||
throw ParseError("path '%s' has a trailing slash", yytext);
|
||||
yylval->path = strdup(yytext);
|
||||
return HPATH;
|
||||
}
|
||||
{SPATH} { yylval->path = strdup(yytext); return SPATH; }
|
||||
{URI} { yylval->uri = strdup(yytext); return URI; }
|
||||
|
||||
[ \t\r\n]+ /* eat up whitespace */
|
||||
\#[^\r\n]* /* single-line comments */
|
||||
\/\*([^*]|\*+[^*/])*\*+\/ /* long comments */
|
||||
|
||||
{ANY} {
|
||||
/* Don't return a negative number, as this will cause
|
||||
Bison to stop parsing without an error. */
|
||||
return (unsigned char) yytext[0];
|
||||
}
|
||||
|
||||
%%
|
||||
|
||||
33
third_party/nix/src/libexpr/local.mk
vendored
Normal file
33
third_party/nix/src/libexpr/local.mk
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
libraries += libexpr
|
||||
|
||||
libexpr_NAME = libnixexpr
|
||||
|
||||
libexpr_DIR := $(d)
|
||||
|
||||
libexpr_SOURCES := $(wildcard $(d)/*.cc) $(wildcard $(d)/primops/*.cc) $(d)/lexer-tab.cc $(d)/parser-tab.cc
|
||||
|
||||
libexpr_LIBS = libutil libstore
|
||||
|
||||
libexpr_LDFLAGS =
|
||||
ifneq ($(OS), FreeBSD)
|
||||
libexpr_LDFLAGS += -ldl
|
||||
endif
|
||||
|
||||
# The dependency on libgc must be propagated (i.e. meaning that
|
||||
# programs/libraries that use libexpr must explicitly pass -lgc),
|
||||
# because inline functions in libexpr's header files call libgc.
|
||||
libexpr_LDFLAGS_PROPAGATED = $(BDW_GC_LIBS)
|
||||
|
||||
libexpr_ORDER_AFTER := $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
|
||||
|
||||
$(d)/parser-tab.cc $(d)/parser-tab.hh: $(d)/parser.y
|
||||
$(trace-gen) bison -v -o $(libexpr_DIR)/parser-tab.cc $< -d
|
||||
|
||||
$(d)/lexer-tab.cc $(d)/lexer-tab.hh: $(d)/lexer.l
|
||||
$(trace-gen) flex --outfile $(libexpr_DIR)/lexer-tab.cc --header-file=$(libexpr_DIR)/lexer-tab.hh $<
|
||||
|
||||
clean-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
|
||||
|
||||
dist-files += $(d)/parser-tab.cc $(d)/parser-tab.hh $(d)/lexer-tab.cc $(d)/lexer-tab.hh
|
||||
|
||||
$(eval $(call install-file-in, $(d)/nix-expr.pc, $(prefix)/lib/pkgconfig, 0644))
|
||||
107
third_party/nix/src/libexpr/names.cc
vendored
Normal file
107
third_party/nix/src/libexpr/names.cc
vendored
Normal file
|
|
@ -0,0 +1,107 @@
|
|||
#include "names.hh"
|
||||
#include "util.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
DrvName::DrvName()
|
||||
{
|
||||
name = "";
|
||||
}
|
||||
|
||||
|
||||
/* Parse a derivation name. The `name' part of a derivation name is
|
||||
everything up to but not including the first dash *not* followed by
|
||||
a letter. The `version' part is the rest (excluding the separating
|
||||
dash). E.g., `apache-httpd-2.0.48' is parsed to (`apache-httpd',
|
||||
'2.0.48'). */
|
||||
DrvName::DrvName(const string & s) : hits(0)
|
||||
{
|
||||
name = fullName = s;
|
||||
for (unsigned int i = 0; i < s.size(); ++i) {
|
||||
/* !!! isalpha/isdigit are affected by the locale. */
|
||||
if (s[i] == '-' && i + 1 < s.size() && !isalpha(s[i + 1])) {
|
||||
name = string(s, 0, i);
|
||||
version = string(s, i + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool DrvName::matches(DrvName & n)
|
||||
{
|
||||
if (name != "*") {
|
||||
if (!regex) regex = std::unique_ptr<std::regex>(new std::regex(name, std::regex::extended));
|
||||
if (!std::regex_match(n.name, *regex)) return false;
|
||||
}
|
||||
if (version != "" && version != n.version) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
string nextComponent(string::const_iterator & p,
|
||||
const string::const_iterator end)
|
||||
{
|
||||
/* Skip any dots and dashes (component separators). */
|
||||
while (p != end && (*p == '.' || *p == '-')) ++p;
|
||||
|
||||
if (p == end) return "";
|
||||
|
||||
/* If the first character is a digit, consume the longest sequence
|
||||
of digits. Otherwise, consume the longest sequence of
|
||||
non-digit, non-separator characters. */
|
||||
string s;
|
||||
if (isdigit(*p))
|
||||
while (p != end && isdigit(*p)) s += *p++;
|
||||
else
|
||||
while (p != end && (!isdigit(*p) && *p != '.' && *p != '-'))
|
||||
s += *p++;
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
static bool componentsLT(const string & c1, const string & c2)
|
||||
{
|
||||
int n1, n2;
|
||||
bool c1Num = string2Int(c1, n1), c2Num = string2Int(c2, n2);
|
||||
|
||||
if (c1Num && c2Num) return n1 < n2;
|
||||
else if (c1 == "" && c2Num) return true;
|
||||
else if (c1 == "pre" && c2 != "pre") return true;
|
||||
else if (c2 == "pre") return false;
|
||||
/* Assume that `2.3a' < `2.3.1'. */
|
||||
else if (c2Num) return true;
|
||||
else if (c1Num) return false;
|
||||
else return c1 < c2;
|
||||
}
|
||||
|
||||
|
||||
int compareVersions(const string & v1, const string & v2)
|
||||
{
|
||||
string::const_iterator p1 = v1.begin();
|
||||
string::const_iterator p2 = v2.begin();
|
||||
|
||||
while (p1 != v1.end() || p2 != v2.end()) {
|
||||
string c1 = nextComponent(p1, v1.end());
|
||||
string c2 = nextComponent(p2, v2.end());
|
||||
if (componentsLT(c1, c2)) return -1;
|
||||
else if (componentsLT(c2, c1)) return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
DrvNames drvNamesFromArgs(const Strings & opArgs)
|
||||
{
|
||||
DrvNames result;
|
||||
for (auto & i : opArgs)
|
||||
result.push_back(DrvName(i));
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
32
third_party/nix/src/libexpr/names.hh
vendored
Normal file
32
third_party/nix/src/libexpr/names.hh
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "types.hh"
|
||||
#include <regex>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct DrvName
|
||||
{
|
||||
string fullName;
|
||||
string name;
|
||||
string version;
|
||||
unsigned int hits;
|
||||
|
||||
DrvName();
|
||||
DrvName(const string & s);
|
||||
bool matches(DrvName & n);
|
||||
|
||||
private:
|
||||
std::unique_ptr<std::regex> regex;
|
||||
};
|
||||
|
||||
typedef list<DrvName> DrvNames;
|
||||
|
||||
string nextComponent(string::const_iterator & p,
|
||||
const string::const_iterator end);
|
||||
int compareVersions(const string & v1, const string & v2);
|
||||
DrvNames drvNamesFromArgs(const Strings & opArgs);
|
||||
|
||||
}
|
||||
10
third_party/nix/src/libexpr/nix-expr.pc.in
vendored
Normal file
10
third_party/nix/src/libexpr/nix-expr.pc.in
vendored
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
prefix=@prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
|
||||
Name: Nix
|
||||
Description: Nix Package Manager
|
||||
Version: @PACKAGE_VERSION@
|
||||
Requires: nix-store bdw-gc
|
||||
Libs: -L${libdir} -lnixexpr
|
||||
Cflags: -I${includedir}/nix -std=c++17
|
||||
438
third_party/nix/src/libexpr/nixexpr.cc
vendored
Normal file
438
third_party/nix/src/libexpr/nixexpr.cc
vendored
Normal file
|
|
@ -0,0 +1,438 @@
|
|||
#include "nixexpr.hh"
|
||||
#include "derivations.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* Displaying abstract syntax trees. */
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const Expr & e)
|
||||
{
|
||||
e.show(str);
|
||||
return str;
|
||||
}
|
||||
|
||||
static void showString(std::ostream & str, const string & s)
|
||||
{
|
||||
str << '"';
|
||||
for (auto c : (string) s)
|
||||
if (c == '"' || c == '\\' || c == '$') str << "\\" << c;
|
||||
else if (c == '\n') str << "\\n";
|
||||
else if (c == '\r') str << "\\r";
|
||||
else if (c == '\t') str << "\\t";
|
||||
else str << c;
|
||||
str << '"';
|
||||
}
|
||||
|
||||
static void showId(std::ostream & str, const string & s)
|
||||
{
|
||||
if (s.empty())
|
||||
str << "\"\"";
|
||||
else if (s == "if") // FIXME: handle other keywords
|
||||
str << '"' << s << '"';
|
||||
else {
|
||||
char c = s[0];
|
||||
if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_')) {
|
||||
showString(str, s);
|
||||
return;
|
||||
}
|
||||
for (auto c : s)
|
||||
if (!((c >= 'a' && c <= 'z') ||
|
||||
(c >= 'A' && c <= 'Z') ||
|
||||
(c >= '0' && c <= '9') ||
|
||||
c == '_' || c == '\'' || c == '-')) {
|
||||
showString(str, s);
|
||||
return;
|
||||
}
|
||||
str << s;
|
||||
}
|
||||
}
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const Symbol & sym)
|
||||
{
|
||||
showId(str, *sym.s);
|
||||
return str;
|
||||
}
|
||||
|
||||
void Expr::show(std::ostream & str) const
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
void ExprInt::show(std::ostream & str) const
|
||||
{
|
||||
str << n;
|
||||
}
|
||||
|
||||
void ExprFloat::show(std::ostream & str) const
|
||||
{
|
||||
str << nf;
|
||||
}
|
||||
|
||||
void ExprString::show(std::ostream & str) const
|
||||
{
|
||||
showString(str, s);
|
||||
}
|
||||
|
||||
void ExprPath::show(std::ostream & str) const
|
||||
{
|
||||
str << s;
|
||||
}
|
||||
|
||||
void ExprVar::show(std::ostream & str) const
|
||||
{
|
||||
str << name;
|
||||
}
|
||||
|
||||
void ExprSelect::show(std::ostream & str) const
|
||||
{
|
||||
str << "(" << *e << ")." << showAttrPath(attrPath);
|
||||
if (def) str << " or (" << *def << ")";
|
||||
}
|
||||
|
||||
void ExprOpHasAttr::show(std::ostream & str) const
|
||||
{
|
||||
str << "((" << *e << ") ? " << showAttrPath(attrPath) << ")";
|
||||
}
|
||||
|
||||
void ExprAttrs::show(std::ostream & str) const
|
||||
{
|
||||
if (recursive) str << "rec ";
|
||||
str << "{ ";
|
||||
for (auto & i : attrs)
|
||||
if (i.second.inherited)
|
||||
str << "inherit " << i.first << " " << "; ";
|
||||
else
|
||||
str << i.first << " = " << *i.second.e << "; ";
|
||||
for (auto & i : dynamicAttrs)
|
||||
str << "\"${" << *i.nameExpr << "}\" = " << *i.valueExpr << "; ";
|
||||
str << "}";
|
||||
}
|
||||
|
||||
void ExprList::show(std::ostream & str) const
|
||||
{
|
||||
str << "[ ";
|
||||
for (auto & i : elems)
|
||||
str << "(" << *i << ") ";
|
||||
str << "]";
|
||||
}
|
||||
|
||||
void ExprLambda::show(std::ostream & str) const
|
||||
{
|
||||
str << "(";
|
||||
if (matchAttrs) {
|
||||
str << "{ ";
|
||||
bool first = true;
|
||||
for (auto & i : formals->formals) {
|
||||
if (first) first = false; else str << ", ";
|
||||
str << i.name;
|
||||
if (i.def) str << " ? " << *i.def;
|
||||
}
|
||||
if (formals->ellipsis) {
|
||||
if (!first) str << ", ";
|
||||
str << "...";
|
||||
}
|
||||
str << " }";
|
||||
if (!arg.empty()) str << " @ ";
|
||||
}
|
||||
if (!arg.empty()) str << arg;
|
||||
str << ": " << *body << ")";
|
||||
}
|
||||
|
||||
void ExprLet::show(std::ostream & str) const
|
||||
{
|
||||
str << "(let ";
|
||||
for (auto & i : attrs->attrs)
|
||||
if (i.second.inherited) {
|
||||
str << "inherit " << i.first << "; ";
|
||||
}
|
||||
else
|
||||
str << i.first << " = " << *i.second.e << "; ";
|
||||
str << "in " << *body << ")";
|
||||
}
|
||||
|
||||
void ExprWith::show(std::ostream & str) const
|
||||
{
|
||||
str << "(with " << *attrs << "; " << *body << ")";
|
||||
}
|
||||
|
||||
void ExprIf::show(std::ostream & str) const
|
||||
{
|
||||
str << "(if " << *cond << " then " << *then << " else " << *else_ << ")";
|
||||
}
|
||||
|
||||
void ExprAssert::show(std::ostream & str) const
|
||||
{
|
||||
str << "assert " << *cond << "; " << *body;
|
||||
}
|
||||
|
||||
void ExprOpNot::show(std::ostream & str) const
|
||||
{
|
||||
str << "(! " << *e << ")";
|
||||
}
|
||||
|
||||
void ExprConcatStrings::show(std::ostream & str) const
|
||||
{
|
||||
bool first = true;
|
||||
str << "(";
|
||||
for (auto & i : *es) {
|
||||
if (first) first = false; else str << " + ";
|
||||
str << *i;
|
||||
}
|
||||
str << ")";
|
||||
}
|
||||
|
||||
void ExprPos::show(std::ostream & str) const
|
||||
{
|
||||
str << "__curPos";
|
||||
}
|
||||
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const Pos & pos)
|
||||
{
|
||||
if (!pos)
|
||||
str << "undefined position";
|
||||
else
|
||||
str << (format(ANSI_BOLD "%1%" ANSI_NORMAL ":%2%:%3%") % (string) pos.file % pos.line % pos.column).str();
|
||||
return str;
|
||||
}
|
||||
|
||||
|
||||
string showAttrPath(const AttrPath & attrPath)
|
||||
{
|
||||
std::ostringstream out;
|
||||
bool first = true;
|
||||
for (auto & i : attrPath) {
|
||||
if (!first) out << '.'; else first = false;
|
||||
if (i.symbol.set())
|
||||
out << i.symbol;
|
||||
else
|
||||
out << "\"${" << *i.expr << "}\"";
|
||||
}
|
||||
return out.str();
|
||||
}
|
||||
|
||||
|
||||
Pos noPos;
|
||||
|
||||
|
||||
/* Computing levels/displacements for variables. */
|
||||
|
||||
void Expr::bindVars(const StaticEnv & env)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
void ExprInt::bindVars(const StaticEnv & env)
|
||||
{
|
||||
}
|
||||
|
||||
void ExprFloat::bindVars(const StaticEnv & env)
|
||||
{
|
||||
}
|
||||
|
||||
void ExprString::bindVars(const StaticEnv & env)
|
||||
{
|
||||
}
|
||||
|
||||
void ExprPath::bindVars(const StaticEnv & env)
|
||||
{
|
||||
}
|
||||
|
||||
void ExprVar::bindVars(const StaticEnv & env)
|
||||
{
|
||||
/* Check whether the variable appears in the environment. If so,
|
||||
set its level and displacement. */
|
||||
const StaticEnv * curEnv;
|
||||
unsigned int level;
|
||||
int withLevel = -1;
|
||||
for (curEnv = &env, level = 0; curEnv; curEnv = curEnv->up, level++) {
|
||||
if (curEnv->isWith) {
|
||||
if (withLevel == -1) withLevel = level;
|
||||
} else {
|
||||
StaticEnv::Vars::const_iterator i = curEnv->vars.find(name);
|
||||
if (i != curEnv->vars.end()) {
|
||||
fromWith = false;
|
||||
this->level = level;
|
||||
displ = i->second;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Otherwise, the variable must be obtained from the nearest
|
||||
enclosing `with'. If there is no `with', then we can issue an
|
||||
"undefined variable" error now. */
|
||||
if (withLevel == -1) throw UndefinedVarError(format("undefined variable '%1%' at %2%") % name % pos);
|
||||
|
||||
fromWith = true;
|
||||
this->level = withLevel;
|
||||
}
|
||||
|
||||
void ExprSelect::bindVars(const StaticEnv & env)
|
||||
{
|
||||
e->bindVars(env);
|
||||
if (def) def->bindVars(env);
|
||||
for (auto & i : attrPath)
|
||||
if (!i.symbol.set())
|
||||
i.expr->bindVars(env);
|
||||
}
|
||||
|
||||
void ExprOpHasAttr::bindVars(const StaticEnv & env)
|
||||
{
|
||||
e->bindVars(env);
|
||||
for (auto & i : attrPath)
|
||||
if (!i.symbol.set())
|
||||
i.expr->bindVars(env);
|
||||
}
|
||||
|
||||
void ExprAttrs::bindVars(const StaticEnv & env)
|
||||
{
|
||||
const StaticEnv * dynamicEnv = &env;
|
||||
StaticEnv newEnv(false, &env);
|
||||
|
||||
if (recursive) {
|
||||
dynamicEnv = &newEnv;
|
||||
|
||||
unsigned int displ = 0;
|
||||
for (auto & i : attrs)
|
||||
newEnv.vars[i.first] = i.second.displ = displ++;
|
||||
|
||||
for (auto & i : attrs)
|
||||
i.second.e->bindVars(i.second.inherited ? env : newEnv);
|
||||
}
|
||||
|
||||
else
|
||||
for (auto & i : attrs)
|
||||
i.second.e->bindVars(env);
|
||||
|
||||
for (auto & i : dynamicAttrs) {
|
||||
i.nameExpr->bindVars(*dynamicEnv);
|
||||
i.valueExpr->bindVars(*dynamicEnv);
|
||||
}
|
||||
}
|
||||
|
||||
void ExprList::bindVars(const StaticEnv & env)
|
||||
{
|
||||
for (auto & i : elems)
|
||||
i->bindVars(env);
|
||||
}
|
||||
|
||||
void ExprLambda::bindVars(const StaticEnv & env)
|
||||
{
|
||||
StaticEnv newEnv(false, &env);
|
||||
|
||||
unsigned int displ = 0;
|
||||
|
||||
if (!arg.empty()) newEnv.vars[arg] = displ++;
|
||||
|
||||
if (matchAttrs) {
|
||||
for (auto & i : formals->formals)
|
||||
newEnv.vars[i.name] = displ++;
|
||||
|
||||
for (auto & i : formals->formals)
|
||||
if (i.def) i.def->bindVars(newEnv);
|
||||
}
|
||||
|
||||
body->bindVars(newEnv);
|
||||
}
|
||||
|
||||
void ExprLet::bindVars(const StaticEnv & env)
|
||||
{
|
||||
StaticEnv newEnv(false, &env);
|
||||
|
||||
unsigned int displ = 0;
|
||||
for (auto & i : attrs->attrs)
|
||||
newEnv.vars[i.first] = i.second.displ = displ++;
|
||||
|
||||
for (auto & i : attrs->attrs)
|
||||
i.second.e->bindVars(i.second.inherited ? env : newEnv);
|
||||
|
||||
body->bindVars(newEnv);
|
||||
}
|
||||
|
||||
void ExprWith::bindVars(const StaticEnv & env)
|
||||
{
|
||||
/* Does this `with' have an enclosing `with'? If so, record its
|
||||
level so that `lookupVar' can look up variables in the previous
|
||||
`with' if this one doesn't contain the desired attribute. */
|
||||
const StaticEnv * curEnv;
|
||||
unsigned int level;
|
||||
prevWith = 0;
|
||||
for (curEnv = &env, level = 1; curEnv; curEnv = curEnv->up, level++)
|
||||
if (curEnv->isWith) {
|
||||
prevWith = level;
|
||||
break;
|
||||
}
|
||||
|
||||
attrs->bindVars(env);
|
||||
StaticEnv newEnv(true, &env);
|
||||
body->bindVars(newEnv);
|
||||
}
|
||||
|
||||
void ExprIf::bindVars(const StaticEnv & env)
|
||||
{
|
||||
cond->bindVars(env);
|
||||
then->bindVars(env);
|
||||
else_->bindVars(env);
|
||||
}
|
||||
|
||||
void ExprAssert::bindVars(const StaticEnv & env)
|
||||
{
|
||||
cond->bindVars(env);
|
||||
body->bindVars(env);
|
||||
}
|
||||
|
||||
void ExprOpNot::bindVars(const StaticEnv & env)
|
||||
{
|
||||
e->bindVars(env);
|
||||
}
|
||||
|
||||
void ExprConcatStrings::bindVars(const StaticEnv & env)
|
||||
{
|
||||
for (auto & i : *es)
|
||||
i->bindVars(env);
|
||||
}
|
||||
|
||||
void ExprPos::bindVars(const StaticEnv & env)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
/* Storing function names. */
|
||||
|
||||
void Expr::setName(Symbol & name)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void ExprLambda::setName(Symbol & name)
|
||||
{
|
||||
this->name = name;
|
||||
body->setName(name);
|
||||
}
|
||||
|
||||
|
||||
string ExprLambda::showNamePos() const
|
||||
{
|
||||
return (format("%1% at %2%") % (name.set() ? "'" + (string) name + "'" : "anonymous function") % pos).str();
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Symbol table. */
|
||||
|
||||
size_t SymbolTable::totalSize() const
|
||||
{
|
||||
size_t n = 0;
|
||||
for (auto & i : symbols)
|
||||
n += i.size();
|
||||
return n;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
342
third_party/nix/src/libexpr/nixexpr.hh
vendored
Normal file
342
third_party/nix/src/libexpr/nixexpr.hh
vendored
Normal file
|
|
@ -0,0 +1,342 @@
|
|||
#pragma once
|
||||
|
||||
#include "value.hh"
|
||||
#include "symbol-table.hh"
|
||||
|
||||
#include <map>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
MakeError(EvalError, Error)
|
||||
MakeError(ParseError, Error)
|
||||
MakeError(AssertionError, EvalError)
|
||||
MakeError(ThrownError, AssertionError)
|
||||
MakeError(Abort, EvalError)
|
||||
MakeError(TypeError, EvalError)
|
||||
MakeError(UndefinedVarError, Error)
|
||||
MakeError(RestrictedPathError, Error)
|
||||
|
||||
|
||||
/* Position objects. */
|
||||
|
||||
struct Pos
|
||||
{
|
||||
Symbol file;
|
||||
unsigned int line, column;
|
||||
Pos() : line(0), column(0) { };
|
||||
Pos(const Symbol & file, unsigned int line, unsigned int column)
|
||||
: file(file), line(line), column(column) { };
|
||||
operator bool() const
|
||||
{
|
||||
return line != 0;
|
||||
}
|
||||
bool operator < (const Pos & p2) const
|
||||
{
|
||||
if (!line) return p2.line;
|
||||
if (!p2.line) return false;
|
||||
int d = ((string) file).compare((string) p2.file);
|
||||
if (d < 0) return true;
|
||||
if (d > 0) return false;
|
||||
if (line < p2.line) return true;
|
||||
if (line > p2.line) return false;
|
||||
return column < p2.column;
|
||||
}
|
||||
};
|
||||
|
||||
extern Pos noPos;
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const Pos & pos);
|
||||
|
||||
|
||||
struct Env;
|
||||
struct Value;
|
||||
class EvalState;
|
||||
struct StaticEnv;
|
||||
|
||||
|
||||
/* An attribute path is a sequence of attribute names. */
|
||||
struct AttrName
|
||||
{
|
||||
Symbol symbol;
|
||||
Expr * expr;
|
||||
AttrName(const Symbol & s) : symbol(s) {};
|
||||
AttrName(Expr * e) : expr(e) {};
|
||||
};
|
||||
|
||||
typedef std::vector<AttrName> AttrPath;
|
||||
|
||||
string showAttrPath(const AttrPath & attrPath);
|
||||
|
||||
|
||||
/* Abstract syntax of Nix expressions. */
|
||||
|
||||
struct Expr
|
||||
{
|
||||
virtual ~Expr() { };
|
||||
virtual void show(std::ostream & str) const;
|
||||
virtual void bindVars(const StaticEnv & env);
|
||||
virtual void eval(EvalState & state, Env & env, Value & v);
|
||||
virtual Value * maybeThunk(EvalState & state, Env & env);
|
||||
virtual void setName(Symbol & name);
|
||||
};
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const Expr & e);
|
||||
|
||||
#define COMMON_METHODS \
|
||||
void show(std::ostream & str) const; \
|
||||
void eval(EvalState & state, Env & env, Value & v); \
|
||||
void bindVars(const StaticEnv & env);
|
||||
|
||||
struct ExprInt : Expr
|
||||
{
|
||||
NixInt n;
|
||||
Value v;
|
||||
ExprInt(NixInt n) : n(n) { mkInt(v, n); };
|
||||
COMMON_METHODS
|
||||
Value * maybeThunk(EvalState & state, Env & env);
|
||||
};
|
||||
|
||||
struct ExprFloat : Expr
|
||||
{
|
||||
NixFloat nf;
|
||||
Value v;
|
||||
ExprFloat(NixFloat nf) : nf(nf) { mkFloat(v, nf); };
|
||||
COMMON_METHODS
|
||||
Value * maybeThunk(EvalState & state, Env & env);
|
||||
};
|
||||
|
||||
struct ExprString : Expr
|
||||
{
|
||||
Symbol s;
|
||||
Value v;
|
||||
ExprString(const Symbol & s) : s(s) { mkString(v, s); };
|
||||
COMMON_METHODS
|
||||
Value * maybeThunk(EvalState & state, Env & env);
|
||||
};
|
||||
|
||||
/* Temporary class used during parsing of indented strings. */
|
||||
struct ExprIndStr : Expr
|
||||
{
|
||||
string s;
|
||||
ExprIndStr(const string & s) : s(s) { };
|
||||
};
|
||||
|
||||
struct ExprPath : Expr
|
||||
{
|
||||
string s;
|
||||
Value v;
|
||||
ExprPath(const string & s) : s(s) { mkPathNoCopy(v, this->s.c_str()); };
|
||||
COMMON_METHODS
|
||||
Value * maybeThunk(EvalState & state, Env & env);
|
||||
};
|
||||
|
||||
struct ExprVar : Expr
|
||||
{
|
||||
Pos pos;
|
||||
Symbol name;
|
||||
|
||||
/* Whether the variable comes from an environment (e.g. a rec, let
|
||||
or function argument) or from a "with". */
|
||||
bool fromWith;
|
||||
|
||||
/* In the former case, the value is obtained by going `level'
|
||||
levels up from the current environment and getting the
|
||||
`displ'th value in that environment. In the latter case, the
|
||||
value is obtained by getting the attribute named `name' from
|
||||
the set stored in the environment that is `level' levels up
|
||||
from the current one.*/
|
||||
unsigned int level;
|
||||
unsigned int displ;
|
||||
|
||||
ExprVar(const Symbol & name) : name(name) { };
|
||||
ExprVar(const Pos & pos, const Symbol & name) : pos(pos), name(name) { };
|
||||
COMMON_METHODS
|
||||
Value * maybeThunk(EvalState & state, Env & env);
|
||||
};
|
||||
|
||||
struct ExprSelect : Expr
|
||||
{
|
||||
Pos pos;
|
||||
Expr * e, * def;
|
||||
AttrPath attrPath;
|
||||
ExprSelect(const Pos & pos, Expr * e, const AttrPath & attrPath, Expr * def) : pos(pos), e(e), def(def), attrPath(attrPath) { };
|
||||
ExprSelect(const Pos & pos, Expr * e, const Symbol & name) : pos(pos), e(e), def(0) { attrPath.push_back(AttrName(name)); };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprOpHasAttr : Expr
|
||||
{
|
||||
Expr * e;
|
||||
AttrPath attrPath;
|
||||
ExprOpHasAttr(Expr * e, const AttrPath & attrPath) : e(e), attrPath(attrPath) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprAttrs : Expr
|
||||
{
|
||||
bool recursive;
|
||||
struct AttrDef {
|
||||
bool inherited;
|
||||
Expr * e;
|
||||
Pos pos;
|
||||
unsigned int displ; // displacement
|
||||
AttrDef(Expr * e, const Pos & pos, bool inherited=false)
|
||||
: inherited(inherited), e(e), pos(pos) { };
|
||||
AttrDef() { };
|
||||
};
|
||||
typedef std::map<Symbol, AttrDef> AttrDefs;
|
||||
AttrDefs attrs;
|
||||
struct DynamicAttrDef {
|
||||
Expr * nameExpr, * valueExpr;
|
||||
Pos pos;
|
||||
DynamicAttrDef(Expr * nameExpr, Expr * valueExpr, const Pos & pos)
|
||||
: nameExpr(nameExpr), valueExpr(valueExpr), pos(pos) { };
|
||||
};
|
||||
typedef std::vector<DynamicAttrDef> DynamicAttrDefs;
|
||||
DynamicAttrDefs dynamicAttrs;
|
||||
ExprAttrs() : recursive(false) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprList : Expr
|
||||
{
|
||||
std::vector<Expr *> elems;
|
||||
ExprList() { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct Formal
|
||||
{
|
||||
Symbol name;
|
||||
Expr * def;
|
||||
Formal(const Symbol & name, Expr * def) : name(name), def(def) { };
|
||||
};
|
||||
|
||||
struct Formals
|
||||
{
|
||||
typedef std::list<Formal> Formals_;
|
||||
Formals_ formals;
|
||||
std::set<Symbol> argNames; // used during parsing
|
||||
bool ellipsis;
|
||||
};
|
||||
|
||||
struct ExprLambda : Expr
|
||||
{
|
||||
Pos pos;
|
||||
Symbol name;
|
||||
Symbol arg;
|
||||
bool matchAttrs;
|
||||
Formals * formals;
|
||||
Expr * body;
|
||||
ExprLambda(const Pos & pos, const Symbol & arg, bool matchAttrs, Formals * formals, Expr * body)
|
||||
: pos(pos), arg(arg), matchAttrs(matchAttrs), formals(formals), body(body)
|
||||
{
|
||||
if (!arg.empty() && formals && formals->argNames.find(arg) != formals->argNames.end())
|
||||
throw ParseError(format("duplicate formal function argument '%1%' at %2%")
|
||||
% arg % pos);
|
||||
};
|
||||
void setName(Symbol & name);
|
||||
string showNamePos() const;
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprLet : Expr
|
||||
{
|
||||
ExprAttrs * attrs;
|
||||
Expr * body;
|
||||
ExprLet(ExprAttrs * attrs, Expr * body) : attrs(attrs), body(body) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprWith : Expr
|
||||
{
|
||||
Pos pos;
|
||||
Expr * attrs, * body;
|
||||
size_t prevWith;
|
||||
ExprWith(const Pos & pos, Expr * attrs, Expr * body) : pos(pos), attrs(attrs), body(body) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprIf : Expr
|
||||
{
|
||||
Expr * cond, * then, * else_;
|
||||
ExprIf(Expr * cond, Expr * then, Expr * else_) : cond(cond), then(then), else_(else_) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprAssert : Expr
|
||||
{
|
||||
Pos pos;
|
||||
Expr * cond, * body;
|
||||
ExprAssert(const Pos & pos, Expr * cond, Expr * body) : pos(pos), cond(cond), body(body) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprOpNot : Expr
|
||||
{
|
||||
Expr * e;
|
||||
ExprOpNot(Expr * e) : e(e) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
#define MakeBinOp(name, s) \
|
||||
struct name : Expr \
|
||||
{ \
|
||||
Pos pos; \
|
||||
Expr * e1, * e2; \
|
||||
name(Expr * e1, Expr * e2) : e1(e1), e2(e2) { }; \
|
||||
name(const Pos & pos, Expr * e1, Expr * e2) : pos(pos), e1(e1), e2(e2) { }; \
|
||||
void show(std::ostream & str) const \
|
||||
{ \
|
||||
str << "(" << *e1 << " " s " " << *e2 << ")"; \
|
||||
} \
|
||||
void bindVars(const StaticEnv & env) \
|
||||
{ \
|
||||
e1->bindVars(env); e2->bindVars(env); \
|
||||
} \
|
||||
void eval(EvalState & state, Env & env, Value & v); \
|
||||
};
|
||||
|
||||
MakeBinOp(ExprApp, "")
|
||||
MakeBinOp(ExprOpEq, "==")
|
||||
MakeBinOp(ExprOpNEq, "!=")
|
||||
MakeBinOp(ExprOpAnd, "&&")
|
||||
MakeBinOp(ExprOpOr, "||")
|
||||
MakeBinOp(ExprOpImpl, "->")
|
||||
MakeBinOp(ExprOpUpdate, "//")
|
||||
MakeBinOp(ExprOpConcatLists, "++")
|
||||
|
||||
struct ExprConcatStrings : Expr
|
||||
{
|
||||
Pos pos;
|
||||
bool forceString;
|
||||
vector<Expr *> * es;
|
||||
ExprConcatStrings(const Pos & pos, bool forceString, vector<Expr *> * es)
|
||||
: pos(pos), forceString(forceString), es(es) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
struct ExprPos : Expr
|
||||
{
|
||||
Pos pos;
|
||||
ExprPos(const Pos & pos) : pos(pos) { };
|
||||
COMMON_METHODS
|
||||
};
|
||||
|
||||
|
||||
/* Static environments are used to map variable names onto (level,
|
||||
displacement) pairs used to obtain the value of the variable at
|
||||
runtime. */
|
||||
struct StaticEnv
|
||||
{
|
||||
bool isWith;
|
||||
const StaticEnv * up;
|
||||
typedef std::map<Symbol, unsigned int> Vars;
|
||||
Vars vars;
|
||||
StaticEnv(bool isWith, const StaticEnv * up) : isWith(isWith), up(up) { };
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
704
third_party/nix/src/libexpr/parser.y
vendored
Normal file
704
third_party/nix/src/libexpr/parser.y
vendored
Normal file
|
|
@ -0,0 +1,704 @@
|
|||
%glr-parser
|
||||
%pure-parser
|
||||
%locations
|
||||
%define parse.error verbose
|
||||
%defines
|
||||
/* %no-lines */
|
||||
%parse-param { void * scanner }
|
||||
%parse-param { nix::ParseData * data }
|
||||
%lex-param { void * scanner }
|
||||
%lex-param { nix::ParseData * data }
|
||||
%expect 1
|
||||
%expect-rr 1
|
||||
|
||||
%code requires {
|
||||
|
||||
#ifndef BISON_HEADER
|
||||
#define BISON_HEADER
|
||||
|
||||
#include "util.hh"
|
||||
|
||||
#include "nixexpr.hh"
|
||||
#include "eval.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct ParseData
|
||||
{
|
||||
EvalState & state;
|
||||
SymbolTable & symbols;
|
||||
Expr * result;
|
||||
Path basePath;
|
||||
Symbol path;
|
||||
string error;
|
||||
Symbol sLetBody;
|
||||
ParseData(EvalState & state)
|
||||
: state(state)
|
||||
, symbols(state.symbols)
|
||||
, sLetBody(symbols.create("<let-body>"))
|
||||
{ };
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#define YY_DECL int yylex \
|
||||
(YYSTYPE * yylval_param, YYLTYPE * yylloc_param, yyscan_t yyscanner, nix::ParseData * data)
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
%{
|
||||
|
||||
#include "parser-tab.hh"
|
||||
#include "lexer-tab.hh"
|
||||
|
||||
YY_DECL;
|
||||
|
||||
using namespace nix;
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void dupAttr(const AttrPath & attrPath, const Pos & pos, const Pos & prevPos)
|
||||
{
|
||||
throw ParseError(format("attribute '%1%' at %2% already defined at %3%")
|
||||
% showAttrPath(attrPath) % pos % prevPos);
|
||||
}
|
||||
|
||||
|
||||
static void dupAttr(Symbol attr, const Pos & pos, const Pos & prevPos)
|
||||
{
|
||||
throw ParseError(format("attribute '%1%' at %2% already defined at %3%")
|
||||
% attr % pos % prevPos);
|
||||
}
|
||||
|
||||
|
||||
static void addAttr(ExprAttrs * attrs, AttrPath & attrPath,
|
||||
Expr * e, const Pos & pos)
|
||||
{
|
||||
AttrPath::iterator i;
|
||||
// All attrpaths have at least one attr
|
||||
assert(!attrPath.empty());
|
||||
// Checking attrPath validity.
|
||||
// ===========================
|
||||
for (i = attrPath.begin(); i + 1 < attrPath.end(); i++) {
|
||||
if (i->symbol.set()) {
|
||||
ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(i->symbol);
|
||||
if (j != attrs->attrs.end()) {
|
||||
if (!j->second.inherited) {
|
||||
ExprAttrs * attrs2 = dynamic_cast<ExprAttrs *>(j->second.e);
|
||||
if (!attrs2) dupAttr(attrPath, pos, j->second.pos);
|
||||
attrs = attrs2;
|
||||
} else
|
||||
dupAttr(attrPath, pos, j->second.pos);
|
||||
} else {
|
||||
ExprAttrs * nested = new ExprAttrs;
|
||||
attrs->attrs[i->symbol] = ExprAttrs::AttrDef(nested, pos);
|
||||
attrs = nested;
|
||||
}
|
||||
} else {
|
||||
ExprAttrs *nested = new ExprAttrs;
|
||||
attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, nested, pos));
|
||||
attrs = nested;
|
||||
}
|
||||
}
|
||||
// Expr insertion.
|
||||
// ==========================
|
||||
if (i->symbol.set()) {
|
||||
ExprAttrs::AttrDefs::iterator j = attrs->attrs.find(i->symbol);
|
||||
if (j != attrs->attrs.end()) {
|
||||
// This attr path is already defined. However, if both
|
||||
// e and the expr pointed by the attr path are two attribute sets,
|
||||
// we want to merge them.
|
||||
// Otherwise, throw an error.
|
||||
auto ae = dynamic_cast<ExprAttrs *>(e);
|
||||
auto jAttrs = dynamic_cast<ExprAttrs *>(j->second.e);
|
||||
if (jAttrs && ae) {
|
||||
for (auto & ad : ae->attrs) {
|
||||
auto j2 = jAttrs->attrs.find(ad.first);
|
||||
if (j2 != jAttrs->attrs.end()) // Attr already defined in iAttrs, error.
|
||||
dupAttr(ad.first, j2->second.pos, ad.second.pos);
|
||||
jAttrs->attrs[ad.first] = ad.second;
|
||||
}
|
||||
} else {
|
||||
dupAttr(attrPath, pos, j->second.pos);
|
||||
}
|
||||
} else {
|
||||
// This attr path is not defined. Let's create it.
|
||||
attrs->attrs[i->symbol] = ExprAttrs::AttrDef(e, pos);
|
||||
e->setName(i->symbol);
|
||||
}
|
||||
} else {
|
||||
attrs->dynamicAttrs.push_back(ExprAttrs::DynamicAttrDef(i->expr, e, pos));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void addFormal(const Pos & pos, Formals * formals, const Formal & formal)
|
||||
{
|
||||
if (formals->argNames.find(formal.name) != formals->argNames.end())
|
||||
throw ParseError(format("duplicate formal function argument '%1%' at %2%")
|
||||
% formal.name % pos);
|
||||
formals->formals.push_front(formal);
|
||||
formals->argNames.insert(formal.name);
|
||||
}
|
||||
|
||||
|
||||
static Expr * stripIndentation(const Pos & pos, SymbolTable & symbols, vector<Expr *> & es)
|
||||
{
|
||||
if (es.empty()) return new ExprString(symbols.create(""));
|
||||
|
||||
/* Figure out the minimum indentation. Note that by design
|
||||
whitespace-only final lines are not taken into account. (So
|
||||
the " " in "\n ''" is ignored, but the " " in "\n foo''" is.) */
|
||||
bool atStartOfLine = true; /* = seen only whitespace in the current line */
|
||||
size_t minIndent = 1000000;
|
||||
size_t curIndent = 0;
|
||||
for (auto & i : es) {
|
||||
ExprIndStr * e = dynamic_cast<ExprIndStr *>(i);
|
||||
if (!e) {
|
||||
/* Anti-quotations end the current start-of-line whitespace. */
|
||||
if (atStartOfLine) {
|
||||
atStartOfLine = false;
|
||||
if (curIndent < minIndent) minIndent = curIndent;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
for (size_t j = 0; j < e->s.size(); ++j) {
|
||||
if (atStartOfLine) {
|
||||
if (e->s[j] == ' ')
|
||||
curIndent++;
|
||||
else if (e->s[j] == '\n') {
|
||||
/* Empty line, doesn't influence minimum
|
||||
indentation. */
|
||||
curIndent = 0;
|
||||
} else {
|
||||
atStartOfLine = false;
|
||||
if (curIndent < minIndent) minIndent = curIndent;
|
||||
}
|
||||
} else if (e->s[j] == '\n') {
|
||||
atStartOfLine = true;
|
||||
curIndent = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Strip spaces from each line. */
|
||||
vector<Expr *> * es2 = new vector<Expr *>;
|
||||
atStartOfLine = true;
|
||||
size_t curDropped = 0;
|
||||
size_t n = es.size();
|
||||
for (vector<Expr *>::iterator i = es.begin(); i != es.end(); ++i, --n) {
|
||||
ExprIndStr * e = dynamic_cast<ExprIndStr *>(*i);
|
||||
if (!e) {
|
||||
atStartOfLine = false;
|
||||
curDropped = 0;
|
||||
es2->push_back(*i);
|
||||
continue;
|
||||
}
|
||||
|
||||
string s2;
|
||||
for (size_t j = 0; j < e->s.size(); ++j) {
|
||||
if (atStartOfLine) {
|
||||
if (e->s[j] == ' ') {
|
||||
if (curDropped++ >= minIndent)
|
||||
s2 += e->s[j];
|
||||
}
|
||||
else if (e->s[j] == '\n') {
|
||||
curDropped = 0;
|
||||
s2 += e->s[j];
|
||||
} else {
|
||||
atStartOfLine = false;
|
||||
curDropped = 0;
|
||||
s2 += e->s[j];
|
||||
}
|
||||
} else {
|
||||
s2 += e->s[j];
|
||||
if (e->s[j] == '\n') atStartOfLine = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Remove the last line if it is empty and consists only of
|
||||
spaces. */
|
||||
if (n == 1) {
|
||||
string::size_type p = s2.find_last_of('\n');
|
||||
if (p != string::npos && s2.find_first_not_of(' ', p + 1) == string::npos)
|
||||
s2 = string(s2, 0, p + 1);
|
||||
}
|
||||
|
||||
es2->push_back(new ExprString(symbols.create(s2)));
|
||||
}
|
||||
|
||||
/* If this is a single string, then don't do a concatenation. */
|
||||
return es2->size() == 1 && dynamic_cast<ExprString *>((*es2)[0]) ? (*es2)[0] : new ExprConcatStrings(pos, true, es2);
|
||||
}
|
||||
|
||||
|
||||
static inline Pos makeCurPos(const YYLTYPE & loc, ParseData * data)
|
||||
{
|
||||
return Pos(data->path, loc.first_line, loc.first_column);
|
||||
}
|
||||
|
||||
#define CUR_POS makeCurPos(*yylocp, data)
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
void yyerror(YYLTYPE * loc, yyscan_t scanner, ParseData * data, const char * error)
|
||||
{
|
||||
data->error = (format("%1%, at %2%")
|
||||
% error % makeCurPos(*loc, data)).str();
|
||||
}
|
||||
|
||||
|
||||
%}
|
||||
|
||||
%union {
|
||||
// !!! We're probably leaking stuff here.
|
||||
nix::Expr * e;
|
||||
nix::ExprList * list;
|
||||
nix::ExprAttrs * attrs;
|
||||
nix::Formals * formals;
|
||||
nix::Formal * formal;
|
||||
nix::NixInt n;
|
||||
nix::NixFloat nf;
|
||||
const char * id; // !!! -> Symbol
|
||||
char * path;
|
||||
char * uri;
|
||||
std::vector<nix::AttrName> * attrNames;
|
||||
std::vector<nix::Expr *> * string_parts;
|
||||
}
|
||||
|
||||
%type <e> start expr expr_function expr_if expr_op
|
||||
%type <e> expr_app expr_select expr_simple
|
||||
%type <list> expr_list
|
||||
%type <attrs> binds
|
||||
%type <formals> formals
|
||||
%type <formal> formal
|
||||
%type <attrNames> attrs attrpath
|
||||
%type <string_parts> string_parts_interpolated ind_string_parts
|
||||
%type <e> string_parts string_attr
|
||||
%type <id> attr
|
||||
%token <id> ID ATTRPATH
|
||||
%token <e> STR IND_STR
|
||||
%token <n> INT
|
||||
%token <nf> FLOAT
|
||||
%token <path> PATH HPATH SPATH
|
||||
%token <uri> URI
|
||||
%token IF THEN ELSE ASSERT WITH LET IN REC INHERIT EQ NEQ AND OR IMPL OR_KW
|
||||
%token DOLLAR_CURLY /* == ${ */
|
||||
%token IND_STRING_OPEN IND_STRING_CLOSE
|
||||
%token ELLIPSIS
|
||||
|
||||
%right IMPL
|
||||
%left OR
|
||||
%left AND
|
||||
%nonassoc EQ NEQ
|
||||
%nonassoc '<' '>' LEQ GEQ
|
||||
%right UPDATE
|
||||
%left NOT
|
||||
%left '+' '-'
|
||||
%left '*' '/'
|
||||
%right CONCAT
|
||||
%nonassoc '?'
|
||||
%nonassoc NEGATE
|
||||
|
||||
%%
|
||||
|
||||
start: expr { data->result = $1; };
|
||||
|
||||
expr: expr_function;
|
||||
|
||||
expr_function
|
||||
: ID ':' expr_function
|
||||
{ $$ = new ExprLambda(CUR_POS, data->symbols.create($1), false, 0, $3); }
|
||||
| '{' formals '}' ':' expr_function
|
||||
{ $$ = new ExprLambda(CUR_POS, data->symbols.create(""), true, $2, $5); }
|
||||
| '{' formals '}' '@' ID ':' expr_function
|
||||
{ $$ = new ExprLambda(CUR_POS, data->symbols.create($5), true, $2, $7); }
|
||||
| ID '@' '{' formals '}' ':' expr_function
|
||||
{ $$ = new ExprLambda(CUR_POS, data->symbols.create($1), true, $4, $7); }
|
||||
| ASSERT expr ';' expr_function
|
||||
{ $$ = new ExprAssert(CUR_POS, $2, $4); }
|
||||
| WITH expr ';' expr_function
|
||||
{ $$ = new ExprWith(CUR_POS, $2, $4); }
|
||||
| LET binds IN expr_function
|
||||
{ if (!$2->dynamicAttrs.empty())
|
||||
throw ParseError(format("dynamic attributes not allowed in let at %1%")
|
||||
% CUR_POS);
|
||||
$$ = new ExprLet($2, $4);
|
||||
}
|
||||
| expr_if
|
||||
;
|
||||
|
||||
expr_if
|
||||
: IF expr THEN expr ELSE expr { $$ = new ExprIf($2, $4, $6); }
|
||||
| expr_op
|
||||
;
|
||||
|
||||
expr_op
|
||||
: '!' expr_op %prec NOT { $$ = new ExprOpNot($2); }
|
||||
| '-' expr_op %prec NEGATE { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), new ExprInt(0)), $2); }
|
||||
| expr_op EQ expr_op { $$ = new ExprOpEq($1, $3); }
|
||||
| expr_op NEQ expr_op { $$ = new ExprOpNEq($1, $3); }
|
||||
| expr_op '<' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3); }
|
||||
| expr_op LEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1)); }
|
||||
| expr_op '>' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $3), $1); }
|
||||
| expr_op GEQ expr_op { $$ = new ExprOpNot(new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__lessThan")), $1), $3)); }
|
||||
| expr_op AND expr_op { $$ = new ExprOpAnd(CUR_POS, $1, $3); }
|
||||
| expr_op OR expr_op { $$ = new ExprOpOr(CUR_POS, $1, $3); }
|
||||
| expr_op IMPL expr_op { $$ = new ExprOpImpl(CUR_POS, $1, $3); }
|
||||
| expr_op UPDATE expr_op { $$ = new ExprOpUpdate(CUR_POS, $1, $3); }
|
||||
| expr_op '?' attrpath { $$ = new ExprOpHasAttr($1, *$3); }
|
||||
| expr_op '+' expr_op
|
||||
{ $$ = new ExprConcatStrings(CUR_POS, false, new vector<Expr *>({$1, $3})); }
|
||||
| expr_op '-' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__sub")), $1), $3); }
|
||||
| expr_op '*' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__mul")), $1), $3); }
|
||||
| expr_op '/' expr_op { $$ = new ExprApp(CUR_POS, new ExprApp(new ExprVar(data->symbols.create("__div")), $1), $3); }
|
||||
| expr_op CONCAT expr_op { $$ = new ExprOpConcatLists(CUR_POS, $1, $3); }
|
||||
| expr_app
|
||||
;
|
||||
|
||||
expr_app
|
||||
: expr_app expr_select
|
||||
{ $$ = new ExprApp(CUR_POS, $1, $2); }
|
||||
| expr_select { $$ = $1; }
|
||||
;
|
||||
|
||||
expr_select
|
||||
: expr_simple '.' attrpath
|
||||
{ $$ = new ExprSelect(CUR_POS, $1, *$3, 0); }
|
||||
| expr_simple '.' attrpath OR_KW expr_select
|
||||
{ $$ = new ExprSelect(CUR_POS, $1, *$3, $5); }
|
||||
| /* Backwards compatibility: because Nixpkgs has a rarely used
|
||||
function named ‘or’, allow stuff like ‘map or [...]’. */
|
||||
expr_simple OR_KW
|
||||
{ $$ = new ExprApp(CUR_POS, $1, new ExprVar(CUR_POS, data->symbols.create("or"))); }
|
||||
| expr_simple { $$ = $1; }
|
||||
;
|
||||
|
||||
expr_simple
|
||||
: ID {
|
||||
if (strcmp($1, "__curPos") == 0)
|
||||
$$ = new ExprPos(CUR_POS);
|
||||
else
|
||||
$$ = new ExprVar(CUR_POS, data->symbols.create($1));
|
||||
}
|
||||
| INT { $$ = new ExprInt($1); }
|
||||
| FLOAT { $$ = new ExprFloat($1); }
|
||||
| '"' string_parts '"' { $$ = $2; }
|
||||
| IND_STRING_OPEN ind_string_parts IND_STRING_CLOSE {
|
||||
$$ = stripIndentation(CUR_POS, data->symbols, *$2);
|
||||
}
|
||||
| PATH { $$ = new ExprPath(absPath($1, data->basePath)); }
|
||||
| HPATH { $$ = new ExprPath(getHome() + string{$1 + 1}); }
|
||||
| SPATH {
|
||||
string path($1 + 1, strlen($1) - 2);
|
||||
$$ = new ExprApp(CUR_POS,
|
||||
new ExprApp(new ExprVar(data->symbols.create("__findFile")),
|
||||
new ExprVar(data->symbols.create("__nixPath"))),
|
||||
new ExprString(data->symbols.create(path)));
|
||||
}
|
||||
| URI { $$ = new ExprString(data->symbols.create($1)); }
|
||||
| '(' expr ')' { $$ = $2; }
|
||||
/* Let expressions `let {..., body = ...}' are just desugared
|
||||
into `(rec {..., body = ...}).body'. */
|
||||
| LET '{' binds '}'
|
||||
{ $3->recursive = true; $$ = new ExprSelect(noPos, $3, data->symbols.create("body")); }
|
||||
| REC '{' binds '}'
|
||||
{ $3->recursive = true; $$ = $3; }
|
||||
| '{' binds '}'
|
||||
{ $$ = $2; }
|
||||
| '[' expr_list ']' { $$ = $2; }
|
||||
;
|
||||
|
||||
string_parts
|
||||
: STR
|
||||
| string_parts_interpolated { $$ = new ExprConcatStrings(CUR_POS, true, $1); }
|
||||
| { $$ = new ExprString(data->symbols.create("")); }
|
||||
;
|
||||
|
||||
string_parts_interpolated
|
||||
: string_parts_interpolated STR { $$ = $1; $1->push_back($2); }
|
||||
| string_parts_interpolated DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); }
|
||||
| DOLLAR_CURLY expr '}' { $$ = new vector<Expr *>; $$->push_back($2); }
|
||||
| STR DOLLAR_CURLY expr '}' {
|
||||
$$ = new vector<Expr *>;
|
||||
$$->push_back($1);
|
||||
$$->push_back($3);
|
||||
}
|
||||
;
|
||||
|
||||
ind_string_parts
|
||||
: ind_string_parts IND_STR { $$ = $1; $1->push_back($2); }
|
||||
| ind_string_parts DOLLAR_CURLY expr '}' { $$ = $1; $1->push_back($3); }
|
||||
| { $$ = new vector<Expr *>; }
|
||||
;
|
||||
|
||||
binds
|
||||
: binds attrpath '=' expr ';' { $$ = $1; addAttr($$, *$2, $4, makeCurPos(@2, data)); }
|
||||
| binds INHERIT attrs ';'
|
||||
{ $$ = $1;
|
||||
for (auto & i : *$3) {
|
||||
if ($$->attrs.find(i.symbol) != $$->attrs.end())
|
||||
dupAttr(i.symbol, makeCurPos(@3, data), $$->attrs[i.symbol].pos);
|
||||
Pos pos = makeCurPos(@3, data);
|
||||
$$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprVar(CUR_POS, i.symbol), pos, true);
|
||||
}
|
||||
}
|
||||
| binds INHERIT '(' expr ')' attrs ';'
|
||||
{ $$ = $1;
|
||||
/* !!! Should ensure sharing of the expression in $4. */
|
||||
for (auto & i : *$6) {
|
||||
if ($$->attrs.find(i.symbol) != $$->attrs.end())
|
||||
dupAttr(i.symbol, makeCurPos(@6, data), $$->attrs[i.symbol].pos);
|
||||
$$->attrs[i.symbol] = ExprAttrs::AttrDef(new ExprSelect(CUR_POS, $4, i.symbol), makeCurPos(@6, data));
|
||||
}
|
||||
}
|
||||
| { $$ = new ExprAttrs; }
|
||||
;
|
||||
|
||||
attrs
|
||||
: attrs attr { $$ = $1; $1->push_back(AttrName(data->symbols.create($2))); }
|
||||
| attrs string_attr
|
||||
{ $$ = $1;
|
||||
ExprString * str = dynamic_cast<ExprString *>($2);
|
||||
if (str) {
|
||||
$$->push_back(AttrName(str->s));
|
||||
delete str;
|
||||
} else
|
||||
throw ParseError(format("dynamic attributes not allowed in inherit at %1%")
|
||||
% makeCurPos(@2, data));
|
||||
}
|
||||
| { $$ = new AttrPath; }
|
||||
;
|
||||
|
||||
attrpath
|
||||
: attrpath '.' attr { $$ = $1; $1->push_back(AttrName(data->symbols.create($3))); }
|
||||
| attrpath '.' string_attr
|
||||
{ $$ = $1;
|
||||
ExprString * str = dynamic_cast<ExprString *>($3);
|
||||
if (str) {
|
||||
$$->push_back(AttrName(str->s));
|
||||
delete str;
|
||||
} else
|
||||
$$->push_back(AttrName($3));
|
||||
}
|
||||
| attr { $$ = new vector<AttrName>; $$->push_back(AttrName(data->symbols.create($1))); }
|
||||
| string_attr
|
||||
{ $$ = new vector<AttrName>;
|
||||
ExprString *str = dynamic_cast<ExprString *>($1);
|
||||
if (str) {
|
||||
$$->push_back(AttrName(str->s));
|
||||
delete str;
|
||||
} else
|
||||
$$->push_back(AttrName($1));
|
||||
}
|
||||
;
|
||||
|
||||
attr
|
||||
: ID { $$ = $1; }
|
||||
| OR_KW { $$ = "or"; }
|
||||
;
|
||||
|
||||
string_attr
|
||||
: '"' string_parts '"' { $$ = $2; }
|
||||
| DOLLAR_CURLY expr '}' { $$ = $2; }
|
||||
;
|
||||
|
||||
expr_list
|
||||
: expr_list expr_select { $$ = $1; $1->elems.push_back($2); /* !!! dangerous */ }
|
||||
| { $$ = new ExprList; }
|
||||
;
|
||||
|
||||
formals
|
||||
: formal ',' formals
|
||||
{ $$ = $3; addFormal(CUR_POS, $$, *$1); }
|
||||
| formal
|
||||
{ $$ = new Formals; addFormal(CUR_POS, $$, *$1); $$->ellipsis = false; }
|
||||
|
|
||||
{ $$ = new Formals; $$->ellipsis = false; }
|
||||
| ELLIPSIS
|
||||
{ $$ = new Formals; $$->ellipsis = true; }
|
||||
;
|
||||
|
||||
formal
|
||||
: ID { $$ = new Formal(data->symbols.create($1), 0); }
|
||||
| ID '?' expr { $$ = new Formal(data->symbols.create($1), $3); }
|
||||
;
|
||||
|
||||
%%
|
||||
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include "eval.hh"
|
||||
#include "download.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
Expr * EvalState::parse(const char * text,
|
||||
const Path & path, const Path & basePath, StaticEnv & staticEnv)
|
||||
{
|
||||
yyscan_t scanner;
|
||||
ParseData data(*this);
|
||||
data.basePath = basePath;
|
||||
data.path = data.symbols.create(path);
|
||||
|
||||
yylex_init(&scanner);
|
||||
yy_scan_string(text, scanner);
|
||||
int res = yyparse(scanner, &data);
|
||||
yylex_destroy(scanner);
|
||||
|
||||
if (res) throw ParseError(data.error);
|
||||
|
||||
data.result->bindVars(staticEnv);
|
||||
|
||||
return data.result;
|
||||
}
|
||||
|
||||
|
||||
Path resolveExprPath(Path path)
|
||||
{
|
||||
assert(path[0] == '/');
|
||||
|
||||
/* If `path' is a symlink, follow it. This is so that relative
|
||||
path references work. */
|
||||
struct stat st;
|
||||
while (true) {
|
||||
if (lstat(path.c_str(), &st))
|
||||
throw SysError(format("getting status of '%1%'") % path);
|
||||
if (!S_ISLNK(st.st_mode)) break;
|
||||
path = absPath(readLink(path), dirOf(path));
|
||||
}
|
||||
|
||||
/* If `path' refers to a directory, append `/default.nix'. */
|
||||
if (S_ISDIR(st.st_mode))
|
||||
path = canonPath(path + "/default.nix");
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
|
||||
Expr * EvalState::parseExprFromFile(const Path & path)
|
||||
{
|
||||
return parseExprFromFile(path, staticBaseEnv);
|
||||
}
|
||||
|
||||
|
||||
Expr * EvalState::parseExprFromFile(const Path & path, StaticEnv & staticEnv)
|
||||
{
|
||||
return parse(readFile(path).c_str(), path, dirOf(path), staticEnv);
|
||||
}
|
||||
|
||||
|
||||
Expr * EvalState::parseExprFromString(const string & s, const Path & basePath, StaticEnv & staticEnv)
|
||||
{
|
||||
return parse(s.c_str(), "(string)", basePath, staticEnv);
|
||||
}
|
||||
|
||||
|
||||
Expr * EvalState::parseExprFromString(const string & s, const Path & basePath)
|
||||
{
|
||||
return parseExprFromString(s, basePath, staticBaseEnv);
|
||||
}
|
||||
|
||||
|
||||
Expr * EvalState::parseStdin()
|
||||
{
|
||||
//Activity act(*logger, lvlTalkative, format("parsing standard input"));
|
||||
return parseExprFromString(drainFD(0), absPath("."));
|
||||
}
|
||||
|
||||
|
||||
void EvalState::addToSearchPath(const string & s)
|
||||
{
|
||||
size_t pos = s.find('=');
|
||||
string prefix;
|
||||
Path path;
|
||||
if (pos == string::npos) {
|
||||
path = s;
|
||||
} else {
|
||||
prefix = string(s, 0, pos);
|
||||
path = string(s, pos + 1);
|
||||
}
|
||||
|
||||
searchPath.emplace_back(prefix, path);
|
||||
}
|
||||
|
||||
|
||||
Path EvalState::findFile(const string & path)
|
||||
{
|
||||
return findFile(searchPath, path);
|
||||
}
|
||||
|
||||
|
||||
Path EvalState::findFile(SearchPath & searchPath, const string & path, const Pos & pos)
|
||||
{
|
||||
for (auto & i : searchPath) {
|
||||
std::string suffix;
|
||||
if (i.first.empty())
|
||||
suffix = "/" + path;
|
||||
else {
|
||||
auto s = i.first.size();
|
||||
if (path.compare(0, s, i.first) != 0 ||
|
||||
(path.size() > s && path[s] != '/'))
|
||||
continue;
|
||||
suffix = path.size() == s ? "" : "/" + string(path, s);
|
||||
}
|
||||
auto r = resolveSearchPathElem(i);
|
||||
if (!r.first) continue;
|
||||
Path res = r.second + suffix;
|
||||
if (pathExists(res)) return canonPath(res);
|
||||
}
|
||||
format f = format(
|
||||
"file '%1%' was not found in the Nix search path (add it using $NIX_PATH or -I)"
|
||||
+ string(pos ? ", at %2%" : ""));
|
||||
f.exceptions(boost::io::all_error_bits ^ boost::io::too_many_args_bit);
|
||||
throw ThrownError(f % path % pos);
|
||||
}
|
||||
|
||||
|
||||
std::pair<bool, std::string> EvalState::resolveSearchPathElem(const SearchPathElem & elem)
|
||||
{
|
||||
auto i = searchPathResolved.find(elem.second);
|
||||
if (i != searchPathResolved.end()) return i->second;
|
||||
|
||||
std::pair<bool, std::string> res;
|
||||
|
||||
if (isUri(elem.second)) {
|
||||
try {
|
||||
CachedDownloadRequest request(elem.second);
|
||||
request.unpack = true;
|
||||
res = { true, getDownloader()->downloadCached(store, request).path };
|
||||
} catch (DownloadError & e) {
|
||||
printError(format("warning: Nix search path entry '%1%' cannot be downloaded, ignoring") % elem.second);
|
||||
res = { false, "" };
|
||||
}
|
||||
} else {
|
||||
auto path = absPath(elem.second);
|
||||
if (pathExists(path))
|
||||
res = { true, path };
|
||||
else {
|
||||
printError(format("warning: Nix search path entry '%1%' does not exist, ignoring") % elem.second);
|
||||
res = { false, "" };
|
||||
}
|
||||
}
|
||||
|
||||
debug(format("resolved search path element '%s' to '%s'") % elem.second % res.second);
|
||||
|
||||
searchPathResolved[elem.second] = res;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
2329
third_party/nix/src/libexpr/primops.cc
vendored
Normal file
2329
third_party/nix/src/libexpr/primops.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
26
third_party/nix/src/libexpr/primops.hh
vendored
Normal file
26
third_party/nix/src/libexpr/primops.hh
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
#include "eval.hh"
|
||||
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct RegisterPrimOp
|
||||
{
|
||||
typedef std::vector<std::tuple<std::string, size_t, PrimOpFun>> PrimOps;
|
||||
static PrimOps * primOps;
|
||||
/* You can register a constant by passing an arity of 0. fun
|
||||
will get called during EvalState initialization, so there
|
||||
may be primops not yet added and builtins is not yet sorted. */
|
||||
RegisterPrimOp(std::string name, size_t arity, PrimOpFun fun);
|
||||
};
|
||||
|
||||
/* These primops are disabled without enableNativeCode, but plugins
|
||||
may wish to use them in limited contexts without globally enabling
|
||||
them. */
|
||||
/* Load a ValueInitializer from a DSO and return whatever it initializes */
|
||||
void prim_importNative(EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
/* Execute a program and parse its output */
|
||||
void prim_exec(EvalState & state, const Pos & pos, Value * * args, Value & v);
|
||||
|
||||
}
|
||||
187
third_party/nix/src/libexpr/primops/context.cc
vendored
Normal file
187
third_party/nix/src/libexpr/primops/context.cc
vendored
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
static void prim_unsafeDiscardStringContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
PathSet context;
|
||||
string s = state.coerceToString(pos, *args[0], context);
|
||||
mkString(v, s, PathSet());
|
||||
}
|
||||
|
||||
static RegisterPrimOp r1("__unsafeDiscardStringContext", 1, prim_unsafeDiscardStringContext);
|
||||
|
||||
|
||||
static void prim_hasContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
PathSet context;
|
||||
state.forceString(*args[0], context, pos);
|
||||
mkBool(v, !context.empty());
|
||||
}
|
||||
|
||||
static RegisterPrimOp r2("__hasContext", 1, prim_hasContext);
|
||||
|
||||
|
||||
/* Sometimes we want to pass a derivation path (i.e. pkg.drvPath) to a
|
||||
builder without causing the derivation to be built (for instance,
|
||||
in the derivation that builds NARs in nix-push, when doing
|
||||
source-only deployment). This primop marks the string context so
|
||||
that builtins.derivation adds the path to drv.inputSrcs rather than
|
||||
drv.inputDrvs. */
|
||||
static void prim_unsafeDiscardOutputDependency(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
PathSet context;
|
||||
string s = state.coerceToString(pos, *args[0], context);
|
||||
|
||||
PathSet context2;
|
||||
for (auto & p : context)
|
||||
context2.insert(p.at(0) == '=' ? string(p, 1) : p);
|
||||
|
||||
mkString(v, s, context2);
|
||||
}
|
||||
|
||||
static RegisterPrimOp r3("__unsafeDiscardOutputDependency", 1, prim_unsafeDiscardOutputDependency);
|
||||
|
||||
|
||||
/* Extract the context of a string as a structured Nix value.
|
||||
|
||||
The context is represented as an attribute set whose keys are the
|
||||
paths in the context set and whose values are attribute sets with
|
||||
the following keys:
|
||||
path: True if the relevant path is in the context as a plain store
|
||||
path (i.e. the kind of context you get when interpolating
|
||||
a Nix path (e.g. ./.) into a string). False if missing.
|
||||
allOutputs: True if the relevant path is a derivation and it is
|
||||
in the context as a drv file with all of its outputs
|
||||
(i.e. the kind of context you get when referencing
|
||||
.drvPath of some derivation). False if missing.
|
||||
outputs: If a non-empty list, the relevant path is a derivation
|
||||
and the provided outputs are referenced in the context
|
||||
(i.e. the kind of context you get when referencing
|
||||
.outPath of some derivation). Empty list if missing.
|
||||
Note that for a given path any combination of the above attributes
|
||||
may be present.
|
||||
*/
|
||||
static void prim_getContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
struct ContextInfo {
|
||||
bool path = false;
|
||||
bool allOutputs = false;
|
||||
Strings outputs;
|
||||
};
|
||||
PathSet context;
|
||||
state.forceString(*args[0], context, pos);
|
||||
auto contextInfos = std::map<Path, ContextInfo>();
|
||||
for (const auto & p : context) {
|
||||
Path drv;
|
||||
string output;
|
||||
const Path * path = &p;
|
||||
if (p.at(0) == '=') {
|
||||
drv = string(p, 1);
|
||||
path = &drv;
|
||||
} else if (p.at(0) == '!') {
|
||||
std::pair<string, string> ctx = decodeContext(p);
|
||||
drv = ctx.first;
|
||||
output = ctx.second;
|
||||
path = &drv;
|
||||
}
|
||||
auto isPath = drv.empty();
|
||||
auto isAllOutputs = (!drv.empty()) && output.empty();
|
||||
|
||||
auto iter = contextInfos.find(*path);
|
||||
if (iter == contextInfos.end()) {
|
||||
contextInfos.emplace(*path, ContextInfo{isPath, isAllOutputs, output.empty() ? Strings{} : Strings{std::move(output)}});
|
||||
} else {
|
||||
if (isPath)
|
||||
iter->second.path = true;
|
||||
else if (isAllOutputs)
|
||||
iter->second.allOutputs = true;
|
||||
else
|
||||
iter->second.outputs.emplace_back(std::move(output));
|
||||
}
|
||||
}
|
||||
|
||||
state.mkAttrs(v, contextInfos.size());
|
||||
|
||||
auto sPath = state.symbols.create("path");
|
||||
auto sAllOutputs = state.symbols.create("allOutputs");
|
||||
for (const auto & info : contextInfos) {
|
||||
auto & infoVal = *state.allocAttr(v, state.symbols.create(info.first));
|
||||
state.mkAttrs(infoVal, 3);
|
||||
if (info.second.path)
|
||||
mkBool(*state.allocAttr(infoVal, sPath), true);
|
||||
if (info.second.allOutputs)
|
||||
mkBool(*state.allocAttr(infoVal, sAllOutputs), true);
|
||||
if (!info.second.outputs.empty()) {
|
||||
auto & outputsVal = *state.allocAttr(infoVal, state.sOutputs);
|
||||
state.mkList(outputsVal, info.second.outputs.size());
|
||||
size_t i = 0;
|
||||
for (const auto & output : info.second.outputs) {
|
||||
mkString(*(outputsVal.listElems()[i++] = state.allocValue()), output);
|
||||
}
|
||||
}
|
||||
infoVal.attrs->sort();
|
||||
}
|
||||
v.attrs->sort();
|
||||
}
|
||||
|
||||
static RegisterPrimOp r4("__getContext", 1, prim_getContext);
|
||||
|
||||
|
||||
/* Append the given context to a given string.
|
||||
|
||||
See the commentary above unsafeGetContext for details of the
|
||||
context representation.
|
||||
*/
|
||||
static void prim_appendContext(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
PathSet context;
|
||||
auto orig = state.forceString(*args[0], context, pos);
|
||||
|
||||
state.forceAttrs(*args[1], pos);
|
||||
|
||||
auto sPath = state.symbols.create("path");
|
||||
auto sAllOutputs = state.symbols.create("allOutputs");
|
||||
for (auto & i : *args[1]->attrs) {
|
||||
if (!state.store->isStorePath(i.name))
|
||||
throw EvalError("Context key '%s' is not a store path, at %s", i.name, i.pos);
|
||||
if (!settings.readOnlyMode)
|
||||
state.store->ensurePath(i.name);
|
||||
state.forceAttrs(*i.value, *i.pos);
|
||||
auto iter = i.value->attrs->find(sPath);
|
||||
if (iter != i.value->attrs->end()) {
|
||||
if (state.forceBool(*iter->value, *iter->pos))
|
||||
context.insert(i.name);
|
||||
}
|
||||
|
||||
iter = i.value->attrs->find(sAllOutputs);
|
||||
if (iter != i.value->attrs->end()) {
|
||||
if (state.forceBool(*iter->value, *iter->pos)) {
|
||||
if (!isDerivation(i.name)) {
|
||||
throw EvalError("Tried to add all-outputs context of %s, which is not a derivation, to a string, at %s", i.name, i.pos);
|
||||
}
|
||||
context.insert("=" + string(i.name));
|
||||
}
|
||||
}
|
||||
|
||||
iter = i.value->attrs->find(state.sOutputs);
|
||||
if (iter != i.value->attrs->end()) {
|
||||
state.forceList(*iter->value, *iter->pos);
|
||||
if (iter->value->listSize() && !isDerivation(i.name)) {
|
||||
throw EvalError("Tried to add derivation output context of %s, which is not a derivation, to a string, at %s", i.name, i.pos);
|
||||
}
|
||||
for (unsigned int n = 0; n < iter->value->listSize(); ++n) {
|
||||
auto name = state.forceStringNoCtx(*iter->value->listElems()[n], *iter->pos);
|
||||
context.insert("!" + name + "!" + string(i.name));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mkString(v, orig, context);
|
||||
}
|
||||
|
||||
static RegisterPrimOp r5("__appendContext", 2, prim_appendContext);
|
||||
|
||||
}
|
||||
247
third_party/nix/src/libexpr/primops/fetchGit.cc
vendored
Normal file
247
third_party/nix/src/libexpr/primops/fetchGit.cc
vendored
Normal file
|
|
@ -0,0 +1,247 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "download.hh"
|
||||
#include "store-api.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "hash.hh"
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <regex>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct GitInfo
|
||||
{
|
||||
Path storePath;
|
||||
std::string rev;
|
||||
std::string shortRev;
|
||||
uint64_t revCount = 0;
|
||||
};
|
||||
|
||||
std::regex revRegex("^[0-9a-fA-F]{40}$");
|
||||
|
||||
GitInfo exportGit(ref<Store> store, const std::string & uri,
|
||||
std::optional<std::string> ref, std::string rev,
|
||||
const std::string & name)
|
||||
{
|
||||
if (evalSettings.pureEval && rev == "")
|
||||
throw Error("in pure evaluation mode, 'fetchGit' requires a Git revision");
|
||||
|
||||
if (!ref && rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.git")) {
|
||||
|
||||
bool clean = true;
|
||||
|
||||
try {
|
||||
runProgram("git", true, { "-C", uri, "diff-index", "--quiet", "HEAD", "--" });
|
||||
} catch (ExecError & e) {
|
||||
if (!WIFEXITED(e.status) || WEXITSTATUS(e.status) != 1) throw;
|
||||
clean = false;
|
||||
}
|
||||
|
||||
if (!clean) {
|
||||
|
||||
/* This is an unclean working tree. So copy all tracked
|
||||
files. */
|
||||
|
||||
GitInfo gitInfo;
|
||||
gitInfo.rev = "0000000000000000000000000000000000000000";
|
||||
gitInfo.shortRev = std::string(gitInfo.rev, 0, 7);
|
||||
|
||||
auto files = tokenizeString<std::set<std::string>>(
|
||||
runProgram("git", true, { "-C", uri, "ls-files", "-z" }), "\0"s);
|
||||
|
||||
PathFilter filter = [&](const Path & p) -> bool {
|
||||
assert(hasPrefix(p, uri));
|
||||
std::string file(p, uri.size() + 1);
|
||||
|
||||
auto st = lstat(p);
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
auto prefix = file + "/";
|
||||
auto i = files.lower_bound(prefix);
|
||||
return i != files.end() && hasPrefix(*i, prefix);
|
||||
}
|
||||
|
||||
return files.count(file);
|
||||
};
|
||||
|
||||
gitInfo.storePath = store->addToStore("source", uri, true, htSHA256, filter);
|
||||
|
||||
return gitInfo;
|
||||
}
|
||||
|
||||
// clean working tree, but no ref or rev specified. Use 'HEAD'.
|
||||
rev = chomp(runProgram("git", true, { "-C", uri, "rev-parse", "HEAD" }));
|
||||
ref = "HEAD"s;
|
||||
}
|
||||
|
||||
if (!ref) ref = "HEAD"s;
|
||||
|
||||
if (rev != "" && !std::regex_match(rev, revRegex))
|
||||
throw Error("invalid Git revision '%s'", rev);
|
||||
|
||||
deletePath(getCacheDir() + "/nix/git");
|
||||
|
||||
Path cacheDir = getCacheDir() + "/nix/gitv2/" + hashString(htSHA256, uri).to_string(Base32, false);
|
||||
|
||||
if (!pathExists(cacheDir)) {
|
||||
createDirs(dirOf(cacheDir));
|
||||
runProgram("git", true, { "init", "--bare", cacheDir });
|
||||
}
|
||||
|
||||
Path localRefFile;
|
||||
if (ref->compare(0, 5, "refs/") == 0)
|
||||
localRefFile = cacheDir + "/" + *ref;
|
||||
else
|
||||
localRefFile = cacheDir + "/refs/heads/" + *ref;
|
||||
|
||||
bool doFetch;
|
||||
time_t now = time(0);
|
||||
/* If a rev was specified, we need to fetch if it's not in the
|
||||
repo. */
|
||||
if (rev != "") {
|
||||
try {
|
||||
runProgram("git", true, { "-C", cacheDir, "cat-file", "-e", rev });
|
||||
doFetch = false;
|
||||
} catch (ExecError & e) {
|
||||
if (WIFEXITED(e.status)) {
|
||||
doFetch = true;
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* If the local ref is older than ‘tarball-ttl’ seconds, do a
|
||||
git fetch to update the local ref to the remote ref. */
|
||||
struct stat st;
|
||||
doFetch = stat(localRefFile.c_str(), &st) != 0 ||
|
||||
(uint64_t) st.st_mtime + settings.tarballTtl <= (uint64_t) now;
|
||||
}
|
||||
if (doFetch)
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Git repository '%s'", uri));
|
||||
|
||||
// FIXME: git stderr messes up our progress indicator, so
|
||||
// we're using --quiet for now. Should process its stderr.
|
||||
runProgram("git", true, { "-C", cacheDir, "fetch", "--quiet", "--force", "--", uri, fmt("%s:%s", *ref, *ref) });
|
||||
|
||||
struct timeval times[2];
|
||||
times[0].tv_sec = now;
|
||||
times[0].tv_usec = 0;
|
||||
times[1].tv_sec = now;
|
||||
times[1].tv_usec = 0;
|
||||
|
||||
utimes(localRefFile.c_str(), times);
|
||||
}
|
||||
|
||||
// FIXME: check whether rev is an ancestor of ref.
|
||||
GitInfo gitInfo;
|
||||
gitInfo.rev = rev != "" ? rev : chomp(readFile(localRefFile));
|
||||
gitInfo.shortRev = std::string(gitInfo.rev, 0, 7);
|
||||
|
||||
printTalkative("using revision %s of repo '%s'", gitInfo.rev, uri);
|
||||
|
||||
std::string storeLinkName = hashString(htSHA512, name + std::string("\0"s) + gitInfo.rev).to_string(Base32, false);
|
||||
Path storeLink = cacheDir + "/" + storeLinkName + ".link";
|
||||
PathLocks storeLinkLock({storeLink}, fmt("waiting for lock on '%1%'...", storeLink)); // FIXME: broken
|
||||
|
||||
try {
|
||||
auto json = nlohmann::json::parse(readFile(storeLink));
|
||||
|
||||
assert(json["name"] == name && json["rev"] == gitInfo.rev);
|
||||
|
||||
gitInfo.storePath = json["storePath"];
|
||||
|
||||
if (store->isValidPath(gitInfo.storePath)) {
|
||||
gitInfo.revCount = json["revCount"];
|
||||
return gitInfo;
|
||||
}
|
||||
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT) throw;
|
||||
}
|
||||
|
||||
// FIXME: should pipe this, or find some better way to extract a
|
||||
// revision.
|
||||
auto tar = runProgram("git", true, { "-C", cacheDir, "archive", gitInfo.rev });
|
||||
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete delTmpDir(tmpDir, true);
|
||||
|
||||
runProgram("tar", true, { "x", "-C", tmpDir }, tar);
|
||||
|
||||
gitInfo.storePath = store->addToStore(name, tmpDir);
|
||||
|
||||
gitInfo.revCount = std::stoull(runProgram("git", true, { "-C", cacheDir, "rev-list", "--count", gitInfo.rev }));
|
||||
|
||||
nlohmann::json json;
|
||||
json["storePath"] = gitInfo.storePath;
|
||||
json["uri"] = uri;
|
||||
json["name"] = name;
|
||||
json["rev"] = gitInfo.rev;
|
||||
json["revCount"] = gitInfo.revCount;
|
||||
|
||||
writeFile(storeLink, json.dump());
|
||||
|
||||
return gitInfo;
|
||||
}
|
||||
|
||||
static void prim_fetchGit(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
std::string url;
|
||||
std::optional<std::string> ref;
|
||||
std::string rev;
|
||||
std::string name = "source";
|
||||
PathSet context;
|
||||
|
||||
state.forceValue(*args[0]);
|
||||
|
||||
if (args[0]->type == tAttrs) {
|
||||
|
||||
state.forceAttrs(*args[0], pos);
|
||||
|
||||
for (auto & attr : *args[0]->attrs) {
|
||||
string n(attr.name);
|
||||
if (n == "url")
|
||||
url = state.coerceToString(*attr.pos, *attr.value, context, false, false);
|
||||
else if (n == "ref")
|
||||
ref = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "rev")
|
||||
rev = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "name")
|
||||
name = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else
|
||||
throw EvalError("unsupported argument '%s' to 'fetchGit', at %s", attr.name, *attr.pos);
|
||||
}
|
||||
|
||||
if (url.empty())
|
||||
throw EvalError(format("'url' argument required, at %1%") % pos);
|
||||
|
||||
} else
|
||||
url = state.coerceToString(pos, *args[0], context, false, false);
|
||||
|
||||
// FIXME: git externals probably can be used to bypass the URI
|
||||
// whitelist. Ah well.
|
||||
state.checkURI(url);
|
||||
|
||||
auto gitInfo = exportGit(state.store, url, ref, rev, name);
|
||||
|
||||
state.mkAttrs(v, 8);
|
||||
mkString(*state.allocAttr(v, state.sOutPath), gitInfo.storePath, PathSet({gitInfo.storePath}));
|
||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), gitInfo.rev);
|
||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), gitInfo.shortRev);
|
||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), gitInfo.revCount);
|
||||
v.attrs->sort();
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(state.store->toRealPath(gitInfo.storePath));
|
||||
}
|
||||
|
||||
static RegisterPrimOp r("fetchGit", 1, prim_fetchGit);
|
||||
|
||||
}
|
||||
219
third_party/nix/src/libexpr/primops/fetchMercurial.cc
vendored
Normal file
219
third_party/nix/src/libexpr/primops/fetchMercurial.cc
vendored
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "download.hh"
|
||||
#include "store-api.hh"
|
||||
#include "pathlocks.hh"
|
||||
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <regex>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct HgInfo
|
||||
{
|
||||
Path storePath;
|
||||
std::string branch;
|
||||
std::string rev;
|
||||
uint64_t revCount = 0;
|
||||
};
|
||||
|
||||
std::regex commitHashRegex("^[0-9a-fA-F]{40}$");
|
||||
|
||||
HgInfo exportMercurial(ref<Store> store, const std::string & uri,
|
||||
std::string rev, const std::string & name)
|
||||
{
|
||||
if (evalSettings.pureEval && rev == "")
|
||||
throw Error("in pure evaluation mode, 'fetchMercurial' requires a Mercurial revision");
|
||||
|
||||
if (rev == "" && hasPrefix(uri, "/") && pathExists(uri + "/.hg")) {
|
||||
|
||||
bool clean = runProgram("hg", true, { "status", "-R", uri, "--modified", "--added", "--removed" }) == "";
|
||||
|
||||
if (!clean) {
|
||||
|
||||
/* This is an unclean working tree. So copy all tracked
|
||||
files. */
|
||||
|
||||
printTalkative("copying unclean Mercurial working tree '%s'", uri);
|
||||
|
||||
HgInfo hgInfo;
|
||||
hgInfo.rev = "0000000000000000000000000000000000000000";
|
||||
hgInfo.branch = chomp(runProgram("hg", true, { "branch", "-R", uri }));
|
||||
|
||||
auto files = tokenizeString<std::set<std::string>>(
|
||||
runProgram("hg", true, { "status", "-R", uri, "--clean", "--modified", "--added", "--no-status", "--print0" }), "\0"s);
|
||||
|
||||
PathFilter filter = [&](const Path & p) -> bool {
|
||||
assert(hasPrefix(p, uri));
|
||||
std::string file(p, uri.size() + 1);
|
||||
|
||||
auto st = lstat(p);
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
auto prefix = file + "/";
|
||||
auto i = files.lower_bound(prefix);
|
||||
return i != files.end() && hasPrefix(*i, prefix);
|
||||
}
|
||||
|
||||
return files.count(file);
|
||||
};
|
||||
|
||||
hgInfo.storePath = store->addToStore("source", uri, true, htSHA256, filter);
|
||||
|
||||
return hgInfo;
|
||||
}
|
||||
}
|
||||
|
||||
if (rev == "") rev = "default";
|
||||
|
||||
Path cacheDir = fmt("%s/nix/hg/%s", getCacheDir(), hashString(htSHA256, uri).to_string(Base32, false));
|
||||
|
||||
Path stampFile = fmt("%s/.hg/%s.stamp", cacheDir, hashString(htSHA512, rev).to_string(Base32, false));
|
||||
|
||||
/* If we haven't pulled this repo less than ‘tarball-ttl’ seconds,
|
||||
do so now. */
|
||||
time_t now = time(0);
|
||||
struct stat st;
|
||||
if (stat(stampFile.c_str(), &st) != 0 ||
|
||||
(uint64_t) st.st_mtime + settings.tarballTtl <= (uint64_t) now)
|
||||
{
|
||||
/* Except that if this is a commit hash that we already have,
|
||||
we don't have to pull again. */
|
||||
if (!(std::regex_match(rev, commitHashRegex)
|
||||
&& pathExists(cacheDir)
|
||||
&& runProgram(
|
||||
RunOptions("hg", { "log", "-R", cacheDir, "-r", rev, "--template", "1" })
|
||||
.killStderr(true)).second == "1"))
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("fetching Mercurial repository '%s'", uri));
|
||||
|
||||
if (pathExists(cacheDir)) {
|
||||
try {
|
||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri });
|
||||
}
|
||||
catch (ExecError & e) {
|
||||
string transJournal = cacheDir + "/.hg/store/journal";
|
||||
/* hg throws "abandoned transaction" error only if this file exists */
|
||||
if (pathExists(transJournal)) {
|
||||
runProgram("hg", true, { "recover", "-R", cacheDir });
|
||||
runProgram("hg", true, { "pull", "-R", cacheDir, "--", uri });
|
||||
} else {
|
||||
throw ExecError(e.status, fmt("'hg pull' %s", statusToString(e.status)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
createDirs(dirOf(cacheDir));
|
||||
runProgram("hg", true, { "clone", "--noupdate", "--", uri, cacheDir });
|
||||
}
|
||||
}
|
||||
|
||||
writeFile(stampFile, "");
|
||||
}
|
||||
|
||||
auto tokens = tokenizeString<std::vector<std::string>>(
|
||||
runProgram("hg", true, { "log", "-R", cacheDir, "-r", rev, "--template", "{node} {rev} {branch}" }));
|
||||
assert(tokens.size() == 3);
|
||||
|
||||
HgInfo hgInfo;
|
||||
hgInfo.rev = tokens[0];
|
||||
hgInfo.revCount = std::stoull(tokens[1]);
|
||||
hgInfo.branch = tokens[2];
|
||||
|
||||
std::string storeLinkName = hashString(htSHA512, name + std::string("\0"s) + hgInfo.rev).to_string(Base32, false);
|
||||
Path storeLink = fmt("%s/.hg/%s.link", cacheDir, storeLinkName);
|
||||
|
||||
try {
|
||||
auto json = nlohmann::json::parse(readFile(storeLink));
|
||||
|
||||
assert(json["name"] == name && json["rev"] == hgInfo.rev);
|
||||
|
||||
hgInfo.storePath = json["storePath"];
|
||||
|
||||
if (store->isValidPath(hgInfo.storePath)) {
|
||||
printTalkative("using cached Mercurial store path '%s'", hgInfo.storePath);
|
||||
return hgInfo;
|
||||
}
|
||||
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT) throw;
|
||||
}
|
||||
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete delTmpDir(tmpDir, true);
|
||||
|
||||
runProgram("hg", true, { "archive", "-R", cacheDir, "-r", rev, tmpDir });
|
||||
|
||||
deletePath(tmpDir + "/.hg_archival.txt");
|
||||
|
||||
hgInfo.storePath = store->addToStore(name, tmpDir);
|
||||
|
||||
nlohmann::json json;
|
||||
json["storePath"] = hgInfo.storePath;
|
||||
json["uri"] = uri;
|
||||
json["name"] = name;
|
||||
json["branch"] = hgInfo.branch;
|
||||
json["rev"] = hgInfo.rev;
|
||||
json["revCount"] = hgInfo.revCount;
|
||||
|
||||
writeFile(storeLink, json.dump());
|
||||
|
||||
return hgInfo;
|
||||
}
|
||||
|
||||
static void prim_fetchMercurial(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
std::string url;
|
||||
std::string rev;
|
||||
std::string name = "source";
|
||||
PathSet context;
|
||||
|
||||
state.forceValue(*args[0]);
|
||||
|
||||
if (args[0]->type == tAttrs) {
|
||||
|
||||
state.forceAttrs(*args[0], pos);
|
||||
|
||||
for (auto & attr : *args[0]->attrs) {
|
||||
string n(attr.name);
|
||||
if (n == "url")
|
||||
url = state.coerceToString(*attr.pos, *attr.value, context, false, false);
|
||||
else if (n == "rev")
|
||||
rev = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else if (n == "name")
|
||||
name = state.forceStringNoCtx(*attr.value, *attr.pos);
|
||||
else
|
||||
throw EvalError("unsupported argument '%s' to 'fetchMercurial', at %s", attr.name, *attr.pos);
|
||||
}
|
||||
|
||||
if (url.empty())
|
||||
throw EvalError(format("'url' argument required, at %1%") % pos);
|
||||
|
||||
} else
|
||||
url = state.coerceToString(pos, *args[0], context, false, false);
|
||||
|
||||
// FIXME: git externals probably can be used to bypass the URI
|
||||
// whitelist. Ah well.
|
||||
state.checkURI(url);
|
||||
|
||||
auto hgInfo = exportMercurial(state.store, url, rev, name);
|
||||
|
||||
state.mkAttrs(v, 8);
|
||||
mkString(*state.allocAttr(v, state.sOutPath), hgInfo.storePath, PathSet({hgInfo.storePath}));
|
||||
mkString(*state.allocAttr(v, state.symbols.create("branch")), hgInfo.branch);
|
||||
mkString(*state.allocAttr(v, state.symbols.create("rev")), hgInfo.rev);
|
||||
mkString(*state.allocAttr(v, state.symbols.create("shortRev")), std::string(hgInfo.rev, 0, 12));
|
||||
mkInt(*state.allocAttr(v, state.symbols.create("revCount")), hgInfo.revCount);
|
||||
v.attrs->sort();
|
||||
|
||||
if (state.allowedPaths)
|
||||
state.allowedPaths->insert(state.store->toRealPath(hgInfo.storePath));
|
||||
}
|
||||
|
||||
static RegisterPrimOp r("fetchMercurial", 1, prim_fetchMercurial);
|
||||
|
||||
}
|
||||
90
third_party/nix/src/libexpr/primops/fromTOML.cc
vendored
Normal file
90
third_party/nix/src/libexpr/primops/fromTOML.cc
vendored
Normal file
|
|
@ -0,0 +1,90 @@
|
|||
#include "primops.hh"
|
||||
#include "eval-inline.hh"
|
||||
|
||||
#include "cpptoml/cpptoml.h"
|
||||
|
||||
namespace nix {
|
||||
|
||||
static void prim_fromTOML(EvalState & state, const Pos & pos, Value * * args, Value & v)
|
||||
{
|
||||
using namespace cpptoml;
|
||||
|
||||
auto toml = state.forceStringNoCtx(*args[0], pos);
|
||||
|
||||
std::istringstream tomlStream(toml);
|
||||
|
||||
std::function<void(Value &, std::shared_ptr<base>)> visit;
|
||||
|
||||
visit = [&](Value & v, std::shared_ptr<base> t) {
|
||||
|
||||
if (auto t2 = t->as_table()) {
|
||||
|
||||
size_t size = 0;
|
||||
for (auto & i : *t2) { (void) i; size++; }
|
||||
|
||||
state.mkAttrs(v, size);
|
||||
|
||||
for (auto & i : *t2) {
|
||||
auto & v2 = *state.allocAttr(v, state.symbols.create(i.first));
|
||||
|
||||
if (auto i2 = i.second->as_table_array()) {
|
||||
size_t size2 = i2->get().size();
|
||||
state.mkList(v2, size2);
|
||||
for (size_t j = 0; j < size2; ++j)
|
||||
visit(*(v2.listElems()[j] = state.allocValue()), i2->get()[j]);
|
||||
}
|
||||
else
|
||||
visit(v2, i.second);
|
||||
}
|
||||
|
||||
v.attrs->sort();
|
||||
}
|
||||
|
||||
else if (auto t2 = t->as_array()) {
|
||||
size_t size = t2->get().size();
|
||||
|
||||
state.mkList(v, size);
|
||||
|
||||
for (size_t i = 0; i < size; ++i)
|
||||
visit(*(v.listElems()[i] = state.allocValue()), t2->get()[i]);
|
||||
}
|
||||
|
||||
// Handle cases like 'a = [[{ a = true }]]', which IMHO should be
|
||||
// parsed as a array containing an array containing a table,
|
||||
// but instead are parsed as an array containing a table array
|
||||
// containing a table.
|
||||
else if (auto t2 = t->as_table_array()) {
|
||||
size_t size = t2->get().size();
|
||||
|
||||
state.mkList(v, size);
|
||||
|
||||
for (size_t j = 0; j < size; ++j)
|
||||
visit(*(v.listElems()[j] = state.allocValue()), t2->get()[j]);
|
||||
}
|
||||
|
||||
else if (t->is_value()) {
|
||||
if (auto val = t->as<int64_t>())
|
||||
mkInt(v, val->get());
|
||||
else if (auto val = t->as<NixFloat>())
|
||||
mkFloat(v, val->get());
|
||||
else if (auto val = t->as<bool>())
|
||||
mkBool(v, val->get());
|
||||
else if (auto val = t->as<std::string>())
|
||||
mkString(v, val->get());
|
||||
else
|
||||
throw EvalError("unsupported value type in TOML");
|
||||
}
|
||||
|
||||
else abort();
|
||||
};
|
||||
|
||||
try {
|
||||
visit(v, parser(tomlStream).parse());
|
||||
} catch (std::runtime_error & e) {
|
||||
throw EvalError("while parsing a TOML string at %s: %s", pos, e.what());
|
||||
}
|
||||
}
|
||||
|
||||
static RegisterPrimOp r("fromTOML", 1, prim_fromTOML);
|
||||
|
||||
}
|
||||
87
third_party/nix/src/libexpr/symbol-table.hh
vendored
Normal file
87
third_party/nix/src/libexpr/symbol-table.hh
vendored
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
#pragma once
|
||||
|
||||
#include <map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* Symbol table used by the parser and evaluator to represent and look
|
||||
up identifiers and attributes efficiently. SymbolTable::create()
|
||||
converts a string into a symbol. Symbols have the property that
|
||||
they can be compared efficiently (using a pointer equality test),
|
||||
because the symbol table stores only one copy of each string. */
|
||||
|
||||
class Symbol
|
||||
{
|
||||
private:
|
||||
const string * s; // pointer into SymbolTable
|
||||
Symbol(const string * s) : s(s) { };
|
||||
friend class SymbolTable;
|
||||
|
||||
public:
|
||||
Symbol() : s(0) { };
|
||||
|
||||
bool operator == (const Symbol & s2) const
|
||||
{
|
||||
return s == s2.s;
|
||||
}
|
||||
|
||||
bool operator != (const Symbol & s2) const
|
||||
{
|
||||
return s != s2.s;
|
||||
}
|
||||
|
||||
bool operator < (const Symbol & s2) const
|
||||
{
|
||||
return s < s2.s;
|
||||
}
|
||||
|
||||
operator const string & () const
|
||||
{
|
||||
return *s;
|
||||
}
|
||||
|
||||
bool set() const
|
||||
{
|
||||
return s;
|
||||
}
|
||||
|
||||
bool empty() const
|
||||
{
|
||||
return s->empty();
|
||||
}
|
||||
|
||||
friend std::ostream & operator << (std::ostream & str, const Symbol & sym);
|
||||
};
|
||||
|
||||
class SymbolTable
|
||||
{
|
||||
private:
|
||||
typedef std::unordered_set<string> Symbols;
|
||||
Symbols symbols;
|
||||
|
||||
public:
|
||||
Symbol create(const string & s)
|
||||
{
|
||||
std::pair<Symbols::iterator, bool> res = symbols.insert(s);
|
||||
return Symbol(&*res.first);
|
||||
}
|
||||
|
||||
size_t size() const
|
||||
{
|
||||
return symbols.size();
|
||||
}
|
||||
|
||||
size_t totalSize() const;
|
||||
|
||||
template<typename T>
|
||||
void dump(T callback)
|
||||
{
|
||||
for (auto & s : symbols)
|
||||
callback(s);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
100
third_party/nix/src/libexpr/value-to-json.cc
vendored
Normal file
100
third_party/nix/src/libexpr/value-to-json.cc
vendored
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
#include "value-to-json.hh"
|
||||
#include "json.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <iomanip>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, JSONPlaceholder & out, PathSet & context)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
if (strict) state.forceValue(v);
|
||||
|
||||
switch (v.type) {
|
||||
|
||||
case tInt:
|
||||
out.write(v.integer);
|
||||
break;
|
||||
|
||||
case tBool:
|
||||
out.write(v.boolean);
|
||||
break;
|
||||
|
||||
case tString:
|
||||
copyContext(v, context);
|
||||
out.write(v.string.s);
|
||||
break;
|
||||
|
||||
case tPath:
|
||||
out.write(state.copyPathToStore(context, v.path));
|
||||
break;
|
||||
|
||||
case tNull:
|
||||
out.write(nullptr);
|
||||
break;
|
||||
|
||||
case tAttrs: {
|
||||
auto maybeString = state.tryAttrsToString(noPos, v, context, false, false);
|
||||
if (maybeString) {
|
||||
out.write(*maybeString);
|
||||
break;
|
||||
}
|
||||
auto i = v.attrs->find(state.sOutPath);
|
||||
if (i == v.attrs->end()) {
|
||||
auto obj(out.object());
|
||||
StringSet names;
|
||||
for (auto & j : *v.attrs)
|
||||
names.insert(j.name);
|
||||
for (auto & j : names) {
|
||||
Attr & a(*v.attrs->find(state.symbols.create(j)));
|
||||
auto placeholder(obj.placeholder(j));
|
||||
printValueAsJSON(state, strict, *a.value, placeholder, context);
|
||||
}
|
||||
} else
|
||||
printValueAsJSON(state, strict, *i->value, out, context);
|
||||
break;
|
||||
}
|
||||
|
||||
case tList1: case tList2: case tListN: {
|
||||
auto list(out.list());
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n) {
|
||||
auto placeholder(list.placeholder());
|
||||
printValueAsJSON(state, strict, *v.listElems()[n], placeholder, context);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case tExternal:
|
||||
v.external->printValueAsJSON(state, strict, out, context);
|
||||
break;
|
||||
|
||||
case tFloat:
|
||||
out.write(v.fpoint);
|
||||
break;
|
||||
|
||||
default:
|
||||
throw TypeError(format("cannot convert %1% to JSON") % showType(v));
|
||||
}
|
||||
}
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, std::ostream & str, PathSet & context)
|
||||
{
|
||||
JSONPlaceholder out(str);
|
||||
printValueAsJSON(state, strict, v, out, context);
|
||||
}
|
||||
|
||||
void ExternalValueBase::printValueAsJSON(EvalState & state, bool strict,
|
||||
JSONPlaceholder & out, PathSet & context) const
|
||||
{
|
||||
throw TypeError(format("cannot convert %1% to JSON") % showType());
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
19
third_party/nix/src/libexpr/value-to-json.hh
vendored
Normal file
19
third_party/nix/src/libexpr/value-to-json.hh
vendored
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
#pragma once
|
||||
|
||||
#include "nixexpr.hh"
|
||||
#include "eval.hh"
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
namespace nix {
|
||||
|
||||
class JSONPlaceholder;
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, JSONPlaceholder & out, PathSet & context);
|
||||
|
||||
void printValueAsJSON(EvalState & state, bool strict,
|
||||
Value & v, std::ostream & str, PathSet & context);
|
||||
|
||||
}
|
||||
178
third_party/nix/src/libexpr/value-to-xml.cc
vendored
Normal file
178
third_party/nix/src/libexpr/value-to-xml.cc
vendored
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
#include "value-to-xml.hh"
|
||||
#include "xml-writer.hh"
|
||||
#include "eval-inline.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static XMLAttrs singletonAttrs(const string & name, const string & value)
|
||||
{
|
||||
XMLAttrs attrs;
|
||||
attrs[name] = value;
|
||||
return attrs;
|
||||
}
|
||||
|
||||
|
||||
static void printValueAsXML(EvalState & state, bool strict, bool location,
|
||||
Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen);
|
||||
|
||||
|
||||
static void posToXML(XMLAttrs & xmlAttrs, const Pos & pos)
|
||||
{
|
||||
xmlAttrs["path"] = pos.file;
|
||||
xmlAttrs["line"] = (format("%1%") % pos.line).str();
|
||||
xmlAttrs["column"] = (format("%1%") % pos.column).str();
|
||||
}
|
||||
|
||||
|
||||
static void showAttrs(EvalState & state, bool strict, bool location,
|
||||
Bindings & attrs, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
|
||||
{
|
||||
StringSet names;
|
||||
|
||||
for (auto & i : attrs)
|
||||
names.insert(i.name);
|
||||
|
||||
for (auto & i : names) {
|
||||
Attr & a(*attrs.find(state.symbols.create(i)));
|
||||
|
||||
XMLAttrs xmlAttrs;
|
||||
xmlAttrs["name"] = i;
|
||||
if (location && a.pos != &noPos) posToXML(xmlAttrs, *a.pos);
|
||||
|
||||
XMLOpenElement _(doc, "attr", xmlAttrs);
|
||||
printValueAsXML(state, strict, location,
|
||||
*a.value, doc, context, drvsSeen);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void printValueAsXML(EvalState & state, bool strict, bool location,
|
||||
Value & v, XMLWriter & doc, PathSet & context, PathSet & drvsSeen)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
if (strict) state.forceValue(v);
|
||||
|
||||
switch (v.type) {
|
||||
|
||||
case tInt:
|
||||
doc.writeEmptyElement("int", singletonAttrs("value", (format("%1%") % v.integer).str()));
|
||||
break;
|
||||
|
||||
case tBool:
|
||||
doc.writeEmptyElement("bool", singletonAttrs("value", v.boolean ? "true" : "false"));
|
||||
break;
|
||||
|
||||
case tString:
|
||||
/* !!! show the context? */
|
||||
copyContext(v, context);
|
||||
doc.writeEmptyElement("string", singletonAttrs("value", v.string.s));
|
||||
break;
|
||||
|
||||
case tPath:
|
||||
doc.writeEmptyElement("path", singletonAttrs("value", v.path));
|
||||
break;
|
||||
|
||||
case tNull:
|
||||
doc.writeEmptyElement("null");
|
||||
break;
|
||||
|
||||
case tAttrs:
|
||||
if (state.isDerivation(v)) {
|
||||
XMLAttrs xmlAttrs;
|
||||
|
||||
Bindings::iterator a = v.attrs->find(state.symbols.create("derivation"));
|
||||
|
||||
Path drvPath;
|
||||
a = v.attrs->find(state.sDrvPath);
|
||||
if (a != v.attrs->end()) {
|
||||
if (strict) state.forceValue(*a->value);
|
||||
if (a->value->type == tString)
|
||||
xmlAttrs["drvPath"] = drvPath = a->value->string.s;
|
||||
}
|
||||
|
||||
a = v.attrs->find(state.sOutPath);
|
||||
if (a != v.attrs->end()) {
|
||||
if (strict) state.forceValue(*a->value);
|
||||
if (a->value->type == tString)
|
||||
xmlAttrs["outPath"] = a->value->string.s;
|
||||
}
|
||||
|
||||
XMLOpenElement _(doc, "derivation", xmlAttrs);
|
||||
|
||||
if (drvPath != "" && drvsSeen.find(drvPath) == drvsSeen.end()) {
|
||||
drvsSeen.insert(drvPath);
|
||||
showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen);
|
||||
} else
|
||||
doc.writeEmptyElement("repeated");
|
||||
}
|
||||
|
||||
else {
|
||||
XMLOpenElement _(doc, "attrs");
|
||||
showAttrs(state, strict, location, *v.attrs, doc, context, drvsSeen);
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case tList1: case tList2: case tListN: {
|
||||
XMLOpenElement _(doc, "list");
|
||||
for (unsigned int n = 0; n < v.listSize(); ++n)
|
||||
printValueAsXML(state, strict, location, *v.listElems()[n], doc, context, drvsSeen);
|
||||
break;
|
||||
}
|
||||
|
||||
case tLambda: {
|
||||
XMLAttrs xmlAttrs;
|
||||
if (location) posToXML(xmlAttrs, v.lambda.fun->pos);
|
||||
XMLOpenElement _(doc, "function", xmlAttrs);
|
||||
|
||||
if (v.lambda.fun->matchAttrs) {
|
||||
XMLAttrs attrs;
|
||||
if (!v.lambda.fun->arg.empty()) attrs["name"] = v.lambda.fun->arg;
|
||||
if (v.lambda.fun->formals->ellipsis) attrs["ellipsis"] = "1";
|
||||
XMLOpenElement _(doc, "attrspat", attrs);
|
||||
for (auto & i : v.lambda.fun->formals->formals)
|
||||
doc.writeEmptyElement("attr", singletonAttrs("name", i.name));
|
||||
} else
|
||||
doc.writeEmptyElement("varpat", singletonAttrs("name", v.lambda.fun->arg));
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case tExternal:
|
||||
v.external->printValueAsXML(state, strict, location, doc, context, drvsSeen);
|
||||
break;
|
||||
|
||||
case tFloat:
|
||||
doc.writeEmptyElement("float", singletonAttrs("value", (format("%1%") % v.fpoint).str()));
|
||||
break;
|
||||
|
||||
default:
|
||||
doc.writeEmptyElement("unevaluated");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void ExternalValueBase::printValueAsXML(EvalState & state, bool strict,
|
||||
bool location, XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const
|
||||
{
|
||||
doc.writeEmptyElement("unevaluated");
|
||||
}
|
||||
|
||||
|
||||
void printValueAsXML(EvalState & state, bool strict, bool location,
|
||||
Value & v, std::ostream & out, PathSet & context)
|
||||
{
|
||||
XMLWriter doc(true, out);
|
||||
XMLOpenElement root(doc, "expr");
|
||||
PathSet drvsSeen;
|
||||
printValueAsXML(state, strict, location, v, doc, context, drvsSeen);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
14
third_party/nix/src/libexpr/value-to-xml.hh
vendored
Normal file
14
third_party/nix/src/libexpr/value-to-xml.hh
vendored
Normal file
|
|
@ -0,0 +1,14 @@
|
|||
#pragma once
|
||||
|
||||
#include "nixexpr.hh"
|
||||
#include "eval.hh"
|
||||
|
||||
#include <string>
|
||||
#include <map>
|
||||
|
||||
namespace nix {
|
||||
|
||||
void printValueAsXML(EvalState & state, bool strict, bool location,
|
||||
Value & v, std::ostream & out, PathSet & context);
|
||||
|
||||
}
|
||||
274
third_party/nix/src/libexpr/value.hh
vendored
Normal file
274
third_party/nix/src/libexpr/value.hh
vendored
Normal file
|
|
@ -0,0 +1,274 @@
|
|||
#pragma once
|
||||
|
||||
#include "symbol-table.hh"
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
#include <gc/gc_allocator.h>
|
||||
#endif
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
typedef enum {
|
||||
tInt = 1,
|
||||
tBool,
|
||||
tString,
|
||||
tPath,
|
||||
tNull,
|
||||
tAttrs,
|
||||
tList1,
|
||||
tList2,
|
||||
tListN,
|
||||
tThunk,
|
||||
tApp,
|
||||
tLambda,
|
||||
tBlackhole,
|
||||
tPrimOp,
|
||||
tPrimOpApp,
|
||||
tExternal,
|
||||
tFloat
|
||||
} ValueType;
|
||||
|
||||
|
||||
class Bindings;
|
||||
struct Env;
|
||||
struct Expr;
|
||||
struct ExprLambda;
|
||||
struct PrimOp;
|
||||
struct PrimOp;
|
||||
class Symbol;
|
||||
struct Pos;
|
||||
class EvalState;
|
||||
class XMLWriter;
|
||||
class JSONPlaceholder;
|
||||
|
||||
|
||||
typedef int64_t NixInt;
|
||||
typedef double NixFloat;
|
||||
|
||||
/* External values must descend from ExternalValueBase, so that
|
||||
* type-agnostic nix functions (e.g. showType) can be implemented
|
||||
*/
|
||||
class ExternalValueBase
|
||||
{
|
||||
friend std::ostream & operator << (std::ostream & str, const ExternalValueBase & v);
|
||||
protected:
|
||||
/* Print out the value */
|
||||
virtual std::ostream & print(std::ostream & str) const = 0;
|
||||
|
||||
public:
|
||||
/* Return a simple string describing the type */
|
||||
virtual string showType() const = 0;
|
||||
|
||||
/* Return a string to be used in builtins.typeOf */
|
||||
virtual string typeOf() const = 0;
|
||||
|
||||
/* How much space does this value take up */
|
||||
virtual size_t valueSize(std::set<const void *> & seen) const = 0;
|
||||
|
||||
/* Coerce the value to a string. Defaults to uncoercable, i.e. throws an
|
||||
* error
|
||||
*/
|
||||
virtual string coerceToString(const Pos & pos, PathSet & context, bool copyMore, bool copyToStore) const;
|
||||
|
||||
/* Compare to another value of the same type. Defaults to uncomparable,
|
||||
* i.e. always false.
|
||||
*/
|
||||
virtual bool operator==(const ExternalValueBase & b) const;
|
||||
|
||||
/* Print the value as JSON. Defaults to unconvertable, i.e. throws an error */
|
||||
virtual void printValueAsJSON(EvalState & state, bool strict,
|
||||
JSONPlaceholder & out, PathSet & context) const;
|
||||
|
||||
/* Print the value as XML. Defaults to unevaluated */
|
||||
virtual void printValueAsXML(EvalState & state, bool strict, bool location,
|
||||
XMLWriter & doc, PathSet & context, PathSet & drvsSeen) const;
|
||||
|
||||
virtual ~ExternalValueBase()
|
||||
{
|
||||
};
|
||||
};
|
||||
|
||||
std::ostream & operator << (std::ostream & str, const ExternalValueBase & v);
|
||||
|
||||
|
||||
struct Value
|
||||
{
|
||||
ValueType type;
|
||||
union
|
||||
{
|
||||
NixInt integer;
|
||||
bool boolean;
|
||||
|
||||
/* Strings in the evaluator carry a so-called `context' which
|
||||
is a list of strings representing store paths. This is to
|
||||
allow users to write things like
|
||||
|
||||
"--with-freetype2-library=" + freetype + "/lib"
|
||||
|
||||
where `freetype' is a derivation (or a source to be copied
|
||||
to the store). If we just concatenated the strings without
|
||||
keeping track of the referenced store paths, then if the
|
||||
string is used as a derivation attribute, the derivation
|
||||
will not have the correct dependencies in its inputDrvs and
|
||||
inputSrcs.
|
||||
|
||||
The semantics of the context is as follows: when a string
|
||||
with context C is used as a derivation attribute, then the
|
||||
derivations in C will be added to the inputDrvs of the
|
||||
derivation, and the other store paths in C will be added to
|
||||
the inputSrcs of the derivations.
|
||||
|
||||
For canonicity, the store paths should be in sorted order. */
|
||||
struct {
|
||||
const char * s;
|
||||
const char * * context; // must be in sorted order
|
||||
} string;
|
||||
|
||||
const char * path;
|
||||
Bindings * attrs;
|
||||
struct {
|
||||
size_t size;
|
||||
Value * * elems;
|
||||
} bigList;
|
||||
Value * smallList[2];
|
||||
struct {
|
||||
Env * env;
|
||||
Expr * expr;
|
||||
} thunk;
|
||||
struct {
|
||||
Value * left, * right;
|
||||
} app;
|
||||
struct {
|
||||
Env * env;
|
||||
ExprLambda * fun;
|
||||
} lambda;
|
||||
PrimOp * primOp;
|
||||
struct {
|
||||
Value * left, * right;
|
||||
} primOpApp;
|
||||
ExternalValueBase * external;
|
||||
NixFloat fpoint;
|
||||
};
|
||||
|
||||
bool isList() const
|
||||
{
|
||||
return type == tList1 || type == tList2 || type == tListN;
|
||||
}
|
||||
|
||||
Value * * listElems()
|
||||
{
|
||||
return type == tList1 || type == tList2 ? smallList : bigList.elems;
|
||||
}
|
||||
|
||||
const Value * const * listElems() const
|
||||
{
|
||||
return type == tList1 || type == tList2 ? smallList : bigList.elems;
|
||||
}
|
||||
|
||||
size_t listSize() const
|
||||
{
|
||||
return type == tList1 ? 1 : type == tList2 ? 2 : bigList.size;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/* After overwriting an app node, be sure to clear pointers in the
|
||||
Value to ensure that the target isn't kept alive unnecessarily. */
|
||||
static inline void clearValue(Value & v)
|
||||
{
|
||||
v.app.left = v.app.right = 0;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkInt(Value & v, NixInt n)
|
||||
{
|
||||
clearValue(v);
|
||||
v.type = tInt;
|
||||
v.integer = n;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkFloat(Value & v, NixFloat n)
|
||||
{
|
||||
clearValue(v);
|
||||
v.type = tFloat;
|
||||
v.fpoint = n;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkBool(Value & v, bool b)
|
||||
{
|
||||
clearValue(v);
|
||||
v.type = tBool;
|
||||
v.boolean = b;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkNull(Value & v)
|
||||
{
|
||||
clearValue(v);
|
||||
v.type = tNull;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkApp(Value & v, Value & left, Value & right)
|
||||
{
|
||||
v.type = tApp;
|
||||
v.app.left = &left;
|
||||
v.app.right = &right;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkPrimOpApp(Value & v, Value & left, Value & right)
|
||||
{
|
||||
v.type = tPrimOpApp;
|
||||
v.app.left = &left;
|
||||
v.app.right = &right;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkStringNoCopy(Value & v, const char * s)
|
||||
{
|
||||
v.type = tString;
|
||||
v.string.s = s;
|
||||
v.string.context = 0;
|
||||
}
|
||||
|
||||
|
||||
static inline void mkString(Value & v, const Symbol & s)
|
||||
{
|
||||
mkStringNoCopy(v, ((const string &) s).c_str());
|
||||
}
|
||||
|
||||
|
||||
void mkString(Value & v, const char * s);
|
||||
|
||||
|
||||
static inline void mkPathNoCopy(Value & v, const char * s)
|
||||
{
|
||||
clearValue(v);
|
||||
v.type = tPath;
|
||||
v.path = s;
|
||||
}
|
||||
|
||||
|
||||
void mkPath(Value & v, const char * s);
|
||||
|
||||
|
||||
/* Compute the size in bytes of the given value, including all values
|
||||
and environments reachable from it. Static expressions (Exprs) are
|
||||
not included. */
|
||||
size_t valueSize(Value & v);
|
||||
|
||||
|
||||
#if HAVE_BOEHMGC
|
||||
typedef std::vector<Value *, gc_allocator<Value *> > ValueVector;
|
||||
typedef std::map<Symbol, Value *, std::less<Symbol>, gc_allocator<std::pair<const Symbol, Value *> > > ValueMap;
|
||||
#else
|
||||
typedef std::vector<Value *> ValueVector;
|
||||
typedef std::map<Symbol, Value *> ValueMap;
|
||||
#endif
|
||||
|
||||
|
||||
}
|
||||
56
third_party/nix/src/libmain/common-args.cc
vendored
Normal file
56
third_party/nix/src/libmain/common-args.cc
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
#include "common-args.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
MixCommonArgs::MixCommonArgs(const string & programName)
|
||||
: programName(programName)
|
||||
{
|
||||
mkFlag()
|
||||
.longName("verbose")
|
||||
.shortName('v')
|
||||
.description("increase verbosity level")
|
||||
.handler([]() { verbosity = (Verbosity) (verbosity + 1); });
|
||||
|
||||
mkFlag()
|
||||
.longName("quiet")
|
||||
.description("decrease verbosity level")
|
||||
.handler([]() { verbosity = verbosity > lvlError ? (Verbosity) (verbosity - 1) : lvlError; });
|
||||
|
||||
mkFlag()
|
||||
.longName("debug")
|
||||
.description("enable debug output")
|
||||
.handler([]() { verbosity = lvlDebug; });
|
||||
|
||||
mkFlag()
|
||||
.longName("option")
|
||||
.labels({"name", "value"})
|
||||
.description("set a Nix configuration option (overriding nix.conf)")
|
||||
.arity(2)
|
||||
.handler([](std::vector<std::string> ss) {
|
||||
try {
|
||||
globalConfig.set(ss[0], ss[1]);
|
||||
} catch (UsageError & e) {
|
||||
warn(e.what());
|
||||
}
|
||||
});
|
||||
|
||||
mkFlag()
|
||||
.longName("max-jobs")
|
||||
.shortName('j')
|
||||
.label("jobs")
|
||||
.description("maximum number of parallel builds")
|
||||
.handler([=](std::string s) {
|
||||
settings.set("max-jobs", s);
|
||||
});
|
||||
|
||||
std::string cat = "config";
|
||||
globalConfig.convertToArgs(*this, cat);
|
||||
|
||||
// Backward compatibility hack: nix-env already had a --system flag.
|
||||
if (programName == "nix-env") longFlags.erase("system");
|
||||
|
||||
hiddenCategories.insert(cat);
|
||||
}
|
||||
|
||||
}
|
||||
33
third_party/nix/src/libmain/common-args.hh
vendored
Normal file
33
third_party/nix/src/libmain/common-args.hh
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#pragma once
|
||||
|
||||
#include "args.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct MixCommonArgs : virtual Args
|
||||
{
|
||||
string programName;
|
||||
MixCommonArgs(const string & programName);
|
||||
};
|
||||
|
||||
struct MixDryRun : virtual Args
|
||||
{
|
||||
bool dryRun = false;
|
||||
|
||||
MixDryRun()
|
||||
{
|
||||
mkFlag(0, "dry-run", "show what this command would do without doing it", &dryRun);
|
||||
}
|
||||
};
|
||||
|
||||
struct MixJSON : virtual Args
|
||||
{
|
||||
bool json = false;
|
||||
|
||||
MixJSON()
|
||||
{
|
||||
mkFlag(0, "json", "produce JSON output", &json);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
15
third_party/nix/src/libmain/local.mk
vendored
Normal file
15
third_party/nix/src/libmain/local.mk
vendored
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
libraries += libmain
|
||||
|
||||
libmain_NAME = libnixmain
|
||||
|
||||
libmain_DIR := $(d)
|
||||
|
||||
libmain_SOURCES := $(wildcard $(d)/*.cc)
|
||||
|
||||
libmain_LDFLAGS = $(OPENSSL_LIBS)
|
||||
|
||||
libmain_LIBS = libstore libutil
|
||||
|
||||
libmain_ALLOW_UNDEFINED = 1
|
||||
|
||||
$(eval $(call install-file-in, $(d)/nix-main.pc, $(prefix)/lib/pkgconfig, 0644))
|
||||
9
third_party/nix/src/libmain/nix-main.pc.in
vendored
Normal file
9
third_party/nix/src/libmain/nix-main.pc.in
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
prefix=@prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
|
||||
Name: Nix
|
||||
Description: Nix Package Manager
|
||||
Version: @PACKAGE_VERSION@
|
||||
Libs: -L${libdir} -lnixmain
|
||||
Cflags: -I${includedir}/nix -std=c++17
|
||||
381
third_party/nix/src/libmain/shared.cc
vendored
Normal file
381
third_party/nix/src/libmain/shared.cc
vendored
Normal file
|
|
@ -0,0 +1,381 @@
|
|||
#include "globals.hh"
|
||||
#include "shared.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cctype>
|
||||
#include <exception>
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
|
||||
#include <cstdlib>
|
||||
#include <sys/time.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <signal.h>
|
||||
|
||||
#include <openssl/crypto.h>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static bool gcWarning = true;
|
||||
|
||||
void printGCWarning()
|
||||
{
|
||||
if (!gcWarning) return;
|
||||
static bool haveWarned = false;
|
||||
warnOnce(haveWarned,
|
||||
"you did not specify '--add-root'; "
|
||||
"the result might be removed by the garbage collector");
|
||||
}
|
||||
|
||||
|
||||
void printMissing(ref<Store> store, const PathSet & paths, Verbosity lvl)
|
||||
{
|
||||
unsigned long long downloadSize, narSize;
|
||||
PathSet willBuild, willSubstitute, unknown;
|
||||
store->queryMissing(paths, willBuild, willSubstitute, unknown, downloadSize, narSize);
|
||||
printMissing(store, willBuild, willSubstitute, unknown, downloadSize, narSize, lvl);
|
||||
}
|
||||
|
||||
|
||||
void printMissing(ref<Store> store, const PathSet & willBuild,
|
||||
const PathSet & willSubstitute, const PathSet & unknown,
|
||||
unsigned long long downloadSize, unsigned long long narSize, Verbosity lvl)
|
||||
{
|
||||
if (!willBuild.empty()) {
|
||||
printMsg(lvl, "these derivations will be built:");
|
||||
Paths sorted = store->topoSortPaths(willBuild);
|
||||
reverse(sorted.begin(), sorted.end());
|
||||
for (auto & i : sorted)
|
||||
printMsg(lvl, fmt(" %s", i));
|
||||
}
|
||||
|
||||
if (!willSubstitute.empty()) {
|
||||
printMsg(lvl, fmt("these paths will be fetched (%.2f MiB download, %.2f MiB unpacked):",
|
||||
downloadSize / (1024.0 * 1024.0),
|
||||
narSize / (1024.0 * 1024.0)));
|
||||
for (auto & i : willSubstitute)
|
||||
printMsg(lvl, fmt(" %s", i));
|
||||
}
|
||||
|
||||
if (!unknown.empty()) {
|
||||
printMsg(lvl, fmt("don't know how to build these paths%s:",
|
||||
(settings.readOnlyMode ? " (may be caused by read-only store access)" : "")));
|
||||
for (auto & i : unknown)
|
||||
printMsg(lvl, fmt(" %s", i));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
string getArg(const string & opt,
|
||||
Strings::iterator & i, const Strings::iterator & end)
|
||||
{
|
||||
++i;
|
||||
if (i == end) throw UsageError(format("'%1%' requires an argument") % opt);
|
||||
return *i;
|
||||
}
|
||||
|
||||
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10101000L
|
||||
/* OpenSSL is not thread-safe by default - it will randomly crash
|
||||
unless the user supplies a mutex locking function. So let's do
|
||||
that. */
|
||||
static std::vector<std::mutex> opensslLocks;
|
||||
|
||||
static void opensslLockCallback(int mode, int type, const char * file, int line)
|
||||
{
|
||||
if (mode & CRYPTO_LOCK)
|
||||
opensslLocks[type].lock();
|
||||
else
|
||||
opensslLocks[type].unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void sigHandler(int signo) { }
|
||||
|
||||
|
||||
void initNix()
|
||||
{
|
||||
/* Turn on buffering for cerr. */
|
||||
#if HAVE_PUBSETBUF
|
||||
static char buf[1024];
|
||||
std::cerr.rdbuf()->pubsetbuf(buf, sizeof(buf));
|
||||
#endif
|
||||
|
||||
#if OPENSSL_VERSION_NUMBER < 0x10101000L
|
||||
/* Initialise OpenSSL locking. */
|
||||
opensslLocks = std::vector<std::mutex>(CRYPTO_num_locks());
|
||||
CRYPTO_set_locking_callback(opensslLockCallback);
|
||||
#endif
|
||||
|
||||
loadConfFile();
|
||||
|
||||
startSignalHandlerThread();
|
||||
|
||||
/* Reset SIGCHLD to its default. */
|
||||
struct sigaction act;
|
||||
sigemptyset(&act.sa_mask);
|
||||
act.sa_handler = SIG_DFL;
|
||||
act.sa_flags = 0;
|
||||
if (sigaction(SIGCHLD, &act, 0))
|
||||
throw SysError("resetting SIGCHLD");
|
||||
|
||||
/* Install a dummy SIGUSR1 handler for use with pthread_kill(). */
|
||||
act.sa_handler = sigHandler;
|
||||
if (sigaction(SIGUSR1, &act, 0)) throw SysError("handling SIGUSR1");
|
||||
|
||||
#if __APPLE__
|
||||
/* HACK: on darwin, we need can’t use sigprocmask with SIGWINCH.
|
||||
* Instead, add a dummy sigaction handler, and signalHandlerThread
|
||||
* can handle the rest. */
|
||||
struct sigaction sa;
|
||||
sa.sa_handler = sigHandler;
|
||||
if (sigaction(SIGWINCH, &sa, 0)) throw SysError("handling SIGWINCH");
|
||||
#endif
|
||||
|
||||
/* Register a SIGSEGV handler to detect stack overflows. */
|
||||
detectStackOverflow();
|
||||
|
||||
/* There is no privacy in the Nix system ;-) At least not for
|
||||
now. In particular, store objects should be readable by
|
||||
everybody. */
|
||||
umask(0022);
|
||||
|
||||
/* Initialise the PRNG. */
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, 0);
|
||||
srandom(tv.tv_usec);
|
||||
|
||||
/* On macOS, don't use the per-session TMPDIR (as set e.g. by
|
||||
sshd). This breaks build users because they don't have access
|
||||
to the TMPDIR, in particular in ‘nix-store --serve’. */
|
||||
#if __APPLE__
|
||||
if (getuid() == 0 && hasPrefix(getEnv("TMPDIR"), "/var/folders/"))
|
||||
unsetenv("TMPDIR");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
LegacyArgs::LegacyArgs(const std::string & programName,
|
||||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg)
|
||||
: MixCommonArgs(programName), parseArg(parseArg)
|
||||
{
|
||||
mkFlag()
|
||||
.longName("no-build-output")
|
||||
.shortName('Q')
|
||||
.description("do not show build output")
|
||||
.set(&settings.verboseBuild, false);
|
||||
|
||||
mkFlag()
|
||||
.longName("keep-failed")
|
||||
.shortName('K')
|
||||
.description("keep temporary directories of failed builds")
|
||||
.set(&(bool&) settings.keepFailed, true);
|
||||
|
||||
mkFlag()
|
||||
.longName("keep-going")
|
||||
.shortName('k')
|
||||
.description("keep going after a build fails")
|
||||
.set(&(bool&) settings.keepGoing, true);
|
||||
|
||||
mkFlag()
|
||||
.longName("fallback")
|
||||
.description("build from source if substitution fails")
|
||||
.set(&(bool&) settings.tryFallback, true);
|
||||
|
||||
auto intSettingAlias = [&](char shortName, const std::string & longName,
|
||||
const std::string & description, const std::string & dest) {
|
||||
mkFlag<unsigned int>(shortName, longName, description, [=](unsigned int n) {
|
||||
settings.set(dest, std::to_string(n));
|
||||
});
|
||||
};
|
||||
|
||||
intSettingAlias(0, "cores", "maximum number of CPU cores to use inside a build", "cores");
|
||||
intSettingAlias(0, "max-silent-time", "number of seconds of silence before a build is killed", "max-silent-time");
|
||||
intSettingAlias(0, "timeout", "number of seconds before a build is killed", "timeout");
|
||||
|
||||
mkFlag(0, "readonly-mode", "do not write to the Nix store",
|
||||
&settings.readOnlyMode);
|
||||
|
||||
mkFlag(0, "no-gc-warning", "disable warning about not using '--add-root'",
|
||||
&gcWarning, false);
|
||||
|
||||
mkFlag()
|
||||
.longName("store")
|
||||
.label("store-uri")
|
||||
.description("URI of the Nix store to use")
|
||||
.dest(&(std::string&) settings.storeUri);
|
||||
}
|
||||
|
||||
|
||||
bool LegacyArgs::processFlag(Strings::iterator & pos, Strings::iterator end)
|
||||
{
|
||||
if (MixCommonArgs::processFlag(pos, end)) return true;
|
||||
bool res = parseArg(pos, end);
|
||||
if (res) ++pos;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
bool LegacyArgs::processArgs(const Strings & args, bool finish)
|
||||
{
|
||||
if (args.empty()) return true;
|
||||
assert(args.size() == 1);
|
||||
Strings ss(args);
|
||||
auto pos = ss.begin();
|
||||
if (!parseArg(pos, ss.end()))
|
||||
throw UsageError(format("unexpected argument '%1%'") % args.front());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void parseCmdLine(int argc, char * * argv,
|
||||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg)
|
||||
{
|
||||
parseCmdLine(baseNameOf(argv[0]), argvToStrings(argc, argv), parseArg);
|
||||
}
|
||||
|
||||
|
||||
void parseCmdLine(const string & programName, const Strings & args,
|
||||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg)
|
||||
{
|
||||
LegacyArgs(programName, parseArg).parseCmdline(args);
|
||||
}
|
||||
|
||||
|
||||
void printVersion(const string & programName)
|
||||
{
|
||||
std::cout << format("%1% (Nix) %2%") % programName % nixVersion << std::endl;
|
||||
if (verbosity > lvlInfo) {
|
||||
Strings cfg;
|
||||
#if HAVE_BOEHMGC
|
||||
cfg.push_back("gc");
|
||||
#endif
|
||||
#if HAVE_SODIUM
|
||||
cfg.push_back("signed-caches");
|
||||
#endif
|
||||
std::cout << "Features: " << concatStringsSep(", ", cfg) << "\n";
|
||||
std::cout << "Configuration file: " << settings.nixConfDir + "/nix.conf" << "\n";
|
||||
std::cout << "Store directory: " << settings.nixStore << "\n";
|
||||
std::cout << "State directory: " << settings.nixStateDir << "\n";
|
||||
}
|
||||
throw Exit();
|
||||
}
|
||||
|
||||
|
||||
void showManPage(const string & name)
|
||||
{
|
||||
restoreSignals();
|
||||
setenv("MANPATH", settings.nixManDir.c_str(), 1);
|
||||
execlp("man", "man", name.c_str(), nullptr);
|
||||
throw SysError(format("command 'man %1%' failed") % name.c_str());
|
||||
}
|
||||
|
||||
|
||||
int handleExceptions(const string & programName, std::function<void()> fun)
|
||||
{
|
||||
ReceiveInterrupts receiveInterrupts; // FIXME: need better place for this
|
||||
|
||||
string error = ANSI_RED "error:" ANSI_NORMAL " ";
|
||||
try {
|
||||
try {
|
||||
fun();
|
||||
} catch (...) {
|
||||
/* Subtle: we have to make sure that any `interrupted'
|
||||
condition is discharged before we reach printMsg()
|
||||
below, since otherwise it will throw an (uncaught)
|
||||
exception. */
|
||||
setInterruptThrown();
|
||||
throw;
|
||||
}
|
||||
} catch (Exit & e) {
|
||||
return e.status;
|
||||
} catch (UsageError & e) {
|
||||
printError(
|
||||
format(error + "%1%\nTry '%2% --help' for more information.")
|
||||
% e.what() % programName);
|
||||
return 1;
|
||||
} catch (BaseError & e) {
|
||||
printError(format(error + "%1%%2%") % (settings.showTrace ? e.prefix() : "") % e.msg());
|
||||
if (e.prefix() != "" && !settings.showTrace)
|
||||
printError("(use '--show-trace' to show detailed location information)");
|
||||
return e.status;
|
||||
} catch (std::bad_alloc & e) {
|
||||
printError(error + "out of memory");
|
||||
return 1;
|
||||
} catch (std::exception & e) {
|
||||
printError(error + e.what());
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
RunPager::RunPager()
|
||||
{
|
||||
if (!isatty(STDOUT_FILENO)) return;
|
||||
char * pager = getenv("NIX_PAGER");
|
||||
if (!pager) pager = getenv("PAGER");
|
||||
if (pager && ((string) pager == "" || (string) pager == "cat")) return;
|
||||
|
||||
Pipe toPager;
|
||||
toPager.create();
|
||||
|
||||
pid = startProcess([&]() {
|
||||
if (dup2(toPager.readSide.get(), STDIN_FILENO) == -1)
|
||||
throw SysError("dupping stdin");
|
||||
if (!getenv("LESS"))
|
||||
setenv("LESS", "FRSXMK", 1);
|
||||
restoreSignals();
|
||||
if (pager)
|
||||
execl("/bin/sh", "sh", "-c", pager, nullptr);
|
||||
execlp("pager", "pager", nullptr);
|
||||
execlp("less", "less", nullptr);
|
||||
execlp("more", "more", nullptr);
|
||||
throw SysError(format("executing '%1%'") % pager);
|
||||
});
|
||||
|
||||
pid.setKillSignal(SIGINT);
|
||||
|
||||
if (dup2(toPager.writeSide.get(), STDOUT_FILENO) == -1)
|
||||
throw SysError("dupping stdout");
|
||||
}
|
||||
|
||||
|
||||
RunPager::~RunPager()
|
||||
{
|
||||
try {
|
||||
if (pid != -1) {
|
||||
std::cout.flush();
|
||||
close(STDOUT_FILENO);
|
||||
pid.wait();
|
||||
}
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
string showBytes(unsigned long long bytes)
|
||||
{
|
||||
return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
|
||||
}
|
||||
|
||||
|
||||
PrintFreed::~PrintFreed()
|
||||
{
|
||||
if (show)
|
||||
std::cout << format("%1% store paths deleted, %2% freed\n")
|
||||
% results.paths.size()
|
||||
% showBytes(results.bytesFreed);
|
||||
}
|
||||
|
||||
Exit::~Exit() { }
|
||||
|
||||
}
|
||||
126
third_party/nix/src/libmain/shared.hh
vendored
Normal file
126
third_party/nix/src/libmain/shared.hh
vendored
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
#pragma once
|
||||
|
||||
#include "util.hh"
|
||||
#include "args.hh"
|
||||
#include "common-args.hh"
|
||||
|
||||
#include <signal.h>
|
||||
|
||||
#include <locale>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
class Exit : public std::exception
|
||||
{
|
||||
public:
|
||||
int status;
|
||||
Exit() : status(0) { }
|
||||
Exit(int status) : status(status) { }
|
||||
virtual ~Exit();
|
||||
};
|
||||
|
||||
int handleExceptions(const string & programName, std::function<void()> fun);
|
||||
|
||||
/* Don't forget to call initPlugins() after settings are initialized! */
|
||||
void initNix();
|
||||
|
||||
void parseCmdLine(int argc, char * * argv,
|
||||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg);
|
||||
|
||||
void parseCmdLine(const string & programName, const Strings & args,
|
||||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg);
|
||||
|
||||
void printVersion(const string & programName);
|
||||
|
||||
/* Ugh. No better place to put this. */
|
||||
void printGCWarning();
|
||||
|
||||
class Store;
|
||||
|
||||
void printMissing(ref<Store> store, const PathSet & paths, Verbosity lvl = lvlInfo);
|
||||
|
||||
void printMissing(ref<Store> store, const PathSet & willBuild,
|
||||
const PathSet & willSubstitute, const PathSet & unknown,
|
||||
unsigned long long downloadSize, unsigned long long narSize, Verbosity lvl = lvlInfo);
|
||||
|
||||
string getArg(const string & opt,
|
||||
Strings::iterator & i, const Strings::iterator & end);
|
||||
|
||||
template<class N> N getIntArg(const string & opt,
|
||||
Strings::iterator & i, const Strings::iterator & end, bool allowUnit)
|
||||
{
|
||||
++i;
|
||||
if (i == end) throw UsageError(format("'%1%' requires an argument") % opt);
|
||||
string s = *i;
|
||||
N multiplier = 1;
|
||||
if (allowUnit && !s.empty()) {
|
||||
char u = std::toupper(*s.rbegin());
|
||||
if (std::isalpha(u)) {
|
||||
if (u == 'K') multiplier = 1ULL << 10;
|
||||
else if (u == 'M') multiplier = 1ULL << 20;
|
||||
else if (u == 'G') multiplier = 1ULL << 30;
|
||||
else if (u == 'T') multiplier = 1ULL << 40;
|
||||
else throw UsageError(format("invalid unit specifier '%1%'") % u);
|
||||
s.resize(s.size() - 1);
|
||||
}
|
||||
}
|
||||
N n;
|
||||
if (!string2Int(s, n))
|
||||
throw UsageError(format("'%1%' requires an integer argument") % opt);
|
||||
return n * multiplier;
|
||||
}
|
||||
|
||||
|
||||
struct LegacyArgs : public MixCommonArgs
|
||||
{
|
||||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg;
|
||||
|
||||
LegacyArgs(const std::string & programName,
|
||||
std::function<bool(Strings::iterator & arg, const Strings::iterator & end)> parseArg);
|
||||
|
||||
bool processFlag(Strings::iterator & pos, Strings::iterator end) override;
|
||||
|
||||
bool processArgs(const Strings & args, bool finish) override;
|
||||
};
|
||||
|
||||
|
||||
/* Show the manual page for the specified program. */
|
||||
void showManPage(const string & name);
|
||||
|
||||
/* The constructor of this class starts a pager if stdout is a
|
||||
terminal and $PAGER is set. Stdout is redirected to the pager. */
|
||||
class RunPager
|
||||
{
|
||||
public:
|
||||
RunPager();
|
||||
~RunPager();
|
||||
|
||||
private:
|
||||
Pid pid;
|
||||
};
|
||||
|
||||
extern volatile ::sig_atomic_t blockInt;
|
||||
|
||||
|
||||
/* GC helpers. */
|
||||
|
||||
string showBytes(unsigned long long bytes);
|
||||
|
||||
struct GCResults;
|
||||
|
||||
struct PrintFreed
|
||||
{
|
||||
bool show;
|
||||
const GCResults & results;
|
||||
PrintFreed(bool show, const GCResults & results)
|
||||
: show(show), results(results) { }
|
||||
~PrintFreed();
|
||||
};
|
||||
|
||||
|
||||
/* Install a SIGSEGV handler to detect stack overflows. */
|
||||
void detectStackOverflow();
|
||||
|
||||
|
||||
}
|
||||
71
third_party/nix/src/libmain/stack.cc
vendored
Normal file
71
third_party/nix/src/libmain/stack.cc
vendored
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
#include "types.hh"
|
||||
|
||||
#include <cstring>
|
||||
#include <cstddef>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <signal.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void sigsegvHandler(int signo, siginfo_t * info, void * ctx)
|
||||
{
|
||||
/* Detect stack overflows by comparing the faulting address with
|
||||
the stack pointer. Unfortunately, getting the stack pointer is
|
||||
not portable. */
|
||||
bool haveSP = true;
|
||||
char * sp = 0;
|
||||
#if defined(__x86_64__) && defined(REG_RSP)
|
||||
sp = (char *) ((ucontext_t *) ctx)->uc_mcontext.gregs[REG_RSP];
|
||||
#elif defined(REG_ESP)
|
||||
sp = (char *) ((ucontext_t *) ctx)->uc_mcontext.gregs[REG_ESP];
|
||||
#else
|
||||
haveSP = false;
|
||||
#endif
|
||||
|
||||
if (haveSP) {
|
||||
ptrdiff_t diff = (char *) info->si_addr - sp;
|
||||
if (diff < 0) diff = -diff;
|
||||
if (diff < 4096) {
|
||||
char msg[] = "error: stack overflow (possible infinite recursion)\n";
|
||||
[[gnu::unused]] auto res = write(2, msg, strlen(msg));
|
||||
_exit(1); // maybe abort instead?
|
||||
}
|
||||
}
|
||||
|
||||
/* Restore default behaviour (i.e. segfault and dump core). */
|
||||
struct sigaction act;
|
||||
sigfillset(&act.sa_mask);
|
||||
act.sa_handler = SIG_DFL;
|
||||
act.sa_flags = 0;
|
||||
if (sigaction(SIGSEGV, &act, 0)) abort();
|
||||
}
|
||||
|
||||
|
||||
void detectStackOverflow()
|
||||
{
|
||||
#if defined(SA_SIGINFO) && defined (SA_ONSTACK)
|
||||
/* Install a SIGSEGV handler to detect stack overflows. This
|
||||
requires an alternative stack, otherwise the signal cannot be
|
||||
delivered when we're out of stack space. */
|
||||
stack_t stack;
|
||||
stack.ss_size = 4096 * 4 + MINSIGSTKSZ;
|
||||
static auto stackBuf = std::make_unique<std::vector<char>>(stack.ss_size);
|
||||
stack.ss_sp = stackBuf->data();
|
||||
if (!stack.ss_sp) throw Error("cannot allocate alternative stack");
|
||||
stack.ss_flags = 0;
|
||||
if (sigaltstack(&stack, 0) == -1) throw SysError("cannot set alternative stack");
|
||||
|
||||
struct sigaction act;
|
||||
sigfillset(&act.sa_mask);
|
||||
act.sa_sigaction = sigsegvHandler;
|
||||
act.sa_flags = SA_SIGINFO | SA_ONSTACK;
|
||||
if (sigaction(SIGSEGV, &act, 0))
|
||||
throw SysError("resetting SIGSEGV");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
361
third_party/nix/src/libstore/binary-cache-store.cc
vendored
Normal file
361
third_party/nix/src/libstore/binary-cache-store.cc
vendored
Normal file
|
|
@ -0,0 +1,361 @@
|
|||
#include "archive.hh"
|
||||
#include "binary-cache-store.hh"
|
||||
#include "compression.hh"
|
||||
#include "derivations.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "sync.hh"
|
||||
#include "remote-fs-accessor.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
#include "nar-accessor.hh"
|
||||
#include "json.hh"
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include <future>
|
||||
|
||||
namespace nix {
|
||||
|
||||
BinaryCacheStore::BinaryCacheStore(const Params & params)
|
||||
: Store(params)
|
||||
{
|
||||
if (secretKeyFile != "")
|
||||
secretKey = std::unique_ptr<SecretKey>(new SecretKey(readFile(secretKeyFile)));
|
||||
|
||||
StringSink sink;
|
||||
sink << narVersionMagic1;
|
||||
narMagic = *sink.s;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::init()
|
||||
{
|
||||
std::string cacheInfoFile = "nix-cache-info";
|
||||
|
||||
auto cacheInfo = getFile(cacheInfoFile);
|
||||
if (!cacheInfo) {
|
||||
upsertFile(cacheInfoFile, "StoreDir: " + storeDir + "\n", "text/x-nix-cache-info");
|
||||
} else {
|
||||
for (auto & line : tokenizeString<Strings>(*cacheInfo, "\n")) {
|
||||
size_t colon = line.find(':');
|
||||
if (colon == std::string::npos) continue;
|
||||
auto name = line.substr(0, colon);
|
||||
auto value = trim(line.substr(colon + 1, std::string::npos));
|
||||
if (name == "StoreDir") {
|
||||
if (value != storeDir)
|
||||
throw Error(format("binary cache '%s' is for Nix stores with prefix '%s', not '%s'")
|
||||
% getUri() % value % storeDir);
|
||||
} else if (name == "WantMassQuery") {
|
||||
wantMassQuery_ = value == "1";
|
||||
} else if (name == "Priority") {
|
||||
string2Int(value, priority);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void BinaryCacheStore::getFile(const std::string & path,
|
||||
Callback<std::shared_ptr<std::string>> callback) noexcept
|
||||
{
|
||||
try {
|
||||
callback(getFile(path));
|
||||
} catch (...) { callback.rethrow(); }
|
||||
}
|
||||
|
||||
void BinaryCacheStore::getFile(const std::string & path, Sink & sink)
|
||||
{
|
||||
std::promise<std::shared_ptr<std::string>> promise;
|
||||
getFile(path,
|
||||
{[&](std::future<std::shared_ptr<std::string>> result) {
|
||||
try {
|
||||
promise.set_value(result.get());
|
||||
} catch (...) {
|
||||
promise.set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
auto data = promise.get_future().get();
|
||||
sink((unsigned char *) data->data(), data->size());
|
||||
}
|
||||
|
||||
std::shared_ptr<std::string> BinaryCacheStore::getFile(const std::string & path)
|
||||
{
|
||||
StringSink sink;
|
||||
try {
|
||||
getFile(path, sink);
|
||||
} catch (NoSuchBinaryCacheFile &) {
|
||||
return nullptr;
|
||||
}
|
||||
return sink.s;
|
||||
}
|
||||
|
||||
Path BinaryCacheStore::narInfoFileFor(const Path & storePath)
|
||||
{
|
||||
assertStorePath(storePath);
|
||||
return storePathToHash(storePath) + ".narinfo";
|
||||
}
|
||||
|
||||
void BinaryCacheStore::writeNarInfo(ref<NarInfo> narInfo)
|
||||
{
|
||||
auto narInfoFile = narInfoFileFor(narInfo->path);
|
||||
|
||||
upsertFile(narInfoFile, narInfo->to_string(), "text/x-nix-narinfo");
|
||||
|
||||
auto hashPart = storePathToHash(narInfo->path);
|
||||
|
||||
{
|
||||
auto state_(state.lock());
|
||||
state_->pathInfoCache.upsert(hashPart, std::shared_ptr<NarInfo>(narInfo));
|
||||
}
|
||||
|
||||
if (diskCache)
|
||||
diskCache->upsertNarInfo(getUri(), hashPart, std::shared_ptr<NarInfo>(narInfo));
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
|
||||
{
|
||||
if (!repair && isValidPath(info.path)) return;
|
||||
|
||||
/* Verify that all references are valid. This may do some .narinfo
|
||||
reads, but typically they'll already be cached. */
|
||||
for (auto & ref : info.references)
|
||||
try {
|
||||
if (ref != info.path)
|
||||
queryPathInfo(ref);
|
||||
} catch (InvalidPath &) {
|
||||
throw Error(format("cannot add '%s' to the binary cache because the reference '%s' is not valid")
|
||||
% info.path % ref);
|
||||
}
|
||||
|
||||
assert(nar->compare(0, narMagic.size(), narMagic) == 0);
|
||||
|
||||
auto narInfo = make_ref<NarInfo>(info);
|
||||
|
||||
narInfo->narSize = nar->size();
|
||||
narInfo->narHash = hashString(htSHA256, *nar);
|
||||
|
||||
if (info.narHash && info.narHash != narInfo->narHash)
|
||||
throw Error(format("refusing to copy corrupted path '%1%' to binary cache") % info.path);
|
||||
|
||||
auto accessor_ = std::dynamic_pointer_cast<RemoteFSAccessor>(accessor);
|
||||
|
||||
/* Optionally write a JSON file containing a listing of the
|
||||
contents of the NAR. */
|
||||
if (writeNARListing) {
|
||||
std::ostringstream jsonOut;
|
||||
|
||||
{
|
||||
JSONObject jsonRoot(jsonOut);
|
||||
jsonRoot.attr("version", 1);
|
||||
|
||||
auto narAccessor = makeNarAccessor(nar);
|
||||
|
||||
if (accessor_)
|
||||
accessor_->addToCache(info.path, *nar, narAccessor);
|
||||
|
||||
{
|
||||
auto res = jsonRoot.placeholder("root");
|
||||
listNar(res, narAccessor, "", true);
|
||||
}
|
||||
}
|
||||
|
||||
upsertFile(storePathToHash(info.path) + ".ls", jsonOut.str(), "application/json");
|
||||
}
|
||||
|
||||
else {
|
||||
if (accessor_)
|
||||
accessor_->addToCache(info.path, *nar, makeNarAccessor(nar));
|
||||
}
|
||||
|
||||
/* Compress the NAR. */
|
||||
narInfo->compression = compression;
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
auto narCompressed = compress(compression, *nar, parallelCompression);
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
narInfo->fileHash = hashString(htSHA256, *narCompressed);
|
||||
narInfo->fileSize = narCompressed->size();
|
||||
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
printMsg(lvlTalkative, format("copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% ms) to binary cache")
|
||||
% narInfo->path % narInfo->narSize
|
||||
% ((1.0 - (double) narCompressed->size() / nar->size()) * 100.0)
|
||||
% duration);
|
||||
|
||||
/* Atomically write the NAR file. */
|
||||
narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar"
|
||||
+ (compression == "xz" ? ".xz" :
|
||||
compression == "bzip2" ? ".bz2" :
|
||||
compression == "br" ? ".br" :
|
||||
"");
|
||||
if (repair || !fileExists(narInfo->url)) {
|
||||
stats.narWrite++;
|
||||
upsertFile(narInfo->url, *narCompressed, "application/x-nix-nar");
|
||||
} else
|
||||
stats.narWriteAverted++;
|
||||
|
||||
stats.narWriteBytes += nar->size();
|
||||
stats.narWriteCompressedBytes += narCompressed->size();
|
||||
stats.narWriteCompressionTimeMs += duration;
|
||||
|
||||
/* Atomically write the NAR info file.*/
|
||||
if (secretKey) narInfo->sign(*secretKey);
|
||||
|
||||
writeNarInfo(narInfo);
|
||||
|
||||
stats.narInfoWrite++;
|
||||
}
|
||||
|
||||
bool BinaryCacheStore::isValidPathUncached(const Path & storePath)
|
||||
{
|
||||
// FIXME: this only checks whether a .narinfo with a matching hash
|
||||
// part exists. So ‘f4kb...-foo’ matches ‘f4kb...-bar’, even
|
||||
// though they shouldn't. Not easily fixed.
|
||||
return fileExists(narInfoFileFor(storePath));
|
||||
}
|
||||
|
||||
void BinaryCacheStore::narFromPath(const Path & storePath, Sink & sink)
|
||||
{
|
||||
auto info = queryPathInfo(storePath).cast<const NarInfo>();
|
||||
|
||||
uint64_t narSize = 0;
|
||||
|
||||
LambdaSink wrapperSink([&](const unsigned char * data, size_t len) {
|
||||
sink(data, len);
|
||||
narSize += len;
|
||||
});
|
||||
|
||||
auto decompressor = makeDecompressionSink(info->compression, wrapperSink);
|
||||
|
||||
try {
|
||||
getFile(info->url, *decompressor);
|
||||
} catch (NoSuchBinaryCacheFile & e) {
|
||||
throw SubstituteGone(e.what());
|
||||
}
|
||||
|
||||
decompressor->finish();
|
||||
|
||||
stats.narRead++;
|
||||
//stats.narReadCompressedBytes += nar->size(); // FIXME
|
||||
stats.narReadBytes += narSize;
|
||||
}
|
||||
|
||||
void BinaryCacheStore::queryPathInfoUncached(const Path & storePath,
|
||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
auto uri = getUri();
|
||||
auto act = std::make_shared<Activity>(*logger, lvlTalkative, actQueryPathInfo,
|
||||
fmt("querying info about '%s' on '%s'", storePath, uri), Logger::Fields{storePath, uri});
|
||||
PushActivity pact(act->id);
|
||||
|
||||
auto narInfoFile = narInfoFileFor(storePath);
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getFile(narInfoFile,
|
||||
{[=](std::future<std::shared_ptr<std::string>> fut) {
|
||||
try {
|
||||
auto data = fut.get();
|
||||
|
||||
if (!data) return (*callbackPtr)(nullptr);
|
||||
|
||||
stats.narInfoRead++;
|
||||
|
||||
(*callbackPtr)((std::shared_ptr<ValidPathInfo>)
|
||||
std::make_shared<NarInfo>(*this, *data, narInfoFile));
|
||||
|
||||
(void) act; // force Activity into this lambda to ensure it stays alive
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}
|
||||
|
||||
Path BinaryCacheStore::addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
{
|
||||
// FIXME: some cut&paste from LocalStore::addToStore().
|
||||
|
||||
/* Read the whole path into memory. This is not a very scalable
|
||||
method for very large paths, but `copyPath' is mainly used for
|
||||
small files. */
|
||||
StringSink sink;
|
||||
Hash h;
|
||||
if (recursive) {
|
||||
dumpPath(srcPath, sink, filter);
|
||||
h = hashString(hashAlgo, *sink.s);
|
||||
} else {
|
||||
auto s = readFile(srcPath);
|
||||
dumpString(s, sink);
|
||||
h = hashString(hashAlgo, s);
|
||||
}
|
||||
|
||||
ValidPathInfo info;
|
||||
info.path = makeFixedOutputPath(recursive, h, name);
|
||||
|
||||
addToStore(info, sink.s, repair, CheckSigs, nullptr);
|
||||
|
||||
return info.path;
|
||||
}
|
||||
|
||||
Path BinaryCacheStore::addTextToStore(const string & name, const string & s,
|
||||
const PathSet & references, RepairFlag repair)
|
||||
{
|
||||
ValidPathInfo info;
|
||||
info.path = computeStorePathForText(name, s, references);
|
||||
info.references = references;
|
||||
|
||||
if (repair || !isValidPath(info.path)) {
|
||||
StringSink sink;
|
||||
dumpString(s, sink);
|
||||
addToStore(info, sink.s, repair, CheckSigs, nullptr);
|
||||
}
|
||||
|
||||
return info.path;
|
||||
}
|
||||
|
||||
ref<FSAccessor> BinaryCacheStore::getFSAccessor()
|
||||
{
|
||||
return make_ref<RemoteFSAccessor>(ref<Store>(shared_from_this()), localNarCache);
|
||||
}
|
||||
|
||||
void BinaryCacheStore::addSignatures(const Path & storePath, const StringSet & sigs)
|
||||
{
|
||||
/* Note: this is inherently racy since there is no locking on
|
||||
binary caches. In particular, with S3 this unreliable, even
|
||||
when addSignatures() is called sequentially on a path, because
|
||||
S3 might return an outdated cached version. */
|
||||
|
||||
auto narInfo = make_ref<NarInfo>((NarInfo &) *queryPathInfo(storePath));
|
||||
|
||||
narInfo->sigs.insert(sigs.begin(), sigs.end());
|
||||
|
||||
auto narInfoFile = narInfoFileFor(narInfo->path);
|
||||
|
||||
writeNarInfo(narInfo);
|
||||
}
|
||||
|
||||
std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path & path)
|
||||
{
|
||||
Path drvPath;
|
||||
|
||||
if (isDerivation(path))
|
||||
drvPath = path;
|
||||
else {
|
||||
try {
|
||||
auto info = queryPathInfo(path);
|
||||
// FIXME: add a "Log" field to .narinfo
|
||||
if (info->deriver == "") return nullptr;
|
||||
drvPath = info->deriver;
|
||||
} catch (InvalidPath &) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
auto logPath = "log/" + baseNameOf(drvPath);
|
||||
|
||||
debug("fetching build log from binary cache '%s/%s'", getUri(), logPath);
|
||||
|
||||
return getFile(logPath);
|
||||
}
|
||||
|
||||
}
|
||||
115
third_party/nix/src/libstore/binary-cache-store.hh
vendored
Normal file
115
third_party/nix/src/libstore/binary-cache-store.hh
vendored
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
#pragma once
|
||||
|
||||
#include "crypto.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include "pool.hh"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct NarInfo;
|
||||
|
||||
class BinaryCacheStore : public Store
|
||||
{
|
||||
public:
|
||||
|
||||
const Setting<std::string> compression{this, "xz", "compression", "NAR compression method ('xz', 'bzip2', or 'none')"};
|
||||
const Setting<bool> writeNARListing{this, false, "write-nar-listing", "whether to write a JSON file listing the files in each NAR"};
|
||||
const Setting<Path> secretKeyFile{this, "", "secret-key", "path to secret key used to sign the binary cache"};
|
||||
const Setting<Path> localNarCache{this, "", "local-nar-cache", "path to a local cache of NARs"};
|
||||
const Setting<bool> parallelCompression{this, false, "parallel-compression",
|
||||
"enable multi-threading compression, available for xz only currently"};
|
||||
|
||||
private:
|
||||
|
||||
std::unique_ptr<SecretKey> secretKey;
|
||||
|
||||
protected:
|
||||
|
||||
BinaryCacheStore(const Params & params);
|
||||
|
||||
public:
|
||||
|
||||
virtual bool fileExists(const std::string & path) = 0;
|
||||
|
||||
virtual void upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
const std::string & mimeType) = 0;
|
||||
|
||||
/* Note: subclasses must implement at least one of the two
|
||||
following getFile() methods. */
|
||||
|
||||
/* Dump the contents of the specified file to a sink. */
|
||||
virtual void getFile(const std::string & path, Sink & sink);
|
||||
|
||||
/* Fetch the specified file and call the specified callback with
|
||||
the result. A subclass may implement this asynchronously. */
|
||||
virtual void getFile(const std::string & path,
|
||||
Callback<std::shared_ptr<std::string>> callback) noexcept;
|
||||
|
||||
std::shared_ptr<std::string> getFile(const std::string & path);
|
||||
|
||||
protected:
|
||||
|
||||
bool wantMassQuery_ = false;
|
||||
int priority = 50;
|
||||
|
||||
public:
|
||||
|
||||
virtual void init();
|
||||
|
||||
private:
|
||||
|
||||
std::string narMagic;
|
||||
|
||||
std::string narInfoFileFor(const Path & storePath);
|
||||
|
||||
void writeNarInfo(ref<NarInfo> narInfo);
|
||||
|
||||
public:
|
||||
|
||||
bool isValidPathUncached(const Path & path) override;
|
||||
|
||||
void queryPathInfoUncached(const Path & path,
|
||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
Path queryPathFromHashPart(const string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
bool wantMassQuery() override { return wantMassQuery_; }
|
||||
|
||||
void addToStore(const ValidPathInfo & info, const ref<std::string> & nar,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
Path addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override;
|
||||
|
||||
Path addTextToStore(const string & name, const string & s,
|
||||
const PathSet & references, RepairFlag repair) override;
|
||||
|
||||
void narFromPath(const Path & path, Sink & sink) override;
|
||||
|
||||
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override
|
||||
{ unsupported("buildDerivation"); }
|
||||
|
||||
void ensurePath(const Path & path) override
|
||||
{ unsupported("ensurePath"); }
|
||||
|
||||
ref<FSAccessor> getFSAccessor() override;
|
||||
|
||||
void addSignatures(const Path & storePath, const StringSet & sigs) override;
|
||||
|
||||
std::shared_ptr<std::string> getBuildLog(const Path & path) override;
|
||||
|
||||
int getPriority() override { return priority; }
|
||||
|
||||
};
|
||||
|
||||
MakeError(NoSuchBinaryCacheFile, Error);
|
||||
|
||||
}
|
||||
4708
third_party/nix/src/libstore/build.cc
vendored
Normal file
4708
third_party/nix/src/libstore/build.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
11
third_party/nix/src/libstore/builtins.hh
vendored
Normal file
11
third_party/nix/src/libstore/builtins.hh
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#include "derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
// TODO: make pluggable.
|
||||
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData);
|
||||
void builtinBuildenv(const BasicDerivation & drv);
|
||||
|
||||
}
|
||||
204
third_party/nix/src/libstore/builtins/buildenv.cc
vendored
Normal file
204
third_party/nix/src/libstore/builtins/buildenv.cc
vendored
Normal file
|
|
@ -0,0 +1,204 @@
|
|||
#include "builtins.hh"
|
||||
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <fcntl.h>
|
||||
#include <algorithm>
|
||||
|
||||
namespace nix {
|
||||
|
||||
typedef std::map<Path,int> Priorities;
|
||||
|
||||
// FIXME: change into local variables.
|
||||
|
||||
static Priorities priorities;
|
||||
|
||||
static unsigned long symlinks;
|
||||
|
||||
/* For each activated package, create symlinks */
|
||||
static void createLinks(const Path & srcDir, const Path & dstDir, int priority)
|
||||
{
|
||||
DirEntries srcFiles;
|
||||
|
||||
try {
|
||||
srcFiles = readDirectory(srcDir);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOTDIR) {
|
||||
printError("warning: not including '%s' in the user environment because it's not a directory", srcDir);
|
||||
return;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
for (const auto & ent : srcFiles) {
|
||||
if (ent.name[0] == '.')
|
||||
/* not matched by glob */
|
||||
continue;
|
||||
auto srcFile = srcDir + "/" + ent.name;
|
||||
auto dstFile = dstDir + "/" + ent.name;
|
||||
|
||||
struct stat srcSt;
|
||||
try {
|
||||
if (stat(srcFile.c_str(), &srcSt) == -1)
|
||||
throw SysError("getting status of '%1%'", srcFile);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOENT || e.errNo == ENOTDIR) {
|
||||
printError("warning: skipping dangling symlink '%s'", dstFile);
|
||||
continue;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
|
||||
/* The files below are special-cased to that they don't show up
|
||||
* in user profiles, either because they are useless, or
|
||||
* because they would cauase pointless collisions (e.g., each
|
||||
* Python package brings its own
|
||||
* `$out/lib/pythonX.Y/site-packages/easy-install.pth'.)
|
||||
*/
|
||||
if (hasSuffix(srcFile, "/propagated-build-inputs") ||
|
||||
hasSuffix(srcFile, "/nix-support") ||
|
||||
hasSuffix(srcFile, "/perllocal.pod") ||
|
||||
hasSuffix(srcFile, "/info/dir") ||
|
||||
hasSuffix(srcFile, "/log"))
|
||||
continue;
|
||||
|
||||
else if (S_ISDIR(srcSt.st_mode)) {
|
||||
struct stat dstSt;
|
||||
auto res = lstat(dstFile.c_str(), &dstSt);
|
||||
if (res == 0) {
|
||||
if (S_ISDIR(dstSt.st_mode)) {
|
||||
createLinks(srcFile, dstFile, priority);
|
||||
continue;
|
||||
} else if (S_ISLNK(dstSt.st_mode)) {
|
||||
auto target = canonPath(dstFile, true);
|
||||
if (!S_ISDIR(lstat(target).st_mode))
|
||||
throw Error("collision between '%1%' and non-directory '%2%'", srcFile, target);
|
||||
if (unlink(dstFile.c_str()) == -1)
|
||||
throw SysError(format("unlinking '%1%'") % dstFile);
|
||||
if (mkdir(dstFile.c_str(), 0755) == -1)
|
||||
throw SysError(format("creating directory '%1%'"));
|
||||
createLinks(target, dstFile, priorities[dstFile]);
|
||||
createLinks(srcFile, dstFile, priority);
|
||||
continue;
|
||||
}
|
||||
} else if (errno != ENOENT)
|
||||
throw SysError(format("getting status of '%1%'") % dstFile);
|
||||
}
|
||||
|
||||
else {
|
||||
struct stat dstSt;
|
||||
auto res = lstat(dstFile.c_str(), &dstSt);
|
||||
if (res == 0) {
|
||||
if (S_ISLNK(dstSt.st_mode)) {
|
||||
auto prevPriority = priorities[dstFile];
|
||||
if (prevPriority == priority)
|
||||
throw Error(
|
||||
"packages '%1%' and '%2%' have the same priority %3%; "
|
||||
"use 'nix-env --set-flag priority NUMBER INSTALLED_PKGNAME' "
|
||||
"to change the priority of one of the conflicting packages"
|
||||
" (0 being the highest priority)",
|
||||
srcFile, readLink(dstFile), priority);
|
||||
if (prevPriority < priority)
|
||||
continue;
|
||||
if (unlink(dstFile.c_str()) == -1)
|
||||
throw SysError(format("unlinking '%1%'") % dstFile);
|
||||
} else if (S_ISDIR(dstSt.st_mode))
|
||||
throw Error("collision between non-directory '%1%' and directory '%2%'", srcFile, dstFile);
|
||||
} else if (errno != ENOENT)
|
||||
throw SysError(format("getting status of '%1%'") % dstFile);
|
||||
}
|
||||
|
||||
createSymlink(srcFile, dstFile);
|
||||
priorities[dstFile] = priority;
|
||||
symlinks++;
|
||||
}
|
||||
}
|
||||
|
||||
typedef std::set<Path> FileProp;
|
||||
|
||||
static FileProp done;
|
||||
static FileProp postponed = FileProp{};
|
||||
|
||||
static Path out;
|
||||
|
||||
static void addPkg(const Path & pkgDir, int priority)
|
||||
{
|
||||
if (done.count(pkgDir)) return;
|
||||
done.insert(pkgDir);
|
||||
createLinks(pkgDir, out, priority);
|
||||
|
||||
try {
|
||||
for (const auto & p : tokenizeString<std::vector<string>>(
|
||||
readFile(pkgDir + "/nix-support/propagated-user-env-packages"), " \n"))
|
||||
if (!done.count(p))
|
||||
postponed.insert(p);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != ENOTDIR) throw;
|
||||
}
|
||||
}
|
||||
|
||||
struct Package {
|
||||
Path path;
|
||||
bool active;
|
||||
int priority;
|
||||
Package(Path path, bool active, int priority) : path{path}, active{active}, priority{priority} {}
|
||||
};
|
||||
|
||||
typedef std::vector<Package> Packages;
|
||||
|
||||
void builtinBuildenv(const BasicDerivation & drv)
|
||||
{
|
||||
auto getAttr = [&](const string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error("attribute '%s' missing", name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
out = getAttr("out");
|
||||
createDirs(out);
|
||||
|
||||
/* Convert the stuff we get from the environment back into a
|
||||
* coherent data type. */
|
||||
Packages pkgs;
|
||||
auto derivations = tokenizeString<Strings>(getAttr("derivations"));
|
||||
while (!derivations.empty()) {
|
||||
/* !!! We're trusting the caller to structure derivations env var correctly */
|
||||
auto active = derivations.front(); derivations.pop_front();
|
||||
auto priority = stoi(derivations.front()); derivations.pop_front();
|
||||
auto outputs = stoi(derivations.front()); derivations.pop_front();
|
||||
for (auto n = 0; n < outputs; n++) {
|
||||
auto path = derivations.front(); derivations.pop_front();
|
||||
pkgs.emplace_back(path, active != "false", priority);
|
||||
}
|
||||
}
|
||||
|
||||
/* Symlink to the packages that have been installed explicitly by the
|
||||
* user. Process in priority order to reduce unnecessary
|
||||
* symlink/unlink steps.
|
||||
*/
|
||||
std::sort(pkgs.begin(), pkgs.end(), [](const Package & a, const Package & b) {
|
||||
return a.priority < b.priority || (a.priority == b.priority && a.path < b.path);
|
||||
});
|
||||
for (const auto & pkg : pkgs)
|
||||
if (pkg.active)
|
||||
addPkg(pkg.path, pkg.priority);
|
||||
|
||||
/* Symlink to the packages that have been "propagated" by packages
|
||||
* installed by the user (i.e., package X declares that it wants Y
|
||||
* installed as well). We do these later because they have a lower
|
||||
* priority in case of collisions.
|
||||
*/
|
||||
auto priorityCounter = 1000;
|
||||
while (!postponed.empty()) {
|
||||
auto pkgDirs = postponed;
|
||||
postponed = FileProp{};
|
||||
for (const auto & pkgDir : pkgDirs)
|
||||
addPkg(pkgDir, priorityCounter++);
|
||||
}
|
||||
|
||||
printError("created %d symlinks in user environment", symlinks);
|
||||
|
||||
createSymlink(getAttr("manifest"), out + "/manifest.nix");
|
||||
}
|
||||
|
||||
}
|
||||
78
third_party/nix/src/libstore/builtins/fetchurl.cc
vendored
Normal file
78
third_party/nix/src/libstore/builtins/fetchurl.cc
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
#include "builtins.hh"
|
||||
#include "download.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "compression.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
void builtinFetchurl(const BasicDerivation & drv, const std::string & netrcData)
|
||||
{
|
||||
/* Make the host's netrc data available. Too bad curl requires
|
||||
this to be stored in a file. It would be nice if we could just
|
||||
pass a pointer to the data. */
|
||||
if (netrcData != "") {
|
||||
settings.netrcFile = "netrc";
|
||||
writeFile(settings.netrcFile, netrcData, 0600);
|
||||
}
|
||||
|
||||
auto getAttr = [&](const string & name) {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end()) throw Error(format("attribute '%s' missing") % name);
|
||||
return i->second;
|
||||
};
|
||||
|
||||
Path storePath = getAttr("out");
|
||||
auto mainUrl = getAttr("url");
|
||||
bool unpack = get(drv.env, "unpack", "") == "1";
|
||||
|
||||
/* Note: have to use a fresh downloader here because we're in
|
||||
a forked process. */
|
||||
auto downloader = makeDownloader();
|
||||
|
||||
auto fetch = [&](const std::string & url) {
|
||||
|
||||
auto source = sinkToSource([&](Sink & sink) {
|
||||
|
||||
/* No need to do TLS verification, because we check the hash of
|
||||
the result anyway. */
|
||||
DownloadRequest request(url);
|
||||
request.verifyTLS = false;
|
||||
request.decompress = false;
|
||||
|
||||
auto decompressor = makeDecompressionSink(
|
||||
unpack && hasSuffix(mainUrl, ".xz") ? "xz" : "none", sink);
|
||||
downloader->download(std::move(request), *decompressor);
|
||||
decompressor->finish();
|
||||
});
|
||||
|
||||
if (unpack)
|
||||
restorePath(storePath, *source);
|
||||
else
|
||||
writeFile(storePath, *source);
|
||||
|
||||
auto executable = drv.env.find("executable");
|
||||
if (executable != drv.env.end() && executable->second == "1") {
|
||||
if (chmod(storePath.c_str(), 0755) == -1)
|
||||
throw SysError(format("making '%1%' executable") % storePath);
|
||||
}
|
||||
};
|
||||
|
||||
/* Try the hashed mirrors first. */
|
||||
if (getAttr("outputHashMode") == "flat")
|
||||
for (auto hashedMirror : settings.hashedMirrors.get())
|
||||
try {
|
||||
if (!hasSuffix(hashedMirror, "/")) hashedMirror += '/';
|
||||
auto ht = parseHashType(getAttr("outputHashAlgo"));
|
||||
auto h = Hash(getAttr("outputHash"), ht);
|
||||
fetch(hashedMirror + printHashType(h.type) + "/" + h.to_string(Base16, false));
|
||||
return;
|
||||
} catch (Error & e) {
|
||||
debug(e.what());
|
||||
}
|
||||
|
||||
/* Otherwise try the specified URL. */
|
||||
fetch(mainUrl);
|
||||
}
|
||||
|
||||
}
|
||||
126
third_party/nix/src/libstore/crypto.cc
vendored
Normal file
126
third_party/nix/src/libstore/crypto.cc
vendored
Normal file
|
|
@ -0,0 +1,126 @@
|
|||
#include "crypto.hh"
|
||||
#include "util.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#if HAVE_SODIUM
|
||||
#include <sodium.h>
|
||||
#endif
|
||||
|
||||
namespace nix {
|
||||
|
||||
static std::pair<std::string, std::string> split(const string & s)
|
||||
{
|
||||
size_t colon = s.find(':');
|
||||
if (colon == std::string::npos || colon == 0)
|
||||
return {"", ""};
|
||||
return {std::string(s, 0, colon), std::string(s, colon + 1)};
|
||||
}
|
||||
|
||||
Key::Key(const string & s)
|
||||
{
|
||||
auto ss = split(s);
|
||||
|
||||
name = ss.first;
|
||||
key = ss.second;
|
||||
|
||||
if (name == "" || key == "")
|
||||
throw Error("secret key is corrupt");
|
||||
|
||||
key = base64Decode(key);
|
||||
}
|
||||
|
||||
SecretKey::SecretKey(const string & s)
|
||||
: Key(s)
|
||||
{
|
||||
#if HAVE_SODIUM
|
||||
if (key.size() != crypto_sign_SECRETKEYBYTES)
|
||||
throw Error("secret key is not valid");
|
||||
#endif
|
||||
}
|
||||
|
||||
#if !HAVE_SODIUM
|
||||
[[noreturn]] static void noSodium()
|
||||
{
|
||||
throw Error("Nix was not compiled with libsodium, required for signed binary cache support");
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string SecretKey::signDetached(const std::string & data) const
|
||||
{
|
||||
#if HAVE_SODIUM
|
||||
unsigned char sig[crypto_sign_BYTES];
|
||||
unsigned long long sigLen;
|
||||
crypto_sign_detached(sig, &sigLen, (unsigned char *) data.data(), data.size(),
|
||||
(unsigned char *) key.data());
|
||||
return name + ":" + base64Encode(std::string((char *) sig, sigLen));
|
||||
#else
|
||||
noSodium();
|
||||
#endif
|
||||
}
|
||||
|
||||
PublicKey SecretKey::toPublicKey() const
|
||||
{
|
||||
#if HAVE_SODIUM
|
||||
unsigned char pk[crypto_sign_PUBLICKEYBYTES];
|
||||
crypto_sign_ed25519_sk_to_pk(pk, (unsigned char *) key.data());
|
||||
return PublicKey(name, std::string((char *) pk, crypto_sign_PUBLICKEYBYTES));
|
||||
#else
|
||||
noSodium();
|
||||
#endif
|
||||
}
|
||||
|
||||
PublicKey::PublicKey(const string & s)
|
||||
: Key(s)
|
||||
{
|
||||
#if HAVE_SODIUM
|
||||
if (key.size() != crypto_sign_PUBLICKEYBYTES)
|
||||
throw Error("public key is not valid");
|
||||
#endif
|
||||
}
|
||||
|
||||
bool verifyDetached(const std::string & data, const std::string & sig,
|
||||
const PublicKeys & publicKeys)
|
||||
{
|
||||
#if HAVE_SODIUM
|
||||
auto ss = split(sig);
|
||||
|
||||
auto key = publicKeys.find(ss.first);
|
||||
if (key == publicKeys.end()) return false;
|
||||
|
||||
auto sig2 = base64Decode(ss.second);
|
||||
if (sig2.size() != crypto_sign_BYTES)
|
||||
throw Error("signature is not valid");
|
||||
|
||||
return crypto_sign_verify_detached((unsigned char *) sig2.data(),
|
||||
(unsigned char *) data.data(), data.size(),
|
||||
(unsigned char *) key->second.key.data()) == 0;
|
||||
#else
|
||||
noSodium();
|
||||
#endif
|
||||
}
|
||||
|
||||
PublicKeys getDefaultPublicKeys()
|
||||
{
|
||||
PublicKeys publicKeys;
|
||||
|
||||
// FIXME: filter duplicates
|
||||
|
||||
for (auto s : settings.trustedPublicKeys.get()) {
|
||||
PublicKey key(s);
|
||||
publicKeys.emplace(key.name, key);
|
||||
}
|
||||
|
||||
for (auto secretKeyFile : settings.secretKeyFiles.get()) {
|
||||
try {
|
||||
SecretKey secretKey(readFile(secretKeyFile));
|
||||
publicKeys.emplace(secretKey.name, secretKey.toPublicKey());
|
||||
} catch (SysError & e) {
|
||||
/* Ignore unreadable key files. That's normal in a
|
||||
multi-user installation. */
|
||||
}
|
||||
}
|
||||
|
||||
return publicKeys;
|
||||
}
|
||||
|
||||
}
|
||||
54
third_party/nix/src/libstore/crypto.hh
vendored
Normal file
54
third_party/nix/src/libstore/crypto.hh
vendored
Normal file
|
|
@ -0,0 +1,54 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
#include <map>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct Key
|
||||
{
|
||||
std::string name;
|
||||
std::string key;
|
||||
|
||||
/* Construct Key from a string in the format
|
||||
‘<name>:<key-in-base64>’. */
|
||||
Key(const std::string & s);
|
||||
|
||||
protected:
|
||||
Key(const std::string & name, const std::string & key)
|
||||
: name(name), key(key) { }
|
||||
};
|
||||
|
||||
struct PublicKey;
|
||||
|
||||
struct SecretKey : Key
|
||||
{
|
||||
SecretKey(const std::string & s);
|
||||
|
||||
/* Return a detached signature of the given string. */
|
||||
std::string signDetached(const std::string & s) const;
|
||||
|
||||
PublicKey toPublicKey() const;
|
||||
};
|
||||
|
||||
struct PublicKey : Key
|
||||
{
|
||||
PublicKey(const std::string & data);
|
||||
|
||||
private:
|
||||
PublicKey(const std::string & name, const std::string & key)
|
||||
: Key(name, key) { }
|
||||
friend struct SecretKey;
|
||||
};
|
||||
|
||||
typedef std::map<std::string, PublicKey> PublicKeys;
|
||||
|
||||
/* Return true iff ‘sig’ is a correct signature over ‘data’ using one
|
||||
of the given public keys. */
|
||||
bool verifyDetached(const std::string & data, const std::string & sig,
|
||||
const PublicKeys & publicKeys);
|
||||
|
||||
PublicKeys getDefaultPublicKeys();
|
||||
|
||||
}
|
||||
416
third_party/nix/src/libstore/derivations.cc
vendored
Normal file
416
third_party/nix/src/libstore/derivations.cc
vendored
Normal file
|
|
@ -0,0 +1,416 @@
|
|||
#include "derivations.hh"
|
||||
#include "store-api.hh"
|
||||
#include "globals.hh"
|
||||
#include "util.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "istringstream_nocopy.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
void DerivationOutput::parseHashInfo(bool & recursive, Hash & hash) const
|
||||
{
|
||||
recursive = false;
|
||||
string algo = hashAlgo;
|
||||
|
||||
if (string(algo, 0, 2) == "r:") {
|
||||
recursive = true;
|
||||
algo = string(algo, 2);
|
||||
}
|
||||
|
||||
HashType hashType = parseHashType(algo);
|
||||
if (hashType == htUnknown)
|
||||
throw Error(format("unknown hash algorithm '%1%'") % algo);
|
||||
|
||||
hash = Hash(this->hash, hashType);
|
||||
}
|
||||
|
||||
|
||||
Path BasicDerivation::findOutput(const string & id) const
|
||||
{
|
||||
auto i = outputs.find(id);
|
||||
if (i == outputs.end())
|
||||
throw Error(format("derivation has no output '%1%'") % id);
|
||||
return i->second.path;
|
||||
}
|
||||
|
||||
|
||||
bool BasicDerivation::isBuiltin() const
|
||||
{
|
||||
return string(builder, 0, 8) == "builtin:";
|
||||
}
|
||||
|
||||
|
||||
Path writeDerivation(ref<Store> store,
|
||||
const Derivation & drv, const string & name, RepairFlag repair)
|
||||
{
|
||||
PathSet references;
|
||||
references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
|
||||
for (auto & i : drv.inputDrvs)
|
||||
references.insert(i.first);
|
||||
/* Note that the outputs of a derivation are *not* references
|
||||
(that can be missing (of course) and should not necessarily be
|
||||
held during a garbage collection). */
|
||||
string suffix = name + drvExtension;
|
||||
string contents = drv.unparse();
|
||||
return settings.readOnlyMode
|
||||
? store->computeStorePathForText(suffix, contents, references)
|
||||
: store->addTextToStore(suffix, contents, references, repair);
|
||||
}
|
||||
|
||||
|
||||
/* Read string `s' from stream `str'. */
|
||||
static void expect(std::istream & str, const string & s)
|
||||
{
|
||||
char s2[s.size()];
|
||||
str.read(s2, s.size());
|
||||
if (string(s2, s.size()) != s)
|
||||
throw FormatError(format("expected string '%1%'") % s);
|
||||
}
|
||||
|
||||
|
||||
/* Read a C-style string from stream `str'. */
|
||||
static string parseString(std::istream & str)
|
||||
{
|
||||
string res;
|
||||
expect(str, "\"");
|
||||
int c;
|
||||
while ((c = str.get()) != '"')
|
||||
if (c == '\\') {
|
||||
c = str.get();
|
||||
if (c == 'n') res += '\n';
|
||||
else if (c == 'r') res += '\r';
|
||||
else if (c == 't') res += '\t';
|
||||
else res += c;
|
||||
}
|
||||
else res += c;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
static Path parsePath(std::istream & str)
|
||||
{
|
||||
string s = parseString(str);
|
||||
if (s.size() == 0 || s[0] != '/')
|
||||
throw FormatError(format("bad path '%1%' in derivation") % s);
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
static bool endOfList(std::istream & str)
|
||||
{
|
||||
if (str.peek() == ',') {
|
||||
str.get();
|
||||
return false;
|
||||
}
|
||||
if (str.peek() == ']') {
|
||||
str.get();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
static StringSet parseStrings(std::istream & str, bool arePaths)
|
||||
{
|
||||
StringSet res;
|
||||
while (!endOfList(str))
|
||||
res.insert(arePaths ? parsePath(str) : parseString(str));
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
static Derivation parseDerivation(const string & s)
|
||||
{
|
||||
Derivation drv;
|
||||
istringstream_nocopy str(s);
|
||||
expect(str, "Derive([");
|
||||
|
||||
/* Parse the list of outputs. */
|
||||
while (!endOfList(str)) {
|
||||
DerivationOutput out;
|
||||
expect(str, "("); string id = parseString(str);
|
||||
expect(str, ","); out.path = parsePath(str);
|
||||
expect(str, ","); out.hashAlgo = parseString(str);
|
||||
expect(str, ","); out.hash = parseString(str);
|
||||
expect(str, ")");
|
||||
drv.outputs[id] = out;
|
||||
}
|
||||
|
||||
/* Parse the list of input derivations. */
|
||||
expect(str, ",[");
|
||||
while (!endOfList(str)) {
|
||||
expect(str, "(");
|
||||
Path drvPath = parsePath(str);
|
||||
expect(str, ",[");
|
||||
drv.inputDrvs[drvPath] = parseStrings(str, false);
|
||||
expect(str, ")");
|
||||
}
|
||||
|
||||
expect(str, ",["); drv.inputSrcs = parseStrings(str, true);
|
||||
expect(str, ","); drv.platform = parseString(str);
|
||||
expect(str, ","); drv.builder = parseString(str);
|
||||
|
||||
/* Parse the builder arguments. */
|
||||
expect(str, ",[");
|
||||
while (!endOfList(str))
|
||||
drv.args.push_back(parseString(str));
|
||||
|
||||
/* Parse the environment variables. */
|
||||
expect(str, ",[");
|
||||
while (!endOfList(str)) {
|
||||
expect(str, "("); string name = parseString(str);
|
||||
expect(str, ","); string value = parseString(str);
|
||||
expect(str, ")");
|
||||
drv.env[name] = value;
|
||||
}
|
||||
|
||||
expect(str, ")");
|
||||
return drv;
|
||||
}
|
||||
|
||||
|
||||
Derivation readDerivation(const Path & drvPath)
|
||||
{
|
||||
try {
|
||||
return parseDerivation(readFile(drvPath));
|
||||
} catch (FormatError & e) {
|
||||
throw Error(format("error parsing derivation '%1%': %2%") % drvPath % e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Derivation Store::derivationFromPath(const Path & drvPath)
|
||||
{
|
||||
assertStorePath(drvPath);
|
||||
ensurePath(drvPath);
|
||||
auto accessor = getFSAccessor();
|
||||
try {
|
||||
return parseDerivation(accessor->readFile(drvPath));
|
||||
} catch (FormatError & e) {
|
||||
throw Error(format("error parsing derivation '%1%': %2%") % drvPath % e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void printString(string & res, const string & s)
|
||||
{
|
||||
res += '"';
|
||||
for (const char * i = s.c_str(); *i; i++)
|
||||
if (*i == '\"' || *i == '\\') { res += "\\"; res += *i; }
|
||||
else if (*i == '\n') res += "\\n";
|
||||
else if (*i == '\r') res += "\\r";
|
||||
else if (*i == '\t') res += "\\t";
|
||||
else res += *i;
|
||||
res += '"';
|
||||
}
|
||||
|
||||
|
||||
template<class ForwardIterator>
|
||||
static void printStrings(string & res, ForwardIterator i, ForwardIterator j)
|
||||
{
|
||||
res += '[';
|
||||
bool first = true;
|
||||
for ( ; i != j; ++i) {
|
||||
if (first) first = false; else res += ',';
|
||||
printString(res, *i);
|
||||
}
|
||||
res += ']';
|
||||
}
|
||||
|
||||
|
||||
string Derivation::unparse() const
|
||||
{
|
||||
string s;
|
||||
s.reserve(65536);
|
||||
s += "Derive([";
|
||||
|
||||
bool first = true;
|
||||
for (auto & i : outputs) {
|
||||
if (first) first = false; else s += ',';
|
||||
s += '('; printString(s, i.first);
|
||||
s += ','; printString(s, i.second.path);
|
||||
s += ','; printString(s, i.second.hashAlgo);
|
||||
s += ','; printString(s, i.second.hash);
|
||||
s += ')';
|
||||
}
|
||||
|
||||
s += "],[";
|
||||
first = true;
|
||||
for (auto & i : inputDrvs) {
|
||||
if (first) first = false; else s += ',';
|
||||
s += '('; printString(s, i.first);
|
||||
s += ','; printStrings(s, i.second.begin(), i.second.end());
|
||||
s += ')';
|
||||
}
|
||||
|
||||
s += "],";
|
||||
printStrings(s, inputSrcs.begin(), inputSrcs.end());
|
||||
|
||||
s += ','; printString(s, platform);
|
||||
s += ','; printString(s, builder);
|
||||
s += ','; printStrings(s, args.begin(), args.end());
|
||||
|
||||
s += ",[";
|
||||
first = true;
|
||||
for (auto & i : env) {
|
||||
if (first) first = false; else s += ',';
|
||||
s += '('; printString(s, i.first);
|
||||
s += ','; printString(s, i.second);
|
||||
s += ')';
|
||||
}
|
||||
|
||||
s += "])";
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
bool isDerivation(const string & fileName)
|
||||
{
|
||||
return hasSuffix(fileName, drvExtension);
|
||||
}
|
||||
|
||||
|
||||
bool BasicDerivation::isFixedOutput() const
|
||||
{
|
||||
return outputs.size() == 1 &&
|
||||
outputs.begin()->first == "out" &&
|
||||
outputs.begin()->second.hash != "";
|
||||
}
|
||||
|
||||
|
||||
DrvHashes drvHashes;
|
||||
|
||||
|
||||
/* Returns the hash of a derivation modulo fixed-output
|
||||
subderivations. A fixed-output derivation is a derivation with one
|
||||
output (`out') for which an expected hash and hash algorithm are
|
||||
specified (using the `outputHash' and `outputHashAlgo'
|
||||
attributes). We don't want changes to such derivations to
|
||||
propagate upwards through the dependency graph, changing output
|
||||
paths everywhere.
|
||||
|
||||
For instance, if we change the url in a call to the `fetchurl'
|
||||
function, we do not want to rebuild everything depending on it
|
||||
(after all, (the hash of) the file being downloaded is unchanged).
|
||||
So the *output paths* should not change. On the other hand, the
|
||||
*derivation paths* should change to reflect the new dependency
|
||||
graph.
|
||||
|
||||
That's what this function does: it returns a hash which is just the
|
||||
hash of the derivation ATerm, except that any input derivation
|
||||
paths have been replaced by the result of a recursive call to this
|
||||
function, and that for fixed-output derivations we return a hash of
|
||||
its output path. */
|
||||
Hash hashDerivationModulo(Store & store, Derivation drv)
|
||||
{
|
||||
/* Return a fixed hash for fixed-output derivations. */
|
||||
if (drv.isFixedOutput()) {
|
||||
DerivationOutputs::const_iterator i = drv.outputs.begin();
|
||||
return hashString(htSHA256, "fixed:out:"
|
||||
+ i->second.hashAlgo + ":"
|
||||
+ i->second.hash + ":"
|
||||
+ i->second.path);
|
||||
}
|
||||
|
||||
/* For other derivations, replace the inputs paths with recursive
|
||||
calls to this function.*/
|
||||
DerivationInputs inputs2;
|
||||
for (auto & i : drv.inputDrvs) {
|
||||
Hash h = drvHashes[i.first];
|
||||
if (!h) {
|
||||
assert(store.isValidPath(i.first));
|
||||
Derivation drv2 = readDerivation(store.toRealPath(i.first));
|
||||
h = hashDerivationModulo(store, drv2);
|
||||
drvHashes[i.first] = h;
|
||||
}
|
||||
inputs2[h.to_string(Base16, false)] = i.second;
|
||||
}
|
||||
drv.inputDrvs = inputs2;
|
||||
|
||||
return hashString(htSHA256, drv.unparse());
|
||||
}
|
||||
|
||||
|
||||
DrvPathWithOutputs parseDrvPathWithOutputs(const string & s)
|
||||
{
|
||||
size_t n = s.find("!");
|
||||
return n == s.npos
|
||||
? DrvPathWithOutputs(s, std::set<string>())
|
||||
: DrvPathWithOutputs(string(s, 0, n), tokenizeString<std::set<string> >(string(s, n + 1), ","));
|
||||
}
|
||||
|
||||
|
||||
Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs)
|
||||
{
|
||||
return outputs.empty()
|
||||
? drvPath
|
||||
: drvPath + "!" + concatStringsSep(",", outputs);
|
||||
}
|
||||
|
||||
|
||||
bool wantOutput(const string & output, const std::set<string> & wanted)
|
||||
{
|
||||
return wanted.empty() || wanted.find(output) != wanted.end();
|
||||
}
|
||||
|
||||
|
||||
PathSet BasicDerivation::outputPaths() const
|
||||
{
|
||||
PathSet paths;
|
||||
for (auto & i : outputs)
|
||||
paths.insert(i.second.path);
|
||||
return paths;
|
||||
}
|
||||
|
||||
|
||||
Source & readDerivation(Source & in, Store & store, BasicDerivation & drv)
|
||||
{
|
||||
drv.outputs.clear();
|
||||
auto nr = readNum<size_t>(in);
|
||||
for (size_t n = 0; n < nr; n++) {
|
||||
auto name = readString(in);
|
||||
DerivationOutput o;
|
||||
in >> o.path >> o.hashAlgo >> o.hash;
|
||||
store.assertStorePath(o.path);
|
||||
drv.outputs[name] = o;
|
||||
}
|
||||
|
||||
drv.inputSrcs = readStorePaths<PathSet>(store, in);
|
||||
in >> drv.platform >> drv.builder;
|
||||
drv.args = readStrings<Strings>(in);
|
||||
|
||||
nr = readNum<size_t>(in);
|
||||
for (size_t n = 0; n < nr; n++) {
|
||||
auto key = readString(in);
|
||||
auto value = readString(in);
|
||||
drv.env[key] = value;
|
||||
}
|
||||
|
||||
return in;
|
||||
}
|
||||
|
||||
|
||||
Sink & operator << (Sink & out, const BasicDerivation & drv)
|
||||
{
|
||||
out << drv.outputs.size();
|
||||
for (auto & i : drv.outputs)
|
||||
out << i.first << i.second.path << i.second.hashAlgo << i.second.hash;
|
||||
out << drv.inputSrcs << drv.platform << drv.builder << drv.args;
|
||||
out << drv.env.size();
|
||||
for (auto & i : drv.env)
|
||||
out << i.first << i.second;
|
||||
return out;
|
||||
}
|
||||
|
||||
|
||||
std::string hashPlaceholder(const std::string & outputName)
|
||||
{
|
||||
// FIXME: memoize?
|
||||
return "/" + hashString(htSHA256, "nix-output:" + outputName).to_string(Base32, false);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
117
third_party/nix/src/libstore/derivations.hh
vendored
Normal file
117
third_party/nix/src/libstore/derivations.hh
vendored
Normal file
|
|
@ -0,0 +1,117 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
#include <map>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* Extension of derivations in the Nix store. */
|
||||
const string drvExtension = ".drv";
|
||||
|
||||
|
||||
/* Abstract syntax of derivations. */
|
||||
|
||||
struct DerivationOutput
|
||||
{
|
||||
Path path;
|
||||
string hashAlgo; /* hash used for expected hash computation */
|
||||
string hash; /* expected hash, may be null */
|
||||
DerivationOutput()
|
||||
{
|
||||
}
|
||||
DerivationOutput(Path path, string hashAlgo, string hash)
|
||||
{
|
||||
this->path = path;
|
||||
this->hashAlgo = hashAlgo;
|
||||
this->hash = hash;
|
||||
}
|
||||
void parseHashInfo(bool & recursive, Hash & hash) const;
|
||||
};
|
||||
|
||||
typedef std::map<string, DerivationOutput> DerivationOutputs;
|
||||
|
||||
/* For inputs that are sub-derivations, we specify exactly which
|
||||
output IDs we are interested in. */
|
||||
typedef std::map<Path, StringSet> DerivationInputs;
|
||||
|
||||
typedef std::map<string, string> StringPairs;
|
||||
|
||||
struct BasicDerivation
|
||||
{
|
||||
DerivationOutputs outputs; /* keyed on symbolic IDs */
|
||||
PathSet inputSrcs; /* inputs that are sources */
|
||||
string platform;
|
||||
Path builder;
|
||||
Strings args;
|
||||
StringPairs env;
|
||||
|
||||
virtual ~BasicDerivation() { };
|
||||
|
||||
/* Return the path corresponding to the output identifier `id' in
|
||||
the given derivation. */
|
||||
Path findOutput(const string & id) const;
|
||||
|
||||
bool isBuiltin() const;
|
||||
|
||||
/* Return true iff this is a fixed-output derivation. */
|
||||
bool isFixedOutput() const;
|
||||
|
||||
/* Return the output paths of a derivation. */
|
||||
PathSet outputPaths() const;
|
||||
|
||||
};
|
||||
|
||||
struct Derivation : BasicDerivation
|
||||
{
|
||||
DerivationInputs inputDrvs; /* inputs that are sub-derivations */
|
||||
|
||||
/* Print a derivation. */
|
||||
std::string unparse() const;
|
||||
};
|
||||
|
||||
|
||||
class Store;
|
||||
|
||||
|
||||
/* Write a derivation to the Nix store, and return its path. */
|
||||
Path writeDerivation(ref<Store> store,
|
||||
const Derivation & drv, const string & name, RepairFlag repair = NoRepair);
|
||||
|
||||
/* Read a derivation from a file. */
|
||||
Derivation readDerivation(const Path & drvPath);
|
||||
|
||||
/* Check whether a file name ends with the extension for
|
||||
derivations. */
|
||||
bool isDerivation(const string & fileName);
|
||||
|
||||
Hash hashDerivationModulo(Store & store, Derivation drv);
|
||||
|
||||
/* Memoisation of hashDerivationModulo(). */
|
||||
typedef std::map<Path, Hash> DrvHashes;
|
||||
|
||||
extern DrvHashes drvHashes; // FIXME: global, not thread-safe
|
||||
|
||||
/* Split a string specifying a derivation and a set of outputs
|
||||
(/nix/store/hash-foo!out1,out2,...) into the derivation path and
|
||||
the outputs. */
|
||||
typedef std::pair<string, std::set<string> > DrvPathWithOutputs;
|
||||
DrvPathWithOutputs parseDrvPathWithOutputs(const string & s);
|
||||
|
||||
Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs);
|
||||
|
||||
bool wantOutput(const string & output, const std::set<string> & wanted);
|
||||
|
||||
struct Source;
|
||||
struct Sink;
|
||||
|
||||
Source & readDerivation(Source & in, Store & store, BasicDerivation & drv);
|
||||
Sink & operator << (Sink & out, const BasicDerivation & drv);
|
||||
|
||||
std::string hashPlaceholder(const std::string & outputName);
|
||||
|
||||
}
|
||||
946
third_party/nix/src/libstore/download.cc
vendored
Normal file
946
third_party/nix/src/libstore/download.cc
vendored
Normal file
|
|
@ -0,0 +1,946 @@
|
|||
#include "download.hh"
|
||||
#include "util.hh"
|
||||
#include "globals.hh"
|
||||
#include "hash.hh"
|
||||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "s3.hh"
|
||||
#include "compression.hh"
|
||||
#include "pathlocks.hh"
|
||||
#include "finally.hh"
|
||||
|
||||
#ifdef ENABLE_S3
|
||||
#include <aws/core/client/ClientConfiguration.h>
|
||||
#endif
|
||||
|
||||
#include <unistd.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
#include <queue>
|
||||
#include <random>
|
||||
#include <thread>
|
||||
|
||||
using namespace std::string_literals;
|
||||
|
||||
namespace nix {
|
||||
|
||||
DownloadSettings downloadSettings;
|
||||
|
||||
static GlobalConfig::Register r1(&downloadSettings);
|
||||
|
||||
std::string resolveUri(const std::string & uri)
|
||||
{
|
||||
if (uri.compare(0, 8, "channel:") == 0)
|
||||
return "https://nixos.org/channels/" + std::string(uri, 8) + "/nixexprs.tar.xz";
|
||||
else
|
||||
return uri;
|
||||
}
|
||||
|
||||
struct CurlDownloader : public Downloader
|
||||
{
|
||||
CURLM * curlm = 0;
|
||||
|
||||
std::random_device rd;
|
||||
std::mt19937 mt19937;
|
||||
|
||||
struct DownloadItem : public std::enable_shared_from_this<DownloadItem>
|
||||
{
|
||||
CurlDownloader & downloader;
|
||||
DownloadRequest request;
|
||||
DownloadResult result;
|
||||
Activity act;
|
||||
bool done = false; // whether either the success or failure function has been called
|
||||
Callback<DownloadResult> callback;
|
||||
CURL * req = 0;
|
||||
bool active = false; // whether the handle has been added to the multi object
|
||||
std::string status;
|
||||
|
||||
unsigned int attempt = 0;
|
||||
|
||||
/* Don't start this download until the specified time point
|
||||
has been reached. */
|
||||
std::chrono::steady_clock::time_point embargo;
|
||||
|
||||
struct curl_slist * requestHeaders = 0;
|
||||
|
||||
std::string encoding;
|
||||
|
||||
bool acceptRanges = false;
|
||||
|
||||
curl_off_t writtenToSink = 0;
|
||||
|
||||
DownloadItem(CurlDownloader & downloader,
|
||||
const DownloadRequest & request,
|
||||
Callback<DownloadResult> && callback)
|
||||
: downloader(downloader)
|
||||
, request(request)
|
||||
, act(*logger, lvlTalkative, actDownload,
|
||||
fmt(request.data ? "uploading '%s'" : "downloading '%s'", request.uri),
|
||||
{request.uri}, request.parentAct)
|
||||
, callback(std::move(callback))
|
||||
, finalSink([this](const unsigned char * data, size_t len) {
|
||||
if (this->request.dataCallback) {
|
||||
long httpStatus = 0;
|
||||
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
|
||||
|
||||
/* Only write data to the sink if this is a
|
||||
successful response. */
|
||||
if (httpStatus == 0 || httpStatus == 200 || httpStatus == 201 || httpStatus == 206) {
|
||||
writtenToSink += len;
|
||||
this->request.dataCallback((char *) data, len);
|
||||
}
|
||||
} else
|
||||
this->result.data->append((char *) data, len);
|
||||
})
|
||||
{
|
||||
if (!request.expectedETag.empty())
|
||||
requestHeaders = curl_slist_append(requestHeaders, ("If-None-Match: " + request.expectedETag).c_str());
|
||||
if (!request.mimeType.empty())
|
||||
requestHeaders = curl_slist_append(requestHeaders, ("Content-Type: " + request.mimeType).c_str());
|
||||
}
|
||||
|
||||
~DownloadItem()
|
||||
{
|
||||
if (req) {
|
||||
if (active)
|
||||
curl_multi_remove_handle(downloader.curlm, req);
|
||||
curl_easy_cleanup(req);
|
||||
}
|
||||
if (requestHeaders) curl_slist_free_all(requestHeaders);
|
||||
try {
|
||||
if (!done)
|
||||
fail(DownloadError(Interrupted, format("download of '%s' was interrupted") % request.uri));
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
|
||||
void failEx(std::exception_ptr ex)
|
||||
{
|
||||
assert(!done);
|
||||
done = true;
|
||||
callback.rethrow(ex);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
void fail(const T & e)
|
||||
{
|
||||
failEx(std::make_exception_ptr(e));
|
||||
}
|
||||
|
||||
LambdaSink finalSink;
|
||||
std::shared_ptr<CompressionSink> decompressionSink;
|
||||
|
||||
std::exception_ptr writeException;
|
||||
|
||||
size_t writeCallback(void * contents, size_t size, size_t nmemb)
|
||||
{
|
||||
try {
|
||||
size_t realSize = size * nmemb;
|
||||
result.bodySize += realSize;
|
||||
|
||||
if (!decompressionSink)
|
||||
decompressionSink = makeDecompressionSink(encoding, finalSink);
|
||||
|
||||
(*decompressionSink)((unsigned char *) contents, realSize);
|
||||
|
||||
return realSize;
|
||||
} catch (...) {
|
||||
writeException = std::current_exception();
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static size_t writeCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
|
||||
{
|
||||
return ((DownloadItem *) userp)->writeCallback(contents, size, nmemb);
|
||||
}
|
||||
|
||||
size_t headerCallback(void * contents, size_t size, size_t nmemb)
|
||||
{
|
||||
size_t realSize = size * nmemb;
|
||||
std::string line((char *) contents, realSize);
|
||||
printMsg(lvlVomit, format("got header for '%s': %s") % request.uri % trim(line));
|
||||
if (line.compare(0, 5, "HTTP/") == 0) { // new response starts
|
||||
result.etag = "";
|
||||
auto ss = tokenizeString<vector<string>>(line, " ");
|
||||
status = ss.size() >= 2 ? ss[1] : "";
|
||||
result.data = std::make_shared<std::string>();
|
||||
result.bodySize = 0;
|
||||
acceptRanges = false;
|
||||
encoding = "";
|
||||
} else {
|
||||
auto i = line.find(':');
|
||||
if (i != string::npos) {
|
||||
string name = toLower(trim(string(line, 0, i)));
|
||||
if (name == "etag") {
|
||||
result.etag = trim(string(line, i + 1));
|
||||
/* Hack to work around a GitHub bug: it sends
|
||||
ETags, but ignores If-None-Match. So if we get
|
||||
the expected ETag on a 200 response, then shut
|
||||
down the connection because we already have the
|
||||
data. */
|
||||
if (result.etag == request.expectedETag && status == "200") {
|
||||
debug(format("shutting down on 200 HTTP response with expected ETag"));
|
||||
return 0;
|
||||
}
|
||||
} else if (name == "content-encoding")
|
||||
encoding = trim(string(line, i + 1));
|
||||
else if (name == "accept-ranges" && toLower(trim(std::string(line, i + 1))) == "bytes")
|
||||
acceptRanges = true;
|
||||
}
|
||||
}
|
||||
return realSize;
|
||||
}
|
||||
|
||||
static size_t headerCallbackWrapper(void * contents, size_t size, size_t nmemb, void * userp)
|
||||
{
|
||||
return ((DownloadItem *) userp)->headerCallback(contents, size, nmemb);
|
||||
}
|
||||
|
||||
int progressCallback(double dltotal, double dlnow)
|
||||
{
|
||||
try {
|
||||
act.progress(dlnow, dltotal);
|
||||
} catch (nix::Interrupted &) {
|
||||
assert(_isInterrupted);
|
||||
}
|
||||
return _isInterrupted;
|
||||
}
|
||||
|
||||
static int progressCallbackWrapper(void * userp, double dltotal, double dlnow, double ultotal, double ulnow)
|
||||
{
|
||||
return ((DownloadItem *) userp)->progressCallback(dltotal, dlnow);
|
||||
}
|
||||
|
||||
static int debugCallback(CURL * handle, curl_infotype type, char * data, size_t size, void * userptr)
|
||||
{
|
||||
if (type == CURLINFO_TEXT)
|
||||
vomit("curl: %s", chomp(std::string(data, size)));
|
||||
return 0;
|
||||
}
|
||||
|
||||
size_t readOffset = 0;
|
||||
size_t readCallback(char *buffer, size_t size, size_t nitems)
|
||||
{
|
||||
if (readOffset == request.data->length())
|
||||
return 0;
|
||||
auto count = std::min(size * nitems, request.data->length() - readOffset);
|
||||
assert(count);
|
||||
memcpy(buffer, request.data->data() + readOffset, count);
|
||||
readOffset += count;
|
||||
return count;
|
||||
}
|
||||
|
||||
static size_t readCallbackWrapper(char *buffer, size_t size, size_t nitems, void * userp)
|
||||
{
|
||||
return ((DownloadItem *) userp)->readCallback(buffer, size, nitems);
|
||||
}
|
||||
|
||||
void init()
|
||||
{
|
||||
if (!req) req = curl_easy_init();
|
||||
|
||||
curl_easy_reset(req);
|
||||
|
||||
if (verbosity >= lvlVomit) {
|
||||
curl_easy_setopt(req, CURLOPT_VERBOSE, 1);
|
||||
curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, DownloadItem::debugCallback);
|
||||
}
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str());
|
||||
curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_MAXREDIRS, 10);
|
||||
curl_easy_setopt(req, CURLOPT_NOSIGNAL, 1);
|
||||
curl_easy_setopt(req, CURLOPT_USERAGENT,
|
||||
("curl/" LIBCURL_VERSION " Nix/" + nixVersion +
|
||||
(downloadSettings.userAgentSuffix != "" ? " " + downloadSettings.userAgentSuffix.get() : "")).c_str());
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00
|
||||
curl_easy_setopt(req, CURLOPT_PIPEWAIT, 1);
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x072f00
|
||||
if (downloadSettings.enableHttp2)
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2TLS);
|
||||
else
|
||||
curl_easy_setopt(req, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1);
|
||||
#endif
|
||||
curl_easy_setopt(req, CURLOPT_WRITEFUNCTION, DownloadItem::writeCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_WRITEDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERFUNCTION, DownloadItem::headerCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_HEADERDATA, this);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_PROGRESSFUNCTION, progressCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_PROGRESSDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_NOPROGRESS, 0);
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_HTTPHEADER, requestHeaders);
|
||||
|
||||
if (request.head)
|
||||
curl_easy_setopt(req, CURLOPT_NOBODY, 1);
|
||||
|
||||
if (request.data) {
|
||||
curl_easy_setopt(req, CURLOPT_UPLOAD, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_READFUNCTION, readCallbackWrapper);
|
||||
curl_easy_setopt(req, CURLOPT_READDATA, this);
|
||||
curl_easy_setopt(req, CURLOPT_INFILESIZE_LARGE, (curl_off_t) request.data->length());
|
||||
}
|
||||
|
||||
if (request.verifyTLS) {
|
||||
if (settings.caFile != "")
|
||||
curl_easy_setopt(req, CURLOPT_CAINFO, settings.caFile.c_str());
|
||||
} else {
|
||||
curl_easy_setopt(req, CURLOPT_SSL_VERIFYPEER, 0);
|
||||
curl_easy_setopt(req, CURLOPT_SSL_VERIFYHOST, 0);
|
||||
}
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_CONNECTTIMEOUT, downloadSettings.connectTimeout.get());
|
||||
|
||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_LIMIT, 1L);
|
||||
curl_easy_setopt(req, CURLOPT_LOW_SPEED_TIME, downloadSettings.stalledDownloadTimeout.get());
|
||||
|
||||
/* If no file exist in the specified path, curl continues to work
|
||||
anyway as if netrc support was disabled. */
|
||||
curl_easy_setopt(req, CURLOPT_NETRC_FILE, settings.netrcFile.get().c_str());
|
||||
curl_easy_setopt(req, CURLOPT_NETRC, CURL_NETRC_OPTIONAL);
|
||||
|
||||
if (writtenToSink)
|
||||
curl_easy_setopt(req, CURLOPT_RESUME_FROM_LARGE, writtenToSink);
|
||||
|
||||
result.data = std::make_shared<std::string>();
|
||||
result.bodySize = 0;
|
||||
}
|
||||
|
||||
void finish(CURLcode code)
|
||||
{
|
||||
long httpStatus = 0;
|
||||
curl_easy_getinfo(req, CURLINFO_RESPONSE_CODE, &httpStatus);
|
||||
|
||||
char * effectiveUriCStr;
|
||||
curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr);
|
||||
if (effectiveUriCStr)
|
||||
result.effectiveUri = effectiveUriCStr;
|
||||
|
||||
debug("finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d bytes",
|
||||
request.verb(), request.uri, code, httpStatus, result.bodySize);
|
||||
|
||||
if (decompressionSink) {
|
||||
try {
|
||||
decompressionSink->finish();
|
||||
} catch (...) {
|
||||
writeException = std::current_exception();
|
||||
}
|
||||
}
|
||||
|
||||
if (code == CURLE_WRITE_ERROR && result.etag == request.expectedETag) {
|
||||
code = CURLE_OK;
|
||||
httpStatus = 304;
|
||||
}
|
||||
|
||||
if (writeException)
|
||||
failEx(writeException);
|
||||
|
||||
else if (code == CURLE_OK &&
|
||||
(httpStatus == 200 || httpStatus == 201 || httpStatus == 204 || httpStatus == 206 || httpStatus == 304 || httpStatus == 226 /* FTP */ || httpStatus == 0 /* other protocol */))
|
||||
{
|
||||
result.cached = httpStatus == 304;
|
||||
act.progress(result.bodySize, result.bodySize);
|
||||
done = true;
|
||||
callback(std::move(result));
|
||||
}
|
||||
|
||||
else {
|
||||
// We treat most errors as transient, but won't retry when hopeless
|
||||
Error err = Transient;
|
||||
|
||||
if (httpStatus == 404 || httpStatus == 410 || code == CURLE_FILE_COULDNT_READ_FILE) {
|
||||
// The file is definitely not there
|
||||
err = NotFound;
|
||||
} else if (httpStatus == 401 || httpStatus == 403 || httpStatus == 407) {
|
||||
// Don't retry on authentication/authorization failures
|
||||
err = Forbidden;
|
||||
} else if (httpStatus >= 400 && httpStatus < 500 && httpStatus != 408 && httpStatus != 429) {
|
||||
// Most 4xx errors are client errors and are probably not worth retrying:
|
||||
// * 408 means the server timed out waiting for us, so we try again
|
||||
// * 429 means too many requests, so we retry (with a delay)
|
||||
err = Misc;
|
||||
} else if (httpStatus == 501 || httpStatus == 505 || httpStatus == 511) {
|
||||
// Let's treat most 5xx (server) errors as transient, except for a handful:
|
||||
// * 501 not implemented
|
||||
// * 505 http version not supported
|
||||
// * 511 we're behind a captive portal
|
||||
err = Misc;
|
||||
} else {
|
||||
// Don't bother retrying on certain cURL errors either
|
||||
switch (code) {
|
||||
case CURLE_FAILED_INIT:
|
||||
case CURLE_URL_MALFORMAT:
|
||||
case CURLE_NOT_BUILT_IN:
|
||||
case CURLE_REMOTE_ACCESS_DENIED:
|
||||
case CURLE_FILE_COULDNT_READ_FILE:
|
||||
case CURLE_FUNCTION_NOT_FOUND:
|
||||
case CURLE_ABORTED_BY_CALLBACK:
|
||||
case CURLE_BAD_FUNCTION_ARGUMENT:
|
||||
case CURLE_INTERFACE_FAILED:
|
||||
case CURLE_UNKNOWN_OPTION:
|
||||
case CURLE_SSL_CACERT_BADFILE:
|
||||
case CURLE_TOO_MANY_REDIRECTS:
|
||||
case CURLE_WRITE_ERROR:
|
||||
case CURLE_UNSUPPORTED_PROTOCOL:
|
||||
err = Misc;
|
||||
break;
|
||||
default: // Shut up warnings
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
attempt++;
|
||||
|
||||
auto exc =
|
||||
code == CURLE_ABORTED_BY_CALLBACK && _isInterrupted
|
||||
? DownloadError(Interrupted, fmt("%s of '%s' was interrupted", request.verb(), request.uri))
|
||||
: httpStatus != 0
|
||||
? DownloadError(err,
|
||||
fmt("unable to %s '%s': HTTP error %d",
|
||||
request.verb(), request.uri, httpStatus)
|
||||
+ (code == CURLE_OK ? "" : fmt(" (curl error: %s)", curl_easy_strerror(code)))
|
||||
)
|
||||
: DownloadError(err,
|
||||
fmt("unable to %s '%s': %s (%d)",
|
||||
request.verb(), request.uri, curl_easy_strerror(code), code));
|
||||
|
||||
/* If this is a transient error, then maybe retry the
|
||||
download after a while. If we're writing to a
|
||||
sink, we can only retry if the server supports
|
||||
ranged requests. */
|
||||
if (err == Transient
|
||||
&& attempt < request.tries
|
||||
&& (!this->request.dataCallback
|
||||
|| writtenToSink == 0
|
||||
|| (acceptRanges && encoding.empty())))
|
||||
{
|
||||
int ms = request.baseRetryTimeMs * std::pow(2.0f, attempt - 1 + std::uniform_real_distribution<>(0.0, 0.5)(downloader.mt19937));
|
||||
if (writtenToSink)
|
||||
warn("%s; retrying from offset %d in %d ms", exc.what(), writtenToSink, ms);
|
||||
else
|
||||
warn("%s; retrying in %d ms", exc.what(), ms);
|
||||
embargo = std::chrono::steady_clock::now() + std::chrono::milliseconds(ms);
|
||||
downloader.enqueueItem(shared_from_this());
|
||||
}
|
||||
else
|
||||
fail(exc);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct State
|
||||
{
|
||||
struct EmbargoComparator {
|
||||
bool operator() (const std::shared_ptr<DownloadItem> & i1, const std::shared_ptr<DownloadItem> & i2) {
|
||||
return i1->embargo > i2->embargo;
|
||||
}
|
||||
};
|
||||
bool quit = false;
|
||||
std::priority_queue<std::shared_ptr<DownloadItem>, std::vector<std::shared_ptr<DownloadItem>>, EmbargoComparator> incoming;
|
||||
};
|
||||
|
||||
Sync<State> state_;
|
||||
|
||||
/* We can't use a std::condition_variable to wake up the curl
|
||||
thread, because it only monitors file descriptors. So use a
|
||||
pipe instead. */
|
||||
Pipe wakeupPipe;
|
||||
|
||||
std::thread workerThread;
|
||||
|
||||
CurlDownloader()
|
||||
: mt19937(rd())
|
||||
{
|
||||
static std::once_flag globalInit;
|
||||
std::call_once(globalInit, curl_global_init, CURL_GLOBAL_ALL);
|
||||
|
||||
curlm = curl_multi_init();
|
||||
|
||||
#if LIBCURL_VERSION_NUM >= 0x072b00 // Multiplex requires >= 7.43.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX);
|
||||
#endif
|
||||
#if LIBCURL_VERSION_NUM >= 0x071e00 // Max connections requires >= 7.30.0
|
||||
curl_multi_setopt(curlm, CURLMOPT_MAX_TOTAL_CONNECTIONS,
|
||||
downloadSettings.httpConnections.get());
|
||||
#endif
|
||||
|
||||
wakeupPipe.create();
|
||||
fcntl(wakeupPipe.readSide.get(), F_SETFL, O_NONBLOCK);
|
||||
|
||||
workerThread = std::thread([&]() { workerThreadEntry(); });
|
||||
}
|
||||
|
||||
~CurlDownloader()
|
||||
{
|
||||
stopWorkerThread();
|
||||
|
||||
workerThread.join();
|
||||
|
||||
if (curlm) curl_multi_cleanup(curlm);
|
||||
}
|
||||
|
||||
void stopWorkerThread()
|
||||
{
|
||||
/* Signal the worker thread to exit. */
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->quit = true;
|
||||
}
|
||||
writeFull(wakeupPipe.writeSide.get(), " ", false);
|
||||
}
|
||||
|
||||
void workerThreadMain()
|
||||
{
|
||||
/* Cause this thread to be notified on SIGINT. */
|
||||
auto callback = createInterruptCallback([&]() {
|
||||
stopWorkerThread();
|
||||
});
|
||||
|
||||
std::map<CURL *, std::shared_ptr<DownloadItem>> items;
|
||||
|
||||
bool quit = false;
|
||||
|
||||
std::chrono::steady_clock::time_point nextWakeup;
|
||||
|
||||
while (!quit) {
|
||||
checkInterrupt();
|
||||
|
||||
/* Let curl do its thing. */
|
||||
int running;
|
||||
CURLMcode mc = curl_multi_perform(curlm, &running);
|
||||
if (mc != CURLM_OK)
|
||||
throw nix::Error(format("unexpected error from curl_multi_perform(): %s") % curl_multi_strerror(mc));
|
||||
|
||||
/* Set the promises of any finished requests. */
|
||||
CURLMsg * msg;
|
||||
int left;
|
||||
while ((msg = curl_multi_info_read(curlm, &left))) {
|
||||
if (msg->msg == CURLMSG_DONE) {
|
||||
auto i = items.find(msg->easy_handle);
|
||||
assert(i != items.end());
|
||||
i->second->finish(msg->data.result);
|
||||
curl_multi_remove_handle(curlm, i->second->req);
|
||||
i->second->active = false;
|
||||
items.erase(i);
|
||||
}
|
||||
}
|
||||
|
||||
/* Wait for activity, including wakeup events. */
|
||||
int numfds = 0;
|
||||
struct curl_waitfd extraFDs[1];
|
||||
extraFDs[0].fd = wakeupPipe.readSide.get();
|
||||
extraFDs[0].events = CURL_WAIT_POLLIN;
|
||||
extraFDs[0].revents = 0;
|
||||
long maxSleepTimeMs = items.empty() ? 10000 : 100;
|
||||
auto sleepTimeMs =
|
||||
nextWakeup != std::chrono::steady_clock::time_point()
|
||||
? std::max(0, (int) std::chrono::duration_cast<std::chrono::milliseconds>(nextWakeup - std::chrono::steady_clock::now()).count())
|
||||
: maxSleepTimeMs;
|
||||
vomit("download thread waiting for %d ms", sleepTimeMs);
|
||||
mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds);
|
||||
if (mc != CURLM_OK)
|
||||
throw nix::Error(format("unexpected error from curl_multi_wait(): %s") % curl_multi_strerror(mc));
|
||||
|
||||
nextWakeup = std::chrono::steady_clock::time_point();
|
||||
|
||||
/* Add new curl requests from the incoming requests queue,
|
||||
except for requests that are embargoed (waiting for a
|
||||
retry timeout to expire). */
|
||||
if (extraFDs[0].revents & CURL_WAIT_POLLIN) {
|
||||
char buf[1024];
|
||||
auto res = read(extraFDs[0].fd, buf, sizeof(buf));
|
||||
if (res == -1 && errno != EINTR)
|
||||
throw SysError("reading curl wakeup socket");
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<DownloadItem>> incoming;
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
while (!state->incoming.empty()) {
|
||||
auto item = state->incoming.top();
|
||||
if (item->embargo <= now) {
|
||||
incoming.push_back(item);
|
||||
state->incoming.pop();
|
||||
} else {
|
||||
if (nextWakeup == std::chrono::steady_clock::time_point()
|
||||
|| item->embargo < nextWakeup)
|
||||
nextWakeup = item->embargo;
|
||||
break;
|
||||
}
|
||||
}
|
||||
quit = state->quit;
|
||||
}
|
||||
|
||||
for (auto & item : incoming) {
|
||||
debug("starting %s of %s", item->request.verb(), item->request.uri);
|
||||
item->init();
|
||||
curl_multi_add_handle(curlm, item->req);
|
||||
item->active = true;
|
||||
items[item->req] = item;
|
||||
}
|
||||
}
|
||||
|
||||
debug("download thread shutting down");
|
||||
}
|
||||
|
||||
void workerThreadEntry()
|
||||
{
|
||||
try {
|
||||
workerThreadMain();
|
||||
} catch (nix::Interrupted & e) {
|
||||
} catch (std::exception & e) {
|
||||
printError("unexpected error in download thread: %s", e.what());
|
||||
}
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
while (!state->incoming.empty()) state->incoming.pop();
|
||||
state->quit = true;
|
||||
}
|
||||
}
|
||||
|
||||
void enqueueItem(std::shared_ptr<DownloadItem> item)
|
||||
{
|
||||
if (item->request.data
|
||||
&& !hasPrefix(item->request.uri, "http://")
|
||||
&& !hasPrefix(item->request.uri, "https://"))
|
||||
throw nix::Error("uploading to '%s' is not supported", item->request.uri);
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
if (state->quit)
|
||||
throw nix::Error("cannot enqueue download request because the download thread is shutting down");
|
||||
state->incoming.push(item);
|
||||
}
|
||||
writeFull(wakeupPipe.writeSide.get(), " ");
|
||||
}
|
||||
|
||||
#ifdef ENABLE_S3
|
||||
std::tuple<std::string, std::string, Store::Params> parseS3Uri(std::string uri)
|
||||
{
|
||||
auto [path, params] = splitUriAndParams(uri);
|
||||
|
||||
auto slash = path.find('/', 5); // 5 is the length of "s3://" prefix
|
||||
if (slash == std::string::npos)
|
||||
throw nix::Error("bad S3 URI '%s'", path);
|
||||
|
||||
std::string bucketName(path, 5, slash - 5);
|
||||
std::string key(path, slash + 1);
|
||||
|
||||
return {bucketName, key, params};
|
||||
}
|
||||
#endif
|
||||
|
||||
void enqueueDownload(const DownloadRequest & request,
|
||||
Callback<DownloadResult> callback) override
|
||||
{
|
||||
/* Ugly hack to support s3:// URIs. */
|
||||
if (hasPrefix(request.uri, "s3://")) {
|
||||
// FIXME: do this on a worker thread
|
||||
try {
|
||||
#ifdef ENABLE_S3
|
||||
auto [bucketName, key, params] = parseS3Uri(request.uri);
|
||||
|
||||
std::string profile = get(params, "profile", "");
|
||||
std::string region = get(params, "region", Aws::Region::US_EAST_1);
|
||||
std::string scheme = get(params, "scheme", "");
|
||||
std::string endpoint = get(params, "endpoint", "");
|
||||
|
||||
S3Helper s3Helper(profile, region, scheme, endpoint);
|
||||
|
||||
// FIXME: implement ETag
|
||||
auto s3Res = s3Helper.getObject(bucketName, key);
|
||||
DownloadResult res;
|
||||
if (!s3Res.data)
|
||||
throw DownloadError(NotFound, fmt("S3 object '%s' does not exist", request.uri));
|
||||
res.data = s3Res.data;
|
||||
callback(std::move(res));
|
||||
#else
|
||||
throw nix::Error("cannot download '%s' because Nix is not built with S3 support", request.uri);
|
||||
#endif
|
||||
} catch (...) { callback.rethrow(); }
|
||||
return;
|
||||
}
|
||||
|
||||
enqueueItem(std::make_shared<DownloadItem>(*this, request, std::move(callback)));
|
||||
}
|
||||
};
|
||||
|
||||
ref<Downloader> getDownloader()
|
||||
{
|
||||
static ref<Downloader> downloader = makeDownloader();
|
||||
return downloader;
|
||||
}
|
||||
|
||||
ref<Downloader> makeDownloader()
|
||||
{
|
||||
return make_ref<CurlDownloader>();
|
||||
}
|
||||
|
||||
std::future<DownloadResult> Downloader::enqueueDownload(const DownloadRequest & request)
|
||||
{
|
||||
auto promise = std::make_shared<std::promise<DownloadResult>>();
|
||||
enqueueDownload(request,
|
||||
{[promise](std::future<DownloadResult> fut) {
|
||||
try {
|
||||
promise->set_value(fut.get());
|
||||
} catch (...) {
|
||||
promise->set_exception(std::current_exception());
|
||||
}
|
||||
}});
|
||||
return promise->get_future();
|
||||
}
|
||||
|
||||
DownloadResult Downloader::download(const DownloadRequest & request)
|
||||
{
|
||||
return enqueueDownload(request).get();
|
||||
}
|
||||
|
||||
void Downloader::download(DownloadRequest && request, Sink & sink)
|
||||
{
|
||||
/* Note: we can't call 'sink' via request.dataCallback, because
|
||||
that would cause the sink to execute on the downloader
|
||||
thread. If 'sink' is a coroutine, this will fail. Also, if the
|
||||
sink is expensive (e.g. one that does decompression and writing
|
||||
to the Nix store), it would stall the download thread too much.
|
||||
Therefore we use a buffer to communicate data between the
|
||||
download thread and the calling thread. */
|
||||
|
||||
struct State {
|
||||
bool quit = false;
|
||||
std::exception_ptr exc;
|
||||
std::string data;
|
||||
std::condition_variable avail, request;
|
||||
};
|
||||
|
||||
auto _state = std::make_shared<Sync<State>>();
|
||||
|
||||
/* In case of an exception, wake up the download thread. FIXME:
|
||||
abort the download request. */
|
||||
Finally finally([&]() {
|
||||
auto state(_state->lock());
|
||||
state->quit = true;
|
||||
state->request.notify_one();
|
||||
});
|
||||
|
||||
request.dataCallback = [_state](char * buf, size_t len) {
|
||||
|
||||
auto state(_state->lock());
|
||||
|
||||
if (state->quit) return;
|
||||
|
||||
/* If the buffer is full, then go to sleep until the calling
|
||||
thread wakes us up (i.e. when it has removed data from the
|
||||
buffer). We don't wait forever to prevent stalling the
|
||||
download thread. (Hopefully sleeping will throttle the
|
||||
sender.) */
|
||||
if (state->data.size() > 1024 * 1024) {
|
||||
debug("download buffer is full; going to sleep");
|
||||
state.wait_for(state->request, std::chrono::seconds(10));
|
||||
}
|
||||
|
||||
/* Append data to the buffer and wake up the calling
|
||||
thread. */
|
||||
state->data.append(buf, len);
|
||||
state->avail.notify_one();
|
||||
};
|
||||
|
||||
enqueueDownload(request,
|
||||
{[_state](std::future<DownloadResult> fut) {
|
||||
auto state(_state->lock());
|
||||
state->quit = true;
|
||||
try {
|
||||
fut.get();
|
||||
} catch (...) {
|
||||
state->exc = std::current_exception();
|
||||
}
|
||||
state->avail.notify_one();
|
||||
state->request.notify_one();
|
||||
}});
|
||||
|
||||
while (true) {
|
||||
checkInterrupt();
|
||||
|
||||
std::string chunk;
|
||||
|
||||
/* Grab data if available, otherwise wait for the download
|
||||
thread to wake us up. */
|
||||
{
|
||||
auto state(_state->lock());
|
||||
|
||||
while (state->data.empty()) {
|
||||
|
||||
if (state->quit) {
|
||||
if (state->exc) std::rethrow_exception(state->exc);
|
||||
return;
|
||||
}
|
||||
|
||||
state.wait(state->avail);
|
||||
}
|
||||
|
||||
chunk = std::move(state->data);
|
||||
|
||||
state->request.notify_one();
|
||||
}
|
||||
|
||||
/* Flush the data to the sink and wake up the download thread
|
||||
if it's blocked on a full buffer. We don't hold the state
|
||||
lock while doing this to prevent blocking the download
|
||||
thread if sink() takes a long time. */
|
||||
sink((unsigned char *) chunk.data(), chunk.size());
|
||||
}
|
||||
}
|
||||
|
||||
CachedDownloadResult Downloader::downloadCached(
|
||||
ref<Store> store, const CachedDownloadRequest & request)
|
||||
{
|
||||
auto url = resolveUri(request.uri);
|
||||
|
||||
auto name = request.name;
|
||||
if (name == "") {
|
||||
auto p = url.rfind('/');
|
||||
if (p != string::npos) name = string(url, p + 1);
|
||||
}
|
||||
|
||||
Path expectedStorePath;
|
||||
if (request.expectedHash) {
|
||||
expectedStorePath = store->makeFixedOutputPath(request.unpack, request.expectedHash, name);
|
||||
if (store->isValidPath(expectedStorePath)) {
|
||||
CachedDownloadResult result;
|
||||
result.storePath = expectedStorePath;
|
||||
result.path = store->toRealPath(expectedStorePath);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
Path cacheDir = getCacheDir() + "/nix/tarballs";
|
||||
createDirs(cacheDir);
|
||||
|
||||
string urlHash = hashString(htSHA256, name + std::string("\0"s) + url).to_string(Base32, false);
|
||||
|
||||
Path dataFile = cacheDir + "/" + urlHash + ".info";
|
||||
Path fileLink = cacheDir + "/" + urlHash + "-file";
|
||||
|
||||
PathLocks lock({fileLink}, fmt("waiting for lock on '%1%'...", fileLink));
|
||||
|
||||
Path storePath;
|
||||
|
||||
string expectedETag;
|
||||
|
||||
bool skip = false;
|
||||
|
||||
CachedDownloadResult result;
|
||||
|
||||
if (pathExists(fileLink) && pathExists(dataFile)) {
|
||||
storePath = readLink(fileLink);
|
||||
store->addTempRoot(storePath);
|
||||
if (store->isValidPath(storePath)) {
|
||||
auto ss = tokenizeString<vector<string>>(readFile(dataFile), "\n");
|
||||
if (ss.size() >= 3 && ss[0] == url) {
|
||||
time_t lastChecked;
|
||||
if (string2Int(ss[2], lastChecked) && (uint64_t) lastChecked + request.ttl >= (uint64_t) time(0)) {
|
||||
skip = true;
|
||||
result.effectiveUri = request.uri;
|
||||
result.etag = ss[1];
|
||||
} else if (!ss[1].empty()) {
|
||||
debug(format("verifying previous ETag '%1%'") % ss[1]);
|
||||
expectedETag = ss[1];
|
||||
}
|
||||
}
|
||||
} else
|
||||
storePath = "";
|
||||
}
|
||||
|
||||
if (!skip) {
|
||||
|
||||
try {
|
||||
DownloadRequest request2(url);
|
||||
request2.expectedETag = expectedETag;
|
||||
auto res = download(request2);
|
||||
result.effectiveUri = res.effectiveUri;
|
||||
result.etag = res.etag;
|
||||
|
||||
if (!res.cached) {
|
||||
ValidPathInfo info;
|
||||
StringSink sink;
|
||||
dumpString(*res.data, sink);
|
||||
Hash hash = hashString(request.expectedHash ? request.expectedHash.type : htSHA256, *res.data);
|
||||
info.path = store->makeFixedOutputPath(false, hash, name);
|
||||
info.narHash = hashString(htSHA256, *sink.s);
|
||||
info.narSize = sink.s->size();
|
||||
info.ca = makeFixedOutputCA(false, hash);
|
||||
store->addToStore(info, sink.s, NoRepair, NoCheckSigs);
|
||||
storePath = info.path;
|
||||
}
|
||||
|
||||
assert(!storePath.empty());
|
||||
replaceSymlink(storePath, fileLink);
|
||||
|
||||
writeFile(dataFile, url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n");
|
||||
} catch (DownloadError & e) {
|
||||
if (storePath.empty()) throw;
|
||||
warn("warning: %s; using cached result", e.msg());
|
||||
result.etag = expectedETag;
|
||||
}
|
||||
}
|
||||
|
||||
if (request.unpack) {
|
||||
Path unpackedLink = cacheDir + "/" + baseNameOf(storePath) + "-unpacked";
|
||||
PathLocks lock2({unpackedLink}, fmt("waiting for lock on '%1%'...", unpackedLink));
|
||||
Path unpackedStorePath;
|
||||
if (pathExists(unpackedLink)) {
|
||||
unpackedStorePath = readLink(unpackedLink);
|
||||
store->addTempRoot(unpackedStorePath);
|
||||
if (!store->isValidPath(unpackedStorePath))
|
||||
unpackedStorePath = "";
|
||||
}
|
||||
if (unpackedStorePath.empty()) {
|
||||
printInfo(format("unpacking '%1%'...") % url);
|
||||
Path tmpDir = createTempDir();
|
||||
AutoDelete autoDelete(tmpDir, true);
|
||||
// FIXME: this requires GNU tar for decompression.
|
||||
runProgram("tar", true, {"xf", store->toRealPath(storePath), "-C", tmpDir, "--strip-components", "1"});
|
||||
unpackedStorePath = store->addToStore(name, tmpDir, true, htSHA256, defaultPathFilter, NoRepair);
|
||||
}
|
||||
replaceSymlink(unpackedStorePath, unpackedLink);
|
||||
storePath = unpackedStorePath;
|
||||
}
|
||||
|
||||
if (expectedStorePath != "" && storePath != expectedStorePath) {
|
||||
unsigned int statusCode = 102;
|
||||
Hash gotHash = request.unpack
|
||||
? hashPath(request.expectedHash.type, store->toRealPath(storePath)).first
|
||||
: hashFile(request.expectedHash.type, store->toRealPath(storePath));
|
||||
throw nix::Error(statusCode, "hash mismatch in file downloaded from '%s':\n wanted: %s\n got: %s",
|
||||
url, request.expectedHash.to_string(), gotHash.to_string());
|
||||
}
|
||||
|
||||
result.storePath = storePath;
|
||||
result.path = store->toRealPath(storePath);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
bool isUri(const string & s)
|
||||
{
|
||||
if (s.compare(0, 8, "channel:") == 0) return true;
|
||||
size_t pos = s.find("://");
|
||||
if (pos == string::npos) return false;
|
||||
string scheme(s, 0, pos);
|
||||
return scheme == "http" || scheme == "https" || scheme == "file" || scheme == "channel" || scheme == "git" || scheme == "s3" || scheme == "ssh";
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
138
third_party/nix/src/libstore/download.hh
vendored
Normal file
138
third_party/nix/src/libstore/download.hh
vendored
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#include <string>
|
||||
#include <future>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct DownloadSettings : Config
|
||||
{
|
||||
Setting<bool> enableHttp2{this, true, "http2",
|
||||
"Whether to enable HTTP/2 support."};
|
||||
|
||||
Setting<std::string> userAgentSuffix{this, "", "user-agent-suffix",
|
||||
"String appended to the user agent in HTTP requests."};
|
||||
|
||||
Setting<size_t> httpConnections{this, 25, "http-connections",
|
||||
"Number of parallel HTTP connections.",
|
||||
{"binary-caches-parallel-connections"}};
|
||||
|
||||
Setting<unsigned long> connectTimeout{this, 0, "connect-timeout",
|
||||
"Timeout for connecting to servers during downloads. 0 means use curl's builtin default."};
|
||||
|
||||
Setting<unsigned long> stalledDownloadTimeout{this, 300, "stalled-download-timeout",
|
||||
"Timeout (in seconds) for receiving data from servers during download. Nix cancels idle downloads after this timeout's duration."};
|
||||
|
||||
Setting<unsigned int> tries{this, 5, "download-attempts",
|
||||
"How often Nix will attempt to download a file before giving up."};
|
||||
};
|
||||
|
||||
extern DownloadSettings downloadSettings;
|
||||
|
||||
struct DownloadRequest
|
||||
{
|
||||
std::string uri;
|
||||
std::string expectedETag;
|
||||
bool verifyTLS = true;
|
||||
bool head = false;
|
||||
size_t tries = downloadSettings.tries;
|
||||
unsigned int baseRetryTimeMs = 250;
|
||||
ActivityId parentAct;
|
||||
bool decompress = true;
|
||||
std::shared_ptr<std::string> data;
|
||||
std::string mimeType;
|
||||
std::function<void(char *, size_t)> dataCallback;
|
||||
|
||||
DownloadRequest(const std::string & uri)
|
||||
: uri(uri), parentAct(getCurActivity()) { }
|
||||
|
||||
std::string verb()
|
||||
{
|
||||
return data ? "upload" : "download";
|
||||
}
|
||||
};
|
||||
|
||||
struct DownloadResult
|
||||
{
|
||||
bool cached = false;
|
||||
std::string etag;
|
||||
std::string effectiveUri;
|
||||
std::shared_ptr<std::string> data;
|
||||
uint64_t bodySize = 0;
|
||||
};
|
||||
|
||||
struct CachedDownloadRequest
|
||||
{
|
||||
std::string uri;
|
||||
bool unpack = false;
|
||||
std::string name;
|
||||
Hash expectedHash;
|
||||
unsigned int ttl = settings.tarballTtl;
|
||||
|
||||
CachedDownloadRequest(const std::string & uri)
|
||||
: uri(uri) { }
|
||||
};
|
||||
|
||||
struct CachedDownloadResult
|
||||
{
|
||||
// Note: 'storePath' may be different from 'path' when using a
|
||||
// chroot store.
|
||||
Path storePath;
|
||||
Path path;
|
||||
std::optional<std::string> etag;
|
||||
std::string effectiveUri;
|
||||
};
|
||||
|
||||
class Store;
|
||||
|
||||
struct Downloader
|
||||
{
|
||||
virtual ~Downloader() { }
|
||||
|
||||
/* Enqueue a download request, returning a future to the result of
|
||||
the download. The future may throw a DownloadError
|
||||
exception. */
|
||||
virtual void enqueueDownload(const DownloadRequest & request,
|
||||
Callback<DownloadResult> callback) = 0;
|
||||
|
||||
std::future<DownloadResult> enqueueDownload(const DownloadRequest & request);
|
||||
|
||||
/* Synchronously download a file. */
|
||||
DownloadResult download(const DownloadRequest & request);
|
||||
|
||||
/* Download a file, writing its data to a sink. The sink will be
|
||||
invoked on the thread of the caller. */
|
||||
void download(DownloadRequest && request, Sink & sink);
|
||||
|
||||
/* Check if the specified file is already in ~/.cache/nix/tarballs
|
||||
and is more recent than ‘tarball-ttl’ seconds. Otherwise,
|
||||
use the recorded ETag to verify if the server has a more
|
||||
recent version, and if so, download it to the Nix store. */
|
||||
CachedDownloadResult downloadCached(ref<Store> store, const CachedDownloadRequest & request);
|
||||
|
||||
enum Error { NotFound, Forbidden, Misc, Transient, Interrupted };
|
||||
};
|
||||
|
||||
/* Return a shared Downloader object. Using this object is preferred
|
||||
because it enables connection reuse and HTTP/2 multiplexing. */
|
||||
ref<Downloader> getDownloader();
|
||||
|
||||
/* Return a new Downloader object. */
|
||||
ref<Downloader> makeDownloader();
|
||||
|
||||
class DownloadError : public Error
|
||||
{
|
||||
public:
|
||||
Downloader::Error error;
|
||||
DownloadError(Downloader::Error error, const FormatOrString & fs)
|
||||
: Error(fs), error(error)
|
||||
{ }
|
||||
};
|
||||
|
||||
bool isUri(const string & s);
|
||||
|
||||
}
|
||||
106
third_party/nix/src/libstore/export-import.cc
vendored
Normal file
106
third_party/nix/src/libstore/export-import.cc
vendored
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
#include "store-api.hh"
|
||||
#include "archive.hh"
|
||||
#include "worker-protocol.hh"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct HashAndWriteSink : Sink
|
||||
{
|
||||
Sink & writeSink;
|
||||
HashSink hashSink;
|
||||
HashAndWriteSink(Sink & writeSink) : writeSink(writeSink), hashSink(htSHA256)
|
||||
{
|
||||
}
|
||||
virtual void operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
writeSink(data, len);
|
||||
hashSink(data, len);
|
||||
}
|
||||
Hash currentHash()
|
||||
{
|
||||
return hashSink.currentHash().first;
|
||||
}
|
||||
};
|
||||
|
||||
void Store::exportPaths(const Paths & paths, Sink & sink)
|
||||
{
|
||||
Paths sorted = topoSortPaths(PathSet(paths.begin(), paths.end()));
|
||||
std::reverse(sorted.begin(), sorted.end());
|
||||
|
||||
std::string doneLabel("paths exported");
|
||||
//logger->incExpected(doneLabel, sorted.size());
|
||||
|
||||
for (auto & path : sorted) {
|
||||
//Activity act(*logger, lvlInfo, format("exporting path '%s'") % path);
|
||||
sink << 1;
|
||||
exportPath(path, sink);
|
||||
//logger->incProgress(doneLabel);
|
||||
}
|
||||
|
||||
sink << 0;
|
||||
}
|
||||
|
||||
void Store::exportPath(const Path & path, Sink & sink)
|
||||
{
|
||||
auto info = queryPathInfo(path);
|
||||
|
||||
HashAndWriteSink hashAndWriteSink(sink);
|
||||
|
||||
narFromPath(path, hashAndWriteSink);
|
||||
|
||||
/* Refuse to export paths that have changed. This prevents
|
||||
filesystem corruption from spreading to other machines.
|
||||
Don't complain if the stored hash is zero (unknown). */
|
||||
Hash hash = hashAndWriteSink.currentHash();
|
||||
if (hash != info->narHash && info->narHash != Hash(info->narHash.type))
|
||||
throw Error(format("hash of path '%1%' has changed from '%2%' to '%3%'!") % path
|
||||
% info->narHash.to_string() % hash.to_string());
|
||||
|
||||
hashAndWriteSink << exportMagic << path << info->references << info->deriver << 0;
|
||||
}
|
||||
|
||||
Paths Store::importPaths(Source & source, std::shared_ptr<FSAccessor> accessor, CheckSigsFlag checkSigs)
|
||||
{
|
||||
Paths res;
|
||||
while (true) {
|
||||
auto n = readNum<uint64_t>(source);
|
||||
if (n == 0) break;
|
||||
if (n != 1) throw Error("input doesn't look like something created by 'nix-store --export'");
|
||||
|
||||
/* Extract the NAR from the source. */
|
||||
TeeSink tee(source);
|
||||
parseDump(tee, tee.source);
|
||||
|
||||
uint32_t magic = readInt(source);
|
||||
if (magic != exportMagic)
|
||||
throw Error("Nix archive cannot be imported; wrong format");
|
||||
|
||||
ValidPathInfo info;
|
||||
|
||||
info.path = readStorePath(*this, source);
|
||||
|
||||
//Activity act(*logger, lvlInfo, format("importing path '%s'") % info.path);
|
||||
|
||||
info.references = readStorePaths<PathSet>(*this, source);
|
||||
|
||||
info.deriver = readString(source);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
|
||||
info.narHash = hashString(htSHA256, *tee.source.data);
|
||||
info.narSize = tee.source.data->size();
|
||||
|
||||
// Ignore optional legacy signature.
|
||||
if (readInt(source) == 1)
|
||||
readString(source);
|
||||
|
||||
addToStore(info, tee.source.data, NoRepair, checkSigs, accessor);
|
||||
|
||||
res.push_back(info.path);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
33
third_party/nix/src/libstore/fs-accessor.hh
vendored
Normal file
33
third_party/nix/src/libstore/fs-accessor.hh
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* An abstract class for accessing a filesystem-like structure, such
|
||||
as a (possibly remote) Nix store or the contents of a NAR file. */
|
||||
class FSAccessor
|
||||
{
|
||||
public:
|
||||
enum Type { tMissing, tRegular, tSymlink, tDirectory };
|
||||
|
||||
struct Stat
|
||||
{
|
||||
Type type = tMissing;
|
||||
uint64_t fileSize = 0; // regular files only
|
||||
bool isExecutable = false; // regular files only
|
||||
uint64_t narOffset = 0; // regular files only
|
||||
};
|
||||
|
||||
virtual ~FSAccessor() { }
|
||||
|
||||
virtual Stat stat(const Path & path) = 0;
|
||||
|
||||
virtual StringSet readDirectory(const Path & path) = 0;
|
||||
|
||||
virtual std::string readFile(const Path & path) = 0;
|
||||
|
||||
virtual std::string readLink(const Path & path) = 0;
|
||||
};
|
||||
|
||||
}
|
||||
948
third_party/nix/src/libstore/gc.cc
vendored
Normal file
948
third_party/nix/src/libstore/gc.cc
vendored
Normal file
|
|
@ -0,0 +1,948 @@
|
|||
#include "derivations.hh"
|
||||
#include "globals.hh"
|
||||
#include "local-store.hh"
|
||||
#include "finally.hh"
|
||||
|
||||
#include <functional>
|
||||
#include <queue>
|
||||
#include <algorithm>
|
||||
#include <regex>
|
||||
#include <random>
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
#include <climits>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static string gcLockName = "gc.lock";
|
||||
static string gcRootsDir = "gcroots";
|
||||
|
||||
|
||||
/* Acquire the global GC lock. This is used to prevent new Nix
|
||||
processes from starting after the temporary root files have been
|
||||
read. To be precise: when they try to create a new temporary root
|
||||
file, they will block until the garbage collector has finished /
|
||||
yielded the GC lock. */
|
||||
AutoCloseFD LocalStore::openGCLock(LockType lockType)
|
||||
{
|
||||
Path fnGCLock = (format("%1%/%2%")
|
||||
% stateDir % gcLockName).str();
|
||||
|
||||
debug(format("acquiring global GC lock '%1%'") % fnGCLock);
|
||||
|
||||
AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600);
|
||||
if (!fdGCLock)
|
||||
throw SysError(format("opening global GC lock '%1%'") % fnGCLock);
|
||||
|
||||
if (!lockFile(fdGCLock.get(), lockType, false)) {
|
||||
printError(format("waiting for the big garbage collector lock..."));
|
||||
lockFile(fdGCLock.get(), lockType, true);
|
||||
}
|
||||
|
||||
/* !!! Restrict read permission on the GC root. Otherwise any
|
||||
process that can open the file for reading can DoS the
|
||||
collector. */
|
||||
|
||||
return fdGCLock;
|
||||
}
|
||||
|
||||
|
||||
static void makeSymlink(const Path & link, const Path & target)
|
||||
{
|
||||
/* Create directories up to `gcRoot'. */
|
||||
createDirs(dirOf(link));
|
||||
|
||||
/* Create the new symlink. */
|
||||
Path tempLink = (format("%1%.tmp-%2%-%3%")
|
||||
% link % getpid() % random()).str();
|
||||
createSymlink(target, tempLink);
|
||||
|
||||
/* Atomically replace the old one. */
|
||||
if (rename(tempLink.c_str(), link.c_str()) == -1)
|
||||
throw SysError(format("cannot rename '%1%' to '%2%'")
|
||||
% tempLink % link);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::syncWithGC()
|
||||
{
|
||||
AutoCloseFD fdGCLock = openGCLock(ltRead);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::addIndirectRoot(const Path & path)
|
||||
{
|
||||
string hash = hashString(htSHA1, path).to_string(Base32, false);
|
||||
Path realRoot = canonPath((format("%1%/%2%/auto/%3%")
|
||||
% stateDir % gcRootsDir % hash).str());
|
||||
makeSymlink(realRoot, path);
|
||||
}
|
||||
|
||||
|
||||
Path LocalFSStore::addPermRoot(const Path & _storePath,
|
||||
const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir)
|
||||
{
|
||||
Path storePath(canonPath(_storePath));
|
||||
Path gcRoot(canonPath(_gcRoot));
|
||||
assertStorePath(storePath);
|
||||
|
||||
if (isInStore(gcRoot))
|
||||
throw Error(format(
|
||||
"creating a garbage collector root (%1%) in the Nix store is forbidden "
|
||||
"(are you running nix-build inside the store?)") % gcRoot);
|
||||
|
||||
if (indirect) {
|
||||
/* Don't clobber the link if it already exists and doesn't
|
||||
point to the Nix store. */
|
||||
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
|
||||
throw Error(format("cannot create symlink '%1%'; already exists") % gcRoot);
|
||||
makeSymlink(gcRoot, storePath);
|
||||
addIndirectRoot(gcRoot);
|
||||
}
|
||||
|
||||
else {
|
||||
if (!allowOutsideRootsDir) {
|
||||
Path rootsDir = canonPath((format("%1%/%2%") % stateDir % gcRootsDir).str());
|
||||
|
||||
if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
|
||||
throw Error(format(
|
||||
"path '%1%' is not a valid garbage collector root; "
|
||||
"it's not in the directory '%2%'")
|
||||
% gcRoot % rootsDir);
|
||||
}
|
||||
|
||||
if (baseNameOf(gcRoot) == baseNameOf(storePath))
|
||||
writeFile(gcRoot, "");
|
||||
else
|
||||
makeSymlink(gcRoot, storePath);
|
||||
}
|
||||
|
||||
/* Check that the root can be found by the garbage collector.
|
||||
!!! This can be very slow on machines that have many roots.
|
||||
Instead of reading all the roots, it would be more efficient to
|
||||
check if the root is in a directory in or linked from the
|
||||
gcroots directory. */
|
||||
if (settings.checkRootReachability) {
|
||||
Roots roots = findRoots(false);
|
||||
if (roots[storePath].count(gcRoot) == 0)
|
||||
printError(
|
||||
format(
|
||||
"warning: '%1%' is not in a directory where the garbage collector looks for roots; "
|
||||
"therefore, '%2%' might be removed by the garbage collector")
|
||||
% gcRoot % storePath);
|
||||
}
|
||||
|
||||
/* Grab the global GC root, causing us to block while a GC is in
|
||||
progress. This prevents the set of permanent roots from
|
||||
increasing while a GC is in progress. */
|
||||
syncWithGC();
|
||||
|
||||
return gcRoot;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::addTempRoot(const Path & path)
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
/* Create the temporary roots file for this process. */
|
||||
if (!state->fdTempRoots) {
|
||||
|
||||
while (1) {
|
||||
AutoCloseFD fdGCLock = openGCLock(ltRead);
|
||||
|
||||
if (pathExists(fnTempRoots))
|
||||
/* It *must* be stale, since there can be no two
|
||||
processes with the same pid. */
|
||||
unlink(fnTempRoots.c_str());
|
||||
|
||||
state->fdTempRoots = openLockFile(fnTempRoots, true);
|
||||
|
||||
fdGCLock = -1;
|
||||
|
||||
debug(format("acquiring read lock on '%1%'") % fnTempRoots);
|
||||
lockFile(state->fdTempRoots.get(), ltRead, true);
|
||||
|
||||
/* Check whether the garbage collector didn't get in our
|
||||
way. */
|
||||
struct stat st;
|
||||
if (fstat(state->fdTempRoots.get(), &st) == -1)
|
||||
throw SysError(format("statting '%1%'") % fnTempRoots);
|
||||
if (st.st_size == 0) break;
|
||||
|
||||
/* The garbage collector deleted this file before we could
|
||||
get a lock. (It won't delete the file after we get a
|
||||
lock.) Try again. */
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/* Upgrade the lock to a write lock. This will cause us to block
|
||||
if the garbage collector is holding our lock. */
|
||||
debug(format("acquiring write lock on '%1%'") % fnTempRoots);
|
||||
lockFile(state->fdTempRoots.get(), ltWrite, true);
|
||||
|
||||
string s = path + '\0';
|
||||
writeFull(state->fdTempRoots.get(), s);
|
||||
|
||||
/* Downgrade to a read lock. */
|
||||
debug(format("downgrading to read lock on '%1%'") % fnTempRoots);
|
||||
lockFile(state->fdTempRoots.get(), ltRead, true);
|
||||
}
|
||||
|
||||
|
||||
static std::string censored = "{censored}";
|
||||
|
||||
|
||||
void LocalStore::findTempRoots(FDs & fds, Roots & tempRoots, bool censor)
|
||||
{
|
||||
/* Read the `temproots' directory for per-process temporary root
|
||||
files. */
|
||||
for (auto & i : readDirectory(tempRootsDir)) {
|
||||
Path path = tempRootsDir + "/" + i.name;
|
||||
|
||||
pid_t pid = std::stoi(i.name);
|
||||
|
||||
debug(format("reading temporary root file '%1%'") % path);
|
||||
FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666)));
|
||||
if (!*fd) {
|
||||
/* It's okay if the file has disappeared. */
|
||||
if (errno == ENOENT) continue;
|
||||
throw SysError(format("opening temporary roots file '%1%'") % path);
|
||||
}
|
||||
|
||||
/* This should work, but doesn't, for some reason. */
|
||||
//FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
|
||||
//if (*fd == -1) continue;
|
||||
|
||||
/* Try to acquire a write lock without blocking. This can
|
||||
only succeed if the owning process has died. In that case
|
||||
we don't care about its temporary roots. */
|
||||
if (lockFile(fd->get(), ltWrite, false)) {
|
||||
printError(format("removing stale temporary roots file '%1%'") % path);
|
||||
unlink(path.c_str());
|
||||
writeFull(fd->get(), "d");
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Acquire a read lock. This will prevent the owning process
|
||||
from upgrading to a write lock, therefore it will block in
|
||||
addTempRoot(). */
|
||||
debug(format("waiting for read lock on '%1%'") % path);
|
||||
lockFile(fd->get(), ltRead, true);
|
||||
|
||||
/* Read the entire file. */
|
||||
string contents = readFile(fd->get());
|
||||
|
||||
/* Extract the roots. */
|
||||
string::size_type pos = 0, end;
|
||||
|
||||
while ((end = contents.find((char) 0, pos)) != string::npos) {
|
||||
Path root(contents, pos, end - pos);
|
||||
debug("got temporary root '%s'", root);
|
||||
assertStorePath(root);
|
||||
tempRoots[root].emplace(censor ? censored : fmt("{temp:%d}", pid));
|
||||
pos = end + 1;
|
||||
}
|
||||
|
||||
fds.push_back(fd); /* keep open */
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRoots(const Path & path, unsigned char type, Roots & roots)
|
||||
{
|
||||
auto foundRoot = [&](const Path & path, const Path & target) {
|
||||
Path storePath = toStorePath(target);
|
||||
if (isStorePath(storePath) && isValidPath(storePath))
|
||||
roots[storePath].emplace(path);
|
||||
else
|
||||
printInfo(format("skipping invalid root from '%1%' to '%2%'") % path % storePath);
|
||||
};
|
||||
|
||||
try {
|
||||
|
||||
if (type == DT_UNKNOWN)
|
||||
type = getFileType(path);
|
||||
|
||||
if (type == DT_DIR) {
|
||||
for (auto & i : readDirectory(path))
|
||||
findRoots(path + "/" + i.name, i.type, roots);
|
||||
}
|
||||
|
||||
else if (type == DT_LNK) {
|
||||
Path target = readLink(path);
|
||||
if (isInStore(target))
|
||||
foundRoot(path, target);
|
||||
|
||||
/* Handle indirect roots. */
|
||||
else {
|
||||
target = absPath(target, dirOf(path));
|
||||
if (!pathExists(target)) {
|
||||
if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) {
|
||||
printInfo(format("removing stale link from '%1%' to '%2%'") % path % target);
|
||||
unlink(path.c_str());
|
||||
}
|
||||
} else {
|
||||
struct stat st2 = lstat(target);
|
||||
if (!S_ISLNK(st2.st_mode)) return;
|
||||
Path target2 = readLink(target);
|
||||
if (isInStore(target2)) foundRoot(target, target2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
else if (type == DT_REG) {
|
||||
Path storePath = storeDir + "/" + baseNameOf(path);
|
||||
if (isStorePath(storePath) && isValidPath(storePath))
|
||||
roots[storePath].emplace(path);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
catch (SysError & e) {
|
||||
/* We only ignore permanent failures. */
|
||||
if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
|
||||
printInfo(format("cannot read potential root '%1%'") % path);
|
||||
else
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::findRootsNoTemp(Roots & roots, bool censor)
|
||||
{
|
||||
/* Process direct roots in {gcroots,profiles}. */
|
||||
findRoots(stateDir + "/" + gcRootsDir, DT_UNKNOWN, roots);
|
||||
findRoots(stateDir + "/profiles", DT_UNKNOWN, roots);
|
||||
|
||||
/* Add additional roots returned by different platforms-specific
|
||||
heuristics. This is typically used to add running programs to
|
||||
the set of roots (to prevent them from being garbage collected). */
|
||||
findRuntimeRoots(roots, censor);
|
||||
}
|
||||
|
||||
|
||||
Roots LocalStore::findRoots(bool censor)
|
||||
{
|
||||
Roots roots;
|
||||
findRootsNoTemp(roots, censor);
|
||||
|
||||
FDs fds;
|
||||
findTempRoots(fds, roots, censor);
|
||||
|
||||
return roots;
|
||||
}
|
||||
|
||||
static void readProcLink(const string & file, Roots & roots)
|
||||
{
|
||||
/* 64 is the starting buffer size gnu readlink uses... */
|
||||
auto bufsiz = ssize_t{64};
|
||||
try_again:
|
||||
char buf[bufsiz];
|
||||
auto res = readlink(file.c_str(), buf, bufsiz);
|
||||
if (res == -1) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
return;
|
||||
throw SysError("reading symlink");
|
||||
}
|
||||
if (res == bufsiz) {
|
||||
if (SSIZE_MAX / 2 < bufsiz)
|
||||
throw Error("stupidly long symlink");
|
||||
bufsiz *= 2;
|
||||
goto try_again;
|
||||
}
|
||||
if (res > 0 && buf[0] == '/')
|
||||
roots[std::string(static_cast<char *>(buf), res)]
|
||||
.emplace(file);
|
||||
}
|
||||
|
||||
static string quoteRegexChars(const string & raw)
|
||||
{
|
||||
static auto specialRegex = std::regex(R"([.^$\\*+?()\[\]{}|])");
|
||||
return std::regex_replace(raw, specialRegex, R"(\$&)");
|
||||
}
|
||||
|
||||
static void readFileRoots(const char * path, Roots & roots)
|
||||
{
|
||||
try {
|
||||
roots[readFile(path)].emplace(path);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOENT && e.errNo != EACCES)
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void LocalStore::findRuntimeRoots(Roots & roots, bool censor)
|
||||
{
|
||||
Roots unchecked;
|
||||
|
||||
auto procDir = AutoCloseDir{opendir("/proc")};
|
||||
if (procDir) {
|
||||
struct dirent * ent;
|
||||
auto digitsRegex = std::regex(R"(^\d+$)");
|
||||
auto mapRegex = std::regex(R"(^\s*\S+\s+\S+\s+\S+\s+\S+\s+\S+\s+(/\S+)\s*$)");
|
||||
auto storePathRegex = std::regex(quoteRegexChars(storeDir) + R"(/[0-9a-z]+[0-9a-zA-Z\+\-\._\?=]*)");
|
||||
while (errno = 0, ent = readdir(procDir.get())) {
|
||||
checkInterrupt();
|
||||
if (std::regex_match(ent->d_name, digitsRegex)) {
|
||||
readProcLink(fmt("/proc/%s/exe" ,ent->d_name), unchecked);
|
||||
readProcLink(fmt("/proc/%s/cwd", ent->d_name), unchecked);
|
||||
|
||||
auto fdStr = fmt("/proc/%s/fd", ent->d_name);
|
||||
auto fdDir = AutoCloseDir(opendir(fdStr.c_str()));
|
||||
if (!fdDir) {
|
||||
if (errno == ENOENT || errno == EACCES)
|
||||
continue;
|
||||
throw SysError(format("opening %1%") % fdStr);
|
||||
}
|
||||
struct dirent * fd_ent;
|
||||
while (errno = 0, fd_ent = readdir(fdDir.get())) {
|
||||
if (fd_ent->d_name[0] != '.')
|
||||
readProcLink(fmt("%s/%s", fdStr, fd_ent->d_name), unchecked);
|
||||
}
|
||||
if (errno) {
|
||||
if (errno == ESRCH)
|
||||
continue;
|
||||
throw SysError(format("iterating /proc/%1%/fd") % ent->d_name);
|
||||
}
|
||||
fdDir.reset();
|
||||
|
||||
try {
|
||||
auto mapFile = fmt("/proc/%s/maps", ent->d_name);
|
||||
auto mapLines = tokenizeString<std::vector<string>>(readFile(mapFile, true), "\n");
|
||||
for (const auto & line : mapLines) {
|
||||
auto match = std::smatch{};
|
||||
if (std::regex_match(line, match, mapRegex))
|
||||
unchecked[match[1]].emplace(mapFile);
|
||||
}
|
||||
|
||||
auto envFile = fmt("/proc/%s/environ", ent->d_name);
|
||||
auto envString = readFile(envFile, true);
|
||||
auto env_end = std::sregex_iterator{};
|
||||
for (auto i = std::sregex_iterator{envString.begin(), envString.end(), storePathRegex}; i != env_end; ++i)
|
||||
unchecked[i->str()].emplace(envFile);
|
||||
} catch (SysError & e) {
|
||||
if (errno == ENOENT || errno == EACCES || errno == ESRCH)
|
||||
continue;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (errno)
|
||||
throw SysError("iterating /proc");
|
||||
}
|
||||
|
||||
#if !defined(__linux__)
|
||||
// lsof is really slow on OS X. This actually causes the gc-concurrent.sh test to fail.
|
||||
// See: https://github.com/NixOS/nix/issues/3011
|
||||
// Because of this we disable lsof when running the tests.
|
||||
if (getEnv("_NIX_TEST_NO_LSOF") == "") {
|
||||
try {
|
||||
std::regex lsofRegex(R"(^n(/.*)$)");
|
||||
auto lsofLines =
|
||||
tokenizeString<std::vector<string>>(runProgram(LSOF, true, { "-n", "-w", "-F", "n" }), "\n");
|
||||
for (const auto & line : lsofLines) {
|
||||
std::smatch match;
|
||||
if (std::regex_match(line, match, lsofRegex))
|
||||
unchecked[match[1]].emplace("{lsof}");
|
||||
}
|
||||
} catch (ExecError & e) {
|
||||
/* lsof not installed, lsof failed */
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(__linux__)
|
||||
readFileRoots("/proc/sys/kernel/modprobe", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/fbsplash", unchecked);
|
||||
readFileRoots("/proc/sys/kernel/poweroff_cmd", unchecked);
|
||||
#endif
|
||||
|
||||
for (auto & [target, links] : unchecked) {
|
||||
if (isInStore(target)) {
|
||||
Path path = toStorePath(target);
|
||||
if (isStorePath(path) && isValidPath(path)) {
|
||||
debug(format("got additional root '%1%'") % path);
|
||||
if (censor)
|
||||
roots[path].insert(censored);
|
||||
else
|
||||
roots[path].insert(links.begin(), links.end());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct GCLimitReached { };
|
||||
|
||||
|
||||
struct LocalStore::GCState
|
||||
{
|
||||
GCOptions options;
|
||||
GCResults & results;
|
||||
PathSet roots;
|
||||
PathSet tempRoots;
|
||||
PathSet dead;
|
||||
PathSet alive;
|
||||
bool gcKeepOutputs;
|
||||
bool gcKeepDerivations;
|
||||
unsigned long long bytesInvalidated;
|
||||
bool moveToTrash = true;
|
||||
bool shouldDelete;
|
||||
GCState(GCResults & results_) : results(results_), bytesInvalidated(0) { }
|
||||
};
|
||||
|
||||
|
||||
bool LocalStore::isActiveTempFile(const GCState & state,
|
||||
const Path & path, const string & suffix)
|
||||
{
|
||||
return hasSuffix(path, suffix)
|
||||
&& state.tempRoots.find(string(path, 0, path.size() - suffix.size())) != state.tempRoots.end();
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::deleteGarbage(GCState & state, const Path & path)
|
||||
{
|
||||
unsigned long long bytesFreed;
|
||||
deletePath(path, bytesFreed);
|
||||
state.results.bytesFreed += bytesFreed;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::deletePathRecursive(GCState & state, const Path & path)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
unsigned long long size = 0;
|
||||
|
||||
if (isStorePath(path) && isValidPath(path)) {
|
||||
PathSet referrers;
|
||||
queryReferrers(path, referrers);
|
||||
for (auto & i : referrers)
|
||||
if (i != path) deletePathRecursive(state, i);
|
||||
size = queryPathInfo(path)->narSize;
|
||||
invalidatePathChecked(path);
|
||||
}
|
||||
|
||||
Path realPath = realStoreDir + "/" + baseNameOf(path);
|
||||
|
||||
struct stat st;
|
||||
if (lstat(realPath.c_str(), &st)) {
|
||||
if (errno == ENOENT) return;
|
||||
throw SysError(format("getting status of %1%") % realPath);
|
||||
}
|
||||
|
||||
printInfo(format("deleting '%1%'") % path);
|
||||
|
||||
state.results.paths.insert(path);
|
||||
|
||||
/* If the path is not a regular file or symlink, move it to the
|
||||
trash directory. The move is to ensure that later (when we're
|
||||
not holding the global GC lock) we can delete the path without
|
||||
being afraid that the path has become alive again. Otherwise
|
||||
delete it right away. */
|
||||
if (state.moveToTrash && S_ISDIR(st.st_mode)) {
|
||||
// Estimate the amount freed using the narSize field. FIXME:
|
||||
// if the path was not valid, need to determine the actual
|
||||
// size.
|
||||
try {
|
||||
if (chmod(realPath.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||
throw SysError(format("making '%1%' writable") % realPath);
|
||||
Path tmp = trashDir + "/" + baseNameOf(path);
|
||||
if (rename(realPath.c_str(), tmp.c_str()))
|
||||
throw SysError(format("unable to rename '%1%' to '%2%'") % realPath % tmp);
|
||||
state.bytesInvalidated += size;
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOSPC) {
|
||||
printInfo(format("note: can't create move '%1%': %2%") % realPath % e.msg());
|
||||
deleteGarbage(state, realPath);
|
||||
}
|
||||
}
|
||||
} else
|
||||
deleteGarbage(state, realPath);
|
||||
|
||||
if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
|
||||
printInfo(format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
|
||||
throw GCLimitReached();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & path)
|
||||
{
|
||||
if (visited.count(path)) return false;
|
||||
|
||||
if (state.alive.count(path)) return true;
|
||||
|
||||
if (state.dead.count(path)) return false;
|
||||
|
||||
if (state.roots.count(path)) {
|
||||
debug(format("cannot delete '%1%' because it's a root") % path);
|
||||
state.alive.insert(path);
|
||||
return true;
|
||||
}
|
||||
|
||||
visited.insert(path);
|
||||
|
||||
if (!isStorePath(path) || !isValidPath(path)) return false;
|
||||
|
||||
PathSet incoming;
|
||||
|
||||
/* Don't delete this path if any of its referrers are alive. */
|
||||
queryReferrers(path, incoming);
|
||||
|
||||
/* If keep-derivations is set and this is a derivation, then
|
||||
don't delete the derivation if any of the outputs are alive. */
|
||||
if (state.gcKeepDerivations && isDerivation(path)) {
|
||||
PathSet outputs = queryDerivationOutputs(path);
|
||||
for (auto & i : outputs)
|
||||
if (isValidPath(i) && queryPathInfo(i)->deriver == path)
|
||||
incoming.insert(i);
|
||||
}
|
||||
|
||||
/* If keep-outputs is set, then don't delete this path if there
|
||||
are derivers of this path that are not garbage. */
|
||||
if (state.gcKeepOutputs) {
|
||||
PathSet derivers = queryValidDerivers(path);
|
||||
for (auto & i : derivers)
|
||||
incoming.insert(i);
|
||||
}
|
||||
|
||||
for (auto & i : incoming)
|
||||
if (i != path)
|
||||
if (canReachRoot(state, visited, i)) {
|
||||
state.alive.insert(path);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::tryToDelete(GCState & state, const Path & path)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
auto realPath = realStoreDir + "/" + baseNameOf(path);
|
||||
if (realPath == linksDir || realPath == trashDir) return;
|
||||
|
||||
//Activity act(*logger, lvlDebug, format("considering whether to delete '%1%'") % path);
|
||||
|
||||
if (!isStorePath(path) || !isValidPath(path)) {
|
||||
/* A lock file belonging to a path that we're building right
|
||||
now isn't garbage. */
|
||||
if (isActiveTempFile(state, path, ".lock")) return;
|
||||
|
||||
/* Don't delete .chroot directories for derivations that are
|
||||
currently being built. */
|
||||
if (isActiveTempFile(state, path, ".chroot")) return;
|
||||
|
||||
/* Don't delete .check directories for derivations that are
|
||||
currently being built, because we may need to run
|
||||
diff-hook. */
|
||||
if (isActiveTempFile(state, path, ".check")) return;
|
||||
}
|
||||
|
||||
PathSet visited;
|
||||
|
||||
if (canReachRoot(state, visited, path)) {
|
||||
debug(format("cannot delete '%1%' because it's still reachable") % path);
|
||||
} else {
|
||||
/* No path we visited was a root, so everything is garbage.
|
||||
But we only delete ‘path’ and its referrers here so that
|
||||
‘nix-store --delete’ doesn't have the unexpected effect of
|
||||
recursing into derivations and outputs. */
|
||||
state.dead.insert(visited.begin(), visited.end());
|
||||
if (state.shouldDelete)
|
||||
deletePathRecursive(state, path);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* Unlink all files in /nix/store/.links that have a link count of 1,
|
||||
which indicates that there are no other links and so they can be
|
||||
safely deleted. FIXME: race condition with optimisePath(): we
|
||||
might see a link count of 1 just before optimisePath() increases
|
||||
the link count. */
|
||||
void LocalStore::removeUnusedLinks(const GCState & state)
|
||||
{
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError(format("opening directory '%1%'") % linksDir);
|
||||
|
||||
long long actualSize = 0, unsharedSize = 0;
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
Path path = linksDir + "/" + name;
|
||||
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st) == -1)
|
||||
throw SysError(format("statting '%1%'") % path);
|
||||
|
||||
if (st.st_nlink != 1) {
|
||||
actualSize += st.st_size;
|
||||
unsharedSize += (st.st_nlink - 1) * st.st_size;
|
||||
continue;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, format("deleting unused link '%1%'") % path);
|
||||
|
||||
if (unlink(path.c_str()) == -1)
|
||||
throw SysError(format("deleting '%1%'") % path);
|
||||
|
||||
state.results.bytesFreed += st.st_size;
|
||||
}
|
||||
|
||||
struct stat st;
|
||||
if (stat(linksDir.c_str(), &st) == -1)
|
||||
throw SysError(format("statting '%1%'") % linksDir);
|
||||
long long overhead = st.st_blocks * 512ULL;
|
||||
|
||||
printInfo(format("note: currently hard linking saves %.2f MiB")
|
||||
% ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
GCState state(results);
|
||||
state.options = options;
|
||||
state.gcKeepOutputs = settings.gcKeepOutputs;
|
||||
state.gcKeepDerivations = settings.gcKeepDerivations;
|
||||
|
||||
/* Using `--ignore-liveness' with `--delete' can have unintended
|
||||
consequences if `keep-outputs' or `keep-derivations' are true
|
||||
(the garbage collector will recurse into deleting the outputs
|
||||
or derivers, respectively). So disable them. */
|
||||
if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
|
||||
state.gcKeepOutputs = false;
|
||||
state.gcKeepDerivations = false;
|
||||
}
|
||||
|
||||
state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
|
||||
|
||||
if (state.shouldDelete)
|
||||
deletePath(reservedPath);
|
||||
|
||||
/* Acquire the global GC root. This prevents
|
||||
a) New roots from being added.
|
||||
b) Processes from creating new temporary root files. */
|
||||
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
||||
|
||||
/* Find the roots. Since we've grabbed the GC lock, the set of
|
||||
permanent roots cannot increase now. */
|
||||
printError(format("finding garbage collector roots..."));
|
||||
Roots rootMap;
|
||||
if (!options.ignoreLiveness)
|
||||
findRootsNoTemp(rootMap, true);
|
||||
|
||||
for (auto & i : rootMap) state.roots.insert(i.first);
|
||||
|
||||
/* Read the temporary roots. This acquires read locks on all
|
||||
per-process temporary root files. So after this point no paths
|
||||
can be added to the set of temporary roots. */
|
||||
FDs fds;
|
||||
Roots tempRoots;
|
||||
findTempRoots(fds, tempRoots, true);
|
||||
for (auto & root : tempRoots)
|
||||
state.tempRoots.insert(root.first);
|
||||
state.roots.insert(state.tempRoots.begin(), state.tempRoots.end());
|
||||
|
||||
/* After this point the set of roots or temporary roots cannot
|
||||
increase, since we hold locks on everything. So everything
|
||||
that is not reachable from `roots' is garbage. */
|
||||
|
||||
if (state.shouldDelete) {
|
||||
if (pathExists(trashDir)) deleteGarbage(state, trashDir);
|
||||
try {
|
||||
createDirs(trashDir);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOSPC) {
|
||||
printInfo(format("note: can't create trash directory: %1%") % e.msg());
|
||||
state.moveToTrash = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Now either delete all garbage paths, or just the specified
|
||||
paths (for gcDeleteSpecific). */
|
||||
|
||||
if (options.action == GCOptions::gcDeleteSpecific) {
|
||||
|
||||
for (auto & i : options.pathsToDelete) {
|
||||
assertStorePath(i);
|
||||
tryToDelete(state, i);
|
||||
if (state.dead.find(i) == state.dead.end())
|
||||
throw Error(format("cannot delete path '%1%' since it is still alive") % i);
|
||||
}
|
||||
|
||||
} else if (options.maxFreed > 0) {
|
||||
|
||||
if (state.shouldDelete)
|
||||
printError(format("deleting garbage..."));
|
||||
else
|
||||
printError(format("determining live/dead paths..."));
|
||||
|
||||
try {
|
||||
|
||||
AutoCloseDir dir(opendir(realStoreDir.c_str()));
|
||||
if (!dir) throw SysError(format("opening directory '%1%'") % realStoreDir);
|
||||
|
||||
/* Read the store and immediately delete all paths that
|
||||
aren't valid. When using --max-freed etc., deleting
|
||||
invalid paths is preferred over deleting unreachable
|
||||
paths, since unreachable paths could become reachable
|
||||
again. We don't use readDirectory() here so that GCing
|
||||
can start faster. */
|
||||
Paths entries;
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) {
|
||||
checkInterrupt();
|
||||
string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
Path path = storeDir + "/" + name;
|
||||
if (isStorePath(path) && isValidPath(path))
|
||||
entries.push_back(path);
|
||||
else
|
||||
tryToDelete(state, path);
|
||||
}
|
||||
|
||||
dir.reset();
|
||||
|
||||
/* Now delete the unreachable valid paths. Randomise the
|
||||
order in which we delete entries to make the collector
|
||||
less biased towards deleting paths that come
|
||||
alphabetically first (e.g. /nix/store/000...). This
|
||||
matters when using --max-freed etc. */
|
||||
vector<Path> entries_(entries.begin(), entries.end());
|
||||
std::mt19937 gen(1);
|
||||
std::shuffle(entries_.begin(), entries_.end(), gen);
|
||||
|
||||
for (auto & i : entries_)
|
||||
tryToDelete(state, i);
|
||||
|
||||
} catch (GCLimitReached & e) {
|
||||
}
|
||||
}
|
||||
|
||||
if (state.options.action == GCOptions::gcReturnLive) {
|
||||
state.results.paths = state.alive;
|
||||
return;
|
||||
}
|
||||
|
||||
if (state.options.action == GCOptions::gcReturnDead) {
|
||||
state.results.paths = state.dead;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Allow other processes to add to the store from here on. */
|
||||
fdGCLock = -1;
|
||||
fds.clear();
|
||||
|
||||
/* Delete the trash directory. */
|
||||
printInfo(format("deleting '%1%'") % trashDir);
|
||||
deleteGarbage(state, trashDir);
|
||||
|
||||
/* Clean up the links directory. */
|
||||
if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
|
||||
printError(format("deleting unused links..."));
|
||||
removeUnusedLinks(state);
|
||||
}
|
||||
|
||||
/* While we're at it, vacuum the database. */
|
||||
//if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::autoGC(bool sync)
|
||||
{
|
||||
static auto fakeFreeSpaceFile = getEnv("_NIX_TEST_FREE_SPACE_FILE", "");
|
||||
|
||||
auto getAvail = [this]() -> uint64_t {
|
||||
if (!fakeFreeSpaceFile.empty())
|
||||
return std::stoll(readFile(fakeFreeSpaceFile));
|
||||
|
||||
struct statvfs st;
|
||||
if (statvfs(realStoreDir.c_str(), &st))
|
||||
throw SysError("getting filesystem info about '%s'", realStoreDir);
|
||||
|
||||
return (uint64_t) st.f_bavail * st.f_bsize;
|
||||
};
|
||||
|
||||
std::shared_future<void> future;
|
||||
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
if (state->gcRunning) {
|
||||
future = state->gcFuture;
|
||||
debug("waiting for auto-GC to finish");
|
||||
goto sync;
|
||||
}
|
||||
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
|
||||
if (now < state->lastGCCheck + std::chrono::seconds(settings.minFreeCheckInterval)) return;
|
||||
|
||||
auto avail = getAvail();
|
||||
|
||||
state->lastGCCheck = now;
|
||||
|
||||
if (avail >= settings.minFree || avail >= settings.maxFree) return;
|
||||
|
||||
if (avail > state->availAfterGC * 0.97) return;
|
||||
|
||||
state->gcRunning = true;
|
||||
|
||||
std::promise<void> promise;
|
||||
future = state->gcFuture = promise.get_future().share();
|
||||
|
||||
std::thread([promise{std::move(promise)}, this, avail, getAvail]() mutable {
|
||||
|
||||
try {
|
||||
|
||||
/* Wake up any threads waiting for the auto-GC to finish. */
|
||||
Finally wakeup([&]() {
|
||||
auto state(_state.lock());
|
||||
state->gcRunning = false;
|
||||
state->lastGCCheck = std::chrono::steady_clock::now();
|
||||
promise.set_value();
|
||||
});
|
||||
|
||||
GCOptions options;
|
||||
options.maxFreed = settings.maxFree - avail;
|
||||
|
||||
printInfo("running auto-GC to free %d bytes", options.maxFreed);
|
||||
|
||||
GCResults results;
|
||||
|
||||
collectGarbage(options, results);
|
||||
|
||||
_state.lock()->availAfterGC = getAvail();
|
||||
|
||||
} catch (...) {
|
||||
// FIXME: we could propagate the exception to the
|
||||
// future, but we don't really care.
|
||||
ignoreException();
|
||||
}
|
||||
|
||||
}).detach();
|
||||
}
|
||||
|
||||
sync:
|
||||
// Wait for the future outside of the state lock.
|
||||
if (sync) future.get();
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
187
third_party/nix/src/libstore/globals.cc
vendored
Normal file
187
third_party/nix/src/libstore/globals.cc
vendored
Normal file
|
|
@ -0,0 +1,187 @@
|
|||
#include "globals.hh"
|
||||
#include "util.hh"
|
||||
#include "archive.hh"
|
||||
#include "args.hh"
|
||||
|
||||
#include <algorithm>
|
||||
#include <map>
|
||||
#include <thread>
|
||||
#include <dlfcn.h>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* The default location of the daemon socket, relative to nixStateDir.
|
||||
The socket is in a directory to allow you to control access to the
|
||||
Nix daemon by setting the mode/ownership of the directory
|
||||
appropriately. (This wouldn't work on the socket itself since it
|
||||
must be deleted and recreated on startup.) */
|
||||
#define DEFAULT_SOCKET_PATH "/daemon-socket/socket"
|
||||
|
||||
/* chroot-like behavior from Apple's sandbox */
|
||||
#if __APPLE__
|
||||
#define DEFAULT_ALLOWED_IMPURE_PREFIXES "/System/Library /usr/lib /dev /bin/sh"
|
||||
#else
|
||||
#define DEFAULT_ALLOWED_IMPURE_PREFIXES ""
|
||||
#endif
|
||||
|
||||
Settings settings;
|
||||
|
||||
static GlobalConfig::Register r1(&settings);
|
||||
|
||||
Settings::Settings()
|
||||
: nixPrefix(NIX_PREFIX)
|
||||
, nixStore(canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR))))
|
||||
, nixDataDir(canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR)))
|
||||
, nixLogDir(canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR)))
|
||||
, nixStateDir(canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR)))
|
||||
, nixConfDir(canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR)))
|
||||
, nixLibexecDir(canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR)))
|
||||
, nixBinDir(canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR)))
|
||||
, nixManDir(canonPath(NIX_MAN_DIR))
|
||||
, nixDaemonSocketFile(canonPath(nixStateDir + DEFAULT_SOCKET_PATH))
|
||||
{
|
||||
buildUsersGroup = getuid() == 0 ? "nixbld" : "";
|
||||
lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
|
||||
|
||||
caFile = getEnv("NIX_SSL_CERT_FILE", getEnv("SSL_CERT_FILE", ""));
|
||||
if (caFile == "") {
|
||||
for (auto & fn : {"/etc/ssl/certs/ca-certificates.crt", "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt"})
|
||||
if (pathExists(fn)) {
|
||||
caFile = fn;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Backwards compatibility. */
|
||||
auto s = getEnv("NIX_REMOTE_SYSTEMS");
|
||||
if (s != "") {
|
||||
Strings ss;
|
||||
for (auto & p : tokenizeString<Strings>(s, ":"))
|
||||
ss.push_back("@" + p);
|
||||
builders = concatStringsSep(" ", ss);
|
||||
}
|
||||
|
||||
#if defined(__linux__) && defined(SANDBOX_SHELL)
|
||||
sandboxPaths = tokenizeString<StringSet>("/bin/sh=" SANDBOX_SHELL);
|
||||
#endif
|
||||
|
||||
allowedImpureHostPrefixes = tokenizeString<StringSet>(DEFAULT_ALLOWED_IMPURE_PREFIXES);
|
||||
}
|
||||
|
||||
void loadConfFile()
|
||||
{
|
||||
globalConfig.applyConfigFile(settings.nixConfDir + "/nix.conf");
|
||||
|
||||
/* We only want to send overrides to the daemon, i.e. stuff from
|
||||
~/.nix/nix.conf or the command line. */
|
||||
globalConfig.resetOverriden();
|
||||
|
||||
auto dirs = getConfigDirs();
|
||||
// Iterate over them in reverse so that the ones appearing first in the path take priority
|
||||
for (auto dir = dirs.rbegin(); dir != dirs.rend(); dir++) {
|
||||
globalConfig.applyConfigFile(*dir + "/nix/nix.conf");
|
||||
}
|
||||
}
|
||||
|
||||
unsigned int Settings::getDefaultCores()
|
||||
{
|
||||
return std::max(1U, std::thread::hardware_concurrency());
|
||||
}
|
||||
|
||||
StringSet Settings::getDefaultSystemFeatures()
|
||||
{
|
||||
/* For backwards compatibility, accept some "features" that are
|
||||
used in Nixpkgs to route builds to certain machines but don't
|
||||
actually require anything special on the machines. */
|
||||
StringSet features{"nixos-test", "benchmark", "big-parallel"};
|
||||
|
||||
#if __linux__
|
||||
if (access("/dev/kvm", R_OK | W_OK) == 0)
|
||||
features.insert("kvm");
|
||||
#endif
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
const string nixVersion = PACKAGE_VERSION;
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::set(const std::string & str)
|
||||
{
|
||||
if (str == "true") value = smEnabled;
|
||||
else if (str == "relaxed") value = smRelaxed;
|
||||
else if (str == "false") value = smDisabled;
|
||||
else throw UsageError("option '%s' has invalid value '%s'", name, str);
|
||||
}
|
||||
|
||||
template<> std::string BaseSetting<SandboxMode>::to_string()
|
||||
{
|
||||
if (value == smEnabled) return "true";
|
||||
else if (value == smRelaxed) return "relaxed";
|
||||
else if (value == smDisabled) return "false";
|
||||
else abort();
|
||||
}
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::toJSON(JSONPlaceholder & out)
|
||||
{
|
||||
AbstractSetting::toJSON(out);
|
||||
}
|
||||
|
||||
template<> void BaseSetting<SandboxMode>::convertToArg(Args & args, const std::string & category)
|
||||
{
|
||||
args.mkFlag()
|
||||
.longName(name)
|
||||
.description("Enable sandboxing.")
|
||||
.handler([=](std::vector<std::string> ss) { override(smEnabled); })
|
||||
.category(category);
|
||||
args.mkFlag()
|
||||
.longName("no-" + name)
|
||||
.description("Disable sandboxing.")
|
||||
.handler([=](std::vector<std::string> ss) { override(smDisabled); })
|
||||
.category(category);
|
||||
args.mkFlag()
|
||||
.longName("relaxed-" + name)
|
||||
.description("Enable sandboxing, but allow builds to disable it.")
|
||||
.handler([=](std::vector<std::string> ss) { override(smRelaxed); })
|
||||
.category(category);
|
||||
}
|
||||
|
||||
void MaxBuildJobsSetting::set(const std::string & str)
|
||||
{
|
||||
if (str == "auto") value = std::max(1U, std::thread::hardware_concurrency());
|
||||
else if (!string2Int(str, value))
|
||||
throw UsageError("configuration setting '%s' should be 'auto' or an integer", name);
|
||||
}
|
||||
|
||||
|
||||
void initPlugins()
|
||||
{
|
||||
for (const auto & pluginFile : settings.pluginFiles.get()) {
|
||||
Paths pluginFiles;
|
||||
try {
|
||||
auto ents = readDirectory(pluginFile);
|
||||
for (const auto & ent : ents)
|
||||
pluginFiles.emplace_back(pluginFile + "/" + ent.name);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo != ENOTDIR)
|
||||
throw;
|
||||
pluginFiles.emplace_back(pluginFile);
|
||||
}
|
||||
for (const auto & file : pluginFiles) {
|
||||
/* handle is purposefully leaked as there may be state in the
|
||||
DSO needed by the action of the plugin. */
|
||||
void *handle =
|
||||
dlopen(file.c_str(), RTLD_LAZY | RTLD_LOCAL);
|
||||
if (!handle)
|
||||
throw Error("could not dynamically open plugin file '%s': %s", file, dlerror());
|
||||
}
|
||||
}
|
||||
|
||||
/* Since plugins can add settings, try to re-apply previously
|
||||
unknown settings. */
|
||||
globalConfig.reapplyUnknownSettings();
|
||||
globalConfig.warnUnknownSettings();
|
||||
}
|
||||
|
||||
}
|
||||
370
third_party/nix/src/libstore/globals.hh
vendored
Normal file
370
third_party/nix/src/libstore/globals.hh
vendored
Normal file
|
|
@ -0,0 +1,370 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
#include "config.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <map>
|
||||
#include <limits>
|
||||
|
||||
#include <sys/types.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
typedef enum { smEnabled, smRelaxed, smDisabled } SandboxMode;
|
||||
|
||||
struct MaxBuildJobsSetting : public BaseSetting<unsigned int>
|
||||
{
|
||||
MaxBuildJobsSetting(Config * options,
|
||||
unsigned int def,
|
||||
const std::string & name,
|
||||
const std::string & description,
|
||||
const std::set<std::string> & aliases = {})
|
||||
: BaseSetting<unsigned int>(def, name, description, aliases)
|
||||
{
|
||||
options->addSetting(this);
|
||||
}
|
||||
|
||||
void set(const std::string & str) override;
|
||||
};
|
||||
|
||||
class Settings : public Config {
|
||||
|
||||
unsigned int getDefaultCores();
|
||||
|
||||
StringSet getDefaultSystemFeatures();
|
||||
|
||||
public:
|
||||
|
||||
Settings();
|
||||
|
||||
Path nixPrefix;
|
||||
|
||||
/* The directory where we store sources and derived files. */
|
||||
Path nixStore;
|
||||
|
||||
Path nixDataDir; /* !!! fix */
|
||||
|
||||
/* The directory where we log various operations. */
|
||||
Path nixLogDir;
|
||||
|
||||
/* The directory where state is stored. */
|
||||
Path nixStateDir;
|
||||
|
||||
/* The directory where configuration files are stored. */
|
||||
Path nixConfDir;
|
||||
|
||||
/* The directory where internal helper programs are stored. */
|
||||
Path nixLibexecDir;
|
||||
|
||||
/* The directory where the main programs are stored. */
|
||||
Path nixBinDir;
|
||||
|
||||
/* The directory where the man pages are stored. */
|
||||
Path nixManDir;
|
||||
|
||||
/* File name of the socket the daemon listens to. */
|
||||
Path nixDaemonSocketFile;
|
||||
|
||||
Setting<std::string> storeUri{this, getEnv("NIX_REMOTE", "auto"), "store",
|
||||
"The default Nix store to use."};
|
||||
|
||||
Setting<bool> keepFailed{this, false, "keep-failed",
|
||||
"Whether to keep temporary directories of failed builds."};
|
||||
|
||||
Setting<bool> keepGoing{this, false, "keep-going",
|
||||
"Whether to keep building derivations when another build fails."};
|
||||
|
||||
Setting<bool> tryFallback{this, false, "fallback",
|
||||
"Whether to fall back to building when substitution fails.",
|
||||
{"build-fallback"}};
|
||||
|
||||
/* Whether to show build log output in real time. */
|
||||
bool verboseBuild = true;
|
||||
|
||||
Setting<size_t> logLines{this, 10, "log-lines",
|
||||
"If verbose-build is false, the number of lines of the tail of "
|
||||
"the log to show if a build fails."};
|
||||
|
||||
MaxBuildJobsSetting maxBuildJobs{this, 1, "max-jobs",
|
||||
"Maximum number of parallel build jobs. \"auto\" means use number of cores.",
|
||||
{"build-max-jobs"}};
|
||||
|
||||
Setting<unsigned int> buildCores{this, getDefaultCores(), "cores",
|
||||
"Number of CPU cores to utilize in parallel within a build, "
|
||||
"i.e. by passing this number to Make via '-j'. 0 means that the "
|
||||
"number of actual CPU cores on the local host ought to be "
|
||||
"auto-detected.", {"build-cores"}};
|
||||
|
||||
/* Read-only mode. Don't copy stuff to the store, don't change
|
||||
the database. */
|
||||
bool readOnlyMode = false;
|
||||
|
||||
Setting<std::string> thisSystem{this, SYSTEM, "system",
|
||||
"The canonical Nix system name."};
|
||||
|
||||
Setting<time_t> maxSilentTime{this, 0, "max-silent-time",
|
||||
"The maximum time in seconds that a builer can go without "
|
||||
"producing any output on stdout/stderr before it is killed. "
|
||||
"0 means infinity.",
|
||||
{"build-max-silent-time"}};
|
||||
|
||||
Setting<time_t> buildTimeout{this, 0, "timeout",
|
||||
"The maximum duration in seconds that a builder can run. "
|
||||
"0 means infinity.", {"build-timeout"}};
|
||||
|
||||
PathSetting buildHook{this, true, nixLibexecDir + "/nix/build-remote", "build-hook",
|
||||
"The path of the helper program that executes builds to remote machines."};
|
||||
|
||||
Setting<std::string> builders{this, "@" + nixConfDir + "/machines", "builders",
|
||||
"A semicolon-separated list of build machines, in the format of nix.machines."};
|
||||
|
||||
Setting<bool> buildersUseSubstitutes{this, false, "builders-use-substitutes",
|
||||
"Whether build machines should use their own substitutes for obtaining "
|
||||
"build dependencies if possible, rather than waiting for this host to "
|
||||
"upload them."};
|
||||
|
||||
Setting<off_t> reservedSize{this, 8 * 1024 * 1024, "gc-reserved-space",
|
||||
"Amount of reserved disk space for the garbage collector."};
|
||||
|
||||
Setting<bool> fsyncMetadata{this, true, "fsync-metadata",
|
||||
"Whether SQLite should use fsync()."};
|
||||
|
||||
Setting<bool> useSQLiteWAL{this, true, "use-sqlite-wal",
|
||||
"Whether SQLite should use WAL mode."};
|
||||
|
||||
Setting<bool> syncBeforeRegistering{this, false, "sync-before-registering",
|
||||
"Whether to call sync() before registering a path as valid."};
|
||||
|
||||
Setting<bool> useSubstitutes{this, true, "substitute",
|
||||
"Whether to use substitutes.",
|
||||
{"build-use-substitutes"}};
|
||||
|
||||
Setting<std::string> buildUsersGroup{this, "", "build-users-group",
|
||||
"The Unix group that contains the build users."};
|
||||
|
||||
Setting<bool> impersonateLinux26{this, false, "impersonate-linux-26",
|
||||
"Whether to impersonate a Linux 2.6 machine on newer kernels.",
|
||||
{"build-impersonate-linux-26"}};
|
||||
|
||||
Setting<bool> keepLog{this, true, "keep-build-log",
|
||||
"Whether to store build logs.",
|
||||
{"build-keep-log"}};
|
||||
|
||||
Setting<bool> compressLog{this, true, "compress-build-log",
|
||||
"Whether to compress logs.",
|
||||
{"build-compress-log"}};
|
||||
|
||||
Setting<unsigned long> maxLogSize{this, 0, "max-build-log-size",
|
||||
"Maximum number of bytes a builder can write to stdout/stderr "
|
||||
"before being killed (0 means no limit).",
|
||||
{"build-max-log-size"}};
|
||||
|
||||
/* When buildRepeat > 0 and verboseBuild == true, whether to print
|
||||
repeated builds (i.e. builds other than the first one) to
|
||||
stderr. Hack to prevent Hydra logs from being polluted. */
|
||||
bool printRepeatedBuilds = true;
|
||||
|
||||
Setting<unsigned int> pollInterval{this, 5, "build-poll-interval",
|
||||
"How often (in seconds) to poll for locks."};
|
||||
|
||||
Setting<bool> checkRootReachability{this, false, "gc-check-reachability",
|
||||
"Whether to check if new GC roots can in fact be found by the "
|
||||
"garbage collector."};
|
||||
|
||||
Setting<bool> gcKeepOutputs{this, false, "keep-outputs",
|
||||
"Whether the garbage collector should keep outputs of live derivations.",
|
||||
{"gc-keep-outputs"}};
|
||||
|
||||
Setting<bool> gcKeepDerivations{this, true, "keep-derivations",
|
||||
"Whether the garbage collector should keep derivers of live paths.",
|
||||
{"gc-keep-derivations"}};
|
||||
|
||||
Setting<bool> autoOptimiseStore{this, false, "auto-optimise-store",
|
||||
"Whether to automatically replace files with identical contents with hard links."};
|
||||
|
||||
Setting<bool> envKeepDerivations{this, false, "keep-env-derivations",
|
||||
"Whether to add derivations as a dependency of user environments "
|
||||
"(to prevent them from being GCed).",
|
||||
{"env-keep-derivations"}};
|
||||
|
||||
/* Whether to lock the Nix client and worker to the same CPU. */
|
||||
bool lockCPU;
|
||||
|
||||
/* Whether to show a stack trace if Nix evaluation fails. */
|
||||
Setting<bool> showTrace{this, false, "show-trace",
|
||||
"Whether to show a stack trace on evaluation errors."};
|
||||
|
||||
Setting<SandboxMode> sandboxMode{this,
|
||||
#if __linux__
|
||||
smEnabled
|
||||
#else
|
||||
smDisabled
|
||||
#endif
|
||||
, "sandbox",
|
||||
"Whether to enable sandboxed builds. Can be \"true\", \"false\" or \"relaxed\".",
|
||||
{"build-use-chroot", "build-use-sandbox"}};
|
||||
|
||||
Setting<PathSet> sandboxPaths{this, {}, "sandbox-paths",
|
||||
"The paths to make available inside the build sandbox.",
|
||||
{"build-chroot-dirs", "build-sandbox-paths"}};
|
||||
|
||||
Setting<bool> sandboxFallback{this, true, "sandbox-fallback",
|
||||
"Whether to disable sandboxing when the kernel doesn't allow it."};
|
||||
|
||||
Setting<PathSet> extraSandboxPaths{this, {}, "extra-sandbox-paths",
|
||||
"Additional paths to make available inside the build sandbox.",
|
||||
{"build-extra-chroot-dirs", "build-extra-sandbox-paths"}};
|
||||
|
||||
Setting<size_t> buildRepeat{this, 0, "repeat",
|
||||
"The number of times to repeat a build in order to verify determinism.",
|
||||
{"build-repeat"}};
|
||||
|
||||
#if __linux__
|
||||
Setting<std::string> sandboxShmSize{this, "50%", "sandbox-dev-shm-size",
|
||||
"The size of /dev/shm in the build sandbox."};
|
||||
|
||||
Setting<Path> sandboxBuildDir{this, "/build", "sandbox-build-dir",
|
||||
"The build directory inside the sandbox."};
|
||||
#endif
|
||||
|
||||
Setting<PathSet> allowedImpureHostPrefixes{this, {}, "allowed-impure-host-deps",
|
||||
"Which prefixes to allow derivations to ask for access to (primarily for Darwin)."};
|
||||
|
||||
#if __APPLE__
|
||||
Setting<bool> darwinLogSandboxViolations{this, false, "darwin-log-sandbox-violations",
|
||||
"Whether to log Darwin sandbox access violations to the system log."};
|
||||
#endif
|
||||
|
||||
Setting<bool> runDiffHook{this, false, "run-diff-hook",
|
||||
"Whether to run the program specified by the diff-hook setting "
|
||||
"repeated builds produce a different result. Typically used to "
|
||||
"plug in diffoscope."};
|
||||
|
||||
PathSetting diffHook{this, true, "", "diff-hook",
|
||||
"A program that prints out the differences between the two paths "
|
||||
"specified on its command line."};
|
||||
|
||||
Setting<bool> enforceDeterminism{this, true, "enforce-determinism",
|
||||
"Whether to fail if repeated builds produce different output."};
|
||||
|
||||
Setting<Strings> trustedPublicKeys{this,
|
||||
{"cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY="},
|
||||
"trusted-public-keys",
|
||||
"Trusted public keys for secure substitution.",
|
||||
{"binary-cache-public-keys"}};
|
||||
|
||||
Setting<Strings> secretKeyFiles{this, {}, "secret-key-files",
|
||||
"Secret keys with which to sign local builds."};
|
||||
|
||||
Setting<unsigned int> tarballTtl{this, 60 * 60, "tarball-ttl",
|
||||
"How long downloaded files are considered up-to-date."};
|
||||
|
||||
Setting<bool> requireSigs{this, true, "require-sigs",
|
||||
"Whether to check that any non-content-addressed path added to the "
|
||||
"Nix store has a valid signature (that is, one signed using a key "
|
||||
"listed in 'trusted-public-keys'."};
|
||||
|
||||
Setting<StringSet> extraPlatforms{this,
|
||||
std::string{SYSTEM} == "x86_64-linux" ? StringSet{"i686-linux"} : StringSet{},
|
||||
"extra-platforms",
|
||||
"Additional platforms that can be built on the local system. "
|
||||
"These may be supported natively (e.g. armv7 on some aarch64 CPUs "
|
||||
"or using hacks like qemu-user."};
|
||||
|
||||
Setting<StringSet> systemFeatures{this, getDefaultSystemFeatures(),
|
||||
"system-features",
|
||||
"Optional features that this system implements (like \"kvm\")."};
|
||||
|
||||
Setting<Strings> substituters{this,
|
||||
nixStore == "/nix/store" ? Strings{"https://cache.nixos.org/"} : Strings(),
|
||||
"substituters",
|
||||
"The URIs of substituters (such as https://cache.nixos.org/).",
|
||||
{"binary-caches"}};
|
||||
|
||||
// FIXME: provide a way to add to option values.
|
||||
Setting<Strings> extraSubstituters{this, {}, "extra-substituters",
|
||||
"Additional URIs of substituters.",
|
||||
{"extra-binary-caches"}};
|
||||
|
||||
Setting<StringSet> trustedSubstituters{this, {}, "trusted-substituters",
|
||||
"Disabled substituters that may be enabled via the substituters option by untrusted users.",
|
||||
{"trusted-binary-caches"}};
|
||||
|
||||
Setting<Strings> trustedUsers{this, {"root"}, "trusted-users",
|
||||
"Which users or groups are trusted to ask the daemon to do unsafe things."};
|
||||
|
||||
Setting<unsigned int> ttlNegativeNarInfoCache{this, 3600, "narinfo-cache-negative-ttl",
|
||||
"The TTL in seconds for negative lookups in the disk cache i.e binary cache lookups that "
|
||||
"return an invalid path result"};
|
||||
|
||||
Setting<unsigned int> ttlPositiveNarInfoCache{this, 30 * 24 * 3600, "narinfo-cache-positive-ttl",
|
||||
"The TTL in seconds for positive lookups in the disk cache i.e binary cache lookups that "
|
||||
"return a valid path result."};
|
||||
|
||||
/* ?Who we trust to use the daemon in safe ways */
|
||||
Setting<Strings> allowedUsers{this, {"*"}, "allowed-users",
|
||||
"Which users or groups are allowed to connect to the daemon."};
|
||||
|
||||
Setting<bool> printMissing{this, true, "print-missing",
|
||||
"Whether to print what paths need to be built or downloaded."};
|
||||
|
||||
Setting<std::string> preBuildHook{this,
|
||||
#if __APPLE__
|
||||
nixLibexecDir + "/nix/resolve-system-dependencies",
|
||||
#else
|
||||
"",
|
||||
#endif
|
||||
"pre-build-hook",
|
||||
"A program to run just before a build to set derivation-specific build settings."};
|
||||
|
||||
Setting<std::string> postBuildHook{this, "", "post-build-hook",
|
||||
"A program to run just after each successful build."};
|
||||
|
||||
Setting<std::string> netrcFile{this, fmt("%s/%s", nixConfDir, "netrc"), "netrc-file",
|
||||
"Path to the netrc file used to obtain usernames/passwords for downloads."};
|
||||
|
||||
/* Path to the SSL CA file used */
|
||||
Path caFile;
|
||||
|
||||
#if __linux__
|
||||
Setting<bool> filterSyscalls{this, true, "filter-syscalls",
|
||||
"Whether to prevent certain dangerous system calls, such as "
|
||||
"creation of setuid/setgid files or adding ACLs or extended "
|
||||
"attributes. Only disable this if you're aware of the "
|
||||
"security implications."};
|
||||
|
||||
Setting<bool> allowNewPrivileges{this, false, "allow-new-privileges",
|
||||
"Whether builders can acquire new privileges by calling programs with "
|
||||
"setuid/setgid bits or with file capabilities."};
|
||||
#endif
|
||||
|
||||
Setting<Strings> hashedMirrors{this, {"http://tarballs.nixos.org/"}, "hashed-mirrors",
|
||||
"A list of servers used by builtins.fetchurl to fetch files by hash."};
|
||||
|
||||
Setting<uint64_t> minFree{this, 0, "min-free",
|
||||
"Automatically run the garbage collector when free disk space drops below the specified amount."};
|
||||
|
||||
Setting<uint64_t> maxFree{this, std::numeric_limits<uint64_t>::max(), "max-free",
|
||||
"Stop deleting garbage when free disk space is above the specified amount."};
|
||||
|
||||
Setting<uint64_t> minFreeCheckInterval{this, 5, "min-free-check-interval",
|
||||
"Number of seconds between checking free disk space."};
|
||||
|
||||
Setting<Paths> pluginFiles{this, {}, "plugin-files",
|
||||
"Plugins to dynamically load at nix initialization time."};
|
||||
};
|
||||
|
||||
|
||||
// FIXME: don't use a global variable.
|
||||
extern Settings settings;
|
||||
|
||||
/* This should be called after settings are initialized, but before
|
||||
anything else */
|
||||
void initPlugins();
|
||||
|
||||
void loadConfFile();
|
||||
|
||||
extern const string nixVersion;
|
||||
|
||||
}
|
||||
173
third_party/nix/src/libstore/http-binary-cache-store.cc
vendored
Normal file
173
third_party/nix/src/libstore/http-binary-cache-store.cc
vendored
Normal file
|
|
@ -0,0 +1,173 @@
|
|||
#include "binary-cache-store.hh"
|
||||
#include "download.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
MakeError(UploadToHTTP, Error);
|
||||
|
||||
class HttpBinaryCacheStore : public BinaryCacheStore
|
||||
{
|
||||
private:
|
||||
|
||||
Path cacheUri;
|
||||
|
||||
struct State
|
||||
{
|
||||
bool enabled = true;
|
||||
std::chrono::steady_clock::time_point disabledUntil;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
public:
|
||||
|
||||
HttpBinaryCacheStore(
|
||||
const Params & params, const Path & _cacheUri)
|
||||
: BinaryCacheStore(params)
|
||||
, cacheUri(_cacheUri)
|
||||
{
|
||||
if (cacheUri.back() == '/')
|
||||
cacheUri.pop_back();
|
||||
|
||||
diskCache = getNarInfoDiskCache();
|
||||
}
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return cacheUri;
|
||||
}
|
||||
|
||||
void init() override
|
||||
{
|
||||
// FIXME: do this lazily?
|
||||
if (!diskCache->cacheExists(cacheUri, wantMassQuery_, priority)) {
|
||||
try {
|
||||
BinaryCacheStore::init();
|
||||
} catch (UploadToHTTP &) {
|
||||
throw Error("'%s' does not appear to be a binary cache", cacheUri);
|
||||
}
|
||||
diskCache->createCache(cacheUri, storeDir, wantMassQuery_, priority);
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
void maybeDisable()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
if (state->enabled && settings.tryFallback) {
|
||||
int t = 60;
|
||||
printError("disabling binary cache '%s' for %s seconds", getUri(), t);
|
||||
state->enabled = false;
|
||||
state->disabledUntil = std::chrono::steady_clock::now() + std::chrono::seconds(t);
|
||||
}
|
||||
}
|
||||
|
||||
void checkEnabled()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
if (state->enabled) return;
|
||||
if (std::chrono::steady_clock::now() > state->disabledUntil) {
|
||||
state->enabled = true;
|
||||
debug("re-enabling binary cache '%s'", getUri());
|
||||
return;
|
||||
}
|
||||
throw SubstituterDisabled("substituter '%s' is disabled", getUri());
|
||||
}
|
||||
|
||||
bool fileExists(const std::string & path) override
|
||||
{
|
||||
checkEnabled();
|
||||
|
||||
try {
|
||||
DownloadRequest request(cacheUri + "/" + path);
|
||||
request.head = true;
|
||||
getDownloader()->download(request);
|
||||
return true;
|
||||
} catch (DownloadError & e) {
|
||||
/* S3 buckets return 403 if a file doesn't exist and the
|
||||
bucket is unlistable, so treat 403 as 404. */
|
||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||
return false;
|
||||
maybeDisable();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
auto req = DownloadRequest(cacheUri + "/" + path);
|
||||
req.data = std::make_shared<string>(data); // FIXME: inefficient
|
||||
req.mimeType = mimeType;
|
||||
try {
|
||||
getDownloader()->download(req);
|
||||
} catch (DownloadError & e) {
|
||||
throw UploadToHTTP("while uploading to HTTP binary cache at '%s': %s", cacheUri, e.msg());
|
||||
}
|
||||
}
|
||||
|
||||
DownloadRequest makeRequest(const std::string & path)
|
||||
{
|
||||
DownloadRequest request(cacheUri + "/" + path);
|
||||
return request;
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
{
|
||||
checkEnabled();
|
||||
auto request(makeRequest(path));
|
||||
try {
|
||||
getDownloader()->download(std::move(request), sink);
|
||||
} catch (DownloadError & e) {
|
||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
|
||||
maybeDisable();
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
void getFile(const std::string & path,
|
||||
Callback<std::shared_ptr<std::string>> callback) noexcept override
|
||||
{
|
||||
checkEnabled();
|
||||
|
||||
auto request(makeRequest(path));
|
||||
|
||||
auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback));
|
||||
|
||||
getDownloader()->enqueueDownload(request,
|
||||
{[callbackPtr, this](std::future<DownloadResult> result) {
|
||||
try {
|
||||
(*callbackPtr)(result.get().data);
|
||||
} catch (DownloadError & e) {
|
||||
if (e.error == Downloader::NotFound || e.error == Downloader::Forbidden)
|
||||
return (*callbackPtr)(std::shared_ptr<std::string>());
|
||||
maybeDisable();
|
||||
callbackPtr->rethrow();
|
||||
} catch (...) {
|
||||
callbackPtr->rethrow();
|
||||
}
|
||||
}});
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
static RegisterStoreImplementation regStore([](
|
||||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
{
|
||||
if (std::string(uri, 0, 7) != "http://" &&
|
||||
std::string(uri, 0, 8) != "https://" &&
|
||||
(getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") != "1" || std::string(uri, 0, 7) != "file://")
|
||||
) return 0;
|
||||
auto store = std::make_shared<HttpBinaryCacheStore>(params, uri);
|
||||
store->init();
|
||||
return store;
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
293
third_party/nix/src/libstore/legacy-ssh-store.cc
vendored
Normal file
293
third_party/nix/src/libstore/legacy-ssh-store.cc
vendored
Normal file
|
|
@ -0,0 +1,293 @@
|
|||
#include "archive.hh"
|
||||
#include "pool.hh"
|
||||
#include "remote-store.hh"
|
||||
#include "serve-protocol.hh"
|
||||
#include "store-api.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "ssh.hh"
|
||||
#include "derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
static std::string uriScheme = "ssh://";
|
||||
|
||||
struct LegacySSHStore : public Store
|
||||
{
|
||||
const Setting<int> maxConnections{this, 1, "max-connections", "maximum number of concurrent SSH connections"};
|
||||
const Setting<Path> sshKey{this, "", "ssh-key", "path to an SSH private key"};
|
||||
const Setting<bool> compress{this, false, "compress", "whether to compress the connection"};
|
||||
const Setting<Path> remoteProgram{this, "nix-store", "remote-program", "path to the nix-store executable on the remote system"};
|
||||
const Setting<std::string> remoteStore{this, "", "remote-store", "URI of the store on the remote system"};
|
||||
|
||||
// Hack for getting remote build log output.
|
||||
const Setting<int> logFD{this, -1, "log-fd", "file descriptor to which SSH's stderr is connected"};
|
||||
|
||||
struct Connection
|
||||
{
|
||||
std::unique_ptr<SSHMaster::Connection> sshConn;
|
||||
FdSink to;
|
||||
FdSource from;
|
||||
int remoteVersion;
|
||||
bool good = true;
|
||||
};
|
||||
|
||||
std::string host;
|
||||
|
||||
ref<Pool<Connection>> connections;
|
||||
|
||||
SSHMaster master;
|
||||
|
||||
LegacySSHStore(const string & host, const Params & params)
|
||||
: Store(params)
|
||||
, host(host)
|
||||
, connections(make_ref<Pool<Connection>>(
|
||||
std::max(1, (int) maxConnections),
|
||||
[this]() { return openConnection(); },
|
||||
[](const ref<Connection> & r) { return r->good; }
|
||||
))
|
||||
, master(
|
||||
host,
|
||||
sshKey,
|
||||
// Use SSH master only if using more than 1 connection.
|
||||
connections->capacity() > 1,
|
||||
compress,
|
||||
logFD)
|
||||
{
|
||||
}
|
||||
|
||||
ref<Connection> openConnection()
|
||||
{
|
||||
auto conn = make_ref<Connection>();
|
||||
conn->sshConn = master.startCommand(
|
||||
fmt("%s --serve --write", remoteProgram)
|
||||
+ (remoteStore.get() == "" ? "" : " --store " + shellEscape(remoteStore.get())));
|
||||
conn->to = FdSink(conn->sshConn->in.get());
|
||||
conn->from = FdSource(conn->sshConn->out.get());
|
||||
|
||||
try {
|
||||
conn->to << SERVE_MAGIC_1 << SERVE_PROTOCOL_VERSION;
|
||||
conn->to.flush();
|
||||
|
||||
unsigned int magic = readInt(conn->from);
|
||||
if (magic != SERVE_MAGIC_2)
|
||||
throw Error("protocol mismatch with 'nix-store --serve' on '%s'", host);
|
||||
conn->remoteVersion = readInt(conn->from);
|
||||
if (GET_PROTOCOL_MAJOR(conn->remoteVersion) != 0x200)
|
||||
throw Error("unsupported 'nix-store --serve' protocol version on '%s'", host);
|
||||
|
||||
} catch (EndOfFile & e) {
|
||||
throw Error("cannot connect to '%1%'", host);
|
||||
}
|
||||
|
||||
return conn;
|
||||
};
|
||||
|
||||
string getUri() override
|
||||
{
|
||||
return uriScheme + host;
|
||||
}
|
||||
|
||||
void queryPathInfoUncached(const Path & path,
|
||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override
|
||||
{
|
||||
try {
|
||||
auto conn(connections->get());
|
||||
|
||||
debug("querying remote host '%s' for info on '%s'", host, path);
|
||||
|
||||
conn->to << cmdQueryPathInfos << PathSet{path};
|
||||
conn->to.flush();
|
||||
|
||||
auto info = std::make_shared<ValidPathInfo>();
|
||||
conn->from >> info->path;
|
||||
if (info->path.empty()) return callback(nullptr);
|
||||
assert(path == info->path);
|
||||
|
||||
PathSet references;
|
||||
conn->from >> info->deriver;
|
||||
info->references = readStorePaths<PathSet>(*this, conn->from);
|
||||
readLongLong(conn->from); // download size
|
||||
info->narSize = readLongLong(conn->from);
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 4) {
|
||||
auto s = readString(conn->from);
|
||||
info->narHash = s.empty() ? Hash() : Hash(s);
|
||||
conn->from >> info->ca;
|
||||
info->sigs = readStrings<StringSet>(conn->from);
|
||||
}
|
||||
|
||||
auto s = readString(conn->from);
|
||||
assert(s == "");
|
||||
|
||||
callback(std::move(info));
|
||||
} catch (...) { callback.rethrow(); }
|
||||
}
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override
|
||||
{
|
||||
debug("adding path '%s' to remote host '%s'", info.path, host);
|
||||
|
||||
auto conn(connections->get());
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 5) {
|
||||
|
||||
conn->to
|
||||
<< cmdAddToStoreNar
|
||||
<< info.path
|
||||
<< info.deriver
|
||||
<< info.narHash.to_string(Base16, false)
|
||||
<< info.references
|
||||
<< info.registrationTime
|
||||
<< info.narSize
|
||||
<< info.ultimate
|
||||
<< info.sigs
|
||||
<< info.ca;
|
||||
try {
|
||||
copyNAR(source, conn->to);
|
||||
} catch (...) {
|
||||
conn->good = false;
|
||||
throw;
|
||||
}
|
||||
conn->to.flush();
|
||||
|
||||
} else {
|
||||
|
||||
conn->to
|
||||
<< cmdImportPaths
|
||||
<< 1;
|
||||
try {
|
||||
copyNAR(source, conn->to);
|
||||
} catch (...) {
|
||||
conn->good = false;
|
||||
throw;
|
||||
}
|
||||
conn->to
|
||||
<< exportMagic
|
||||
<< info.path
|
||||
<< info.references
|
||||
<< info.deriver
|
||||
<< 0
|
||||
<< 0;
|
||||
conn->to.flush();
|
||||
|
||||
}
|
||||
|
||||
if (readInt(conn->from) != 1)
|
||||
throw Error("failed to add path '%s' to remote host '%s', info.path, host");
|
||||
}
|
||||
|
||||
void narFromPath(const Path & path, Sink & sink) override
|
||||
{
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->to << cmdDumpStorePath << path;
|
||||
conn->to.flush();
|
||||
copyNAR(conn->from, sink);
|
||||
}
|
||||
|
||||
Path queryPathFromHashPart(const string & hashPart) override
|
||||
{ unsupported("queryPathFromHashPart"); }
|
||||
|
||||
Path addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override
|
||||
{ unsupported("addToStore"); }
|
||||
|
||||
Path addTextToStore(const string & name, const string & s,
|
||||
const PathSet & references, RepairFlag repair) override
|
||||
{ unsupported("addTextToStore"); }
|
||||
|
||||
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override
|
||||
{
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->to
|
||||
<< cmdBuildDerivation
|
||||
<< drvPath
|
||||
<< drv
|
||||
<< settings.maxSilentTime
|
||||
<< settings.buildTimeout;
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 2)
|
||||
conn->to
|
||||
<< settings.maxLogSize;
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
|
||||
conn->to
|
||||
<< settings.buildRepeat
|
||||
<< settings.enforceDeterminism;
|
||||
|
||||
conn->to.flush();
|
||||
|
||||
BuildResult status;
|
||||
status.status = (BuildResult::Status) readInt(conn->from);
|
||||
conn->from >> status.errorMsg;
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->remoteVersion) >= 3)
|
||||
conn->from >> status.timesBuilt >> status.isNonDeterministic >> status.startTime >> status.stopTime;
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void ensurePath(const Path & path) override
|
||||
{ unsupported("ensurePath"); }
|
||||
|
||||
void computeFSClosure(const PathSet & paths,
|
||||
PathSet & out, bool flipDirection = false,
|
||||
bool includeOutputs = false, bool includeDerivers = false) override
|
||||
{
|
||||
if (flipDirection || includeDerivers) {
|
||||
Store::computeFSClosure(paths, out, flipDirection, includeOutputs, includeDerivers);
|
||||
return;
|
||||
}
|
||||
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->to
|
||||
<< cmdQueryClosure
|
||||
<< includeOutputs
|
||||
<< paths;
|
||||
conn->to.flush();
|
||||
|
||||
auto res = readStorePaths<PathSet>(*this, conn->from);
|
||||
|
||||
out.insert(res.begin(), res.end());
|
||||
}
|
||||
|
||||
PathSet queryValidPaths(const PathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override
|
||||
{
|
||||
auto conn(connections->get());
|
||||
|
||||
conn->to
|
||||
<< cmdQueryValidPaths
|
||||
<< false // lock
|
||||
<< maybeSubstitute
|
||||
<< paths;
|
||||
conn->to.flush();
|
||||
|
||||
return readStorePaths<PathSet>(*this, conn->from);
|
||||
}
|
||||
|
||||
void connect() override
|
||||
{
|
||||
auto conn(connections->get());
|
||||
}
|
||||
|
||||
unsigned int getProtocol() override
|
||||
{
|
||||
auto conn(connections->get());
|
||||
return conn->remoteVersion;
|
||||
}
|
||||
};
|
||||
|
||||
static RegisterStoreImplementation regStore([](
|
||||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
{
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
|
||||
return std::make_shared<LegacySSHStore>(std::string(uri, uriScheme.size()), params);
|
||||
});
|
||||
|
||||
}
|
||||
103
third_party/nix/src/libstore/local-binary-cache-store.cc
vendored
Normal file
103
third_party/nix/src/libstore/local-binary-cache-store.cc
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
#include "binary-cache-store.hh"
|
||||
#include "globals.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class LocalBinaryCacheStore : public BinaryCacheStore
|
||||
{
|
||||
private:
|
||||
|
||||
Path binaryCacheDir;
|
||||
|
||||
public:
|
||||
|
||||
LocalBinaryCacheStore(
|
||||
const Params & params, const Path & binaryCacheDir)
|
||||
: BinaryCacheStore(params)
|
||||
, binaryCacheDir(binaryCacheDir)
|
||||
{
|
||||
}
|
||||
|
||||
void init() override;
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return "file://" + binaryCacheDir;
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
bool fileExists(const std::string & path) override;
|
||||
|
||||
void upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
const std::string & mimeType) override;
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
{
|
||||
try {
|
||||
readFile(binaryCacheDir + "/" + path, sink);
|
||||
} catch (SysError & e) {
|
||||
if (e.errNo == ENOENT)
|
||||
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache", path);
|
||||
}
|
||||
}
|
||||
|
||||
PathSet queryAllValidPaths() override
|
||||
{
|
||||
PathSet paths;
|
||||
|
||||
for (auto & entry : readDirectory(binaryCacheDir)) {
|
||||
if (entry.name.size() != 40 ||
|
||||
!hasSuffix(entry.name, ".narinfo"))
|
||||
continue;
|
||||
paths.insert(storeDir + "/" + entry.name.substr(0, entry.name.size() - 8));
|
||||
}
|
||||
|
||||
return paths;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
void LocalBinaryCacheStore::init()
|
||||
{
|
||||
createDirs(binaryCacheDir + "/nar");
|
||||
BinaryCacheStore::init();
|
||||
}
|
||||
|
||||
static void atomicWrite(const Path & path, const std::string & s)
|
||||
{
|
||||
Path tmp = path + ".tmp." + std::to_string(getpid());
|
||||
AutoDelete del(tmp, false);
|
||||
writeFile(tmp, s);
|
||||
if (rename(tmp.c_str(), path.c_str()))
|
||||
throw SysError(format("renaming '%1%' to '%2%'") % tmp % path);
|
||||
del.cancel();
|
||||
}
|
||||
|
||||
bool LocalBinaryCacheStore::fileExists(const std::string & path)
|
||||
{
|
||||
return pathExists(binaryCacheDir + "/" + path);
|
||||
}
|
||||
|
||||
void LocalBinaryCacheStore::upsertFile(const std::string & path,
|
||||
const std::string & data,
|
||||
const std::string & mimeType)
|
||||
{
|
||||
atomicWrite(binaryCacheDir + "/" + path, data);
|
||||
}
|
||||
|
||||
static RegisterStoreImplementation regStore([](
|
||||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
{
|
||||
if (getEnv("_NIX_FORCE_HTTP_BINARY_CACHE_STORE") == "1" ||
|
||||
std::string(uri, 0, 7) != "file://")
|
||||
return 0;
|
||||
auto store = std::make_shared<LocalBinaryCacheStore>(params, std::string(uri, 7));
|
||||
store->init();
|
||||
return store;
|
||||
});
|
||||
|
||||
}
|
||||
131
third_party/nix/src/libstore/local-fs-store.cc
vendored
Normal file
131
third_party/nix/src/libstore/local-fs-store.cc
vendored
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
#include "archive.hh"
|
||||
#include "fs-accessor.hh"
|
||||
#include "store-api.hh"
|
||||
#include "globals.hh"
|
||||
#include "compression.hh"
|
||||
#include "derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
LocalFSStore::LocalFSStore(const Params & params)
|
||||
: Store(params)
|
||||
{
|
||||
}
|
||||
|
||||
struct LocalStoreAccessor : public FSAccessor
|
||||
{
|
||||
ref<LocalFSStore> store;
|
||||
|
||||
LocalStoreAccessor(ref<LocalFSStore> store) : store(store) { }
|
||||
|
||||
Path toRealPath(const Path & path)
|
||||
{
|
||||
Path storePath = store->toStorePath(path);
|
||||
if (!store->isValidPath(storePath))
|
||||
throw InvalidPath(format("path '%1%' is not a valid store path") % storePath);
|
||||
return store->getRealStoreDir() + std::string(path, store->storeDir.size());
|
||||
}
|
||||
|
||||
FSAccessor::Stat stat(const Path & path) override
|
||||
{
|
||||
auto realPath = toRealPath(path);
|
||||
|
||||
struct stat st;
|
||||
if (lstat(realPath.c_str(), &st)) {
|
||||
if (errno == ENOENT || errno == ENOTDIR) return {Type::tMissing, 0, false};
|
||||
throw SysError(format("getting status of '%1%'") % path);
|
||||
}
|
||||
|
||||
if (!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode))
|
||||
throw Error(format("file '%1%' has unsupported type") % path);
|
||||
|
||||
return {
|
||||
S_ISREG(st.st_mode) ? Type::tRegular :
|
||||
S_ISLNK(st.st_mode) ? Type::tSymlink :
|
||||
Type::tDirectory,
|
||||
S_ISREG(st.st_mode) ? (uint64_t) st.st_size : 0,
|
||||
S_ISREG(st.st_mode) && st.st_mode & S_IXUSR};
|
||||
}
|
||||
|
||||
StringSet readDirectory(const Path & path) override
|
||||
{
|
||||
auto realPath = toRealPath(path);
|
||||
|
||||
auto entries = nix::readDirectory(realPath);
|
||||
|
||||
StringSet res;
|
||||
for (auto & entry : entries)
|
||||
res.insert(entry.name);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string readFile(const Path & path) override
|
||||
{
|
||||
return nix::readFile(toRealPath(path));
|
||||
}
|
||||
|
||||
std::string readLink(const Path & path) override
|
||||
{
|
||||
return nix::readLink(toRealPath(path));
|
||||
}
|
||||
};
|
||||
|
||||
ref<FSAccessor> LocalFSStore::getFSAccessor()
|
||||
{
|
||||
return make_ref<LocalStoreAccessor>(ref<LocalFSStore>(
|
||||
std::dynamic_pointer_cast<LocalFSStore>(shared_from_this())));
|
||||
}
|
||||
|
||||
void LocalFSStore::narFromPath(const Path & path, Sink & sink)
|
||||
{
|
||||
if (!isValidPath(path))
|
||||
throw Error(format("path '%s' is not valid") % path);
|
||||
dumpPath(getRealStoreDir() + std::string(path, storeDir.size()), sink);
|
||||
}
|
||||
|
||||
const string LocalFSStore::drvsLogDir = "drvs";
|
||||
|
||||
|
||||
|
||||
std::shared_ptr<std::string> LocalFSStore::getBuildLog(const Path & path_)
|
||||
{
|
||||
auto path(path_);
|
||||
|
||||
assertStorePath(path);
|
||||
|
||||
|
||||
if (!isDerivation(path)) {
|
||||
try {
|
||||
path = queryPathInfo(path)->deriver;
|
||||
} catch (InvalidPath &) {
|
||||
return nullptr;
|
||||
}
|
||||
if (path == "") return nullptr;
|
||||
}
|
||||
|
||||
string baseName = baseNameOf(path);
|
||||
|
||||
for (int j = 0; j < 2; j++) {
|
||||
|
||||
Path logPath =
|
||||
j == 0
|
||||
? fmt("%s/%s/%s/%s", logDir, drvsLogDir, string(baseName, 0, 2), string(baseName, 2))
|
||||
: fmt("%s/%s/%s", logDir, drvsLogDir, baseName);
|
||||
Path logBz2Path = logPath + ".bz2";
|
||||
|
||||
if (pathExists(logPath))
|
||||
return std::make_shared<std::string>(readFile(logPath));
|
||||
|
||||
else if (pathExists(logBz2Path)) {
|
||||
try {
|
||||
return decompress("bzip2", readFile(logBz2Path));
|
||||
} catch (Error &) { }
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
}
|
||||
1453
third_party/nix/src/libstore/local-store.cc
vendored
Normal file
1453
third_party/nix/src/libstore/local-store.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
322
third_party/nix/src/libstore/local-store.hh
vendored
Normal file
322
third_party/nix/src/libstore/local-store.hh
vendored
Normal file
|
|
@ -0,0 +1,322 @@
|
|||
#pragma once
|
||||
|
||||
#include "sqlite.hh"
|
||||
|
||||
#include "pathlocks.hh"
|
||||
#include "store-api.hh"
|
||||
#include "sync.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
/* Nix store and database schema version. Version 1 (or 0) was Nix <=
|
||||
0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
|
||||
Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
|
||||
Nix 1.0. Version 7 is Nix 1.3. Version 10 is 2.0. */
|
||||
const int nixSchemaVersion = 10;
|
||||
|
||||
|
||||
struct Derivation;
|
||||
|
||||
|
||||
struct OptimiseStats
|
||||
{
|
||||
unsigned long filesLinked = 0;
|
||||
unsigned long long bytesFreed = 0;
|
||||
unsigned long long blocksFreed = 0;
|
||||
};
|
||||
|
||||
|
||||
class LocalStore : public LocalFSStore
|
||||
{
|
||||
private:
|
||||
|
||||
/* Lock file used for upgrading. */
|
||||
AutoCloseFD globalLock;
|
||||
|
||||
struct State
|
||||
{
|
||||
/* The SQLite database object. */
|
||||
SQLite db;
|
||||
|
||||
/* Some precompiled SQLite statements. */
|
||||
SQLiteStmt stmtRegisterValidPath;
|
||||
SQLiteStmt stmtUpdatePathInfo;
|
||||
SQLiteStmt stmtAddReference;
|
||||
SQLiteStmt stmtQueryPathInfo;
|
||||
SQLiteStmt stmtQueryReferences;
|
||||
SQLiteStmt stmtQueryReferrers;
|
||||
SQLiteStmt stmtInvalidatePath;
|
||||
SQLiteStmt stmtAddDerivationOutput;
|
||||
SQLiteStmt stmtQueryValidDerivers;
|
||||
SQLiteStmt stmtQueryDerivationOutputs;
|
||||
SQLiteStmt stmtQueryPathFromHashPart;
|
||||
SQLiteStmt stmtQueryValidPaths;
|
||||
|
||||
/* The file to which we write our temporary roots. */
|
||||
AutoCloseFD fdTempRoots;
|
||||
|
||||
/* The last time we checked whether to do an auto-GC, or an
|
||||
auto-GC finished. */
|
||||
std::chrono::time_point<std::chrono::steady_clock> lastGCCheck;
|
||||
|
||||
/* Whether auto-GC is running. If so, get gcFuture to wait for
|
||||
the GC to finish. */
|
||||
bool gcRunning = false;
|
||||
std::shared_future<void> gcFuture;
|
||||
|
||||
/* How much disk space was available after the previous
|
||||
auto-GC. If the current available disk space is below
|
||||
minFree but not much below availAfterGC, then there is no
|
||||
point in starting a new GC. */
|
||||
uint64_t availAfterGC = std::numeric_limits<uint64_t>::max();
|
||||
|
||||
std::unique_ptr<PublicKeys> publicKeys;
|
||||
};
|
||||
|
||||
Sync<State, std::recursive_mutex> _state;
|
||||
|
||||
public:
|
||||
|
||||
PathSetting realStoreDir_;
|
||||
|
||||
const Path realStoreDir;
|
||||
const Path dbDir;
|
||||
const Path linksDir;
|
||||
const Path reservedPath;
|
||||
const Path schemaPath;
|
||||
const Path trashDir;
|
||||
const Path tempRootsDir;
|
||||
const Path fnTempRoots;
|
||||
|
||||
private:
|
||||
|
||||
Setting<bool> requireSigs{(Store*) this,
|
||||
settings.requireSigs,
|
||||
"require-sigs", "whether store paths should have a trusted signature on import"};
|
||||
|
||||
const PublicKeys & getPublicKeys();
|
||||
|
||||
public:
|
||||
|
||||
// Hack for build-remote.cc.
|
||||
PathSet locksHeld = tokenizeString<PathSet>(getEnv("NIX_HELD_LOCKS"));
|
||||
|
||||
/* Initialise the local store, upgrading the schema if
|
||||
necessary. */
|
||||
LocalStore(const Params & params);
|
||||
|
||||
~LocalStore();
|
||||
|
||||
/* Implementations of abstract store API methods. */
|
||||
|
||||
std::string getUri() override;
|
||||
|
||||
bool isValidPathUncached(const Path & path) override;
|
||||
|
||||
PathSet queryValidPaths(const PathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
PathSet queryAllValidPaths() override;
|
||||
|
||||
void queryPathInfoUncached(const Path & path,
|
||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void queryReferrers(const Path & path, PathSet & referrers) override;
|
||||
|
||||
PathSet queryValidDerivers(const Path & path) override;
|
||||
|
||||
PathSet queryDerivationOutputs(const Path & path) override;
|
||||
|
||||
StringSet queryDerivationOutputNames(const Path & path) override;
|
||||
|
||||
Path queryPathFromHashPart(const string & hashPart) override;
|
||||
|
||||
PathSet querySubstitutablePaths(const PathSet & paths) override;
|
||||
|
||||
void querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
Path addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive, HashType hashAlgo,
|
||||
PathFilter & filter, RepairFlag repair) override;
|
||||
|
||||
/* Like addToStore(), but the contents of the path are contained
|
||||
in `dump', which is either a NAR serialisation (if recursive ==
|
||||
true) or simply the contents of a regular file (if recursive ==
|
||||
false). */
|
||||
Path addToStoreFromDump(const string & dump, const string & name,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256, RepairFlag repair = NoRepair);
|
||||
|
||||
Path addTextToStore(const string & name, const string & s,
|
||||
const PathSet & references, RepairFlag repair) override;
|
||||
|
||||
void buildPaths(const PathSet & paths, BuildMode buildMode) override;
|
||||
|
||||
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override;
|
||||
|
||||
void ensurePath(const Path & path) override;
|
||||
|
||||
void addTempRoot(const Path & path) override;
|
||||
|
||||
void addIndirectRoot(const Path & path) override;
|
||||
|
||||
void syncWithGC() override;
|
||||
|
||||
private:
|
||||
|
||||
typedef std::shared_ptr<AutoCloseFD> FDPtr;
|
||||
typedef list<FDPtr> FDs;
|
||||
|
||||
void findTempRoots(FDs & fds, Roots & roots, bool censor);
|
||||
|
||||
public:
|
||||
|
||||
Roots findRoots(bool censor) override;
|
||||
|
||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||
|
||||
/* Optimise the disk space usage of the Nix store by hard-linking
|
||||
files with the same contents. */
|
||||
void optimiseStore(OptimiseStats & stats);
|
||||
|
||||
void optimiseStore() override;
|
||||
|
||||
/* Optimise a single store path. */
|
||||
void optimisePath(const Path & path);
|
||||
|
||||
bool verifyStore(bool checkContents, RepairFlag repair) override;
|
||||
|
||||
/* Register the validity of a path, i.e., that `path' exists, that
|
||||
the paths referenced by it exists, and in the case of an output
|
||||
path of a derivation, that it has been produced by a successful
|
||||
execution of the derivation (or something equivalent). Also
|
||||
register the hash of the file system contents of the path. The
|
||||
hash must be a SHA-256 hash. */
|
||||
void registerValidPath(const ValidPathInfo & info);
|
||||
|
||||
void registerValidPaths(const ValidPathInfos & infos);
|
||||
|
||||
unsigned int getProtocol() override;
|
||||
|
||||
void vacuumDB();
|
||||
|
||||
/* Repair the contents of the given path by redownloading it using
|
||||
a substituter (if available). */
|
||||
void repairPath(const Path & path);
|
||||
|
||||
void addSignatures(const Path & storePath, const StringSet & sigs) override;
|
||||
|
||||
/* If free disk space in /nix/store if below minFree, delete
|
||||
garbage until it exceeds maxFree. */
|
||||
void autoGC(bool sync = true);
|
||||
|
||||
private:
|
||||
|
||||
int getSchema();
|
||||
|
||||
void openDB(State & state, bool create);
|
||||
|
||||
void makeStoreWritable();
|
||||
|
||||
uint64_t queryValidPathId(State & state, const Path & path);
|
||||
|
||||
uint64_t addValidPath(State & state, const ValidPathInfo & info, bool checkOutputs = true);
|
||||
|
||||
void invalidatePath(State & state, const Path & path);
|
||||
|
||||
/* Delete a path from the Nix store. */
|
||||
void invalidatePathChecked(const Path & path);
|
||||
|
||||
void verifyPath(const Path & path, const PathSet & store,
|
||||
PathSet & done, PathSet & validPaths, RepairFlag repair, bool & errors);
|
||||
|
||||
void updatePathInfo(State & state, const ValidPathInfo & info);
|
||||
|
||||
void upgradeStore6();
|
||||
void upgradeStore7();
|
||||
PathSet queryValidPathsOld();
|
||||
ValidPathInfo queryPathInfoOld(const Path & path);
|
||||
|
||||
struct GCState;
|
||||
|
||||
void deleteGarbage(GCState & state, const Path & path);
|
||||
|
||||
void tryToDelete(GCState & state, const Path & path);
|
||||
|
||||
bool canReachRoot(GCState & state, PathSet & visited, const Path & path);
|
||||
|
||||
void deletePathRecursive(GCState & state, const Path & path);
|
||||
|
||||
bool isActiveTempFile(const GCState & state,
|
||||
const Path & path, const string & suffix);
|
||||
|
||||
AutoCloseFD openGCLock(LockType lockType);
|
||||
|
||||
void findRoots(const Path & path, unsigned char type, Roots & roots);
|
||||
|
||||
void findRootsNoTemp(Roots & roots, bool censor);
|
||||
|
||||
void findRuntimeRoots(Roots & roots, bool censor);
|
||||
|
||||
void removeUnusedLinks(const GCState & state);
|
||||
|
||||
Path createTempDirInStore();
|
||||
|
||||
void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
|
||||
|
||||
typedef std::unordered_set<ino_t> InodeHash;
|
||||
|
||||
InodeHash loadInodeHash();
|
||||
Strings readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash);
|
||||
void optimisePath_(Activity * act, OptimiseStats & stats, const Path & path, InodeHash & inodeHash);
|
||||
|
||||
// Internal versions that are not wrapped in retry_sqlite.
|
||||
bool isValidPath_(State & state, const Path & path);
|
||||
void queryReferrers(State & state, const Path & path, PathSet & referrers);
|
||||
|
||||
/* Add signatures to a ValidPathInfo using the secret keys
|
||||
specified by the ‘secret-key-files’ option. */
|
||||
void signPathInfo(ValidPathInfo & info);
|
||||
|
||||
Path getRealStoreDir() override { return realStoreDir; }
|
||||
|
||||
void createUser(const std::string & userName, uid_t userId) override;
|
||||
|
||||
friend class DerivationGoal;
|
||||
friend class SubstitutionGoal;
|
||||
};
|
||||
|
||||
|
||||
typedef std::pair<dev_t, ino_t> Inode;
|
||||
typedef set<Inode> InodesSeen;
|
||||
|
||||
|
||||
/* "Fix", or canonicalise, the meta-data of the files in a store path
|
||||
after it has been built. In particular:
|
||||
- the last modification date on each file is set to 1 (i.e.,
|
||||
00:00:01 1/1/1970 UTC)
|
||||
- the permissions are set of 444 or 555 (i.e., read-only with or
|
||||
without execute permission; setuid bits etc. are cleared)
|
||||
- the owner and group are set to the Nix user and group, if we're
|
||||
running as root. */
|
||||
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
|
||||
void canonicalisePathMetaData(const Path & path, uid_t fromUid);
|
||||
|
||||
void canonicaliseTimestampAndPermissions(const Path & path);
|
||||
|
||||
MakeError(PathInUse, Error);
|
||||
|
||||
}
|
||||
60
third_party/nix/src/libstore/local.mk
vendored
Normal file
60
third_party/nix/src/libstore/local.mk
vendored
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
libraries += libstore
|
||||
|
||||
libstore_NAME = libnixstore
|
||||
|
||||
libstore_DIR := $(d)
|
||||
|
||||
libstore_SOURCES := $(wildcard $(d)/*.cc $(d)/builtins/*.cc)
|
||||
|
||||
libstore_LIBS = libutil
|
||||
|
||||
libstore_LDFLAGS = $(SQLITE3_LIBS) -lbz2 $(LIBCURL_LIBS) $(SODIUM_LIBS) -pthread
|
||||
ifneq ($(OS), FreeBSD)
|
||||
libstore_LDFLAGS += -ldl
|
||||
endif
|
||||
|
||||
libstore_FILES = sandbox-defaults.sb sandbox-minimal.sb sandbox-network.sb
|
||||
|
||||
$(foreach file,$(libstore_FILES),$(eval $(call install-data-in,$(d)/$(file),$(datadir)/nix/sandbox)))
|
||||
|
||||
ifeq ($(ENABLE_S3), 1)
|
||||
libstore_LDFLAGS += -laws-cpp-sdk-transfer -laws-cpp-sdk-s3 -laws-cpp-sdk-core
|
||||
endif
|
||||
|
||||
ifeq ($(OS), SunOS)
|
||||
libstore_LDFLAGS += -lsocket
|
||||
endif
|
||||
|
||||
ifeq ($(HAVE_SECCOMP), 1)
|
||||
libstore_LDFLAGS += -lseccomp
|
||||
endif
|
||||
|
||||
libstore_CXXFLAGS = \
|
||||
-DNIX_PREFIX=\"$(prefix)\" \
|
||||
-DNIX_STORE_DIR=\"$(storedir)\" \
|
||||
-DNIX_DATA_DIR=\"$(datadir)\" \
|
||||
-DNIX_STATE_DIR=\"$(localstatedir)/nix\" \
|
||||
-DNIX_LOG_DIR=\"$(localstatedir)/log/nix\" \
|
||||
-DNIX_CONF_DIR=\"$(sysconfdir)/nix\" \
|
||||
-DNIX_LIBEXEC_DIR=\"$(libexecdir)\" \
|
||||
-DNIX_BIN_DIR=\"$(bindir)\" \
|
||||
-DNIX_MAN_DIR=\"$(mandir)\" \
|
||||
-DLSOF=\"$(lsof)\"
|
||||
|
||||
ifneq ($(sandbox_shell),)
|
||||
libstore_CXXFLAGS += -DSANDBOX_SHELL="\"$(sandbox_shell)\""
|
||||
endif
|
||||
|
||||
$(d)/local-store.cc: $(d)/schema.sql.gen.hh
|
||||
|
||||
$(d)/build.cc:
|
||||
|
||||
%.gen.hh: %
|
||||
@echo 'R"foo(' >> $@.tmp
|
||||
$(trace-gen) cat $< >> $@.tmp
|
||||
@echo ')foo"' >> $@.tmp
|
||||
@mv $@.tmp $@
|
||||
|
||||
clean-files += $(d)/schema.sql.gen.hh
|
||||
|
||||
$(eval $(call install-file-in, $(d)/nix-store.pc, $(prefix)/lib/pkgconfig, 0644))
|
||||
100
third_party/nix/src/libstore/machines.cc
vendored
Normal file
100
third_party/nix/src/libstore/machines.cc
vendored
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
#include "machines.hh"
|
||||
#include "util.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
namespace nix {
|
||||
|
||||
Machine::Machine(decltype(storeUri) storeUri,
|
||||
decltype(systemTypes) systemTypes,
|
||||
decltype(sshKey) sshKey,
|
||||
decltype(maxJobs) maxJobs,
|
||||
decltype(speedFactor) speedFactor,
|
||||
decltype(supportedFeatures) supportedFeatures,
|
||||
decltype(mandatoryFeatures) mandatoryFeatures,
|
||||
decltype(sshPublicHostKey) sshPublicHostKey) :
|
||||
storeUri(
|
||||
// Backwards compatibility: if the URI is a hostname,
|
||||
// prepend ssh://.
|
||||
storeUri.find("://") != std::string::npos
|
||||
|| hasPrefix(storeUri, "local")
|
||||
|| hasPrefix(storeUri, "remote")
|
||||
|| hasPrefix(storeUri, "auto")
|
||||
|| hasPrefix(storeUri, "/")
|
||||
? storeUri
|
||||
: "ssh://" + storeUri),
|
||||
systemTypes(systemTypes),
|
||||
sshKey(sshKey),
|
||||
maxJobs(maxJobs),
|
||||
speedFactor(std::max(1U, speedFactor)),
|
||||
supportedFeatures(supportedFeatures),
|
||||
mandatoryFeatures(mandatoryFeatures),
|
||||
sshPublicHostKey(sshPublicHostKey)
|
||||
{}
|
||||
|
||||
bool Machine::allSupported(const std::set<string> & features) const {
|
||||
return std::all_of(features.begin(), features.end(),
|
||||
[&](const string & feature) {
|
||||
return supportedFeatures.count(feature) ||
|
||||
mandatoryFeatures.count(feature);
|
||||
});
|
||||
}
|
||||
|
||||
bool Machine::mandatoryMet(const std::set<string> & features) const {
|
||||
return std::all_of(mandatoryFeatures.begin(), mandatoryFeatures.end(),
|
||||
[&](const string & feature) {
|
||||
return features.count(feature);
|
||||
});
|
||||
}
|
||||
|
||||
void parseMachines(const std::string & s, Machines & machines)
|
||||
{
|
||||
for (auto line : tokenizeString<std::vector<string>>(s, "\n;")) {
|
||||
trim(line);
|
||||
line.erase(std::find(line.begin(), line.end(), '#'), line.end());
|
||||
if (line.empty()) continue;
|
||||
|
||||
if (line[0] == '@') {
|
||||
auto file = trim(std::string(line, 1));
|
||||
try {
|
||||
parseMachines(readFile(file), machines);
|
||||
} catch (const SysError & e) {
|
||||
if (e.errNo != ENOENT)
|
||||
throw;
|
||||
debug("cannot find machines file '%s'", file);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
auto tokens = tokenizeString<std::vector<string>>(line);
|
||||
auto sz = tokens.size();
|
||||
if (sz < 1)
|
||||
throw FormatError("bad machine specification '%s'", line);
|
||||
|
||||
auto isSet = [&](size_t n) {
|
||||
return tokens.size() > n && tokens[n] != "" && tokens[n] != "-";
|
||||
};
|
||||
|
||||
machines.emplace_back(tokens[0],
|
||||
isSet(1) ? tokenizeString<std::vector<string>>(tokens[1], ",") : std::vector<string>{settings.thisSystem},
|
||||
isSet(2) ? tokens[2] : "",
|
||||
isSet(3) ? std::stoull(tokens[3]) : 1LL,
|
||||
isSet(4) ? std::stoull(tokens[4]) : 1LL,
|
||||
isSet(5) ? tokenizeString<std::set<string>>(tokens[5], ",") : std::set<string>{},
|
||||
isSet(6) ? tokenizeString<std::set<string>>(tokens[6], ",") : std::set<string>{},
|
||||
isSet(7) ? tokens[7] : "");
|
||||
}
|
||||
}
|
||||
|
||||
Machines getMachines()
|
||||
{
|
||||
static auto machines = [&]() {
|
||||
Machines machines;
|
||||
parseMachines(settings.builders, machines);
|
||||
return machines;
|
||||
}();
|
||||
return machines;
|
||||
}
|
||||
|
||||
}
|
||||
39
third_party/nix/src/libstore/machines.hh
vendored
Normal file
39
third_party/nix/src/libstore/machines.hh
vendored
Normal file
|
|
@ -0,0 +1,39 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct Machine {
|
||||
|
||||
const string storeUri;
|
||||
const std::vector<string> systemTypes;
|
||||
const string sshKey;
|
||||
const unsigned int maxJobs;
|
||||
const unsigned int speedFactor;
|
||||
const std::set<string> supportedFeatures;
|
||||
const std::set<string> mandatoryFeatures;
|
||||
const std::string sshPublicHostKey;
|
||||
bool enabled = true;
|
||||
|
||||
bool allSupported(const std::set<string> & features) const;
|
||||
|
||||
bool mandatoryMet(const std::set<string> & features) const;
|
||||
|
||||
Machine(decltype(storeUri) storeUri,
|
||||
decltype(systemTypes) systemTypes,
|
||||
decltype(sshKey) sshKey,
|
||||
decltype(maxJobs) maxJobs,
|
||||
decltype(speedFactor) speedFactor,
|
||||
decltype(supportedFeatures) supportedFeatures,
|
||||
decltype(mandatoryFeatures) mandatoryFeatures,
|
||||
decltype(sshPublicHostKey) sshPublicHostKey);
|
||||
};
|
||||
|
||||
typedef std::vector<Machine> Machines;
|
||||
|
||||
void parseMachines(const std::string & s, Machines & machines);
|
||||
|
||||
Machines getMachines();
|
||||
|
||||
}
|
||||
282
third_party/nix/src/libstore/misc.cc
vendored
Normal file
282
third_party/nix/src/libstore/misc.cc
vendored
Normal file
|
|
@ -0,0 +1,282 @@
|
|||
#include "derivations.hh"
|
||||
#include "parsed-derivations.hh"
|
||||
#include "globals.hh"
|
||||
#include "local-store.hh"
|
||||
#include "store-api.hh"
|
||||
#include "thread-pool.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
void Store::computeFSClosure(const PathSet & startPaths,
|
||||
PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
|
||||
{
|
||||
struct State
|
||||
{
|
||||
size_t pending;
|
||||
PathSet & paths;
|
||||
std::exception_ptr exc;
|
||||
};
|
||||
|
||||
Sync<State> state_(State{0, paths_, 0});
|
||||
|
||||
std::function<void(const Path &)> enqueue;
|
||||
|
||||
std::condition_variable done;
|
||||
|
||||
enqueue = [&](const Path & path) -> void {
|
||||
{
|
||||
auto state(state_.lock());
|
||||
if (state->exc) return;
|
||||
if (state->paths.count(path)) return;
|
||||
state->paths.insert(path);
|
||||
state->pending++;
|
||||
}
|
||||
|
||||
queryPathInfo(path, {[&, path](std::future<ref<ValidPathInfo>> fut) {
|
||||
// FIXME: calls to isValidPath() should be async
|
||||
|
||||
try {
|
||||
auto info = fut.get();
|
||||
|
||||
if (flipDirection) {
|
||||
|
||||
PathSet referrers;
|
||||
queryReferrers(path, referrers);
|
||||
for (auto & ref : referrers)
|
||||
if (ref != path)
|
||||
enqueue(ref);
|
||||
|
||||
if (includeOutputs)
|
||||
for (auto & i : queryValidDerivers(path))
|
||||
enqueue(i);
|
||||
|
||||
if (includeDerivers && isDerivation(path))
|
||||
for (auto & i : queryDerivationOutputs(path))
|
||||
if (isValidPath(i) && queryPathInfo(i)->deriver == path)
|
||||
enqueue(i);
|
||||
|
||||
} else {
|
||||
|
||||
for (auto & ref : info->references)
|
||||
if (ref != path)
|
||||
enqueue(ref);
|
||||
|
||||
if (includeOutputs && isDerivation(path))
|
||||
for (auto & i : queryDerivationOutputs(path))
|
||||
if (isValidPath(i)) enqueue(i);
|
||||
|
||||
if (includeDerivers && isValidPath(info->deriver))
|
||||
enqueue(info->deriver);
|
||||
|
||||
}
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
assert(state->pending);
|
||||
if (!--state->pending) done.notify_one();
|
||||
}
|
||||
|
||||
} catch (...) {
|
||||
auto state(state_.lock());
|
||||
if (!state->exc) state->exc = std::current_exception();
|
||||
assert(state->pending);
|
||||
if (!--state->pending) done.notify_one();
|
||||
};
|
||||
}});
|
||||
};
|
||||
|
||||
for (auto & startPath : startPaths)
|
||||
enqueue(startPath);
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
while (state->pending) state.wait(done);
|
||||
if (state->exc) std::rethrow_exception(state->exc);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Store::computeFSClosure(const Path & startPath,
|
||||
PathSet & paths_, bool flipDirection, bool includeOutputs, bool includeDerivers)
|
||||
{
|
||||
computeFSClosure(PathSet{startPath}, paths_, flipDirection, includeOutputs, includeDerivers);
|
||||
}
|
||||
|
||||
|
||||
void Store::queryMissing(const PathSet & targets,
|
||||
PathSet & willBuild_, PathSet & willSubstitute_, PathSet & unknown_,
|
||||
unsigned long long & downloadSize_, unsigned long long & narSize_)
|
||||
{
|
||||
Activity act(*logger, lvlDebug, actUnknown, "querying info about missing paths");
|
||||
|
||||
downloadSize_ = narSize_ = 0;
|
||||
|
||||
ThreadPool pool;
|
||||
|
||||
struct State
|
||||
{
|
||||
PathSet done;
|
||||
PathSet & unknown, & willSubstitute, & willBuild;
|
||||
unsigned long long & downloadSize;
|
||||
unsigned long long & narSize;
|
||||
};
|
||||
|
||||
struct DrvState
|
||||
{
|
||||
size_t left;
|
||||
bool done = false;
|
||||
PathSet outPaths;
|
||||
DrvState(size_t left) : left(left) { }
|
||||
};
|
||||
|
||||
Sync<State> state_(State{PathSet(), unknown_, willSubstitute_, willBuild_, downloadSize_, narSize_});
|
||||
|
||||
std::function<void(Path)> doPath;
|
||||
|
||||
auto mustBuildDrv = [&](const Path & drvPath, const Derivation & drv) {
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->willBuild.insert(drvPath);
|
||||
}
|
||||
|
||||
for (auto & i : drv.inputDrvs)
|
||||
pool.enqueue(std::bind(doPath, makeDrvPathWithOutputs(i.first, i.second)));
|
||||
};
|
||||
|
||||
auto checkOutput = [&](
|
||||
const Path & drvPath, ref<Derivation> drv, const Path & outPath, ref<Sync<DrvState>> drvState_)
|
||||
{
|
||||
if (drvState_->lock()->done) return;
|
||||
|
||||
SubstitutablePathInfos infos;
|
||||
querySubstitutablePathInfos({outPath}, infos);
|
||||
|
||||
if (infos.empty()) {
|
||||
drvState_->lock()->done = true;
|
||||
mustBuildDrv(drvPath, *drv);
|
||||
} else {
|
||||
{
|
||||
auto drvState(drvState_->lock());
|
||||
if (drvState->done) return;
|
||||
assert(drvState->left);
|
||||
drvState->left--;
|
||||
drvState->outPaths.insert(outPath);
|
||||
if (!drvState->left) {
|
||||
for (auto & path : drvState->outPaths)
|
||||
pool.enqueue(std::bind(doPath, path));
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
doPath = [&](const Path & path) {
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
if (state->done.count(path)) return;
|
||||
state->done.insert(path);
|
||||
}
|
||||
|
||||
DrvPathWithOutputs i2 = parseDrvPathWithOutputs(path);
|
||||
|
||||
if (isDerivation(i2.first)) {
|
||||
if (!isValidPath(i2.first)) {
|
||||
// FIXME: we could try to substitute the derivation.
|
||||
auto state(state_.lock());
|
||||
state->unknown.insert(path);
|
||||
return;
|
||||
}
|
||||
|
||||
Derivation drv = derivationFromPath(i2.first);
|
||||
ParsedDerivation parsedDrv(i2.first, drv);
|
||||
|
||||
PathSet invalid;
|
||||
for (auto & j : drv.outputs)
|
||||
if (wantOutput(j.first, i2.second)
|
||||
&& !isValidPath(j.second.path))
|
||||
invalid.insert(j.second.path);
|
||||
if (invalid.empty()) return;
|
||||
|
||||
if (settings.useSubstitutes && parsedDrv.substitutesAllowed()) {
|
||||
auto drvState = make_ref<Sync<DrvState>>(DrvState(invalid.size()));
|
||||
for (auto & output : invalid)
|
||||
pool.enqueue(std::bind(checkOutput, i2.first, make_ref<Derivation>(drv), output, drvState));
|
||||
} else
|
||||
mustBuildDrv(i2.first, drv);
|
||||
|
||||
} else {
|
||||
|
||||
if (isValidPath(path)) return;
|
||||
|
||||
SubstitutablePathInfos infos;
|
||||
querySubstitutablePathInfos({path}, infos);
|
||||
|
||||
if (infos.empty()) {
|
||||
auto state(state_.lock());
|
||||
state->unknown.insert(path);
|
||||
return;
|
||||
}
|
||||
|
||||
auto info = infos.find(path);
|
||||
assert(info != infos.end());
|
||||
|
||||
{
|
||||
auto state(state_.lock());
|
||||
state->willSubstitute.insert(path);
|
||||
state->downloadSize += info->second.downloadSize;
|
||||
state->narSize += info->second.narSize;
|
||||
}
|
||||
|
||||
for (auto & ref : info->second.references)
|
||||
pool.enqueue(std::bind(doPath, ref));
|
||||
}
|
||||
};
|
||||
|
||||
for (auto & path : targets)
|
||||
pool.enqueue(std::bind(doPath, path));
|
||||
|
||||
pool.process();
|
||||
}
|
||||
|
||||
|
||||
Paths Store::topoSortPaths(const PathSet & paths)
|
||||
{
|
||||
Paths sorted;
|
||||
PathSet visited, parents;
|
||||
|
||||
std::function<void(const Path & path, const Path * parent)> dfsVisit;
|
||||
|
||||
dfsVisit = [&](const Path & path, const Path * parent) {
|
||||
if (parents.find(path) != parents.end())
|
||||
throw BuildError(format("cycle detected in the references of '%1%' from '%2%'") % path % *parent);
|
||||
|
||||
if (visited.find(path) != visited.end()) return;
|
||||
visited.insert(path);
|
||||
parents.insert(path);
|
||||
|
||||
PathSet references;
|
||||
try {
|
||||
references = queryPathInfo(path)->references;
|
||||
} catch (InvalidPath &) {
|
||||
}
|
||||
|
||||
for (auto & i : references)
|
||||
/* Don't traverse into paths that don't exist. That can
|
||||
happen due to substitutes for non-existent paths. */
|
||||
if (i != path && paths.find(i) != paths.end())
|
||||
dfsVisit(i, &path);
|
||||
|
||||
sorted.push_front(path);
|
||||
parents.erase(path);
|
||||
};
|
||||
|
||||
for (auto & i : paths)
|
||||
dfsVisit(i, nullptr);
|
||||
|
||||
return sorted;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
266
third_party/nix/src/libstore/nar-accessor.cc
vendored
Normal file
266
third_party/nix/src/libstore/nar-accessor.cc
vendored
Normal file
|
|
@ -0,0 +1,266 @@
|
|||
#include "nar-accessor.hh"
|
||||
#include "archive.hh"
|
||||
#include "json.hh"
|
||||
|
||||
#include <map>
|
||||
#include <stack>
|
||||
#include <algorithm>
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct NarMember
|
||||
{
|
||||
FSAccessor::Type type = FSAccessor::Type::tMissing;
|
||||
|
||||
bool isExecutable = false;
|
||||
|
||||
/* If this is a regular file, position of the contents of this
|
||||
file in the NAR. */
|
||||
size_t start = 0, size = 0;
|
||||
|
||||
std::string target;
|
||||
|
||||
/* If this is a directory, all the children of the directory. */
|
||||
std::map<std::string, NarMember> children;
|
||||
};
|
||||
|
||||
struct NarAccessor : public FSAccessor
|
||||
{
|
||||
std::shared_ptr<const std::string> nar;
|
||||
|
||||
GetNarBytes getNarBytes;
|
||||
|
||||
NarMember root;
|
||||
|
||||
struct NarIndexer : ParseSink, StringSource
|
||||
{
|
||||
NarAccessor & acc;
|
||||
|
||||
std::stack<NarMember *> parents;
|
||||
|
||||
std::string currentStart;
|
||||
bool isExec = false;
|
||||
|
||||
NarIndexer(NarAccessor & acc, const std::string & nar)
|
||||
: StringSource(nar), acc(acc)
|
||||
{ }
|
||||
|
||||
void createMember(const Path & path, NarMember member) {
|
||||
size_t level = std::count(path.begin(), path.end(), '/');
|
||||
while (parents.size() > level) parents.pop();
|
||||
|
||||
if (parents.empty()) {
|
||||
acc.root = std::move(member);
|
||||
parents.push(&acc.root);
|
||||
} else {
|
||||
if (parents.top()->type != FSAccessor::Type::tDirectory)
|
||||
throw Error("NAR file missing parent directory of path '%s'", path);
|
||||
auto result = parents.top()->children.emplace(baseNameOf(path), std::move(member));
|
||||
parents.push(&result.first->second);
|
||||
}
|
||||
}
|
||||
|
||||
void createDirectory(const Path & path) override
|
||||
{
|
||||
createMember(path, {FSAccessor::Type::tDirectory, false, 0, 0});
|
||||
}
|
||||
|
||||
void createRegularFile(const Path & path) override
|
||||
{
|
||||
createMember(path, {FSAccessor::Type::tRegular, false, 0, 0});
|
||||
}
|
||||
|
||||
void isExecutable() override
|
||||
{
|
||||
parents.top()->isExecutable = true;
|
||||
}
|
||||
|
||||
void preallocateContents(unsigned long long size) override
|
||||
{
|
||||
currentStart = string(s, pos, 16);
|
||||
assert(size <= std::numeric_limits<size_t>::max());
|
||||
parents.top()->size = (size_t)size;
|
||||
parents.top()->start = pos;
|
||||
}
|
||||
|
||||
void receiveContents(unsigned char * data, unsigned int len) override
|
||||
{
|
||||
// Sanity check
|
||||
if (!currentStart.empty()) {
|
||||
assert(len < 16 || currentStart == string((char *) data, 16));
|
||||
currentStart.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void createSymlink(const Path & path, const string & target) override
|
||||
{
|
||||
createMember(path,
|
||||
NarMember{FSAccessor::Type::tSymlink, false, 0, 0, target});
|
||||
}
|
||||
};
|
||||
|
||||
NarAccessor(ref<const std::string> nar) : nar(nar)
|
||||
{
|
||||
NarIndexer indexer(*this, *nar);
|
||||
parseDump(indexer, indexer);
|
||||
}
|
||||
|
||||
NarAccessor(const std::string & listing, GetNarBytes getNarBytes)
|
||||
: getNarBytes(getNarBytes)
|
||||
{
|
||||
using json = nlohmann::json;
|
||||
|
||||
std::function<void(NarMember &, json &)> recurse;
|
||||
|
||||
recurse = [&](NarMember & member, json & v) {
|
||||
std::string type = v["type"];
|
||||
|
||||
if (type == "directory") {
|
||||
member.type = FSAccessor::Type::tDirectory;
|
||||
for (auto i = v["entries"].begin(); i != v["entries"].end(); ++i) {
|
||||
std::string name = i.key();
|
||||
recurse(member.children[name], i.value());
|
||||
}
|
||||
} else if (type == "regular") {
|
||||
member.type = FSAccessor::Type::tRegular;
|
||||
member.size = v["size"];
|
||||
member.isExecutable = v.value("executable", false);
|
||||
member.start = v["narOffset"];
|
||||
} else if (type == "symlink") {
|
||||
member.type = FSAccessor::Type::tSymlink;
|
||||
member.target = v.value("target", "");
|
||||
} else return;
|
||||
};
|
||||
|
||||
json v = json::parse(listing);
|
||||
recurse(root, v);
|
||||
}
|
||||
|
||||
NarMember * find(const Path & path)
|
||||
{
|
||||
Path canon = path == "" ? "" : canonPath(path);
|
||||
NarMember * current = &root;
|
||||
auto end = path.end();
|
||||
for (auto it = path.begin(); it != end; ) {
|
||||
// because it != end, the remaining component is non-empty so we need
|
||||
// a directory
|
||||
if (current->type != FSAccessor::Type::tDirectory) return nullptr;
|
||||
|
||||
// skip slash (canonPath above ensures that this is always a slash)
|
||||
assert(*it == '/');
|
||||
it += 1;
|
||||
|
||||
// lookup current component
|
||||
auto next = std::find(it, end, '/');
|
||||
auto child = current->children.find(std::string(it, next));
|
||||
if (child == current->children.end()) return nullptr;
|
||||
current = &child->second;
|
||||
|
||||
it = next;
|
||||
}
|
||||
|
||||
return current;
|
||||
}
|
||||
|
||||
NarMember & get(const Path & path) {
|
||||
auto result = find(path);
|
||||
if (result == nullptr)
|
||||
throw Error("NAR file does not contain path '%1%'", path);
|
||||
return *result;
|
||||
}
|
||||
|
||||
Stat stat(const Path & path) override
|
||||
{
|
||||
auto i = find(path);
|
||||
if (i == nullptr)
|
||||
return {FSAccessor::Type::tMissing, 0, false};
|
||||
return {i->type, i->size, i->isExecutable, i->start};
|
||||
}
|
||||
|
||||
StringSet readDirectory(const Path & path) override
|
||||
{
|
||||
auto i = get(path);
|
||||
|
||||
if (i.type != FSAccessor::Type::tDirectory)
|
||||
throw Error(format("path '%1%' inside NAR file is not a directory") % path);
|
||||
|
||||
StringSet res;
|
||||
for (auto & child : i.children)
|
||||
res.insert(child.first);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string readFile(const Path & path) override
|
||||
{
|
||||
auto i = get(path);
|
||||
if (i.type != FSAccessor::Type::tRegular)
|
||||
throw Error(format("path '%1%' inside NAR file is not a regular file") % path);
|
||||
|
||||
if (getNarBytes) return getNarBytes(i.start, i.size);
|
||||
|
||||
assert(nar);
|
||||
return std::string(*nar, i.start, i.size);
|
||||
}
|
||||
|
||||
std::string readLink(const Path & path) override
|
||||
{
|
||||
auto i = get(path);
|
||||
if (i.type != FSAccessor::Type::tSymlink)
|
||||
throw Error(format("path '%1%' inside NAR file is not a symlink") % path);
|
||||
return i.target;
|
||||
}
|
||||
};
|
||||
|
||||
ref<FSAccessor> makeNarAccessor(ref<const std::string> nar)
|
||||
{
|
||||
return make_ref<NarAccessor>(nar);
|
||||
}
|
||||
|
||||
ref<FSAccessor> makeLazyNarAccessor(const std::string & listing,
|
||||
GetNarBytes getNarBytes)
|
||||
{
|
||||
return make_ref<NarAccessor>(listing, getNarBytes);
|
||||
}
|
||||
|
||||
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
|
||||
const Path & path, bool recurse)
|
||||
{
|
||||
auto st = accessor->stat(path);
|
||||
|
||||
auto obj = res.object();
|
||||
|
||||
switch (st.type) {
|
||||
case FSAccessor::Type::tRegular:
|
||||
obj.attr("type", "regular");
|
||||
obj.attr("size", st.fileSize);
|
||||
if (st.isExecutable)
|
||||
obj.attr("executable", true);
|
||||
if (st.narOffset)
|
||||
obj.attr("narOffset", st.narOffset);
|
||||
break;
|
||||
case FSAccessor::Type::tDirectory:
|
||||
obj.attr("type", "directory");
|
||||
{
|
||||
auto res2 = obj.object("entries");
|
||||
for (auto & name : accessor->readDirectory(path)) {
|
||||
if (recurse) {
|
||||
auto res3 = res2.placeholder(name);
|
||||
listNar(res3, accessor, path + "/" + name, true);
|
||||
} else
|
||||
res2.object(name);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case FSAccessor::Type::tSymlink:
|
||||
obj.attr("type", "symlink");
|
||||
obj.attr("target", accessor->readLink(path));
|
||||
break;
|
||||
default:
|
||||
throw Error("path '%s' does not exist in NAR", path);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
30
third_party/nix/src/libstore/nar-accessor.hh
vendored
Normal file
30
third_party/nix/src/libstore/nar-accessor.hh
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
#pragma once
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "fs-accessor.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* Return an object that provides access to the contents of a NAR
|
||||
file. */
|
||||
ref<FSAccessor> makeNarAccessor(ref<const std::string> nar);
|
||||
|
||||
/* Create a NAR accessor from a NAR listing (in the format produced by
|
||||
listNar()). The callback getNarBytes(offset, length) is used by the
|
||||
readFile() method of the accessor to get the contents of files
|
||||
inside the NAR. */
|
||||
typedef std::function<std::string(uint64_t, uint64_t)> GetNarBytes;
|
||||
|
||||
ref<FSAccessor> makeLazyNarAccessor(
|
||||
const std::string & listing,
|
||||
GetNarBytes getNarBytes);
|
||||
|
||||
class JSONPlaceholder;
|
||||
|
||||
/* Write a JSON representation of the contents of a NAR (except file
|
||||
contents). */
|
||||
void listNar(JSONPlaceholder & res, ref<FSAccessor> accessor,
|
||||
const Path & path, bool recurse);
|
||||
|
||||
}
|
||||
267
third_party/nix/src/libstore/nar-info-disk-cache.cc
vendored
Normal file
267
third_party/nix/src/libstore/nar-info-disk-cache.cc
vendored
Normal file
|
|
@ -0,0 +1,267 @@
|
|||
#include "nar-info-disk-cache.hh"
|
||||
#include "sync.hh"
|
||||
#include "sqlite.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#include <sqlite3.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
static const char * schema = R"sql(
|
||||
|
||||
create table if not exists BinaryCaches (
|
||||
id integer primary key autoincrement not null,
|
||||
url text unique not null,
|
||||
timestamp integer not null,
|
||||
storeDir text not null,
|
||||
wantMassQuery integer not null,
|
||||
priority integer not null
|
||||
);
|
||||
|
||||
create table if not exists NARs (
|
||||
cache integer not null,
|
||||
hashPart text not null,
|
||||
namePart text,
|
||||
url text,
|
||||
compression text,
|
||||
fileHash text,
|
||||
fileSize integer,
|
||||
narHash text,
|
||||
narSize integer,
|
||||
refs text,
|
||||
deriver text,
|
||||
sigs text,
|
||||
ca text,
|
||||
timestamp integer not null,
|
||||
present integer not null,
|
||||
primary key (cache, hashPart),
|
||||
foreign key (cache) references BinaryCaches(id) on delete cascade
|
||||
);
|
||||
|
||||
create table if not exists LastPurge (
|
||||
dummy text primary key,
|
||||
value integer
|
||||
);
|
||||
|
||||
)sql";
|
||||
|
||||
class NarInfoDiskCacheImpl : public NarInfoDiskCache
|
||||
{
|
||||
public:
|
||||
|
||||
/* How often to purge expired entries from the cache. */
|
||||
const int purgeInterval = 24 * 3600;
|
||||
|
||||
struct Cache
|
||||
{
|
||||
int id;
|
||||
Path storeDir;
|
||||
bool wantMassQuery;
|
||||
int priority;
|
||||
};
|
||||
|
||||
struct State
|
||||
{
|
||||
SQLite db;
|
||||
SQLiteStmt insertCache, queryCache, insertNAR, insertMissingNAR, queryNAR, purgeCache;
|
||||
std::map<std::string, Cache> caches;
|
||||
};
|
||||
|
||||
Sync<State> _state;
|
||||
|
||||
NarInfoDiskCacheImpl()
|
||||
{
|
||||
auto state(_state.lock());
|
||||
|
||||
Path dbPath = getCacheDir() + "/nix/binary-cache-v6.sqlite";
|
||||
createDirs(dirOf(dbPath));
|
||||
|
||||
state->db = SQLite(dbPath);
|
||||
|
||||
if (sqlite3_busy_timeout(state->db, 60 * 60 * 1000) != SQLITE_OK)
|
||||
throwSQLiteError(state->db, "setting timeout");
|
||||
|
||||
// We can always reproduce the cache.
|
||||
state->db.exec("pragma synchronous = off");
|
||||
state->db.exec("pragma main.journal_mode = truncate");
|
||||
|
||||
state->db.exec(schema);
|
||||
|
||||
state->insertCache.create(state->db,
|
||||
"insert or replace into BinaryCaches(url, timestamp, storeDir, wantMassQuery, priority) values (?, ?, ?, ?, ?)");
|
||||
|
||||
state->queryCache.create(state->db,
|
||||
"select id, storeDir, wantMassQuery, priority from BinaryCaches where url = ?");
|
||||
|
||||
state->insertNAR.create(state->db,
|
||||
"insert or replace into NARs(cache, hashPart, namePart, url, compression, fileHash, fileSize, narHash, "
|
||||
"narSize, refs, deriver, sigs, ca, timestamp, present) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, 1)");
|
||||
|
||||
state->insertMissingNAR.create(state->db,
|
||||
"insert or replace into NARs(cache, hashPart, timestamp, present) values (?, ?, ?, 0)");
|
||||
|
||||
state->queryNAR.create(state->db,
|
||||
"select present, namePart, url, compression, fileHash, fileSize, narHash, narSize, refs, deriver, sigs, ca from NARs where cache = ? and hashPart = ? and ((present = 0 and timestamp > ?) or (present = 1 and timestamp > ?))");
|
||||
|
||||
/* Periodically purge expired entries from the database. */
|
||||
retrySQLite<void>([&]() {
|
||||
auto now = time(0);
|
||||
|
||||
SQLiteStmt queryLastPurge(state->db, "select value from LastPurge");
|
||||
auto queryLastPurge_(queryLastPurge.use());
|
||||
|
||||
if (!queryLastPurge_.next() || queryLastPurge_.getInt(0) < now - purgeInterval) {
|
||||
SQLiteStmt(state->db,
|
||||
"delete from NARs where ((present = 0 and timestamp < ?) or (present = 1 and timestamp < ?))")
|
||||
.use()
|
||||
(now - settings.ttlNegativeNarInfoCache)
|
||||
(now - settings.ttlPositiveNarInfoCache)
|
||||
.exec();
|
||||
|
||||
debug("deleted %d entries from the NAR info disk cache", sqlite3_changes(state->db));
|
||||
|
||||
SQLiteStmt(state->db,
|
||||
"insert or replace into LastPurge(dummy, value) values ('', ?)")
|
||||
.use()(now).exec();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Cache & getCache(State & state, const std::string & uri)
|
||||
{
|
||||
auto i = state.caches.find(uri);
|
||||
if (i == state.caches.end()) abort();
|
||||
return i->second;
|
||||
}
|
||||
|
||||
void createCache(const std::string & uri, const Path & storeDir, bool wantMassQuery, int priority) override
|
||||
{
|
||||
retrySQLite<void>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
// FIXME: race
|
||||
|
||||
state->insertCache.use()(uri)(time(0))(storeDir)(wantMassQuery)(priority).exec();
|
||||
assert(sqlite3_changes(state->db) == 1);
|
||||
state->caches[uri] = Cache{(int) sqlite3_last_insert_rowid(state->db), storeDir, wantMassQuery, priority};
|
||||
});
|
||||
}
|
||||
|
||||
bool cacheExists(const std::string & uri,
|
||||
bool & wantMassQuery, int & priority) override
|
||||
{
|
||||
return retrySQLite<bool>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto i = state->caches.find(uri);
|
||||
if (i == state->caches.end()) {
|
||||
auto queryCache(state->queryCache.use()(uri));
|
||||
if (!queryCache.next()) return false;
|
||||
state->caches.emplace(uri,
|
||||
Cache{(int) queryCache.getInt(0), queryCache.getStr(1), queryCache.getInt(2) != 0, (int) queryCache.getInt(3)});
|
||||
}
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
|
||||
wantMassQuery = cache.wantMassQuery;
|
||||
priority = cache.priority;
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||
const std::string & uri, const std::string & hashPart) override
|
||||
{
|
||||
return retrySQLite<std::pair<Outcome, std::shared_ptr<NarInfo>>>(
|
||||
[&]() -> std::pair<Outcome, std::shared_ptr<NarInfo>> {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
|
||||
auto now = time(0);
|
||||
|
||||
auto queryNAR(state->queryNAR.use()
|
||||
(cache.id)
|
||||
(hashPart)
|
||||
(now - settings.ttlNegativeNarInfoCache)
|
||||
(now - settings.ttlPositiveNarInfoCache));
|
||||
|
||||
if (!queryNAR.next())
|
||||
return {oUnknown, 0};
|
||||
|
||||
if (!queryNAR.getInt(0))
|
||||
return {oInvalid, 0};
|
||||
|
||||
auto narInfo = make_ref<NarInfo>();
|
||||
|
||||
auto namePart = queryNAR.getStr(1);
|
||||
narInfo->path = cache.storeDir + "/" +
|
||||
hashPart + (namePart.empty() ? "" : "-" + namePart);
|
||||
narInfo->url = queryNAR.getStr(2);
|
||||
narInfo->compression = queryNAR.getStr(3);
|
||||
if (!queryNAR.isNull(4))
|
||||
narInfo->fileHash = Hash(queryNAR.getStr(4));
|
||||
narInfo->fileSize = queryNAR.getInt(5);
|
||||
narInfo->narHash = Hash(queryNAR.getStr(6));
|
||||
narInfo->narSize = queryNAR.getInt(7);
|
||||
for (auto & r : tokenizeString<Strings>(queryNAR.getStr(8), " "))
|
||||
narInfo->references.insert(cache.storeDir + "/" + r);
|
||||
if (!queryNAR.isNull(9))
|
||||
narInfo->deriver = cache.storeDir + "/" + queryNAR.getStr(9);
|
||||
for (auto & sig : tokenizeString<Strings>(queryNAR.getStr(10), " "))
|
||||
narInfo->sigs.insert(sig);
|
||||
narInfo->ca = queryNAR.getStr(11);
|
||||
|
||||
return {oValid, narInfo};
|
||||
});
|
||||
}
|
||||
|
||||
void upsertNarInfo(
|
||||
const std::string & uri, const std::string & hashPart,
|
||||
std::shared_ptr<ValidPathInfo> info) override
|
||||
{
|
||||
retrySQLite<void>([&]() {
|
||||
auto state(_state.lock());
|
||||
|
||||
auto & cache(getCache(*state, uri));
|
||||
|
||||
if (info) {
|
||||
|
||||
auto narInfo = std::dynamic_pointer_cast<NarInfo>(info);
|
||||
|
||||
assert(hashPart == storePathToHash(info->path));
|
||||
|
||||
state->insertNAR.use()
|
||||
(cache.id)
|
||||
(hashPart)
|
||||
(storePathToName(info->path))
|
||||
(narInfo ? narInfo->url : "", narInfo != 0)
|
||||
(narInfo ? narInfo->compression : "", narInfo != 0)
|
||||
(narInfo && narInfo->fileHash ? narInfo->fileHash.to_string() : "", narInfo && narInfo->fileHash)
|
||||
(narInfo ? narInfo->fileSize : 0, narInfo != 0 && narInfo->fileSize)
|
||||
(info->narHash.to_string())
|
||||
(info->narSize)
|
||||
(concatStringsSep(" ", info->shortRefs()))
|
||||
(info->deriver != "" ? baseNameOf(info->deriver) : "", info->deriver != "")
|
||||
(concatStringsSep(" ", info->sigs))
|
||||
(info->ca)
|
||||
(time(0)).exec();
|
||||
|
||||
} else {
|
||||
state->insertMissingNAR.use()
|
||||
(cache.id)
|
||||
(hashPart)
|
||||
(time(0)).exec();
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
ref<NarInfoDiskCache> getNarInfoDiskCache()
|
||||
{
|
||||
static ref<NarInfoDiskCache> cache = make_ref<NarInfoDiskCacheImpl>();
|
||||
return cache;
|
||||
}
|
||||
|
||||
}
|
||||
31
third_party/nix/src/libstore/nar-info-disk-cache.hh
vendored
Normal file
31
third_party/nix/src/libstore/nar-info-disk-cache.hh
vendored
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
#pragma once
|
||||
|
||||
#include "ref.hh"
|
||||
#include "nar-info.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class NarInfoDiskCache
|
||||
{
|
||||
public:
|
||||
typedef enum { oValid, oInvalid, oUnknown } Outcome;
|
||||
|
||||
virtual void createCache(const std::string & uri, const Path & storeDir,
|
||||
bool wantMassQuery, int priority) = 0;
|
||||
|
||||
virtual bool cacheExists(const std::string & uri,
|
||||
bool & wantMassQuery, int & priority) = 0;
|
||||
|
||||
virtual std::pair<Outcome, std::shared_ptr<NarInfo>> lookupNarInfo(
|
||||
const std::string & uri, const std::string & hashPart) = 0;
|
||||
|
||||
virtual void upsertNarInfo(
|
||||
const std::string & uri, const std::string & hashPart,
|
||||
std::shared_ptr<ValidPathInfo> info) = 0;
|
||||
};
|
||||
|
||||
/* Return a singleton cache object that can be used concurrently by
|
||||
multiple threads. */
|
||||
ref<NarInfoDiskCache> getNarInfoDiskCache();
|
||||
|
||||
}
|
||||
116
third_party/nix/src/libstore/nar-info.cc
vendored
Normal file
116
third_party/nix/src/libstore/nar-info.cc
vendored
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
#include "globals.hh"
|
||||
#include "nar-info.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
NarInfo::NarInfo(const Store & store, const std::string & s, const std::string & whence)
|
||||
{
|
||||
auto corrupt = [&]() {
|
||||
throw Error(format("NAR info file '%1%' is corrupt") % whence);
|
||||
};
|
||||
|
||||
auto parseHashField = [&](const string & s) {
|
||||
try {
|
||||
return Hash(s);
|
||||
} catch (BadHash &) {
|
||||
corrupt();
|
||||
return Hash(); // never reached
|
||||
}
|
||||
};
|
||||
|
||||
size_t pos = 0;
|
||||
while (pos < s.size()) {
|
||||
|
||||
size_t colon = s.find(':', pos);
|
||||
if (colon == std::string::npos) corrupt();
|
||||
|
||||
std::string name(s, pos, colon - pos);
|
||||
|
||||
size_t eol = s.find('\n', colon + 2);
|
||||
if (eol == std::string::npos) corrupt();
|
||||
|
||||
std::string value(s, colon + 2, eol - colon - 2);
|
||||
|
||||
if (name == "StorePath") {
|
||||
if (!store.isStorePath(value)) corrupt();
|
||||
path = value;
|
||||
}
|
||||
else if (name == "URL")
|
||||
url = value;
|
||||
else if (name == "Compression")
|
||||
compression = value;
|
||||
else if (name == "FileHash")
|
||||
fileHash = parseHashField(value);
|
||||
else if (name == "FileSize") {
|
||||
if (!string2Int(value, fileSize)) corrupt();
|
||||
}
|
||||
else if (name == "NarHash")
|
||||
narHash = parseHashField(value);
|
||||
else if (name == "NarSize") {
|
||||
if (!string2Int(value, narSize)) corrupt();
|
||||
}
|
||||
else if (name == "References") {
|
||||
auto refs = tokenizeString<Strings>(value, " ");
|
||||
if (!references.empty()) corrupt();
|
||||
for (auto & r : refs) {
|
||||
auto r2 = store.storeDir + "/" + r;
|
||||
if (!store.isStorePath(r2)) corrupt();
|
||||
references.insert(r2);
|
||||
}
|
||||
}
|
||||
else if (name == "Deriver") {
|
||||
if (value != "unknown-deriver") {
|
||||
auto p = store.storeDir + "/" + value;
|
||||
if (!store.isStorePath(p)) corrupt();
|
||||
deriver = p;
|
||||
}
|
||||
}
|
||||
else if (name == "System")
|
||||
system = value;
|
||||
else if (name == "Sig")
|
||||
sigs.insert(value);
|
||||
else if (name == "CA") {
|
||||
if (!ca.empty()) corrupt();
|
||||
ca = value;
|
||||
}
|
||||
|
||||
pos = eol + 1;
|
||||
}
|
||||
|
||||
if (compression == "") compression = "bzip2";
|
||||
|
||||
if (path.empty() || url.empty() || narSize == 0 || !narHash) corrupt();
|
||||
}
|
||||
|
||||
std::string NarInfo::to_string() const
|
||||
{
|
||||
std::string res;
|
||||
res += "StorePath: " + path + "\n";
|
||||
res += "URL: " + url + "\n";
|
||||
assert(compression != "");
|
||||
res += "Compression: " + compression + "\n";
|
||||
assert(fileHash.type == htSHA256);
|
||||
res += "FileHash: " + fileHash.to_string(Base32) + "\n";
|
||||
res += "FileSize: " + std::to_string(fileSize) + "\n";
|
||||
assert(narHash.type == htSHA256);
|
||||
res += "NarHash: " + narHash.to_string(Base32) + "\n";
|
||||
res += "NarSize: " + std::to_string(narSize) + "\n";
|
||||
|
||||
res += "References: " + concatStringsSep(" ", shortRefs()) + "\n";
|
||||
|
||||
if (!deriver.empty())
|
||||
res += "Deriver: " + baseNameOf(deriver) + "\n";
|
||||
|
||||
if (!system.empty())
|
||||
res += "System: " + system + "\n";
|
||||
|
||||
for (auto sig : sigs)
|
||||
res += "Sig: " + sig + "\n";
|
||||
|
||||
if (!ca.empty())
|
||||
res += "CA: " + ca + "\n";
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
}
|
||||
24
third_party/nix/src/libstore/nar-info.hh
vendored
Normal file
24
third_party/nix/src/libstore/nar-info.hh
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct NarInfo : ValidPathInfo
|
||||
{
|
||||
std::string url;
|
||||
std::string compression;
|
||||
Hash fileHash;
|
||||
uint64_t fileSize = 0;
|
||||
std::string system;
|
||||
|
||||
NarInfo() { }
|
||||
NarInfo(const ValidPathInfo & info) : ValidPathInfo(info) { }
|
||||
NarInfo(const Store & store, const std::string & s, const std::string & whence);
|
||||
|
||||
std::string to_string() const;
|
||||
};
|
||||
|
||||
}
|
||||
9
third_party/nix/src/libstore/nix-store.pc.in
vendored
Normal file
9
third_party/nix/src/libstore/nix-store.pc.in
vendored
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
prefix=@prefix@
|
||||
libdir=@libdir@
|
||||
includedir=@includedir@
|
||||
|
||||
Name: Nix
|
||||
Description: Nix Package Manager
|
||||
Version: @PACKAGE_VERSION@
|
||||
Libs: -L${libdir} -lnixstore -lnixutil
|
||||
Cflags: -I${includedir}/nix -std=c++17
|
||||
302
third_party/nix/src/libstore/optimise-store.cc
vendored
Normal file
302
third_party/nix/src/libstore/optimise-store.cc
vendored
Normal file
|
|
@ -0,0 +1,302 @@
|
|||
#include "util.hh"
|
||||
#include "local-store.hh"
|
||||
#include "globals.hh"
|
||||
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
#include <regex>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static void makeWritable(const Path & path)
|
||||
{
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st))
|
||||
throw SysError(format("getting attributes of path '%1%'") % path);
|
||||
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||
throw SysError(format("changing writability of '%1%'") % path);
|
||||
}
|
||||
|
||||
|
||||
struct MakeReadOnly
|
||||
{
|
||||
Path path;
|
||||
MakeReadOnly(const Path & path) : path(path) { }
|
||||
~MakeReadOnly()
|
||||
{
|
||||
try {
|
||||
/* This will make the path read-only. */
|
||||
if (path != "") canonicaliseTimestampAndPermissions(path);
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
LocalStore::InodeHash LocalStore::loadInodeHash()
|
||||
{
|
||||
debug("loading hash inodes in memory");
|
||||
InodeHash inodeHash;
|
||||
|
||||
AutoCloseDir dir(opendir(linksDir.c_str()));
|
||||
if (!dir) throw SysError(format("opening directory '%1%'") % linksDir);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
// We don't care if we hit non-hash files, anything goes
|
||||
inodeHash.insert(dirent->d_ino);
|
||||
}
|
||||
if (errno) throw SysError(format("reading directory '%1%'") % linksDir);
|
||||
|
||||
printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size());
|
||||
|
||||
return inodeHash;
|
||||
}
|
||||
|
||||
|
||||
Strings LocalStore::readDirectoryIgnoringInodes(const Path & path, const InodeHash & inodeHash)
|
||||
{
|
||||
Strings names;
|
||||
|
||||
AutoCloseDir dir(opendir(path.c_str()));
|
||||
if (!dir) throw SysError(format("opening directory '%1%'") % path);
|
||||
|
||||
struct dirent * dirent;
|
||||
while (errno = 0, dirent = readdir(dir.get())) { /* sic */
|
||||
checkInterrupt();
|
||||
|
||||
if (inodeHash.count(dirent->d_ino)) {
|
||||
debug(format("'%1%' is already linked") % dirent->d_name);
|
||||
continue;
|
||||
}
|
||||
|
||||
string name = dirent->d_name;
|
||||
if (name == "." || name == "..") continue;
|
||||
names.push_back(name);
|
||||
}
|
||||
if (errno) throw SysError(format("reading directory '%1%'") % path);
|
||||
|
||||
return names;
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimisePath_(Activity * act, OptimiseStats & stats,
|
||||
const Path & path, InodeHash & inodeHash)
|
||||
{
|
||||
checkInterrupt();
|
||||
|
||||
struct stat st;
|
||||
if (lstat(path.c_str(), &st))
|
||||
throw SysError(format("getting attributes of path '%1%'") % path);
|
||||
|
||||
#if __APPLE__
|
||||
/* HFS/macOS has some undocumented security feature disabling hardlinking for
|
||||
special files within .app dirs. *.app/Contents/PkgInfo and
|
||||
*.app/Contents/Resources/\*.lproj seem to be the only paths affected. See
|
||||
https://github.com/NixOS/nix/issues/1443 for more discussion. */
|
||||
|
||||
if (std::regex_search(path, std::regex("\\.app/Contents/.+$")))
|
||||
{
|
||||
debug(format("'%1%' is not allowed to be linked in macOS") % path);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (S_ISDIR(st.st_mode)) {
|
||||
Strings names = readDirectoryIgnoringInodes(path, inodeHash);
|
||||
for (auto & i : names)
|
||||
optimisePath_(act, stats, path + "/" + i, inodeHash);
|
||||
return;
|
||||
}
|
||||
|
||||
/* We can hard link regular files and maybe symlinks. */
|
||||
if (!S_ISREG(st.st_mode)
|
||||
#if CAN_LINK_SYMLINK
|
||||
&& !S_ISLNK(st.st_mode)
|
||||
#endif
|
||||
) return;
|
||||
|
||||
/* Sometimes SNAFUs can cause files in the Nix store to be
|
||||
modified, in particular when running programs as root under
|
||||
NixOS (example: $fontconfig/var/cache being modified). Skip
|
||||
those files. FIXME: check the modification time. */
|
||||
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
|
||||
printError(format("skipping suspicious writable file '%1%'") % path);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This can still happen on top-level files. */
|
||||
if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) {
|
||||
debug(format("'%1%' is already linked, with %2% other file(s)") % path % (st.st_nlink - 2));
|
||||
return;
|
||||
}
|
||||
|
||||
/* Hash the file. Note that hashPath() returns the hash over the
|
||||
NAR serialisation, which includes the execute bit on the file.
|
||||
Thus, executable and non-executable files with the same
|
||||
contents *won't* be linked (which is good because otherwise the
|
||||
permissions would be screwed up).
|
||||
|
||||
Also note that if `path' is a symlink, then we're hashing the
|
||||
contents of the symlink (i.e. the result of readlink()), not
|
||||
the contents of the target (which may not even exist). */
|
||||
Hash hash = hashPath(htSHA256, path).first;
|
||||
debug(format("'%1%' has hash '%2%'") % path % hash.to_string());
|
||||
|
||||
/* Check if this is a known hash. */
|
||||
Path linkPath = linksDir + "/" + hash.to_string(Base32, false);
|
||||
|
||||
retry:
|
||||
if (!pathExists(linkPath)) {
|
||||
/* Nope, create a hard link in the links directory. */
|
||||
if (link(path.c_str(), linkPath.c_str()) == 0) {
|
||||
inodeHash.insert(st.st_ino);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (errno) {
|
||||
case EEXIST:
|
||||
/* Fall through if another process created ‘linkPath’ before
|
||||
we did. */
|
||||
break;
|
||||
|
||||
case ENOSPC:
|
||||
/* On ext4, that probably means the directory index is
|
||||
full. When that happens, it's fine to ignore it: we
|
||||
just effectively disable deduplication of this
|
||||
file. */
|
||||
printInfo("cannot link '%s' to '%s': %s", linkPath, path, strerror(errno));
|
||||
return;
|
||||
|
||||
default:
|
||||
throw SysError("cannot link '%1%' to '%2%'", linkPath, path);
|
||||
}
|
||||
}
|
||||
|
||||
/* Yes! We've seen a file with the same contents. Replace the
|
||||
current file with a hard link to that file. */
|
||||
struct stat stLink;
|
||||
if (lstat(linkPath.c_str(), &stLink))
|
||||
throw SysError(format("getting attributes of path '%1%'") % linkPath);
|
||||
|
||||
if (st.st_ino == stLink.st_ino) {
|
||||
debug(format("'%1%' is already linked to '%2%'") % path % linkPath);
|
||||
return;
|
||||
}
|
||||
|
||||
if (st.st_size != stLink.st_size) {
|
||||
printError(format("removing corrupted link '%1%'") % linkPath);
|
||||
unlink(linkPath.c_str());
|
||||
goto retry;
|
||||
}
|
||||
|
||||
printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath);
|
||||
|
||||
/* Make the containing directory writable, but only if it's not
|
||||
the store itself (we don't want or need to mess with its
|
||||
permissions). */
|
||||
bool mustToggle = dirOf(path) != realStoreDir;
|
||||
if (mustToggle) makeWritable(dirOf(path));
|
||||
|
||||
/* When we're done, make the directory read-only again and reset
|
||||
its timestamp back to 0. */
|
||||
MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
|
||||
|
||||
Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
|
||||
% realStoreDir % getpid() % random()).str();
|
||||
|
||||
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
|
||||
if (errno == EMLINK) {
|
||||
/* Too many links to the same file (>= 32000 on most file
|
||||
systems). This is likely to happen with empty files.
|
||||
Just shrug and ignore. */
|
||||
if (st.st_size)
|
||||
printInfo(format("'%1%' has maximum number of links") % linkPath);
|
||||
return;
|
||||
}
|
||||
throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath);
|
||||
}
|
||||
|
||||
/* Atomically replace the old file with the new hard link. */
|
||||
if (rename(tempLink.c_str(), path.c_str()) == -1) {
|
||||
if (unlink(tempLink.c_str()) == -1)
|
||||
printError(format("unable to unlink '%1%'") % tempLink);
|
||||
if (errno == EMLINK) {
|
||||
/* Some filesystems generate too many links on the rename,
|
||||
rather than on the original link. (Probably it
|
||||
temporarily increases the st_nlink field before
|
||||
decreasing it again.) */
|
||||
debug("'%s' has reached maximum number of links", linkPath);
|
||||
return;
|
||||
}
|
||||
throw SysError(format("cannot rename '%1%' to '%2%'") % tempLink % path);
|
||||
}
|
||||
|
||||
stats.filesLinked++;
|
||||
stats.bytesFreed += st.st_size;
|
||||
stats.blocksFreed += st.st_blocks;
|
||||
|
||||
if (act)
|
||||
act->result(resFileLinked, st.st_size, st.st_blocks);
|
||||
}
|
||||
|
||||
|
||||
void LocalStore::optimiseStore(OptimiseStats & stats)
|
||||
{
|
||||
Activity act(*logger, actOptimiseStore);
|
||||
|
||||
PathSet paths = queryAllValidPaths();
|
||||
InodeHash inodeHash = loadInodeHash();
|
||||
|
||||
act.progress(0, paths.size());
|
||||
|
||||
uint64_t done = 0;
|
||||
|
||||
for (auto & i : paths) {
|
||||
addTempRoot(i);
|
||||
if (!isValidPath(i)) continue; /* path was GC'ed, probably */
|
||||
{
|
||||
Activity act(*logger, lvlTalkative, actUnknown, fmt("optimising path '%s'", i));
|
||||
optimisePath_(&act, stats, realStoreDir + "/" + baseNameOf(i), inodeHash);
|
||||
}
|
||||
done++;
|
||||
act.progress(done, paths.size());
|
||||
}
|
||||
}
|
||||
|
||||
static string showBytes(unsigned long long bytes)
|
||||
{
|
||||
return (format("%.2f MiB") % (bytes / (1024.0 * 1024.0))).str();
|
||||
}
|
||||
|
||||
void LocalStore::optimiseStore()
|
||||
{
|
||||
OptimiseStats stats;
|
||||
|
||||
optimiseStore(stats);
|
||||
|
||||
printInfo(
|
||||
format("%1% freed by hard-linking %2% files")
|
||||
% showBytes(stats.bytesFreed)
|
||||
% stats.filesLinked);
|
||||
}
|
||||
|
||||
void LocalStore::optimisePath(const Path & path)
|
||||
{
|
||||
OptimiseStats stats;
|
||||
InodeHash inodeHash;
|
||||
|
||||
if (settings.autoOptimiseStore) optimisePath_(nullptr, stats, path, inodeHash);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
116
third_party/nix/src/libstore/parsed-derivations.cc
vendored
Normal file
116
third_party/nix/src/libstore/parsed-derivations.cc
vendored
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
#include "parsed-derivations.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
ParsedDerivation::ParsedDerivation(const Path & drvPath, BasicDerivation & drv)
|
||||
: drvPath(drvPath), drv(drv)
|
||||
{
|
||||
/* Parse the __json attribute, if any. */
|
||||
auto jsonAttr = drv.env.find("__json");
|
||||
if (jsonAttr != drv.env.end()) {
|
||||
try {
|
||||
structuredAttrs = nlohmann::json::parse(jsonAttr->second);
|
||||
} catch (std::exception & e) {
|
||||
throw Error("cannot process __json attribute of '%s': %s", drvPath, e.what());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<std::string> ParsedDerivation::getStringAttr(const std::string & name) const
|
||||
{
|
||||
if (structuredAttrs) {
|
||||
auto i = structuredAttrs->find(name);
|
||||
if (i == structuredAttrs->end())
|
||||
return {};
|
||||
else {
|
||||
if (!i->is_string())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a string", name, drvPath);
|
||||
return i->get<std::string>();
|
||||
}
|
||||
} else {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end())
|
||||
return {};
|
||||
else
|
||||
return i->second;
|
||||
}
|
||||
}
|
||||
|
||||
bool ParsedDerivation::getBoolAttr(const std::string & name, bool def) const
|
||||
{
|
||||
if (structuredAttrs) {
|
||||
auto i = structuredAttrs->find(name);
|
||||
if (i == structuredAttrs->end())
|
||||
return def;
|
||||
else {
|
||||
if (!i->is_boolean())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a Boolean", name, drvPath);
|
||||
return i->get<bool>();
|
||||
}
|
||||
} else {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end())
|
||||
return def;
|
||||
else
|
||||
return i->second == "1";
|
||||
}
|
||||
}
|
||||
|
||||
std::optional<Strings> ParsedDerivation::getStringsAttr(const std::string & name) const
|
||||
{
|
||||
if (structuredAttrs) {
|
||||
auto i = structuredAttrs->find(name);
|
||||
if (i == structuredAttrs->end())
|
||||
return {};
|
||||
else {
|
||||
if (!i->is_array())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
|
||||
Strings res;
|
||||
for (auto j = i->begin(); j != i->end(); ++j) {
|
||||
if (!j->is_string())
|
||||
throw Error("attribute '%s' of derivation '%s' must be a list of strings", name, drvPath);
|
||||
res.push_back(j->get<std::string>());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
} else {
|
||||
auto i = drv.env.find(name);
|
||||
if (i == drv.env.end())
|
||||
return {};
|
||||
else
|
||||
return tokenizeString<Strings>(i->second);
|
||||
}
|
||||
}
|
||||
|
||||
StringSet ParsedDerivation::getRequiredSystemFeatures() const
|
||||
{
|
||||
StringSet res;
|
||||
for (auto & i : getStringsAttr("requiredSystemFeatures").value_or(Strings()))
|
||||
res.insert(i);
|
||||
return res;
|
||||
}
|
||||
|
||||
bool ParsedDerivation::canBuildLocally() const
|
||||
{
|
||||
if (drv.platform != settings.thisSystem.get()
|
||||
&& !settings.extraPlatforms.get().count(drv.platform)
|
||||
&& !drv.isBuiltin())
|
||||
return false;
|
||||
|
||||
for (auto & feature : getRequiredSystemFeatures())
|
||||
if (!settings.systemFeatures.get().count(feature)) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ParsedDerivation::willBuildLocally() const
|
||||
{
|
||||
return getBoolAttr("preferLocalBuild") && canBuildLocally();
|
||||
}
|
||||
|
||||
bool ParsedDerivation::substitutesAllowed() const
|
||||
{
|
||||
return getBoolAttr("allowSubstitutes", true);
|
||||
}
|
||||
|
||||
}
|
||||
37
third_party/nix/src/libstore/parsed-derivations.hh
vendored
Normal file
37
third_party/nix/src/libstore/parsed-derivations.hh
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
#include "derivations.hh"
|
||||
|
||||
#include <nlohmann/json.hpp>
|
||||
|
||||
namespace nix {
|
||||
|
||||
class ParsedDerivation
|
||||
{
|
||||
Path drvPath;
|
||||
BasicDerivation & drv;
|
||||
std::optional<nlohmann::json> structuredAttrs;
|
||||
|
||||
public:
|
||||
|
||||
ParsedDerivation(const Path & drvPath, BasicDerivation & drv);
|
||||
|
||||
const std::optional<nlohmann::json> & getStructuredAttrs() const
|
||||
{
|
||||
return structuredAttrs;
|
||||
}
|
||||
|
||||
std::optional<std::string> getStringAttr(const std::string & name) const;
|
||||
|
||||
bool getBoolAttr(const std::string & name, bool def = false) const;
|
||||
|
||||
std::optional<Strings> getStringsAttr(const std::string & name) const;
|
||||
|
||||
StringSet getRequiredSystemFeatures() const;
|
||||
|
||||
bool canBuildLocally() const;
|
||||
|
||||
bool willBuildLocally() const;
|
||||
|
||||
bool substitutesAllowed() const;
|
||||
};
|
||||
|
||||
}
|
||||
178
third_party/nix/src/libstore/pathlocks.cc
vendored
Normal file
178
third_party/nix/src/libstore/pathlocks.cc
vendored
Normal file
|
|
@ -0,0 +1,178 @@
|
|||
#include "pathlocks.hh"
|
||||
#include "util.hh"
|
||||
#include "sync.hh"
|
||||
|
||||
#include <cerrno>
|
||||
#include <cstdlib>
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/file.h>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
AutoCloseFD openLockFile(const Path & path, bool create)
|
||||
{
|
||||
AutoCloseFD fd;
|
||||
|
||||
fd = open(path.c_str(), O_CLOEXEC | O_RDWR | (create ? O_CREAT : 0), 0600);
|
||||
if (!fd && (create || errno != ENOENT))
|
||||
throw SysError(format("opening lock file '%1%'") % path);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
|
||||
void deleteLockFile(const Path & path, int fd)
|
||||
{
|
||||
/* Get rid of the lock file. Have to be careful not to introduce
|
||||
races. Write a (meaningless) token to the file to indicate to
|
||||
other processes waiting on this lock that the lock is stale
|
||||
(deleted). */
|
||||
unlink(path.c_str());
|
||||
writeFull(fd, "d");
|
||||
/* Note that the result of unlink() is ignored; removing the lock
|
||||
file is an optimisation, not a necessity. */
|
||||
}
|
||||
|
||||
|
||||
bool lockFile(int fd, LockType lockType, bool wait)
|
||||
{
|
||||
int type;
|
||||
if (lockType == ltRead) type = LOCK_SH;
|
||||
else if (lockType == ltWrite) type = LOCK_EX;
|
||||
else if (lockType == ltNone) type = LOCK_UN;
|
||||
else abort();
|
||||
|
||||
if (wait) {
|
||||
while (flock(fd, type) != 0) {
|
||||
checkInterrupt();
|
||||
if (errno != EINTR)
|
||||
throw SysError(format("acquiring/releasing lock"));
|
||||
else
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
while (flock(fd, type | LOCK_NB) != 0) {
|
||||
checkInterrupt();
|
||||
if (errno == EWOULDBLOCK) return false;
|
||||
if (errno != EINTR)
|
||||
throw SysError(format("acquiring/releasing lock"));
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
PathLocks::PathLocks()
|
||||
: deletePaths(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PathLocks::PathLocks(const PathSet & paths, const string & waitMsg)
|
||||
: deletePaths(false)
|
||||
{
|
||||
lockPaths(paths, waitMsg);
|
||||
}
|
||||
|
||||
|
||||
bool PathLocks::lockPaths(const PathSet & paths,
|
||||
const string & waitMsg, bool wait)
|
||||
{
|
||||
assert(fds.empty());
|
||||
|
||||
/* Note that `fds' is built incrementally so that the destructor
|
||||
will only release those locks that we have already acquired. */
|
||||
|
||||
/* Acquire the lock for each path in sorted order. This ensures
|
||||
that locks are always acquired in the same order, thus
|
||||
preventing deadlocks. */
|
||||
for (auto & path : paths) {
|
||||
checkInterrupt();
|
||||
Path lockPath = path + ".lock";
|
||||
|
||||
debug(format("locking path '%1%'") % path);
|
||||
|
||||
AutoCloseFD fd;
|
||||
|
||||
while (1) {
|
||||
|
||||
/* Open/create the lock file. */
|
||||
fd = openLockFile(lockPath, true);
|
||||
|
||||
/* Acquire an exclusive lock. */
|
||||
if (!lockFile(fd.get(), ltWrite, false)) {
|
||||
if (wait) {
|
||||
if (waitMsg != "") printError(waitMsg);
|
||||
lockFile(fd.get(), ltWrite, true);
|
||||
} else {
|
||||
/* Failed to lock this path; release all other
|
||||
locks. */
|
||||
unlock();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
debug(format("lock acquired on '%1%'") % lockPath);
|
||||
|
||||
/* Check that the lock file hasn't become stale (i.e.,
|
||||
hasn't been unlinked). */
|
||||
struct stat st;
|
||||
if (fstat(fd.get(), &st) == -1)
|
||||
throw SysError(format("statting lock file '%1%'") % lockPath);
|
||||
if (st.st_size != 0)
|
||||
/* This lock file has been unlinked, so we're holding
|
||||
a lock on a deleted file. This means that other
|
||||
processes may create and acquire a lock on
|
||||
`lockPath', and proceed. So we must retry. */
|
||||
debug(format("open lock file '%1%' has become stale") % lockPath);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
/* Use borrow so that the descriptor isn't closed. */
|
||||
fds.push_back(FDPair(fd.release(), lockPath));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
PathLocks::~PathLocks()
|
||||
{
|
||||
try {
|
||||
unlock();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void PathLocks::unlock()
|
||||
{
|
||||
for (auto & i : fds) {
|
||||
if (deletePaths) deleteLockFile(i.second, i.first);
|
||||
|
||||
if (close(i.first) == -1)
|
||||
printError(
|
||||
format("error (ignored): cannot close lock file on '%1%'") % i.second);
|
||||
|
||||
debug(format("lock released on '%1%'") % i.second);
|
||||
}
|
||||
|
||||
fds.clear();
|
||||
}
|
||||
|
||||
|
||||
void PathLocks::setDeletion(bool deletePaths)
|
||||
{
|
||||
this->deletePaths = deletePaths;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
38
third_party/nix/src/libstore/pathlocks.hh
vendored
Normal file
38
third_party/nix/src/libstore/pathlocks.hh
vendored
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
#pragma once
|
||||
|
||||
#include "util.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
/* Open (possibly create) a lock file and return the file descriptor.
|
||||
-1 is returned if create is false and the lock could not be opened
|
||||
because it doesn't exist. Any other error throws an exception. */
|
||||
AutoCloseFD openLockFile(const Path & path, bool create);
|
||||
|
||||
/* Delete an open lock file. */
|
||||
void deleteLockFile(const Path & path, int fd);
|
||||
|
||||
enum LockType { ltRead, ltWrite, ltNone };
|
||||
|
||||
bool lockFile(int fd, LockType lockType, bool wait);
|
||||
|
||||
class PathLocks
|
||||
{
|
||||
private:
|
||||
typedef std::pair<int, Path> FDPair;
|
||||
list<FDPair> fds;
|
||||
bool deletePaths;
|
||||
|
||||
public:
|
||||
PathLocks();
|
||||
PathLocks(const PathSet & paths,
|
||||
const string & waitMsg = "");
|
||||
bool lockPaths(const PathSet & _paths,
|
||||
const string & waitMsg = "",
|
||||
bool wait = true);
|
||||
~PathLocks();
|
||||
void unlock();
|
||||
void setDeletion(bool deletePaths);
|
||||
};
|
||||
|
||||
}
|
||||
259
third_party/nix/src/libstore/profiles.cc
vendored
Normal file
259
third_party/nix/src/libstore/profiles.cc
vendored
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
#include "profiles.hh"
|
||||
#include "store-api.hh"
|
||||
#include "util.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <stdio.h>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static bool cmpGensByNumber(const Generation & a, const Generation & b)
|
||||
{
|
||||
return a.number < b.number;
|
||||
}
|
||||
|
||||
|
||||
/* Parse a generation name of the format
|
||||
`<profilename>-<number>-link'. */
|
||||
static int parseName(const string & profileName, const string & name)
|
||||
{
|
||||
if (string(name, 0, profileName.size() + 1) != profileName + "-") return -1;
|
||||
string s = string(name, profileName.size() + 1);
|
||||
string::size_type p = s.find("-link");
|
||||
if (p == string::npos) return -1;
|
||||
int n;
|
||||
if (string2Int(string(s, 0, p), n) && n >= 0)
|
||||
return n;
|
||||
else
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
|
||||
Generations findGenerations(Path profile, int & curGen)
|
||||
{
|
||||
Generations gens;
|
||||
|
||||
Path profileDir = dirOf(profile);
|
||||
string profileName = baseNameOf(profile);
|
||||
|
||||
for (auto & i : readDirectory(profileDir)) {
|
||||
int n;
|
||||
if ((n = parseName(profileName, i.name)) != -1) {
|
||||
Generation gen;
|
||||
gen.path = profileDir + "/" + i.name;
|
||||
gen.number = n;
|
||||
struct stat st;
|
||||
if (lstat(gen.path.c_str(), &st) != 0)
|
||||
throw SysError(format("statting '%1%'") % gen.path);
|
||||
gen.creationTime = st.st_mtime;
|
||||
gens.push_back(gen);
|
||||
}
|
||||
}
|
||||
|
||||
gens.sort(cmpGensByNumber);
|
||||
|
||||
curGen = pathExists(profile)
|
||||
? parseName(profileName, readLink(profile))
|
||||
: -1;
|
||||
|
||||
return gens;
|
||||
}
|
||||
|
||||
|
||||
static void makeName(const Path & profile, unsigned int num,
|
||||
Path & outLink)
|
||||
{
|
||||
Path prefix = (format("%1%-%2%") % profile % num).str();
|
||||
outLink = prefix + "-link";
|
||||
}
|
||||
|
||||
|
||||
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath)
|
||||
{
|
||||
/* The new generation number should be higher than old the
|
||||
previous ones. */
|
||||
int dummy;
|
||||
Generations gens = findGenerations(profile, dummy);
|
||||
|
||||
unsigned int num;
|
||||
if (gens.size() > 0) {
|
||||
Generation last = gens.back();
|
||||
|
||||
if (readLink(last.path) == outPath) {
|
||||
/* We only create a new generation symlink if it differs
|
||||
from the last one.
|
||||
|
||||
This helps keeping gratuitous installs/rebuilds from piling
|
||||
up uncontrolled numbers of generations, cluttering up the
|
||||
UI like grub. */
|
||||
return last.path;
|
||||
}
|
||||
|
||||
num = gens.back().number;
|
||||
} else {
|
||||
num = 0;
|
||||
}
|
||||
|
||||
/* Create the new generation. Note that addPermRoot() blocks if
|
||||
the garbage collector is running to prevent the stuff we've
|
||||
built from moving from the temporary roots (which the GC knows)
|
||||
to the permanent roots (of which the GC would have a stale
|
||||
view). If we didn't do it this way, the GC might remove the
|
||||
user environment etc. we've just built. */
|
||||
Path generation;
|
||||
makeName(profile, num + 1, generation);
|
||||
store->addPermRoot(outPath, generation, false, true);
|
||||
|
||||
return generation;
|
||||
}
|
||||
|
||||
|
||||
static void removeFile(const Path & path)
|
||||
{
|
||||
if (remove(path.c_str()) == -1)
|
||||
throw SysError(format("cannot unlink '%1%'") % path);
|
||||
}
|
||||
|
||||
|
||||
void deleteGeneration(const Path & profile, unsigned int gen)
|
||||
{
|
||||
Path generation;
|
||||
makeName(profile, gen, generation);
|
||||
removeFile(generation);
|
||||
}
|
||||
|
||||
|
||||
static void deleteGeneration2(const Path & profile, unsigned int gen, bool dryRun)
|
||||
{
|
||||
if (dryRun)
|
||||
printInfo(format("would remove generation %1%") % gen);
|
||||
else {
|
||||
printInfo(format("removing generation %1%") % gen);
|
||||
deleteGeneration(profile, gen);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun)
|
||||
{
|
||||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
|
||||
if (gensToDelete.find(curGen) != gensToDelete.end())
|
||||
throw Error(format("cannot delete current generation of profile %1%'") % profile);
|
||||
|
||||
for (auto & i : gens) {
|
||||
if (gensToDelete.find(i.number) == gensToDelete.end()) continue;
|
||||
deleteGeneration2(profile, i.number, dryRun);
|
||||
}
|
||||
}
|
||||
|
||||
void deleteGenerationsGreaterThan(const Path & profile, int max, bool dryRun)
|
||||
{
|
||||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
bool fromCurGen = false;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
for (auto i = gens.rbegin(); i != gens.rend(); ++i) {
|
||||
if (i->number == curGen) {
|
||||
fromCurGen = true;
|
||||
max--;
|
||||
continue;
|
||||
}
|
||||
if (fromCurGen) {
|
||||
if (max) {
|
||||
max--;
|
||||
continue;
|
||||
}
|
||||
deleteGeneration2(profile, i->number, dryRun);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void deleteOldGenerations(const Path & profile, bool dryRun)
|
||||
{
|
||||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
|
||||
for (auto & i : gens)
|
||||
if (i.number != curGen)
|
||||
deleteGeneration2(profile, i.number, dryRun);
|
||||
}
|
||||
|
||||
|
||||
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun)
|
||||
{
|
||||
PathLocks lock;
|
||||
lockProfile(lock, profile);
|
||||
|
||||
int curGen;
|
||||
Generations gens = findGenerations(profile, curGen);
|
||||
|
||||
bool canDelete = false;
|
||||
for (auto i = gens.rbegin(); i != gens.rend(); ++i)
|
||||
if (canDelete) {
|
||||
assert(i->creationTime < t);
|
||||
if (i->number != curGen)
|
||||
deleteGeneration2(profile, i->number, dryRun);
|
||||
} else if (i->creationTime < t) {
|
||||
/* We may now start deleting generations, but we don't
|
||||
delete this generation yet, because this generation was
|
||||
still the one that was active at the requested point in
|
||||
time. */
|
||||
canDelete = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun)
|
||||
{
|
||||
time_t curTime = time(0);
|
||||
string strDays = string(timeSpec, 0, timeSpec.size() - 1);
|
||||
int days;
|
||||
|
||||
if (!string2Int(strDays, days) || days < 1)
|
||||
throw Error(format("invalid number of days specifier '%1%'") % timeSpec);
|
||||
|
||||
time_t oldTime = curTime - days * 24 * 3600;
|
||||
|
||||
deleteGenerationsOlderThan(profile, oldTime, dryRun);
|
||||
}
|
||||
|
||||
|
||||
void switchLink(Path link, Path target)
|
||||
{
|
||||
/* Hacky. */
|
||||
if (dirOf(target) == dirOf(link)) target = baseNameOf(target);
|
||||
|
||||
replaceSymlink(target, link);
|
||||
}
|
||||
|
||||
|
||||
void lockProfile(PathLocks & lock, const Path & profile)
|
||||
{
|
||||
lock.lockPaths({profile}, (format("waiting for lock on profile '%1%'") % profile).str());
|
||||
lock.setDeletion(true);
|
||||
}
|
||||
|
||||
|
||||
string optimisticLockProfile(const Path & profile)
|
||||
{
|
||||
return pathExists(profile) ? readLink(profile) : "";
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
67
third_party/nix/src/libstore/profiles.hh
vendored
Normal file
67
third_party/nix/src/libstore/profiles.hh
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
#include "pathlocks.hh"
|
||||
|
||||
#include <time.h>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
struct Generation
|
||||
{
|
||||
int number;
|
||||
Path path;
|
||||
time_t creationTime;
|
||||
Generation()
|
||||
{
|
||||
number = -1;
|
||||
}
|
||||
operator bool() const
|
||||
{
|
||||
return number != -1;
|
||||
}
|
||||
};
|
||||
|
||||
typedef list<Generation> Generations;
|
||||
|
||||
|
||||
/* Returns the list of currently present generations for the specified
|
||||
profile, sorted by generation number. */
|
||||
Generations findGenerations(Path profile, int & curGen);
|
||||
|
||||
class LocalFSStore;
|
||||
|
||||
Path createGeneration(ref<LocalFSStore> store, Path profile, Path outPath);
|
||||
|
||||
void deleteGeneration(const Path & profile, unsigned int gen);
|
||||
|
||||
void deleteGenerations(const Path & profile, const std::set<unsigned int> & gensToDelete, bool dryRun);
|
||||
|
||||
void deleteGenerationsGreaterThan(const Path & profile, const int max, bool dryRun);
|
||||
|
||||
void deleteOldGenerations(const Path & profile, bool dryRun);
|
||||
|
||||
void deleteGenerationsOlderThan(const Path & profile, time_t t, bool dryRun);
|
||||
|
||||
void deleteGenerationsOlderThan(const Path & profile, const string & timeSpec, bool dryRun);
|
||||
|
||||
void switchLink(Path link, Path target);
|
||||
|
||||
/* Ensure exclusive access to a profile. Any command that modifies
|
||||
the profile first acquires this lock. */
|
||||
void lockProfile(PathLocks & lock, const Path & profile);
|
||||
|
||||
/* Optimistic locking is used by long-running operations like `nix-env
|
||||
-i'. Instead of acquiring the exclusive lock for the entire
|
||||
duration of the operation, we just perform the operation
|
||||
optimistically (without an exclusive lock), and check at the end
|
||||
whether the profile changed while we were busy (i.e., the symlink
|
||||
target changed). If so, the operation is restarted. Restarting is
|
||||
generally cheap, since the build results are still in the Nix
|
||||
store. Most of the time, only the user environment has to be
|
||||
rebuilt. */
|
||||
string optimisticLockProfile(const Path & profile);
|
||||
|
||||
}
|
||||
122
third_party/nix/src/libstore/references.cc
vendored
Normal file
122
third_party/nix/src/libstore/references.cc
vendored
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
#include "references.hh"
|
||||
#include "hash.hh"
|
||||
#include "util.hh"
|
||||
#include "archive.hh"
|
||||
|
||||
#include <map>
|
||||
#include <cstdlib>
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
static unsigned int refLength = 32; /* characters */
|
||||
|
||||
|
||||
static void search(const unsigned char * s, size_t len,
|
||||
StringSet & hashes, StringSet & seen)
|
||||
{
|
||||
static bool initialised = false;
|
||||
static bool isBase32[256];
|
||||
if (!initialised) {
|
||||
for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
|
||||
for (unsigned int i = 0; i < base32Chars.size(); ++i)
|
||||
isBase32[(unsigned char) base32Chars[i]] = true;
|
||||
initialised = true;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i + refLength <= len; ) {
|
||||
int j;
|
||||
bool match = true;
|
||||
for (j = refLength - 1; j >= 0; --j)
|
||||
if (!isBase32[(unsigned char) s[i + j]]) {
|
||||
i += j + 1;
|
||||
match = false;
|
||||
break;
|
||||
}
|
||||
if (!match) continue;
|
||||
string ref((const char *) s + i, refLength);
|
||||
if (hashes.find(ref) != hashes.end()) {
|
||||
debug(format("found reference to '%1%' at offset '%2%'")
|
||||
% ref % i);
|
||||
seen.insert(ref);
|
||||
hashes.erase(ref);
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct RefScanSink : Sink
|
||||
{
|
||||
HashSink hashSink;
|
||||
StringSet hashes;
|
||||
StringSet seen;
|
||||
|
||||
string tail;
|
||||
|
||||
RefScanSink() : hashSink(htSHA256) { }
|
||||
|
||||
void operator () (const unsigned char * data, size_t len);
|
||||
};
|
||||
|
||||
|
||||
void RefScanSink::operator () (const unsigned char * data, size_t len)
|
||||
{
|
||||
hashSink(data, len);
|
||||
|
||||
/* It's possible that a reference spans the previous and current
|
||||
fragment, so search in the concatenation of the tail of the
|
||||
previous fragment and the start of the current fragment. */
|
||||
string s = tail + string((const char *) data, len > refLength ? refLength : len);
|
||||
search((const unsigned char *) s.data(), s.size(), hashes, seen);
|
||||
|
||||
search(data, len, hashes, seen);
|
||||
|
||||
size_t tailLen = len <= refLength ? len : refLength;
|
||||
tail =
|
||||
string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) +
|
||||
string((const char *) data + len - tailLen, tailLen);
|
||||
}
|
||||
|
||||
|
||||
PathSet scanForReferences(const string & path,
|
||||
const PathSet & refs, HashResult & hash)
|
||||
{
|
||||
RefScanSink sink;
|
||||
std::map<string, Path> backMap;
|
||||
|
||||
/* For efficiency (and a higher hit rate), just search for the
|
||||
hash part of the file name. (This assumes that all references
|
||||
have the form `HASH-bla'). */
|
||||
for (auto & i : refs) {
|
||||
string baseName = baseNameOf(i);
|
||||
string::size_type pos = baseName.find('-');
|
||||
if (pos == string::npos)
|
||||
throw Error(format("bad reference '%1%'") % i);
|
||||
string s = string(baseName, 0, pos);
|
||||
assert(s.size() == refLength);
|
||||
assert(backMap.find(s) == backMap.end());
|
||||
// parseHash(htSHA256, s);
|
||||
sink.hashes.insert(s);
|
||||
backMap[s] = i;
|
||||
}
|
||||
|
||||
/* Look for the hashes in the NAR dump of the path. */
|
||||
dumpPath(path, sink);
|
||||
|
||||
/* Map the hashes found back to their store paths. */
|
||||
PathSet found;
|
||||
for (auto & i : sink.seen) {
|
||||
std::map<string, Path>::iterator j;
|
||||
if ((j = backMap.find(i)) == backMap.end()) abort();
|
||||
found.insert(j->second);
|
||||
}
|
||||
|
||||
hash = sink.hashSink.finish();
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
11
third_party/nix/src/libstore/references.hh
vendored
Normal file
11
third_party/nix/src/libstore/references.hh
vendored
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
#include "types.hh"
|
||||
#include "hash.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
PathSet scanForReferences(const Path & path, const PathSet & refs,
|
||||
HashResult & hash);
|
||||
|
||||
}
|
||||
129
third_party/nix/src/libstore/remote-fs-accessor.cc
vendored
Normal file
129
third_party/nix/src/libstore/remote-fs-accessor.cc
vendored
Normal file
|
|
@ -0,0 +1,129 @@
|
|||
#include "remote-fs-accessor.hh"
|
||||
#include "nar-accessor.hh"
|
||||
#include "json.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
|
||||
namespace nix {
|
||||
|
||||
RemoteFSAccessor::RemoteFSAccessor(ref<Store> store, const Path & cacheDir)
|
||||
: store(store)
|
||||
, cacheDir(cacheDir)
|
||||
{
|
||||
if (cacheDir != "")
|
||||
createDirs(cacheDir);
|
||||
}
|
||||
|
||||
Path RemoteFSAccessor::makeCacheFile(const Path & storePath, const std::string & ext)
|
||||
{
|
||||
assert(cacheDir != "");
|
||||
return fmt("%s/%s.%s", cacheDir, storePathToHash(storePath), ext);
|
||||
}
|
||||
|
||||
void RemoteFSAccessor::addToCache(const Path & storePath, const std::string & nar,
|
||||
ref<FSAccessor> narAccessor)
|
||||
{
|
||||
nars.emplace(storePath, narAccessor);
|
||||
|
||||
if (cacheDir != "") {
|
||||
try {
|
||||
std::ostringstream str;
|
||||
JSONPlaceholder jsonRoot(str);
|
||||
listNar(jsonRoot, narAccessor, "", true);
|
||||
writeFile(makeCacheFile(storePath, "ls"), str.str());
|
||||
|
||||
/* FIXME: do this asynchronously. */
|
||||
writeFile(makeCacheFile(storePath, "nar"), nar);
|
||||
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<ref<FSAccessor>, Path> RemoteFSAccessor::fetch(const Path & path_)
|
||||
{
|
||||
auto path = canonPath(path_);
|
||||
|
||||
auto storePath = store->toStorePath(path);
|
||||
std::string restPath = std::string(path, storePath.size());
|
||||
|
||||
if (!store->isValidPath(storePath))
|
||||
throw InvalidPath(format("path '%1%' is not a valid store path") % storePath);
|
||||
|
||||
auto i = nars.find(storePath);
|
||||
if (i != nars.end()) return {i->second, restPath};
|
||||
|
||||
StringSink sink;
|
||||
std::string listing;
|
||||
Path cacheFile;
|
||||
|
||||
if (cacheDir != "" && pathExists(cacheFile = makeCacheFile(storePath, "nar"))) {
|
||||
|
||||
try {
|
||||
listing = nix::readFile(makeCacheFile(storePath, "ls"));
|
||||
|
||||
auto narAccessor = makeLazyNarAccessor(listing,
|
||||
[cacheFile](uint64_t offset, uint64_t length) {
|
||||
|
||||
AutoCloseFD fd = open(cacheFile.c_str(), O_RDONLY | O_CLOEXEC);
|
||||
if (!fd)
|
||||
throw SysError("opening NAR cache file '%s'", cacheFile);
|
||||
|
||||
if (lseek(fd.get(), offset, SEEK_SET) != (off_t) offset)
|
||||
throw SysError("seeking in '%s'", cacheFile);
|
||||
|
||||
std::string buf(length, 0);
|
||||
readFull(fd.get(), (unsigned char *) buf.data(), length);
|
||||
|
||||
return buf;
|
||||
});
|
||||
|
||||
nars.emplace(storePath, narAccessor);
|
||||
return {narAccessor, restPath};
|
||||
|
||||
} catch (SysError &) { }
|
||||
|
||||
try {
|
||||
*sink.s = nix::readFile(cacheFile);
|
||||
|
||||
auto narAccessor = makeNarAccessor(sink.s);
|
||||
nars.emplace(storePath, narAccessor);
|
||||
return {narAccessor, restPath};
|
||||
|
||||
} catch (SysError &) { }
|
||||
}
|
||||
|
||||
store->narFromPath(storePath, sink);
|
||||
auto narAccessor = makeNarAccessor(sink.s);
|
||||
addToCache(storePath, *sink.s, narAccessor);
|
||||
return {narAccessor, restPath};
|
||||
}
|
||||
|
||||
FSAccessor::Stat RemoteFSAccessor::stat(const Path & path)
|
||||
{
|
||||
auto res = fetch(path);
|
||||
return res.first->stat(res.second);
|
||||
}
|
||||
|
||||
StringSet RemoteFSAccessor::readDirectory(const Path & path)
|
||||
{
|
||||
auto res = fetch(path);
|
||||
return res.first->readDirectory(res.second);
|
||||
}
|
||||
|
||||
std::string RemoteFSAccessor::readFile(const Path & path)
|
||||
{
|
||||
auto res = fetch(path);
|
||||
return res.first->readFile(res.second);
|
||||
}
|
||||
|
||||
std::string RemoteFSAccessor::readLink(const Path & path)
|
||||
{
|
||||
auto res = fetch(path);
|
||||
return res.first->readLink(res.second);
|
||||
}
|
||||
|
||||
}
|
||||
40
third_party/nix/src/libstore/remote-fs-accessor.hh
vendored
Normal file
40
third_party/nix/src/libstore/remote-fs-accessor.hh
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#pragma once
|
||||
|
||||
#include "fs-accessor.hh"
|
||||
#include "ref.hh"
|
||||
#include "store-api.hh"
|
||||
|
||||
namespace nix {
|
||||
|
||||
class RemoteFSAccessor : public FSAccessor
|
||||
{
|
||||
ref<Store> store;
|
||||
|
||||
std::map<Path, ref<FSAccessor>> nars;
|
||||
|
||||
Path cacheDir;
|
||||
|
||||
std::pair<ref<FSAccessor>, Path> fetch(const Path & path_);
|
||||
|
||||
friend class BinaryCacheStore;
|
||||
|
||||
Path makeCacheFile(const Path & storePath, const std::string & ext);
|
||||
|
||||
void addToCache(const Path & storePath, const std::string & nar,
|
||||
ref<FSAccessor> narAccessor);
|
||||
|
||||
public:
|
||||
|
||||
RemoteFSAccessor(ref<Store> store,
|
||||
const /* FIXME: use std::optional */ Path & cacheDir = "");
|
||||
|
||||
Stat stat(const Path & path) override;
|
||||
|
||||
StringSet readDirectory(const Path & path) override;
|
||||
|
||||
std::string readFile(const Path & path) override;
|
||||
|
||||
std::string readLink(const Path & path) override;
|
||||
};
|
||||
|
||||
}
|
||||
817
third_party/nix/src/libstore/remote-store.cc
vendored
Normal file
817
third_party/nix/src/libstore/remote-store.cc
vendored
Normal file
|
|
@ -0,0 +1,817 @@
|
|||
#include "serialise.hh"
|
||||
#include "util.hh"
|
||||
#include "remote-store.hh"
|
||||
#include "worker-protocol.hh"
|
||||
#include "archive.hh"
|
||||
#include "affinity.hh"
|
||||
#include "globals.hh"
|
||||
#include "derivations.hh"
|
||||
#include "pool.hh"
|
||||
#include "finally.hh"
|
||||
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/un.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <cstring>
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
Path readStorePath(Store & store, Source & from)
|
||||
{
|
||||
Path path = readString(from);
|
||||
store.assertStorePath(path);
|
||||
return path;
|
||||
}
|
||||
|
||||
|
||||
template<class T> T readStorePaths(Store & store, Source & from)
|
||||
{
|
||||
T paths = readStrings<T>(from);
|
||||
for (auto & i : paths) store.assertStorePath(i);
|
||||
return paths;
|
||||
}
|
||||
|
||||
template PathSet readStorePaths(Store & store, Source & from);
|
||||
template Paths readStorePaths(Store & store, Source & from);
|
||||
|
||||
/* TODO: Separate these store impls into different files, give them better names */
|
||||
RemoteStore::RemoteStore(const Params & params)
|
||||
: Store(params)
|
||||
, connections(make_ref<Pool<Connection>>(
|
||||
std::max(1, (int) maxConnections),
|
||||
[this]() { return openConnectionWrapper(); },
|
||||
[this](const ref<Connection> & r) {
|
||||
return
|
||||
r->to.good()
|
||||
&& r->from.good()
|
||||
&& std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::steady_clock::now() - r->startTime).count() < maxConnectionAge;
|
||||
}
|
||||
))
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ref<RemoteStore::Connection> RemoteStore::openConnectionWrapper()
|
||||
{
|
||||
if (failed)
|
||||
throw Error("opening a connection to remote store '%s' previously failed", getUri());
|
||||
try {
|
||||
return openConnection();
|
||||
} catch (...) {
|
||||
failed = true;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
UDSRemoteStore::UDSRemoteStore(const Params & params)
|
||||
: Store(params)
|
||||
, LocalFSStore(params)
|
||||
, RemoteStore(params)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
UDSRemoteStore::UDSRemoteStore(std::string socket_path, const Params & params)
|
||||
: Store(params)
|
||||
, LocalFSStore(params)
|
||||
, RemoteStore(params)
|
||||
, path(socket_path)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
std::string UDSRemoteStore::getUri()
|
||||
{
|
||||
if (path) {
|
||||
return std::string("unix://") + *path;
|
||||
} else {
|
||||
return "daemon";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ref<RemoteStore::Connection> UDSRemoteStore::openConnection()
|
||||
{
|
||||
auto conn = make_ref<Connection>();
|
||||
|
||||
/* Connect to a daemon that does the privileged work for us. */
|
||||
conn->fd = socket(PF_UNIX, SOCK_STREAM
|
||||
#ifdef SOCK_CLOEXEC
|
||||
| SOCK_CLOEXEC
|
||||
#endif
|
||||
, 0);
|
||||
if (!conn->fd)
|
||||
throw SysError("cannot create Unix domain socket");
|
||||
closeOnExec(conn->fd.get());
|
||||
|
||||
string socketPath = path ? *path : settings.nixDaemonSocketFile;
|
||||
|
||||
struct sockaddr_un addr;
|
||||
addr.sun_family = AF_UNIX;
|
||||
if (socketPath.size() + 1 >= sizeof(addr.sun_path))
|
||||
throw Error(format("socket path '%1%' is too long") % socketPath);
|
||||
strcpy(addr.sun_path, socketPath.c_str());
|
||||
|
||||
if (::connect(conn->fd.get(), (struct sockaddr *) &addr, sizeof(addr)) == -1)
|
||||
throw SysError(format("cannot connect to daemon at '%1%'") % socketPath);
|
||||
|
||||
conn->from.fd = conn->fd.get();
|
||||
conn->to.fd = conn->fd.get();
|
||||
|
||||
conn->startTime = std::chrono::steady_clock::now();
|
||||
|
||||
initConnection(*conn);
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::initConnection(Connection & conn)
|
||||
{
|
||||
/* Send the magic greeting, check for the reply. */
|
||||
try {
|
||||
conn.to << WORKER_MAGIC_1;
|
||||
conn.to.flush();
|
||||
unsigned int magic = readInt(conn.from);
|
||||
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
|
||||
|
||||
conn.from >> conn.daemonVersion;
|
||||
if (GET_PROTOCOL_MAJOR(conn.daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
|
||||
throw Error("Nix daemon protocol version not supported");
|
||||
if (GET_PROTOCOL_MINOR(conn.daemonVersion) < 10)
|
||||
throw Error("the Nix daemon version is too old");
|
||||
conn.to << PROTOCOL_VERSION;
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 14) {
|
||||
int cpu = sameMachine() && settings.lockCPU ? lockToCurrentCPU() : -1;
|
||||
if (cpu != -1)
|
||||
conn.to << 1 << cpu;
|
||||
else
|
||||
conn.to << 0;
|
||||
}
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 11)
|
||||
conn.to << false;
|
||||
|
||||
auto ex = conn.processStderr();
|
||||
if (ex) std::rethrow_exception(ex);
|
||||
}
|
||||
catch (Error & e) {
|
||||
throw Error("cannot open connection to remote store '%s': %s", getUri(), e.what());
|
||||
}
|
||||
|
||||
setOptions(conn);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::setOptions(Connection & conn)
|
||||
{
|
||||
conn.to << wopSetOptions
|
||||
<< settings.keepFailed
|
||||
<< settings.keepGoing
|
||||
<< settings.tryFallback
|
||||
<< verbosity
|
||||
<< settings.maxBuildJobs
|
||||
<< settings.maxSilentTime
|
||||
<< true
|
||||
<< (settings.verboseBuild ? lvlError : lvlVomit)
|
||||
<< 0 // obsolete log type
|
||||
<< 0 /* obsolete print build trace */
|
||||
<< settings.buildCores
|
||||
<< settings.useSubstitutes;
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn.daemonVersion) >= 12) {
|
||||
std::map<std::string, Config::SettingInfo> overrides;
|
||||
globalConfig.getSettings(overrides, true);
|
||||
overrides.erase(settings.keepFailed.name);
|
||||
overrides.erase(settings.keepGoing.name);
|
||||
overrides.erase(settings.tryFallback.name);
|
||||
overrides.erase(settings.maxBuildJobs.name);
|
||||
overrides.erase(settings.maxSilentTime.name);
|
||||
overrides.erase(settings.buildCores.name);
|
||||
overrides.erase(settings.useSubstitutes.name);
|
||||
overrides.erase(settings.showTrace.name);
|
||||
conn.to << overrides.size();
|
||||
for (auto & i : overrides)
|
||||
conn.to << i.first << i.second.value;
|
||||
}
|
||||
|
||||
auto ex = conn.processStderr();
|
||||
if (ex) std::rethrow_exception(ex);
|
||||
}
|
||||
|
||||
|
||||
/* A wrapper around Pool<RemoteStore::Connection>::Handle that marks
|
||||
the connection as bad (causing it to be closed) if a non-daemon
|
||||
exception is thrown before the handle is closed. Such an exception
|
||||
causes a deviation from the expected protocol and therefore a
|
||||
desynchronization between the client and daemon. */
|
||||
struct ConnectionHandle
|
||||
{
|
||||
Pool<RemoteStore::Connection>::Handle handle;
|
||||
bool daemonException = false;
|
||||
|
||||
ConnectionHandle(Pool<RemoteStore::Connection>::Handle && handle)
|
||||
: handle(std::move(handle))
|
||||
{ }
|
||||
|
||||
ConnectionHandle(ConnectionHandle && h)
|
||||
: handle(std::move(h.handle))
|
||||
{ }
|
||||
|
||||
~ConnectionHandle()
|
||||
{
|
||||
if (!daemonException && std::uncaught_exception()) {
|
||||
handle.markBad();
|
||||
debug("closing daemon connection because of an exception");
|
||||
}
|
||||
}
|
||||
|
||||
RemoteStore::Connection * operator -> () { return &*handle; }
|
||||
|
||||
void processStderr(Sink * sink = 0, Source * source = 0)
|
||||
{
|
||||
auto ex = handle->processStderr(sink, source);
|
||||
if (ex) {
|
||||
daemonException = true;
|
||||
std::rethrow_exception(ex);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
ConnectionHandle RemoteStore::getConnection()
|
||||
{
|
||||
return ConnectionHandle(connections->get());
|
||||
}
|
||||
|
||||
|
||||
bool RemoteStore::isValidPathUncached(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopIsValidPath << path;
|
||||
conn.processStderr();
|
||||
return readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::queryValidPaths(const PathSet & paths, SubstituteFlag maybeSubstitute)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
||||
PathSet res;
|
||||
for (auto & i : paths)
|
||||
if (isValidPath(i)) res.insert(i);
|
||||
return res;
|
||||
} else {
|
||||
conn->to << wopQueryValidPaths << paths;
|
||||
conn.processStderr();
|
||||
return readStorePaths<PathSet>(*this, conn->from);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::queryAllValidPaths()
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryAllValidPaths;
|
||||
conn.processStderr();
|
||||
return readStorePaths<PathSet>(*this, conn->from);
|
||||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
||||
PathSet res;
|
||||
for (auto & i : paths) {
|
||||
conn->to << wopHasSubstitutes << i;
|
||||
conn.processStderr();
|
||||
if (readInt(conn->from)) res.insert(i);
|
||||
}
|
||||
return res;
|
||||
} else {
|
||||
conn->to << wopQuerySubstitutablePaths << paths;
|
||||
conn.processStderr();
|
||||
return readStorePaths<PathSet>(*this, conn->from);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos)
|
||||
{
|
||||
if (paths.empty()) return;
|
||||
|
||||
auto conn(getConnection());
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 12) {
|
||||
|
||||
for (auto & i : paths) {
|
||||
SubstitutablePathInfo info;
|
||||
conn->to << wopQuerySubstitutablePathInfo << i;
|
||||
conn.processStderr();
|
||||
unsigned int reply = readInt(conn->from);
|
||||
if (reply == 0) continue;
|
||||
info.deriver = readString(conn->from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
info.references = readStorePaths<PathSet>(*this, conn->from);
|
||||
info.downloadSize = readLongLong(conn->from);
|
||||
info.narSize = readLongLong(conn->from);
|
||||
infos[i] = info;
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
conn->to << wopQuerySubstitutablePathInfos << paths;
|
||||
conn.processStderr();
|
||||
size_t count = readNum<size_t>(conn->from);
|
||||
for (size_t n = 0; n < count; n++) {
|
||||
Path path = readStorePath(*this, conn->from);
|
||||
SubstitutablePathInfo & info(infos[path]);
|
||||
info.deriver = readString(conn->from);
|
||||
if (info.deriver != "") assertStorePath(info.deriver);
|
||||
info.references = readStorePaths<PathSet>(*this, conn->from);
|
||||
info.downloadSize = readLongLong(conn->from);
|
||||
info.narSize = readLongLong(conn->from);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::queryPathInfoUncached(const Path & path,
|
||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept
|
||||
{
|
||||
try {
|
||||
std::shared_ptr<ValidPathInfo> info;
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryPathInfo << path;
|
||||
try {
|
||||
conn.processStderr();
|
||||
} catch (Error & e) {
|
||||
// Ugly backwards compatibility hack.
|
||||
if (e.msg().find("is not valid") != std::string::npos)
|
||||
throw InvalidPath(e.what());
|
||||
throw;
|
||||
}
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 17) {
|
||||
bool valid; conn->from >> valid;
|
||||
if (!valid) throw InvalidPath(format("path '%s' is not valid") % path);
|
||||
}
|
||||
info = std::make_shared<ValidPathInfo>();
|
||||
info->path = path;
|
||||
info->deriver = readString(conn->from);
|
||||
if (info->deriver != "") assertStorePath(info->deriver);
|
||||
info->narHash = Hash(readString(conn->from), htSHA256);
|
||||
info->references = readStorePaths<PathSet>(*this, conn->from);
|
||||
conn->from >> info->registrationTime >> info->narSize;
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 16) {
|
||||
conn->from >> info->ultimate;
|
||||
info->sigs = readStrings<StringSet>(conn->from);
|
||||
conn->from >> info->ca;
|
||||
}
|
||||
}
|
||||
callback(std::move(info));
|
||||
} catch (...) { callback.rethrow(); }
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::queryReferrers(const Path & path,
|
||||
PathSet & referrers)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryReferrers << path;
|
||||
conn.processStderr();
|
||||
PathSet referrers2 = readStorePaths<PathSet>(*this, conn->from);
|
||||
referrers.insert(referrers2.begin(), referrers2.end());
|
||||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::queryValidDerivers(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryValidDerivers << path;
|
||||
conn.processStderr();
|
||||
return readStorePaths<PathSet>(*this, conn->from);
|
||||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::queryDerivationOutputs(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryDerivationOutputs << path;
|
||||
conn.processStderr();
|
||||
return readStorePaths<PathSet>(*this, conn->from);
|
||||
}
|
||||
|
||||
|
||||
PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryDerivationOutputNames << path;
|
||||
conn.processStderr();
|
||||
return readStrings<PathSet>(conn->from);
|
||||
}
|
||||
|
||||
|
||||
Path RemoteStore::queryPathFromHashPart(const string & hashPart)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopQueryPathFromHashPart << hashPart;
|
||||
conn.processStderr();
|
||||
Path path = readString(conn->from);
|
||||
if (!path.empty()) assertStorePath(path);
|
||||
return path;
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::addToStore(const ValidPathInfo & info, Source & source,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs, std::shared_ptr<FSAccessor> accessor)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 18) {
|
||||
conn->to << wopImportPaths;
|
||||
|
||||
auto source2 = sinkToSource([&](Sink & sink) {
|
||||
sink << 1 // == path follows
|
||||
;
|
||||
copyNAR(source, sink);
|
||||
sink
|
||||
<< exportMagic
|
||||
<< info.path
|
||||
<< info.references
|
||||
<< info.deriver
|
||||
<< 0 // == no legacy signature
|
||||
<< 0 // == no path follows
|
||||
;
|
||||
});
|
||||
|
||||
conn.processStderr(0, source2.get());
|
||||
|
||||
auto importedPaths = readStorePaths<PathSet>(*this, conn->from);
|
||||
assert(importedPaths.size() <= 1);
|
||||
}
|
||||
|
||||
else {
|
||||
conn->to << wopAddToStoreNar
|
||||
<< info.path << info.deriver << info.narHash.to_string(Base16, false)
|
||||
<< info.references << info.registrationTime << info.narSize
|
||||
<< info.ultimate << info.sigs << info.ca
|
||||
<< repair << !checkSigs;
|
||||
bool tunnel = GET_PROTOCOL_MINOR(conn->daemonVersion) >= 21;
|
||||
if (!tunnel) copyNAR(source, conn->to);
|
||||
conn.processStderr(0, tunnel ? &source : nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Path RemoteStore::addToStore(const string & name, const Path & _srcPath,
|
||||
bool recursive, HashType hashAlgo, PathFilter & filter, RepairFlag repair)
|
||||
{
|
||||
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
|
||||
|
||||
auto conn(getConnection());
|
||||
|
||||
Path srcPath(absPath(_srcPath));
|
||||
|
||||
conn->to << wopAddToStore << name
|
||||
<< ((hashAlgo == htSHA256 && recursive) ? 0 : 1) /* backwards compatibility hack */
|
||||
<< (recursive ? 1 : 0)
|
||||
<< printHashType(hashAlgo);
|
||||
|
||||
try {
|
||||
conn->to.written = 0;
|
||||
conn->to.warn = true;
|
||||
connections->incCapacity();
|
||||
{
|
||||
Finally cleanup([&]() { connections->decCapacity(); });
|
||||
dumpPath(srcPath, conn->to, filter);
|
||||
}
|
||||
conn->to.warn = false;
|
||||
conn.processStderr();
|
||||
} catch (SysError & e) {
|
||||
/* Daemon closed while we were sending the path. Probably OOM
|
||||
or I/O error. */
|
||||
if (e.errNo == EPIPE)
|
||||
try {
|
||||
conn.processStderr();
|
||||
} catch (EndOfFile & e) { }
|
||||
throw;
|
||||
}
|
||||
|
||||
return readStorePath(*this, conn->from);
|
||||
}
|
||||
|
||||
|
||||
Path RemoteStore::addTextToStore(const string & name, const string & s,
|
||||
const PathSet & references, RepairFlag repair)
|
||||
{
|
||||
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
|
||||
|
||||
auto conn(getConnection());
|
||||
conn->to << wopAddTextToStore << name << s << references;
|
||||
|
||||
conn.processStderr();
|
||||
return readStorePath(*this, conn->from);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopBuildPaths;
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 13) {
|
||||
conn->to << drvPaths;
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) >= 15)
|
||||
conn->to << buildMode;
|
||||
else
|
||||
/* Old daemons did not take a 'buildMode' parameter, so we
|
||||
need to validate it here on the client side. */
|
||||
if (buildMode != bmNormal)
|
||||
throw Error("repairing or checking is not supported when building through the Nix daemon");
|
||||
} else {
|
||||
/* For backwards compatibility with old daemons, strip output
|
||||
identifiers. */
|
||||
PathSet drvPaths2;
|
||||
for (auto & i : drvPaths)
|
||||
drvPaths2.insert(string(i, 0, i.find('!')));
|
||||
conn->to << drvPaths2;
|
||||
}
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
BuildResult RemoteStore::buildDerivation(const Path & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopBuildDerivation << drvPath << drv << buildMode;
|
||||
conn.processStderr();
|
||||
BuildResult res;
|
||||
unsigned int status;
|
||||
conn->from >> status >> res.errorMsg;
|
||||
res.status = (BuildResult::Status) status;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::ensurePath(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopEnsurePath << path;
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::addTempRoot(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopAddTempRoot << path;
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::addIndirectRoot(const Path & path)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopAddIndirectRoot << path;
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::syncWithGC()
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopSyncWithGC;
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
Roots RemoteStore::findRoots(bool censor)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopFindRoots;
|
||||
conn.processStderr();
|
||||
size_t count = readNum<size_t>(conn->from);
|
||||
Roots result;
|
||||
while (count--) {
|
||||
Path link = readString(conn->from);
|
||||
Path target = readStorePath(*this, conn->from);
|
||||
result[target].emplace(link);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
|
||||
conn->to
|
||||
<< wopCollectGarbage << options.action << options.pathsToDelete << options.ignoreLiveness
|
||||
<< options.maxFreed
|
||||
/* removed options */
|
||||
<< 0 << 0 << 0;
|
||||
|
||||
conn.processStderr();
|
||||
|
||||
results.paths = readStrings<PathSet>(conn->from);
|
||||
results.bytesFreed = readLongLong(conn->from);
|
||||
readLongLong(conn->from); // obsolete
|
||||
|
||||
{
|
||||
auto state_(Store::state.lock());
|
||||
state_->pathInfoCache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::optimiseStore()
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopOptimiseStore;
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
bool RemoteStore::verifyStore(bool checkContents, RepairFlag repair)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopVerifyStore << checkContents << repair;
|
||||
conn.processStderr();
|
||||
return readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::addSignatures(const Path & storePath, const StringSet & sigs)
|
||||
{
|
||||
auto conn(getConnection());
|
||||
conn->to << wopAddSignatures << storePath << sigs;
|
||||
conn.processStderr();
|
||||
readInt(conn->from);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::queryMissing(const PathSet & targets,
|
||||
PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
|
||||
unsigned long long & downloadSize, unsigned long long & narSize)
|
||||
{
|
||||
{
|
||||
auto conn(getConnection());
|
||||
if (GET_PROTOCOL_MINOR(conn->daemonVersion) < 19)
|
||||
// Don't hold the connection handle in the fallback case
|
||||
// to prevent a deadlock.
|
||||
goto fallback;
|
||||
conn->to << wopQueryMissing << targets;
|
||||
conn.processStderr();
|
||||
willBuild = readStorePaths<PathSet>(*this, conn->from);
|
||||
willSubstitute = readStorePaths<PathSet>(*this, conn->from);
|
||||
unknown = readStorePaths<PathSet>(*this, conn->from);
|
||||
conn->from >> downloadSize >> narSize;
|
||||
return;
|
||||
}
|
||||
|
||||
fallback:
|
||||
return Store::queryMissing(targets, willBuild, willSubstitute,
|
||||
unknown, downloadSize, narSize);
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::connect()
|
||||
{
|
||||
auto conn(getConnection());
|
||||
}
|
||||
|
||||
|
||||
unsigned int RemoteStore::getProtocol()
|
||||
{
|
||||
auto conn(connections->get());
|
||||
return conn->daemonVersion;
|
||||
}
|
||||
|
||||
|
||||
void RemoteStore::flushBadConnections()
|
||||
{
|
||||
connections->flushBad();
|
||||
}
|
||||
|
||||
|
||||
RemoteStore::Connection::~Connection()
|
||||
{
|
||||
try {
|
||||
to.flush();
|
||||
} catch (...) {
|
||||
ignoreException();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static Logger::Fields readFields(Source & from)
|
||||
{
|
||||
Logger::Fields fields;
|
||||
size_t size = readInt(from);
|
||||
for (size_t n = 0; n < size; n++) {
|
||||
auto type = (decltype(Logger::Field::type)) readInt(from);
|
||||
if (type == Logger::Field::tInt)
|
||||
fields.push_back(readNum<uint64_t>(from));
|
||||
else if (type == Logger::Field::tString)
|
||||
fields.push_back(readString(from));
|
||||
else
|
||||
throw Error("got unsupported field type %x from Nix daemon", (int) type);
|
||||
}
|
||||
return fields;
|
||||
}
|
||||
|
||||
|
||||
std::exception_ptr RemoteStore::Connection::processStderr(Sink * sink, Source * source)
|
||||
{
|
||||
to.flush();
|
||||
|
||||
while (true) {
|
||||
|
||||
auto msg = readNum<uint64_t>(from);
|
||||
|
||||
if (msg == STDERR_WRITE) {
|
||||
string s = readString(from);
|
||||
if (!sink) throw Error("no sink");
|
||||
(*sink)(s);
|
||||
}
|
||||
|
||||
else if (msg == STDERR_READ) {
|
||||
if (!source) throw Error("no source");
|
||||
size_t len = readNum<size_t>(from);
|
||||
auto buf = std::make_unique<unsigned char[]>(len);
|
||||
writeString(buf.get(), source->read(buf.get(), len), to);
|
||||
to.flush();
|
||||
}
|
||||
|
||||
else if (msg == STDERR_ERROR) {
|
||||
string error = readString(from);
|
||||
unsigned int status = readInt(from);
|
||||
return std::make_exception_ptr(Error(status, error));
|
||||
}
|
||||
|
||||
else if (msg == STDERR_NEXT)
|
||||
printError(chomp(readString(from)));
|
||||
|
||||
else if (msg == STDERR_START_ACTIVITY) {
|
||||
auto act = readNum<ActivityId>(from);
|
||||
auto lvl = (Verbosity) readInt(from);
|
||||
auto type = (ActivityType) readInt(from);
|
||||
auto s = readString(from);
|
||||
auto fields = readFields(from);
|
||||
auto parent = readNum<ActivityId>(from);
|
||||
logger->startActivity(act, lvl, type, s, fields, parent);
|
||||
}
|
||||
|
||||
else if (msg == STDERR_STOP_ACTIVITY) {
|
||||
auto act = readNum<ActivityId>(from);
|
||||
logger->stopActivity(act);
|
||||
}
|
||||
|
||||
else if (msg == STDERR_RESULT) {
|
||||
auto act = readNum<ActivityId>(from);
|
||||
auto type = (ResultType) readInt(from);
|
||||
auto fields = readFields(from);
|
||||
logger->result(act, type, fields);
|
||||
}
|
||||
|
||||
else if (msg == STDERR_LAST)
|
||||
break;
|
||||
|
||||
else
|
||||
throw Error("got unknown message type %x from Nix daemon", msg);
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static std::string uriScheme = "unix://";
|
||||
|
||||
static RegisterStoreImplementation regStore([](
|
||||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
{
|
||||
if (std::string(uri, 0, uriScheme.size()) != uriScheme) return 0;
|
||||
return std::make_shared<UDSRemoteStore>(std::string(uri, uriScheme.size()), params);
|
||||
});
|
||||
|
||||
}
|
||||
161
third_party/nix/src/libstore/remote-store.hh
vendored
Normal file
161
third_party/nix/src/libstore/remote-store.hh
vendored
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
#pragma once
|
||||
|
||||
#include <limits>
|
||||
#include <string>
|
||||
|
||||
#include "store-api.hh"
|
||||
|
||||
|
||||
namespace nix {
|
||||
|
||||
|
||||
class Pipe;
|
||||
class Pid;
|
||||
struct FdSink;
|
||||
struct FdSource;
|
||||
template<typename T> class Pool;
|
||||
struct ConnectionHandle;
|
||||
|
||||
|
||||
/* FIXME: RemoteStore is a misnomer - should be something like
|
||||
DaemonStore. */
|
||||
class RemoteStore : public virtual Store
|
||||
{
|
||||
public:
|
||||
|
||||
const Setting<int> maxConnections{(Store*) this, 1,
|
||||
"max-connections", "maximum number of concurrent connections to the Nix daemon"};
|
||||
|
||||
const Setting<unsigned int> maxConnectionAge{(Store*) this, std::numeric_limits<unsigned int>::max(),
|
||||
"max-connection-age", "number of seconds to reuse a connection"};
|
||||
|
||||
virtual bool sameMachine() = 0;
|
||||
|
||||
RemoteStore(const Params & params);
|
||||
|
||||
/* Implementations of abstract store API methods. */
|
||||
|
||||
bool isValidPathUncached(const Path & path) override;
|
||||
|
||||
PathSet queryValidPaths(const PathSet & paths,
|
||||
SubstituteFlag maybeSubstitute = NoSubstitute) override;
|
||||
|
||||
PathSet queryAllValidPaths() override;
|
||||
|
||||
void queryPathInfoUncached(const Path & path,
|
||||
Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept override;
|
||||
|
||||
void queryReferrers(const Path & path, PathSet & referrers) override;
|
||||
|
||||
PathSet queryValidDerivers(const Path & path) override;
|
||||
|
||||
PathSet queryDerivationOutputs(const Path & path) override;
|
||||
|
||||
StringSet queryDerivationOutputNames(const Path & path) override;
|
||||
|
||||
Path queryPathFromHashPart(const string & hashPart) override;
|
||||
|
||||
PathSet querySubstitutablePaths(const PathSet & paths) override;
|
||||
|
||||
void querySubstitutablePathInfos(const PathSet & paths,
|
||||
SubstitutablePathInfos & infos) override;
|
||||
|
||||
void addToStore(const ValidPathInfo & info, Source & nar,
|
||||
RepairFlag repair, CheckSigsFlag checkSigs,
|
||||
std::shared_ptr<FSAccessor> accessor) override;
|
||||
|
||||
Path addToStore(const string & name, const Path & srcPath,
|
||||
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||
PathFilter & filter = defaultPathFilter, RepairFlag repair = NoRepair) override;
|
||||
|
||||
Path addTextToStore(const string & name, const string & s,
|
||||
const PathSet & references, RepairFlag repair) override;
|
||||
|
||||
void buildPaths(const PathSet & paths, BuildMode buildMode) override;
|
||||
|
||||
BuildResult buildDerivation(const Path & drvPath, const BasicDerivation & drv,
|
||||
BuildMode buildMode) override;
|
||||
|
||||
void ensurePath(const Path & path) override;
|
||||
|
||||
void addTempRoot(const Path & path) override;
|
||||
|
||||
void addIndirectRoot(const Path & path) override;
|
||||
|
||||
void syncWithGC() override;
|
||||
|
||||
Roots findRoots(bool censor) override;
|
||||
|
||||
void collectGarbage(const GCOptions & options, GCResults & results) override;
|
||||
|
||||
void optimiseStore() override;
|
||||
|
||||
bool verifyStore(bool checkContents, RepairFlag repair) override;
|
||||
|
||||
void addSignatures(const Path & storePath, const StringSet & sigs) override;
|
||||
|
||||
void queryMissing(const PathSet & targets,
|
||||
PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
|
||||
unsigned long long & downloadSize, unsigned long long & narSize) override;
|
||||
|
||||
void connect() override;
|
||||
|
||||
unsigned int getProtocol() override;
|
||||
|
||||
void flushBadConnections();
|
||||
|
||||
protected:
|
||||
|
||||
struct Connection
|
||||
{
|
||||
AutoCloseFD fd;
|
||||
FdSink to;
|
||||
FdSource from;
|
||||
unsigned int daemonVersion;
|
||||
std::chrono::time_point<std::chrono::steady_clock> startTime;
|
||||
|
||||
virtual ~Connection();
|
||||
|
||||
std::exception_ptr processStderr(Sink * sink = 0, Source * source = 0);
|
||||
};
|
||||
|
||||
ref<Connection> openConnectionWrapper();
|
||||
|
||||
virtual ref<Connection> openConnection() = 0;
|
||||
|
||||
void initConnection(Connection & conn);
|
||||
|
||||
ref<Pool<Connection>> connections;
|
||||
|
||||
virtual void setOptions(Connection & conn);
|
||||
|
||||
ConnectionHandle getConnection();
|
||||
|
||||
friend struct ConnectionHandle;
|
||||
|
||||
private:
|
||||
|
||||
std::atomic_bool failed{false};
|
||||
|
||||
};
|
||||
|
||||
class UDSRemoteStore : public LocalFSStore, public RemoteStore
|
||||
{
|
||||
public:
|
||||
|
||||
UDSRemoteStore(const Params & params);
|
||||
UDSRemoteStore(std::string path, const Params & params);
|
||||
|
||||
std::string getUri() override;
|
||||
|
||||
bool sameMachine()
|
||||
{ return true; }
|
||||
|
||||
private:
|
||||
|
||||
ref<RemoteStore::Connection> openConnection() override;
|
||||
std::optional<std::string> path;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
431
third_party/nix/src/libstore/s3-binary-cache-store.cc
vendored
Normal file
431
third_party/nix/src/libstore/s3-binary-cache-store.cc
vendored
Normal file
|
|
@ -0,0 +1,431 @@
|
|||
#if ENABLE_S3
|
||||
|
||||
#include "s3.hh"
|
||||
#include "s3-binary-cache-store.hh"
|
||||
#include "nar-info.hh"
|
||||
#include "nar-info-disk-cache.hh"
|
||||
#include "globals.hh"
|
||||
#include "compression.hh"
|
||||
#include "download.hh"
|
||||
#include "istringstream_nocopy.hh"
|
||||
|
||||
#include <aws/core/Aws.h>
|
||||
#include <aws/core/VersionConfig.h>
|
||||
#include <aws/core/auth/AWSCredentialsProvider.h>
|
||||
#include <aws/core/auth/AWSCredentialsProviderChain.h>
|
||||
#include <aws/core/client/ClientConfiguration.h>
|
||||
#include <aws/core/client/DefaultRetryStrategy.h>
|
||||
#include <aws/core/utils/logging/FormattedLogSystem.h>
|
||||
#include <aws/core/utils/logging/LogMacros.h>
|
||||
#include <aws/core/utils/threading/Executor.h>
|
||||
#include <aws/s3/S3Client.h>
|
||||
#include <aws/s3/model/GetObjectRequest.h>
|
||||
#include <aws/s3/model/HeadObjectRequest.h>
|
||||
#include <aws/s3/model/ListObjectsRequest.h>
|
||||
#include <aws/s3/model/PutObjectRequest.h>
|
||||
#include <aws/transfer/TransferManager.h>
|
||||
|
||||
using namespace Aws::Transfer;
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct S3Error : public Error
|
||||
{
|
||||
Aws::S3::S3Errors err;
|
||||
S3Error(Aws::S3::S3Errors err, const FormatOrString & fs)
|
||||
: Error(fs), err(err) { };
|
||||
};
|
||||
|
||||
/* Helper: given an Outcome<R, E>, return R in case of success, or
|
||||
throw an exception in case of an error. */
|
||||
template<typename R, typename E>
|
||||
R && checkAws(const FormatOrString & fs, Aws::Utils::Outcome<R, E> && outcome)
|
||||
{
|
||||
if (!outcome.IsSuccess())
|
||||
throw S3Error(
|
||||
outcome.GetError().GetErrorType(),
|
||||
fs.s + ": " + outcome.GetError().GetMessage());
|
||||
return outcome.GetResultWithOwnership();
|
||||
}
|
||||
|
||||
class AwsLogger : public Aws::Utils::Logging::FormattedLogSystem
|
||||
{
|
||||
using Aws::Utils::Logging::FormattedLogSystem::FormattedLogSystem;
|
||||
|
||||
void ProcessFormattedStatement(Aws::String && statement) override
|
||||
{
|
||||
debug("AWS: %s", chomp(statement));
|
||||
}
|
||||
};
|
||||
|
||||
static void initAWS()
|
||||
{
|
||||
static std::once_flag flag;
|
||||
std::call_once(flag, []() {
|
||||
Aws::SDKOptions options;
|
||||
|
||||
/* We install our own OpenSSL locking function (see
|
||||
shared.cc), so don't let aws-sdk-cpp override it. */
|
||||
options.cryptoOptions.initAndCleanupOpenSSL = false;
|
||||
|
||||
if (verbosity >= lvlDebug) {
|
||||
options.loggingOptions.logLevel =
|
||||
verbosity == lvlDebug
|
||||
? Aws::Utils::Logging::LogLevel::Debug
|
||||
: Aws::Utils::Logging::LogLevel::Trace;
|
||||
options.loggingOptions.logger_create_fn = [options]() {
|
||||
return std::make_shared<AwsLogger>(options.loggingOptions.logLevel);
|
||||
};
|
||||
}
|
||||
|
||||
Aws::InitAPI(options);
|
||||
});
|
||||
}
|
||||
|
||||
S3Helper::S3Helper(const string & profile, const string & region, const string & scheme, const string & endpoint)
|
||||
: config(makeConfig(region, scheme, endpoint))
|
||||
, client(make_ref<Aws::S3::S3Client>(
|
||||
profile == ""
|
||||
? std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
|
||||
std::make_shared<Aws::Auth::DefaultAWSCredentialsProviderChain>())
|
||||
: std::dynamic_pointer_cast<Aws::Auth::AWSCredentialsProvider>(
|
||||
std::make_shared<Aws::Auth::ProfileConfigFileAWSCredentialsProvider>(profile.c_str())),
|
||||
*config,
|
||||
// FIXME: https://github.com/aws/aws-sdk-cpp/issues/759
|
||||
#if AWS_VERSION_MAJOR == 1 && AWS_VERSION_MINOR < 3
|
||||
false,
|
||||
#else
|
||||
Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
|
||||
#endif
|
||||
endpoint.empty()))
|
||||
{
|
||||
}
|
||||
|
||||
/* Log AWS retries. */
|
||||
class RetryStrategy : public Aws::Client::DefaultRetryStrategy
|
||||
{
|
||||
bool ShouldRetry(const Aws::Client::AWSError<Aws::Client::CoreErrors>& error, long attemptedRetries) const override
|
||||
{
|
||||
auto retry = Aws::Client::DefaultRetryStrategy::ShouldRetry(error, attemptedRetries);
|
||||
if (retry)
|
||||
printError("AWS error '%s' (%s), will retry in %d ms",
|
||||
error.GetExceptionName(), error.GetMessage(), CalculateDelayBeforeNextRetry(error, attemptedRetries));
|
||||
return retry;
|
||||
}
|
||||
};
|
||||
|
||||
ref<Aws::Client::ClientConfiguration> S3Helper::makeConfig(const string & region, const string & scheme, const string & endpoint)
|
||||
{
|
||||
initAWS();
|
||||
auto res = make_ref<Aws::Client::ClientConfiguration>();
|
||||
res->region = region;
|
||||
if (!scheme.empty()) {
|
||||
res->scheme = Aws::Http::SchemeMapper::FromString(scheme.c_str());
|
||||
}
|
||||
if (!endpoint.empty()) {
|
||||
res->endpointOverride = endpoint;
|
||||
}
|
||||
res->requestTimeoutMs = 600 * 1000;
|
||||
res->connectTimeoutMs = 5 * 1000;
|
||||
res->retryStrategy = std::make_shared<RetryStrategy>();
|
||||
res->caFile = settings.caFile;
|
||||
return res;
|
||||
}
|
||||
|
||||
S3Helper::DownloadResult S3Helper::getObject(
|
||||
const std::string & bucketName, const std::string & key)
|
||||
{
|
||||
debug("fetching 's3://%s/%s'...", bucketName, key);
|
||||
|
||||
auto request =
|
||||
Aws::S3::Model::GetObjectRequest()
|
||||
.WithBucket(bucketName)
|
||||
.WithKey(key);
|
||||
|
||||
request.SetResponseStreamFactory([&]() {
|
||||
return Aws::New<std::stringstream>("STRINGSTREAM");
|
||||
});
|
||||
|
||||
DownloadResult res;
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
try {
|
||||
|
||||
auto result = checkAws(fmt("AWS error fetching '%s'", key),
|
||||
client->GetObject(request));
|
||||
|
||||
res.data = decompress(result.GetContentEncoding(),
|
||||
dynamic_cast<std::stringstream &>(result.GetBody()).str());
|
||||
|
||||
} catch (S3Error & e) {
|
||||
if (e.err != Aws::S3::S3Errors::NO_SUCH_KEY) throw;
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
res.durationMs = std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1).count();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
struct S3BinaryCacheStoreImpl : public S3BinaryCacheStore
|
||||
{
|
||||
const Setting<std::string> profile{this, "", "profile", "The name of the AWS configuration profile to use."};
|
||||
const Setting<std::string> region{this, Aws::Region::US_EAST_1, "region", {"aws-region"}};
|
||||
const Setting<std::string> scheme{this, "", "scheme", "The scheme to use for S3 requests, https by default."};
|
||||
const Setting<std::string> endpoint{this, "", "endpoint", "An optional override of the endpoint to use when talking to S3."};
|
||||
const Setting<std::string> narinfoCompression{this, "", "narinfo-compression", "compression method for .narinfo files"};
|
||||
const Setting<std::string> lsCompression{this, "", "ls-compression", "compression method for .ls files"};
|
||||
const Setting<std::string> logCompression{this, "", "log-compression", "compression method for log/* files"};
|
||||
const Setting<bool> multipartUpload{
|
||||
this, false, "multipart-upload", "whether to use multi-part uploads"};
|
||||
const Setting<uint64_t> bufferSize{
|
||||
this, 5 * 1024 * 1024, "buffer-size", "size (in bytes) of each part in multi-part uploads"};
|
||||
|
||||
std::string bucketName;
|
||||
|
||||
Stats stats;
|
||||
|
||||
S3Helper s3Helper;
|
||||
|
||||
S3BinaryCacheStoreImpl(
|
||||
const Params & params, const std::string & bucketName)
|
||||
: S3BinaryCacheStore(params)
|
||||
, bucketName(bucketName)
|
||||
, s3Helper(profile, region, scheme, endpoint)
|
||||
{
|
||||
diskCache = getNarInfoDiskCache();
|
||||
}
|
||||
|
||||
std::string getUri() override
|
||||
{
|
||||
return "s3://" + bucketName;
|
||||
}
|
||||
|
||||
void init() override
|
||||
{
|
||||
if (!diskCache->cacheExists(getUri(), wantMassQuery_, priority)) {
|
||||
|
||||
BinaryCacheStore::init();
|
||||
|
||||
diskCache->createCache(getUri(), storeDir, wantMassQuery_, priority);
|
||||
}
|
||||
}
|
||||
|
||||
const Stats & getS3Stats() override
|
||||
{
|
||||
return stats;
|
||||
}
|
||||
|
||||
/* This is a specialisation of isValidPath() that optimistically
|
||||
fetches the .narinfo file, rather than first checking for its
|
||||
existence via a HEAD request. Since .narinfos are small, doing
|
||||
a GET is unlikely to be slower than HEAD. */
|
||||
bool isValidPathUncached(const Path & storePath) override
|
||||
{
|
||||
try {
|
||||
queryPathInfo(storePath);
|
||||
return true;
|
||||
} catch (InvalidPath & e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool fileExists(const std::string & path) override
|
||||
{
|
||||
stats.head++;
|
||||
|
||||
auto res = s3Helper.client->HeadObject(
|
||||
Aws::S3::Model::HeadObjectRequest()
|
||||
.WithBucket(bucketName)
|
||||
.WithKey(path));
|
||||
|
||||
if (!res.IsSuccess()) {
|
||||
auto & error = res.GetError();
|
||||
if (error.GetErrorType() == Aws::S3::S3Errors::RESOURCE_NOT_FOUND
|
||||
|| error.GetErrorType() == Aws::S3::S3Errors::NO_SUCH_KEY
|
||||
// If bucket listing is disabled, 404s turn into 403s
|
||||
|| error.GetErrorType() == Aws::S3::S3Errors::ACCESS_DENIED)
|
||||
return false;
|
||||
throw Error(format("AWS error fetching '%s': %s") % path % error.GetMessage());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<TransferManager> transferManager;
|
||||
std::once_flag transferManagerCreated;
|
||||
|
||||
void uploadFile(const std::string & path, const std::string & data,
|
||||
const std::string & mimeType,
|
||||
const std::string & contentEncoding)
|
||||
{
|
||||
auto stream = std::make_shared<istringstream_nocopy>(data);
|
||||
|
||||
auto maxThreads = std::thread::hardware_concurrency();
|
||||
|
||||
static std::shared_ptr<Aws::Utils::Threading::PooledThreadExecutor>
|
||||
executor = std::make_shared<Aws::Utils::Threading::PooledThreadExecutor>(maxThreads);
|
||||
|
||||
std::call_once(transferManagerCreated, [&]()
|
||||
{
|
||||
if (multipartUpload) {
|
||||
TransferManagerConfiguration transferConfig(executor.get());
|
||||
|
||||
transferConfig.s3Client = s3Helper.client;
|
||||
transferConfig.bufferSize = bufferSize;
|
||||
|
||||
transferConfig.uploadProgressCallback =
|
||||
[](const TransferManager *transferManager,
|
||||
const std::shared_ptr<const TransferHandle>
|
||||
&transferHandle)
|
||||
{
|
||||
//FIXME: find a way to properly abort the multipart upload.
|
||||
//checkInterrupt();
|
||||
debug("upload progress ('%s'): '%d' of '%d' bytes",
|
||||
transferHandle->GetKey(),
|
||||
transferHandle->GetBytesTransferred(),
|
||||
transferHandle->GetBytesTotalSize());
|
||||
};
|
||||
|
||||
transferManager = TransferManager::Create(transferConfig);
|
||||
}
|
||||
});
|
||||
|
||||
auto now1 = std::chrono::steady_clock::now();
|
||||
|
||||
if (transferManager) {
|
||||
|
||||
if (contentEncoding != "")
|
||||
throw Error("setting a content encoding is not supported with S3 multi-part uploads");
|
||||
|
||||
std::shared_ptr<TransferHandle> transferHandle =
|
||||
transferManager->UploadFile(
|
||||
stream, bucketName, path, mimeType,
|
||||
Aws::Map<Aws::String, Aws::String>(),
|
||||
nullptr /*, contentEncoding */);
|
||||
|
||||
transferHandle->WaitUntilFinished();
|
||||
|
||||
if (transferHandle->GetStatus() == TransferStatus::FAILED)
|
||||
throw Error("AWS error: failed to upload 's3://%s/%s': %s",
|
||||
bucketName, path, transferHandle->GetLastError().GetMessage());
|
||||
|
||||
if (transferHandle->GetStatus() != TransferStatus::COMPLETED)
|
||||
throw Error("AWS error: transfer status of 's3://%s/%s' in unexpected state",
|
||||
bucketName, path);
|
||||
|
||||
} else {
|
||||
|
||||
auto request =
|
||||
Aws::S3::Model::PutObjectRequest()
|
||||
.WithBucket(bucketName)
|
||||
.WithKey(path);
|
||||
|
||||
request.SetContentType(mimeType);
|
||||
|
||||
if (contentEncoding != "")
|
||||
request.SetContentEncoding(contentEncoding);
|
||||
|
||||
auto stream = std::make_shared<istringstream_nocopy>(data);
|
||||
|
||||
request.SetBody(stream);
|
||||
|
||||
auto result = checkAws(fmt("AWS error uploading '%s'", path),
|
||||
s3Helper.client->PutObject(request));
|
||||
}
|
||||
|
||||
auto now2 = std::chrono::steady_clock::now();
|
||||
|
||||
auto duration =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1)
|
||||
.count();
|
||||
|
||||
printInfo(format("uploaded 's3://%1%/%2%' (%3% bytes) in %4% ms") %
|
||||
bucketName % path % data.size() % duration);
|
||||
|
||||
stats.putTimeMs += duration;
|
||||
stats.putBytes += data.size();
|
||||
stats.put++;
|
||||
}
|
||||
|
||||
void upsertFile(const std::string & path, const std::string & data,
|
||||
const std::string & mimeType) override
|
||||
{
|
||||
if (narinfoCompression != "" && hasSuffix(path, ".narinfo"))
|
||||
uploadFile(path, *compress(narinfoCompression, data), mimeType, narinfoCompression);
|
||||
else if (lsCompression != "" && hasSuffix(path, ".ls"))
|
||||
uploadFile(path, *compress(lsCompression, data), mimeType, lsCompression);
|
||||
else if (logCompression != "" && hasPrefix(path, "log/"))
|
||||
uploadFile(path, *compress(logCompression, data), mimeType, logCompression);
|
||||
else
|
||||
uploadFile(path, data, mimeType, "");
|
||||
}
|
||||
|
||||
void getFile(const std::string & path, Sink & sink) override
|
||||
{
|
||||
stats.get++;
|
||||
|
||||
// FIXME: stream output to sink.
|
||||
auto res = s3Helper.getObject(bucketName, path);
|
||||
|
||||
stats.getBytes += res.data ? res.data->size() : 0;
|
||||
stats.getTimeMs += res.durationMs;
|
||||
|
||||
if (res.data) {
|
||||
printTalkative("downloaded 's3://%s/%s' (%d bytes) in %d ms",
|
||||
bucketName, path, res.data->size(), res.durationMs);
|
||||
|
||||
sink((unsigned char *) res.data->data(), res.data->size());
|
||||
} else
|
||||
throw NoSuchBinaryCacheFile("file '%s' does not exist in binary cache '%s'", path, getUri());
|
||||
}
|
||||
|
||||
PathSet queryAllValidPaths() override
|
||||
{
|
||||
PathSet paths;
|
||||
std::string marker;
|
||||
|
||||
do {
|
||||
debug(format("listing bucket 's3://%s' from key '%s'...") % bucketName % marker);
|
||||
|
||||
auto res = checkAws(format("AWS error listing bucket '%s'") % bucketName,
|
||||
s3Helper.client->ListObjects(
|
||||
Aws::S3::Model::ListObjectsRequest()
|
||||
.WithBucket(bucketName)
|
||||
.WithDelimiter("/")
|
||||
.WithMarker(marker)));
|
||||
|
||||
auto & contents = res.GetContents();
|
||||
|
||||
debug(format("got %d keys, next marker '%s'")
|
||||
% contents.size() % res.GetNextMarker());
|
||||
|
||||
for (auto object : contents) {
|
||||
auto & key = object.GetKey();
|
||||
if (key.size() != 40 || !hasSuffix(key, ".narinfo")) continue;
|
||||
paths.insert(storeDir + "/" + key.substr(0, key.size() - 8));
|
||||
}
|
||||
|
||||
marker = res.GetNextMarker();
|
||||
} while (!marker.empty());
|
||||
|
||||
return paths;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
static RegisterStoreImplementation regStore([](
|
||||
const std::string & uri, const Store::Params & params)
|
||||
-> std::shared_ptr<Store>
|
||||
{
|
||||
if (std::string(uri, 0, 5) != "s3://") return 0;
|
||||
auto store = std::make_shared<S3BinaryCacheStoreImpl>(params, std::string(uri, 5));
|
||||
store->init();
|
||||
return store;
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
33
third_party/nix/src/libstore/s3-binary-cache-store.hh
vendored
Normal file
33
third_party/nix/src/libstore/s3-binary-cache-store.hh
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#pragma once
|
||||
|
||||
#include "binary-cache-store.hh"
|
||||
|
||||
#include <atomic>
|
||||
|
||||
namespace nix {
|
||||
|
||||
class S3BinaryCacheStore : public BinaryCacheStore
|
||||
{
|
||||
protected:
|
||||
|
||||
S3BinaryCacheStore(const Params & params)
|
||||
: BinaryCacheStore(params)
|
||||
{ }
|
||||
|
||||
public:
|
||||
|
||||
struct Stats
|
||||
{
|
||||
std::atomic<uint64_t> put{0};
|
||||
std::atomic<uint64_t> putBytes{0};
|
||||
std::atomic<uint64_t> putTimeMs{0};
|
||||
std::atomic<uint64_t> get{0};
|
||||
std::atomic<uint64_t> getBytes{0};
|
||||
std::atomic<uint64_t> getTimeMs{0};
|
||||
std::atomic<uint64_t> head{0};
|
||||
};
|
||||
|
||||
virtual const Stats & getS3Stats() = 0;
|
||||
};
|
||||
|
||||
}
|
||||
33
third_party/nix/src/libstore/s3.hh
vendored
Normal file
33
third_party/nix/src/libstore/s3.hh
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
#pragma once
|
||||
|
||||
#if ENABLE_S3
|
||||
|
||||
#include "ref.hh"
|
||||
|
||||
namespace Aws { namespace Client { class ClientConfiguration; } }
|
||||
namespace Aws { namespace S3 { class S3Client; } }
|
||||
|
||||
namespace nix {
|
||||
|
||||
struct S3Helper
|
||||
{
|
||||
ref<Aws::Client::ClientConfiguration> config;
|
||||
ref<Aws::S3::S3Client> client;
|
||||
|
||||
S3Helper(const std::string & profile, const std::string & region, const std::string & scheme, const std::string & endpoint);
|
||||
|
||||
ref<Aws::Client::ClientConfiguration> makeConfig(const std::string & region, const std::string & scheme, const std::string & endpoint);
|
||||
|
||||
struct DownloadResult
|
||||
{
|
||||
std::shared_ptr<std::string> data;
|
||||
unsigned int durationMs;
|
||||
};
|
||||
|
||||
DownloadResult getObject(
|
||||
const std::string & bucketName, const std::string & key);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
||||
87
third_party/nix/src/libstore/sandbox-defaults.sb
vendored
Normal file
87
third_party/nix/src/libstore/sandbox-defaults.sb
vendored
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
(define TMPDIR (param "_GLOBAL_TMP_DIR"))
|
||||
|
||||
(deny default)
|
||||
|
||||
; Disallow creating setuid/setgid binaries, since that
|
||||
; would allow breaking build user isolation.
|
||||
(deny file-write-setugid)
|
||||
|
||||
; Allow forking.
|
||||
(allow process-fork)
|
||||
|
||||
; Allow reading system information like #CPUs, etc.
|
||||
(allow sysctl-read)
|
||||
|
||||
; Allow POSIX semaphores and shared memory.
|
||||
(allow ipc-posix*)
|
||||
|
||||
; Allow socket creation.
|
||||
(allow system-socket)
|
||||
|
||||
; Allow sending signals within the sandbox.
|
||||
(allow signal (target same-sandbox))
|
||||
|
||||
; Allow getpwuid.
|
||||
(allow mach-lookup (global-name "com.apple.system.opendirectoryd.libinfo"))
|
||||
|
||||
; Access to /tmp.
|
||||
; The network-outbound/network-inbound ones are for unix domain sockets, which
|
||||
; we allow access to in TMPDIR (but if we allow them more broadly, you could in
|
||||
; theory escape the sandbox)
|
||||
(allow file* process-exec network-outbound network-inbound
|
||||
(literal "/tmp") (subpath TMPDIR))
|
||||
|
||||
; Some packages like to read the system version.
|
||||
(allow file-read* (literal "/System/Library/CoreServices/SystemVersion.plist"))
|
||||
|
||||
; Without this line clang cannot write to /dev/null, breaking some configure tests.
|
||||
(allow file-read-metadata (literal "/dev"))
|
||||
|
||||
; Many packages like to do local networking in their test suites, but let's only
|
||||
; allow it if the package explicitly asks for it.
|
||||
(if (param "_ALLOW_LOCAL_NETWORKING")
|
||||
(begin
|
||||
(allow network* (local ip) (local tcp) (local udp))
|
||||
|
||||
; Allow access to /etc/resolv.conf (which is a symlink to
|
||||
; /private/var/run/resolv.conf).
|
||||
; TODO: deduplicate with sandbox-network.sb
|
||||
(allow file-read-metadata
|
||||
(literal "/var")
|
||||
(literal "/etc")
|
||||
(literal "/etc/resolv.conf")
|
||||
(literal "/private/etc/resolv.conf"))
|
||||
|
||||
(allow file-read*
|
||||
(literal "/private/var/run/resolv.conf"))
|
||||
|
||||
; Allow DNS lookups. This is even needed for localhost, which lots of tests rely on
|
||||
(allow file-read-metadata (literal "/etc/hosts"))
|
||||
(allow file-read* (literal "/private/etc/hosts"))
|
||||
(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder")))))
|
||||
|
||||
; Standard devices.
|
||||
(allow file*
|
||||
(literal "/dev/null")
|
||||
(literal "/dev/random")
|
||||
(literal "/dev/stdin")
|
||||
(literal "/dev/stdout")
|
||||
(literal "/dev/tty")
|
||||
(literal "/dev/urandom")
|
||||
(literal "/dev/zero")
|
||||
(subpath "/dev/fd"))
|
||||
|
||||
; Does nothing, but reduces build noise.
|
||||
(allow file* (literal "/dev/dtracehelper"))
|
||||
|
||||
; Allow access to zoneinfo since libSystem needs it.
|
||||
(allow file-read* (subpath "/usr/share/zoneinfo"))
|
||||
|
||||
(allow file-read* (subpath "/usr/share/locale"))
|
||||
|
||||
; This is mostly to get more specific log messages when builds try to
|
||||
; access something in /etc or /var.
|
||||
(allow file-read-metadata
|
||||
(literal "/etc")
|
||||
(literal "/var")
|
||||
(literal "/private/var/tmp"))
|
||||
5
third_party/nix/src/libstore/sandbox-minimal.sb
vendored
Normal file
5
third_party/nix/src/libstore/sandbox-minimal.sb
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
(allow default)
|
||||
|
||||
; Disallow creating setuid/setgid binaries, since that
|
||||
; would allow breaking build user isolation.
|
||||
(deny file-write-setugid)
|
||||
16
third_party/nix/src/libstore/sandbox-network.sb
vendored
Normal file
16
third_party/nix/src/libstore/sandbox-network.sb
vendored
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
; Allow local and remote network traffic.
|
||||
(allow network* (local ip) (remote ip))
|
||||
|
||||
; Allow access to /etc/resolv.conf (which is a symlink to
|
||||
; /private/var/run/resolv.conf).
|
||||
(allow file-read-metadata
|
||||
(literal "/var")
|
||||
(literal "/etc")
|
||||
(literal "/etc/resolv.conf")
|
||||
(literal "/private/etc/resolv.conf"))
|
||||
|
||||
(allow file-read*
|
||||
(literal "/private/var/run/resolv.conf"))
|
||||
|
||||
; Allow DNS lookups.
|
||||
(allow network-outbound (remote unix-socket (path-literal "/private/var/run/mDNSResponder")))
|
||||
42
third_party/nix/src/libstore/schema.sql
vendored
Normal file
42
third_party/nix/src/libstore/schema.sql
vendored
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
create table if not exists ValidPaths (
|
||||
id integer primary key autoincrement not null,
|
||||
path text unique not null,
|
||||
hash text not null,
|
||||
registrationTime integer not null,
|
||||
deriver text,
|
||||
narSize integer,
|
||||
ultimate integer, -- null implies "false"
|
||||
sigs text, -- space-separated
|
||||
ca text -- if not null, an assertion that the path is content-addressed; see ValidPathInfo
|
||||
);
|
||||
|
||||
create table if not exists Refs (
|
||||
referrer integer not null,
|
||||
reference integer not null,
|
||||
primary key (referrer, reference),
|
||||
foreign key (referrer) references ValidPaths(id) on delete cascade,
|
||||
foreign key (reference) references ValidPaths(id) on delete restrict
|
||||
);
|
||||
|
||||
create index if not exists IndexReferrer on Refs(referrer);
|
||||
create index if not exists IndexReference on Refs(reference);
|
||||
|
||||
-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
|
||||
-- table. This causes a deletion of the corresponding row in
|
||||
-- ValidPaths to cause a foreign key constraint violation (due to `on
|
||||
-- delete restrict' on the `reference' column). Therefore, explicitly
|
||||
-- get rid of self-references.
|
||||
create trigger if not exists DeleteSelfRefs before delete on ValidPaths
|
||||
begin
|
||||
delete from Refs where referrer = old.id and reference = old.id;
|
||||
end;
|
||||
|
||||
create table if not exists DerivationOutputs (
|
||||
drv integer not null,
|
||||
id text not null, -- symbolic output id, usually "out"
|
||||
path text not null,
|
||||
primary key (drv, id),
|
||||
foreign key (drv) references ValidPaths(id) on delete cascade
|
||||
);
|
||||
|
||||
create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
|
||||
24
third_party/nix/src/libstore/serve-protocol.hh
vendored
Normal file
24
third_party/nix/src/libstore/serve-protocol.hh
vendored
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
#pragma once
|
||||
|
||||
namespace nix {
|
||||
|
||||
#define SERVE_MAGIC_1 0x390c9deb
|
||||
#define SERVE_MAGIC_2 0x5452eecb
|
||||
|
||||
#define SERVE_PROTOCOL_VERSION 0x205
|
||||
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||
|
||||
typedef enum {
|
||||
cmdQueryValidPaths = 1,
|
||||
cmdQueryPathInfos = 2,
|
||||
cmdDumpStorePath = 3,
|
||||
cmdImportPaths = 4,
|
||||
cmdExportPaths = 5,
|
||||
cmdBuildPaths = 6,
|
||||
cmdQueryClosure = 7,
|
||||
cmdBuildDerivation = 8,
|
||||
cmdAddToStoreNar = 9,
|
||||
} ServeCommand;
|
||||
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue