refactor(3p/nix/libstore): Replace logging.h with glog
This commit is contained in:
		
							parent
							
								
									ce99ba42df
								
							
						
					
					
						commit
						505b6b044b
					
				
					 25 changed files with 514 additions and 464 deletions
				
			
		|  | @ -6,6 +6,7 @@ | |||
| #include "derivations.hh" | ||||
| #include "fs-accessor.hh" | ||||
| #include "globals.hh" | ||||
| #include "glog/logging.h" | ||||
| #include "json.hh" | ||||
| #include "nar-accessor.hh" | ||||
| #include "nar-info-disk-cache.hh" | ||||
|  | @ -176,12 +177,10 @@ void BinaryCacheStore::addToStore(const ValidPathInfo& info, | |||
|   auto duration = | ||||
|       std::chrono::duration_cast<std::chrono::milliseconds>(now2 - now1) | ||||
|           .count(); | ||||
|   printMsg(lvlTalkative, | ||||
|            format("copying path '%1%' (%2% bytes, compressed %3$.1f%% in %4% " | ||||
|                   "ms) to binary cache") % | ||||
|                narInfo->path % narInfo->narSize % | ||||
|                ((1.0 - (double)narCompressed->size() / nar->size()) * 100.0) % | ||||
|                duration); | ||||
|   DLOG(INFO) << "copying path '" << narInfo->path << "' (" << narInfo->narSize | ||||
|              << " bytes, compressed " | ||||
|              << ((1.0 - (double)narCompressed->size() / nar->size()) * 100.0) | ||||
|              << "% in " << duration << "ms) to binary cache"; | ||||
| 
 | ||||
|   /* Atomically write the NAR file. */ | ||||
|   narInfo->url = "nar/" + narInfo->fileHash.to_string(Base32, false) + ".nar" + | ||||
|  | @ -243,35 +242,28 @@ void BinaryCacheStore::queryPathInfoUncached( | |||
|     const Path& storePath, | ||||
|     Callback<std::shared_ptr<ValidPathInfo>> callback) noexcept { | ||||
|   auto uri = getUri(); | ||||
|   auto act = std::make_shared<Activity>( | ||||
|       *logger, lvlTalkative, actQueryPathInfo, | ||||
|       fmt("querying info about '%s' on '%s'", storePath, uri), | ||||
|       Logger::Fields{storePath, uri}); | ||||
|   PushActivity pact(act->id); | ||||
|   LOG(INFO) << "querying info about '" << storePath << "' on '" << uri << "'"; | ||||
| 
 | ||||
|   auto narInfoFile = narInfoFileFor(storePath); | ||||
| 
 | ||||
|   auto callbackPtr = std::make_shared<decltype(callback)>(std::move(callback)); | ||||
| 
 | ||||
|   getFile( | ||||
|       narInfoFile, {[=](std::future<std::shared_ptr<std::string>> fut) { | ||||
|         try { | ||||
|           auto data = fut.get(); | ||||
|   getFile(narInfoFile, {[=](std::future<std::shared_ptr<std::string>> fut) { | ||||
|             try { | ||||
|               auto data = fut.get(); | ||||
| 
 | ||||
|           if (!data) return (*callbackPtr)(nullptr); | ||||
|               if (!data) return (*callbackPtr)(nullptr); | ||||
| 
 | ||||
|           stats.narInfoRead++; | ||||
|               stats.narInfoRead++; | ||||
| 
 | ||||
|           (*callbackPtr)( | ||||
|               (std::shared_ptr<ValidPathInfo>)std::make_shared<NarInfo>( | ||||
|                   *this, *data, narInfoFile)); | ||||
|               (*callbackPtr)( | ||||
|                   (std::shared_ptr<ValidPathInfo>)std::make_shared<NarInfo>( | ||||
|                       *this, *data, narInfoFile)); | ||||
| 
 | ||||
|           (void) | ||||
|               act;  // force Activity into this lambda to ensure it stays alive
 | ||||
|         } catch (...) { | ||||
|           callbackPtr->rethrow(); | ||||
|         } | ||||
|       }}); | ||||
|             } catch (...) { | ||||
|               callbackPtr->rethrow(); | ||||
|             } | ||||
|           }}); | ||||
| } | ||||
| 
 | ||||
| Path BinaryCacheStore::addToStore(const string& name, const Path& srcPath, | ||||
|  | @ -356,7 +348,8 @@ std::shared_ptr<std::string> BinaryCacheStore::getBuildLog(const Path& path) { | |||
| 
 | ||||
|   auto logPath = "log/" + baseNameOf(drvPath); | ||||
| 
 | ||||
|   debug("fetching build log from binary cache '%s/%s'", getUri(), logPath); | ||||
|   DLOG(INFO) << "fetching build log from binary cache '" << getUri() << "/" | ||||
|              << logPath << "'"; | ||||
| 
 | ||||
|   return getFile(logPath); | ||||
| } | ||||
|  |  | |||
							
								
								
									
										296
									
								
								third_party/nix/src/libstore/build.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										296
									
								
								third_party/nix/src/libstore/build.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -31,6 +31,7 @@ | |||
| #include "download.hh" | ||||
| #include "finally.hh" | ||||
| #include "globals.hh" | ||||
| #include "glog/logging.h" | ||||
| #include "json.hh" | ||||
| #include "local-store.hh" | ||||
| #include "machines.hh" | ||||
|  | @ -226,10 +227,6 @@ class Worker { | |||
|   std::map<Path, bool> pathContentsGoodCache; | ||||
| 
 | ||||
|  public: | ||||
|   const Activity act; | ||||
|   const Activity actDerivations; | ||||
|   const Activity actSubstitutions; | ||||
| 
 | ||||
|   /* Set if at least one derivation had a BuildError (i.e. permanent
 | ||||
|      failure). */ | ||||
|   bool permanentFailure; | ||||
|  | @ -327,16 +324,6 @@ class Worker { | |||
|   bool pathContentsGood(const Path& path); | ||||
| 
 | ||||
|   void markContentsGood(const Path& path); | ||||
| 
 | ||||
|   void updateProgress() { | ||||
|     actDerivations.progress(doneBuilds, expectedBuilds + doneBuilds, | ||||
|                             runningBuilds, failedBuilds); | ||||
|     actSubstitutions.progress(doneSubstitutions, | ||||
|                               expectedSubstitutions + doneSubstitutions, | ||||
|                               runningSubstitutions, failedSubstitutions); | ||||
|     act.setExpected(actDownload, expectedDownloadSize + doneDownloadSize); | ||||
|     act.setExpected(actCopyPath, expectedNarSize + doneNarSize); | ||||
|   } | ||||
| }; | ||||
| 
 | ||||
| //////////////////////////////////////////////////////////////////////
 | ||||
|  | @ -397,7 +384,9 @@ void Goal::amDone(ExitCode result) { | |||
|   worker.removeGoal(shared_from_this()); | ||||
| } | ||||
| 
 | ||||
| void Goal::trace(const FormatOrString& fs) { debug("%1%: %2%", name, fs.s); } | ||||
| void Goal::trace(const FormatOrString& fs) { | ||||
|   DLOG(INFO) << name << ": " << fs.s; | ||||
| } | ||||
| 
 | ||||
| //////////////////////////////////////////////////////////////////////
 | ||||
| 
 | ||||
|  | @ -445,9 +434,11 @@ void handleDiffHook(uid_t uid, uid_t gid, Path tryA, Path tryB, Path drvPath, | |||
|                         fmt("diff-hook program '%1%' %2%", diffHook, | ||||
|                             statusToString(diffRes.first))); | ||||
| 
 | ||||
|       if (diffRes.second != "") printError(chomp(diffRes.second)); | ||||
|       if (diffRes.second != "") { | ||||
|         LOG(ERROR) << chomp(diffRes.second); | ||||
|       } | ||||
|     } catch (Error& error) { | ||||
|       printError("diff hook execution failed: %s", error.what()); | ||||
|       LOG(ERROR) << "diff hook execution failed: " << error.what(); | ||||
|     } | ||||
|   } | ||||
| } | ||||
|  | @ -507,7 +498,7 @@ UserLock::UserLock() { | |||
|   /* Copy the result of getgrnam. */ | ||||
|   Strings users; | ||||
|   for (char** p = gr->gr_mem; *p; ++p) { | ||||
|     debug(format("found build user '%1%'") % *p); | ||||
|     DLOG(INFO) << "found build user " << *p; | ||||
|     users.push_back(*p); | ||||
|   } | ||||
| 
 | ||||
|  | @ -518,7 +509,7 @@ UserLock::UserLock() { | |||
|   /* Find a user account that isn't currently in use for another
 | ||||
|      build. */ | ||||
|   for (auto& i : users) { | ||||
|     debug(format("trying user '%1%'") % i); | ||||
|     DLOG(INFO) << "trying user " << i; | ||||
| 
 | ||||
|     struct passwd* pw = getpwnam(i.c_str()); | ||||
|     if (!pw) | ||||
|  | @ -607,15 +598,13 @@ struct HookInstance { | |||
| 
 | ||||
|   FdSink sink; | ||||
| 
 | ||||
|   std::map<ActivityId, Activity> activities; | ||||
| 
 | ||||
|   HookInstance(); | ||||
| 
 | ||||
|   ~HookInstance(); | ||||
| }; | ||||
| 
 | ||||
| HookInstance::HookInstance() { | ||||
|   debug("starting build hook '%s'", settings.buildHook); | ||||
|   DLOG(INFO) << "starting build hook " << settings.buildHook; | ||||
| 
 | ||||
|   /* Create a pipe to get the output of the child. */ | ||||
|   fromHook.create(); | ||||
|  | @ -630,24 +619,29 @@ HookInstance::HookInstance() { | |||
|   pid = startProcess([&]() { | ||||
|     commonChildInit(fromHook); | ||||
| 
 | ||||
|     if (chdir("/") == -1) throw SysError("changing into /"); | ||||
|     if (chdir("/") == -1) { | ||||
|       throw SysError("changing into /"); | ||||
|     } | ||||
| 
 | ||||
|     /* Dup the communication pipes. */ | ||||
|     if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1) | ||||
|     if (dup2(toHook.readSide.get(), STDIN_FILENO) == -1) { | ||||
|       throw SysError("dupping to-hook read side"); | ||||
|     } | ||||
| 
 | ||||
|     /* Use fd 4 for the builder's stdout/stderr. */ | ||||
|     if (dup2(builderOut.writeSide.get(), 4) == -1) | ||||
|     if (dup2(builderOut.writeSide.get(), 4) == -1) { | ||||
|       throw SysError("dupping builder's stdout/stderr"); | ||||
|     } | ||||
| 
 | ||||
|     /* Hack: pass the read side of that fd to allow build-remote
 | ||||
|        to read SSH error messages. */ | ||||
|     if (dup2(builderOut.readSide.get(), 5) == -1) | ||||
|     if (dup2(builderOut.readSide.get(), 5) == -1) { | ||||
|       throw SysError("dupping builder's stdout/stderr"); | ||||
|     } | ||||
| 
 | ||||
|     Strings args = { | ||||
|         baseNameOf(settings.buildHook), | ||||
|         std::to_string(verbosity), | ||||
|         // std::to_string(verbosity), // TODO(tazjin): what?
 | ||||
|     }; | ||||
| 
 | ||||
|     execv(settings.buildHook.get().c_str(), stringsToCharPtrs(args).data()); | ||||
|  | @ -842,10 +836,6 @@ class DerivationGoal : public Goal { | |||
| 
 | ||||
|   std::unique_ptr<MaintainCount<uint64_t>> mcExpectedBuilds, mcRunningBuilds; | ||||
| 
 | ||||
|   std::unique_ptr<Activity> act; | ||||
| 
 | ||||
|   std::map<ActivityId, Activity> builderActivities; | ||||
| 
 | ||||
|   /* The remote machine on which we're building. */ | ||||
|   std::string machineName; | ||||
| 
 | ||||
|  | @ -972,7 +962,6 @@ DerivationGoal::DerivationGoal(const Path& drvPath, | |||
| 
 | ||||
|   mcExpectedBuilds = | ||||
|       std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds); | ||||
|   worker.updateProgress(); | ||||
| } | ||||
| 
 | ||||
| DerivationGoal::DerivationGoal(const Path& drvPath, const BasicDerivation& drv, | ||||
|  | @ -988,7 +977,6 @@ DerivationGoal::DerivationGoal(const Path& drvPath, const BasicDerivation& drv, | |||
| 
 | ||||
|   mcExpectedBuilds = | ||||
|       std::make_unique<MaintainCount<uint64_t>>(worker.expectedBuilds); | ||||
|   worker.updateProgress(); | ||||
| 
 | ||||
|   /* Prevent the .chroot directory from being
 | ||||
|      garbage-collected. (See isActiveTempFile() in gc.cc.) */ | ||||
|  | @ -1089,7 +1077,7 @@ void DerivationGoal::loadDerivation() { | |||
|   trace("loading derivation"); | ||||
| 
 | ||||
|   if (nrFailed != 0) { | ||||
|     printError(format("cannot build missing derivation '%1%'") % drvPath); | ||||
|     LOG(ERROR) << "cannot build missing derivation '" << drvPath << "'"; | ||||
|     done(BuildResult::MiscFailure); | ||||
|     return; | ||||
|   } | ||||
|  | @ -1241,9 +1229,8 @@ void DerivationGoal::repairClosure() { | |||
|   PathSet broken; | ||||
|   for (auto& i : outputClosure) { | ||||
|     if (worker.pathContentsGood(i)) continue; | ||||
|     printError(format("found corrupted or missing path '%1%' in the output " | ||||
|                       "closure of '%2%'") % | ||||
|                i % drvPath); | ||||
|     LOG(ERROR) << "found corrupted or missing path '" << i | ||||
|                << "' in the output closure of '" << drvPath << "'"; | ||||
|     Path drvPath2 = outputsToDrv[i]; | ||||
|     if (drvPath2 == "") | ||||
|       addWaitee(worker.makeSubstitutionGoal(i, Repair)); | ||||
|  | @ -1274,9 +1261,8 @@ void DerivationGoal::inputsRealised() { | |||
|   if (nrFailed != 0) { | ||||
|     if (!useDerivation) | ||||
|       throw Error(format("some dependencies of '%1%' are missing") % drvPath); | ||||
|     printError(format("cannot build derivation '%1%': %2% dependencies " | ||||
|                       "couldn't be built") % | ||||
|                drvPath % nrFailed); | ||||
|     LOG(ERROR) << "cannot build derivation '" << drvPath << "': " << nrFailed | ||||
|                << " dependencies couldn't be built"; | ||||
|     done(BuildResult::DependencyFailed); | ||||
|     return; | ||||
|   } | ||||
|  | @ -1291,7 +1277,7 @@ void DerivationGoal::inputsRealised() { | |||
| 
 | ||||
|   /* The outputs are referenceable paths. */ | ||||
|   for (auto& i : drv->outputs) { | ||||
|     debug(format("building path '%1%'") % i.second.path); | ||||
|     DLOG(INFO) << "building path " << i.second.path; | ||||
|     allPaths.insert(i.second.path); | ||||
|   } | ||||
| 
 | ||||
|  | @ -1317,7 +1303,7 @@ void DerivationGoal::inputsRealised() { | |||
|   /* Second, the input sources. */ | ||||
|   worker.store.computeFSClosure(drv->inputSrcs, inputPaths); | ||||
| 
 | ||||
|   debug(format("added input paths %1%") % showPaths(inputPaths)); | ||||
|   DLOG(INFO) << "added input paths " << showPaths(inputPaths); | ||||
| 
 | ||||
|   allPaths.insert(inputPaths.begin(), inputPaths.end()); | ||||
| 
 | ||||
|  | @ -1363,8 +1349,8 @@ void DerivationGoal::tryToBuild() { | |||
|      build this derivation, so no further checks are necessary. */ | ||||
|   validPaths = checkPathValidity(true, buildMode == bmRepair); | ||||
|   if (buildMode != bmCheck && validPaths.size() == drv->outputs.size()) { | ||||
|     debug(format("skipping build of derivation '%1%', someone beat us to it") % | ||||
|           drvPath); | ||||
|     DLOG(INFO) << "skipping build of derivation '" << drvPath | ||||
|                << "', someone beat us to it"; | ||||
|     outputLocks.setDeletion(true); | ||||
|     done(BuildResult::AlreadyValid); | ||||
|     return; | ||||
|  | @ -1379,7 +1365,7 @@ void DerivationGoal::tryToBuild() { | |||
|   for (auto& i : drv->outputs) { | ||||
|     Path path = i.second.path; | ||||
|     if (worker.store.isValidPath(path)) continue; | ||||
|     debug(format("removing invalid path '%1%'") % path); | ||||
|     DLOG(INFO) << "removing invalid path " << path; | ||||
|     deletePath(worker.store.toRealPath(path)); | ||||
|   } | ||||
| 
 | ||||
|  | @ -1396,14 +1382,13 @@ void DerivationGoal::tryToBuild() { | |||
|                              : nrRounds > 1 ? "building '%s' (round %d/%d)" | ||||
|                                             : "building '%s'", | ||||
|                    drvPath, curRound, nrRounds); | ||||
|     fmt("building '%s'", drvPath); | ||||
|     if (hook) msg += fmt(" on '%s'", machineName); | ||||
|     act = std::make_unique<Activity>( | ||||
|         *logger, lvlInfo, actBuild, msg, | ||||
|         Logger::Fields{drvPath, hook ? machineName : "", curRound, nrRounds}); | ||||
| 
 | ||||
|     if (hook) { | ||||
|       msg += fmt(" on '%s'", machineName); | ||||
|     } | ||||
|     LOG(INFO) << msg << "[" << drvPath << "]"; | ||||
|     mcRunningBuilds = | ||||
|         std::make_unique<MaintainCount<uint64_t>>(worker.runningBuilds); | ||||
|     worker.updateProgress(); | ||||
|   }; | ||||
| 
 | ||||
|   /* Is the build hook willing to accept this job? */ | ||||
|  | @ -1443,7 +1428,7 @@ void DerivationGoal::tryToBuild() { | |||
|     startBuilder(); | ||||
| 
 | ||||
|   } catch (BuildError& e) { | ||||
|     printError(e.msg()); | ||||
|     LOG(ERROR) << e.msg(); | ||||
|     outputLocks.unlock(); | ||||
|     buildUser.reset(); | ||||
|     worker.permanentFailure = true; | ||||
|  | @ -1487,7 +1472,7 @@ MakeError(NotDeterministic, BuildError) | |||
|      kill it. */ | ||||
|   int status = hook ? hook->pid.kill() : pid.kill(); | ||||
| 
 | ||||
|   debug(format("builder process for '%1%' finished") % drvPath); | ||||
|   DLOG(INFO) << "builder process for '" << drvPath << "' finished"; | ||||
| 
 | ||||
|   result.timesBuilt++; | ||||
|   result.stopTime = time(0); | ||||
|  | @ -1552,10 +1537,11 @@ MakeError(NotDeterministic, BuildError) | |||
|         for (auto& line : logTail) msg += "\n  " + line; | ||||
|       } | ||||
| 
 | ||||
|       if (diskFull) | ||||
|       if (diskFull) { | ||||
|         msg += | ||||
|             "\nnote: build failure may have been caused by lack of free disk " | ||||
|             "space"; | ||||
|       } | ||||
| 
 | ||||
|       throw BuildError(msg); | ||||
|     } | ||||
|  | @ -1565,10 +1551,8 @@ MakeError(NotDeterministic, BuildError) | |||
|     registerOutputs(); | ||||
| 
 | ||||
|     if (settings.postBuildHook != "") { | ||||
|       Activity act(*logger, lvlInfo, actPostBuildHook, | ||||
|                    fmt("running post-build-hook '%s'", settings.postBuildHook), | ||||
|                    Logger::Fields{drvPath}); | ||||
|       PushActivity pact(act.id); | ||||
|       LOG(INFO) << "running post-build-hook '" << settings.postBuildHook | ||||
|                 << "' [" << drvPath << "]"; | ||||
|       auto outputPaths = drv->outputPaths(); | ||||
|       std::map<std::string, std::string> hookEnvironment = getEnv(); | ||||
| 
 | ||||
|  | @ -1580,11 +1564,8 @@ MakeError(NotDeterministic, BuildError) | |||
|       opts.environment = hookEnvironment; | ||||
| 
 | ||||
|       struct LogSink : Sink { | ||||
|         Activity& act; | ||||
|         std::string currentLine; | ||||
| 
 | ||||
|         LogSink(Activity& act) : act(act) {} | ||||
| 
 | ||||
|         void operator()(const unsigned char* data, size_t len) override { | ||||
|           for (size_t i = 0; i < len; i++) { | ||||
|             auto c = data[i]; | ||||
|  | @ -1599,9 +1580,7 @@ MakeError(NotDeterministic, BuildError) | |||
| 
 | ||||
|         void flushLine() { | ||||
|           if (settings.verboseBuild) { | ||||
|             printError("post-build-hook: " + currentLine); | ||||
|           } else { | ||||
|             act.result(resPostBuildLogLine, currentLine); | ||||
|             LOG(ERROR) << "post-build-hook: " << currentLine; | ||||
|           } | ||||
|           currentLine.clear(); | ||||
|         } | ||||
|  | @ -1613,7 +1592,7 @@ MakeError(NotDeterministic, BuildError) | |||
|           } | ||||
|         } | ||||
|       }; | ||||
|       LogSink sink(act); | ||||
|       LogSink sink; | ||||
| 
 | ||||
|       opts.standardOut = &sink; | ||||
|       opts.mergeStderrToStdout = true; | ||||
|  | @ -1649,7 +1628,7 @@ MakeError(NotDeterministic, BuildError) | |||
|     outputLocks.unlock(); | ||||
| 
 | ||||
|   } catch (BuildError& e) { | ||||
|     printError(e.msg()); | ||||
|     LOG(ERROR) << e.msg(); | ||||
| 
 | ||||
|     outputLocks.unlock(); | ||||
| 
 | ||||
|  | @ -1696,38 +1675,37 @@ HookReply DerivationGoal::tryBuildHook() { | |||
|     string reply; | ||||
|     while (true) { | ||||
|       string s = readLine(worker.hook->fromHook.readSide.get()); | ||||
|       if (handleJSONLogMessage(s, worker.act, worker.hook->activities, true)) | ||||
|         ; | ||||
|       else if (string(s, 0, 2) == "# ") { | ||||
|       if (string(s, 0, 2) == "# ") { | ||||
|         reply = string(s, 2); | ||||
|         break; | ||||
|       } else { | ||||
|         s += "\n"; | ||||
|         writeToStderr(s); | ||||
|         std::cerr << s; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     debug(format("hook reply is '%1%'") % reply); | ||||
|     DLOG(INFO) << "hook reply is " << reply; | ||||
| 
 | ||||
|     if (reply == "decline") | ||||
|     if (reply == "decline") { | ||||
|       return rpDecline; | ||||
|     else if (reply == "decline-permanently") { | ||||
|     } else if (reply == "decline-permanently") { | ||||
|       worker.tryBuildHook = false; | ||||
|       worker.hook = 0; | ||||
|       return rpDecline; | ||||
|     } else if (reply == "postpone") | ||||
|     } else if (reply == "postpone") { | ||||
|       return rpPostpone; | ||||
|     else if (reply != "accept") | ||||
|     } else if (reply != "accept") { | ||||
|       throw Error(format("bad hook reply '%1%'") % reply); | ||||
| 
 | ||||
|     } | ||||
|   } catch (SysError& e) { | ||||
|     if (e.errNo == EPIPE) { | ||||
|       printError("build hook died unexpectedly: %s", | ||||
|                  chomp(drainFD(worker.hook->fromHook.readSide.get()))); | ||||
|       LOG(ERROR) << "build hook died unexpectedly: " | ||||
|                  << chomp(drainFD(worker.hook->fromHook.readSide.get())); | ||||
|       worker.hook = 0; | ||||
|       return rpDecline; | ||||
|     } else | ||||
|     } else { | ||||
|       throw; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   hook = std::move(worker.hook); | ||||
|  | @ -2007,8 +1985,7 @@ void DerivationGoal::startBuilder() { | |||
|     /* Clean up the chroot directory automatically. */ | ||||
|     autoDelChroot = std::make_shared<AutoDelete>(chrootRootDir); | ||||
| 
 | ||||
|     printMsg(lvlChatty, | ||||
|              format("setting up chroot environment in '%1%'") % chrootRootDir); | ||||
|     DLOG(INFO) << "setting up chroot environment in '" << chrootRootDir << "'"; | ||||
| 
 | ||||
|     if (mkdir(chrootRootDir.c_str(), 0750) == -1) | ||||
|       throw SysError(format("cannot create '%1%'") % chrootRootDir); | ||||
|  | @ -2073,7 +2050,7 @@ void DerivationGoal::startBuilder() { | |||
|         dirsInChroot[i] = r; | ||||
|       else { | ||||
|         Path p = chrootRootDir + i; | ||||
|         debug("linking '%1%' to '%2%'", p, r); | ||||
|         DLOG(INFO) << "linking '" << p << "' to '" << r << "'"; | ||||
|         if (link(r.c_str(), p.c_str()) == -1) { | ||||
|           /* Hard-linking fails if we exceed the maximum
 | ||||
|              link count on a file (e.g. 32000 of ext3), | ||||
|  | @ -2132,8 +2109,7 @@ void DerivationGoal::startBuilder() { | |||
| 
 | ||||
|   if (useChroot && settings.preBuildHook != "" && | ||||
|       dynamic_cast<Derivation*>(drv.get())) { | ||||
|     printMsg(lvlChatty, | ||||
|              format("executing pre-build hook '%1%'") % settings.preBuildHook); | ||||
|     DLOG(INFO) << "executing pre-build hook '" << settings.preBuildHook << "'"; | ||||
|     auto args = | ||||
|         useChroot ? Strings({drvPath, chrootRootDir}) : Strings({drvPath}); | ||||
|     enum BuildHookState { stBegin, stExtraChrootDirs }; | ||||
|  | @ -2165,7 +2141,7 @@ void DerivationGoal::startBuilder() { | |||
|   } | ||||
| 
 | ||||
|   /* Run the builder. */ | ||||
|   printMsg(lvlChatty, format("executing builder '%1%'") % drv->builder); | ||||
|   DLOG(INFO) << "executing builder '" << drv->builder << "'"; | ||||
| 
 | ||||
|   /* Create the log file. */ | ||||
|   Path logFile = openLogFile(); | ||||
|  | @ -2364,7 +2340,7 @@ void DerivationGoal::startBuilder() { | |||
|       if (msg.size() == 1) break; | ||||
|       throw Error(string(msg, 1)); | ||||
|     } | ||||
|     debug(msg); | ||||
|     DLOG(INFO) << msg; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
|  | @ -2604,10 +2580,10 @@ void setupSeccomp() { | |||
|     throw SysError("unable to add X32 seccomp architecture"); | ||||
| 
 | ||||
|   if (nativeSystem == "aarch64-linux" && | ||||
|       seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0) | ||||
|     printError( | ||||
|         "unable to add ARM seccomp architecture; this may result in spurious " | ||||
|         "build failures if running 32-bit ARM processes"); | ||||
|       seccomp_arch_add(ctx, SCMP_ARCH_ARM) != 0) { | ||||
|     LOG(ERROR) << "unable to add ARM seccomp architecture; this may result in " | ||||
|                << "spurious build failures if running 32-bit ARM processes"; | ||||
|   } | ||||
| 
 | ||||
|   /* Prevent builders from creating setuid/setgid binaries. */ | ||||
|   for (int perm : {S_ISUID, S_ISGID}) { | ||||
|  | @ -2772,7 +2748,7 @@ void DerivationGoal::runChild() { | |||
|          environment. */ | ||||
|       auto doBind = [&](const Path& source, const Path& target, | ||||
|                         bool optional = false) { | ||||
|         debug(format("bind mounting '%1%' to '%2%'") % source % target); | ||||
|         DLOG(INFO) << "bind mounting '" << source << "' to '" << target << "'"; | ||||
|         struct stat st; | ||||
|         if (stat(source.c_str(), &st) == -1) { | ||||
|           if (optional && errno == ENOENT) | ||||
|  | @ -3082,8 +3058,6 @@ void DerivationGoal::runChild() { | |||
|     /* Execute the program.  This should not return. */ | ||||
|     if (drv->isBuiltin()) { | ||||
|       try { | ||||
|         logger = makeJSONLogger(*logger); | ||||
| 
 | ||||
|         BasicDerivation drv2(*drv); | ||||
|         for (auto& e : drv2.env) | ||||
|           e.second = rewriteStrings(e.second, inputRewrites); | ||||
|  | @ -3215,8 +3189,7 @@ void DerivationGoal::registerOutputs() { | |||
|     /* Apply hash rewriting if necessary. */ | ||||
|     bool rewritten = false; | ||||
|     if (!outputRewrites.empty()) { | ||||
|       printError(format("warning: rewriting hashes in '%1%'; cross fingers") % | ||||
|                  path); | ||||
|       LOG(WARNING) << "rewriting hashes in '" << path << "'; cross fingers"; | ||||
| 
 | ||||
|       /* Canonicalise first.  This ensures that the path we're
 | ||||
|          rewriting doesn't contain a hard link to /etc/shadow or | ||||
|  | @ -3300,7 +3273,7 @@ void DerivationGoal::registerOutputs() { | |||
|        contained in it.  Compute the SHA-256 NAR hash at the same | ||||
|        time.  The hash is stored in the database so that we can | ||||
|        verify later on whether nobody has messed with the store. */ | ||||
|     debug("scanning for references inside '%1%'", path); | ||||
|     DLOG(INFO) << "scanning for references inside '" << path << "'"; | ||||
|     HashResult hash; | ||||
|     PathSet references = scanForReferences(actualPath, allPaths, hash); | ||||
| 
 | ||||
|  | @ -3346,9 +3319,9 @@ void DerivationGoal::registerOutputs() { | |||
|     for (auto& i : inputPaths) { | ||||
|       PathSet::iterator j = references.find(i); | ||||
|       if (j == references.end()) | ||||
|         debug(format("unreferenced input: '%1%'") % i); | ||||
|         DLOG(INFO) << "unreferenced input: '" << i << "'"; | ||||
|       else | ||||
|         debug(format("referenced input: '%1%'") % i); | ||||
|         DLOG(INFO) << "referenced input: '" << i << "'"; | ||||
|     } | ||||
| 
 | ||||
|     if (curRound == nrRounds) { | ||||
|  | @ -3397,9 +3370,11 @@ void DerivationGoal::registerOutputs() { | |||
|                        buildUser ? buildUser->getGID() : getgid(), prev, | ||||
|                        i->second.path, drvPath, tmpDir); | ||||
| 
 | ||||
|         if (settings.enforceDeterminism) throw NotDeterministic(msg); | ||||
|         if (settings.enforceDeterminism) { | ||||
|           throw NotDeterministic(msg); | ||||
|         } | ||||
| 
 | ||||
|         printError(msg); | ||||
|         LOG(ERROR) << msg; | ||||
|         curRound = nrRounds;  // we know enough, bail out early
 | ||||
|       } | ||||
|   } | ||||
|  | @ -3644,8 +3619,7 @@ void DerivationGoal::deleteTmpDir(bool force) { | |||
|     /* Don't keep temporary directories for builtins because they
 | ||||
|        might have privileged stuff (like a copy of netrc). */ | ||||
|     if (settings.keepFailed && !force && !drv->isBuiltin()) { | ||||
|       printError(format("note: keeping build directory '%2%'") % drvPath % | ||||
|                  tmpDir); | ||||
|       LOG(INFO) << "note: keeping build directory '" << tmpDir << "'"; | ||||
|       chmod(tmpDir.c_str(), 0755); | ||||
|     } else | ||||
|       deletePath(tmpDir); | ||||
|  | @ -3658,20 +3632,20 @@ void DerivationGoal::handleChildOutput(int fd, const string& data) { | |||
|       (!hook && fd == builderOut.readSide.get())) { | ||||
|     logSize += data.size(); | ||||
|     if (settings.maxLogSize && logSize > settings.maxLogSize) { | ||||
|       printError( | ||||
|           format("%1% killed after writing more than %2% bytes of log output") % | ||||
|           getName() % settings.maxLogSize); | ||||
|       LOG(ERROR) << getName() | ||||
|                  << " killed after writing more than %2% bytes of log output" | ||||
|                  << settings.maxLogSize; | ||||
|       killChild(); | ||||
|       done(BuildResult::LogLimitExceeded); | ||||
|       return; | ||||
|     } | ||||
| 
 | ||||
|     for (auto c : data) | ||||
|       if (c == '\r') | ||||
|       if (c == '\r') { | ||||
|         currentLogLinePos = 0; | ||||
|       else if (c == '\n') | ||||
|       } else if (c == '\n') { | ||||
|         flushLine(); | ||||
|       else { | ||||
|       } else { | ||||
|         if (currentLogLinePos >= currentLogLine.size()) | ||||
|           currentLogLine.resize(currentLogLinePos + 1); | ||||
|         currentLogLine[currentLogLinePos++] = c; | ||||
|  | @ -3683,11 +3657,10 @@ void DerivationGoal::handleChildOutput(int fd, const string& data) { | |||
|   if (hook && fd == hook->fromHook.readSide.get()) { | ||||
|     for (auto c : data) | ||||
|       if (c == '\n') { | ||||
|         handleJSONLogMessage(currentHookLine, worker.act, hook->activities, | ||||
|                              true); | ||||
|         currentHookLine.clear(); | ||||
|       } else | ||||
|       } else { | ||||
|         currentHookLine += c; | ||||
|       } | ||||
|   } | ||||
| } | ||||
| 
 | ||||
|  | @ -3697,19 +3670,11 @@ void DerivationGoal::handleEOF(int fd) { | |||
| } | ||||
| 
 | ||||
| void DerivationGoal::flushLine() { | ||||
|   if (handleJSONLogMessage(currentLogLine, *act, builderActivities, false)) | ||||
|     ; | ||||
| 
 | ||||
|   if (settings.verboseBuild && (settings.printRepeatedBuilds || curRound == 1)) | ||||
|     LOG(INFO) << currentLogLine; | ||||
|   else { | ||||
|     if (settings.verboseBuild && | ||||
|         (settings.printRepeatedBuilds || curRound == 1)) | ||||
|       printError(currentLogLine); | ||||
|     else { | ||||
|       logTail.push_back(currentLogLine); | ||||
|       if (logTail.size() > settings.logLines) logTail.pop_front(); | ||||
|     } | ||||
| 
 | ||||
|     act->result(resBuildLogLine, currentLogLine); | ||||
|     logTail.push_back(currentLogLine); | ||||
|     if (logTail.size() > settings.logLines) logTail.pop_front(); | ||||
|   } | ||||
| 
 | ||||
|   currentLogLine = ""; | ||||
|  | @ -3719,10 +3684,14 @@ void DerivationGoal::flushLine() { | |||
| PathSet DerivationGoal::checkPathValidity(bool returnValid, bool checkHash) { | ||||
|   PathSet result; | ||||
|   for (auto& i : drv->outputs) { | ||||
|     if (!wantOutput(i.first, wantedOutputs)) continue; | ||||
|     if (!wantOutput(i.first, wantedOutputs)) { | ||||
|       continue; | ||||
|     } | ||||
|     bool good = worker.store.isValidPath(i.second.path) && | ||||
|                 (!checkHash || worker.pathContentsGood(i.second.path)); | ||||
|     if (good == returnValid) result.insert(i.second.path); | ||||
|     if (good == returnValid) { | ||||
|       result.insert(i.second.path); | ||||
|     } | ||||
|   } | ||||
|   return result; | ||||
| } | ||||
|  | @ -3758,8 +3727,6 @@ void DerivationGoal::done(BuildResult::Status status, const string& msg) { | |||
|   } else { | ||||
|     if (status != BuildResult::DependencyFailed) worker.failedBuilds++; | ||||
|   } | ||||
| 
 | ||||
|   worker.updateProgress(); | ||||
| } | ||||
| 
 | ||||
| //////////////////////////////////////////////////////////////////////
 | ||||
|  | @ -3891,9 +3858,9 @@ void SubstitutionGoal::tryNext() { | |||
|   if (subs.size() == 0) { | ||||
|     /* None left.  Terminate this goal and let someone else deal
 | ||||
|        with it. */ | ||||
|     debug(format("path '%1%' is required, but there is no substituter that can " | ||||
|                  "build it") % | ||||
|           storePath); | ||||
|     DLOG(WARNING) | ||||
|         << "path '" << storePath | ||||
|         << "' is required, but there is no substituter that can build it"; | ||||
| 
 | ||||
|     /* Hack: don't indicate failure if there were no substituters.
 | ||||
|        In that case the calling derivation should just do a | ||||
|  | @ -3902,7 +3869,6 @@ void SubstitutionGoal::tryNext() { | |||
| 
 | ||||
|     if (substituterFailed) { | ||||
|       worker.failedSubstitutions++; | ||||
|       worker.updateProgress(); | ||||
|     } | ||||
| 
 | ||||
|     return; | ||||
|  | @ -3930,7 +3896,7 @@ void SubstitutionGoal::tryNext() { | |||
|     throw; | ||||
|   } catch (Error& e) { | ||||
|     if (settings.tryFallback) { | ||||
|       printError(e.what()); | ||||
|       LOG(ERROR) << e.what(); | ||||
|       tryNext(); | ||||
|       return; | ||||
|     } | ||||
|  | @ -3949,17 +3915,14 @@ void SubstitutionGoal::tryNext() { | |||
|                 worker.expectedDownloadSize, narInfo->fileSize) | ||||
|           : nullptr; | ||||
| 
 | ||||
|   worker.updateProgress(); | ||||
| 
 | ||||
|   /* Bail out early if this substituter lacks a valid
 | ||||
|      signature. LocalStore::addToStore() also checks for this, but | ||||
|      only after we've downloaded the path. */ | ||||
|   if (worker.store.requireSigs && !sub->isTrusted && | ||||
|       !info->checkSignatures(worker.store, worker.store.getPublicKeys())) { | ||||
|     printError( | ||||
|         "warning: substituter '%s' does not have a valid signature for path " | ||||
|         "'%s'", | ||||
|         sub->getUri(), storePath); | ||||
|     LOG(WARNING) << "substituter '" << sub->getUri() | ||||
|                  << "' does not have a valid signature for path '" << storePath | ||||
|                  << "'"; | ||||
|     tryNext(); | ||||
|     return; | ||||
|   } | ||||
|  | @ -3980,8 +3943,8 @@ void SubstitutionGoal::referencesValid() { | |||
|   trace("all references realised"); | ||||
| 
 | ||||
|   if (nrFailed > 0) { | ||||
|     debug(format("some references of path '%1%' could not be realised") % | ||||
|           storePath); | ||||
|     DLOG(WARNING) << "some references of path '" << storePath | ||||
|                   << "' could not be realised"; | ||||
|     amDone(nrNoSubstituters > 0 || nrIncompleteClosure > 0 ? ecIncompleteClosure | ||||
|                                                            : ecFailed); | ||||
|     return; | ||||
|  | @ -4010,7 +3973,6 @@ void SubstitutionGoal::tryToRun() { | |||
| 
 | ||||
|   maintainRunningSubstitutions = | ||||
|       std::make_unique<MaintainCount<uint64_t>>(worker.runningSubstitutions); | ||||
|   worker.updateProgress(); | ||||
| 
 | ||||
|   outPipe.create(); | ||||
| 
 | ||||
|  | @ -4021,10 +3983,6 @@ void SubstitutionGoal::tryToRun() { | |||
|       /* Wake up the worker loop when we're done. */ | ||||
|       Finally updateStats([this]() { outPipe.writeSide = -1; }); | ||||
| 
 | ||||
|       Activity act(*logger, actSubstitute, | ||||
|                    Logger::Fields{storePath, sub->getUri()}); | ||||
|       PushActivity pact(act.id); | ||||
| 
 | ||||
|       copyStorePath(ref<Store>(sub), | ||||
|                     ref<Store>(worker.store.shared_from_this()), storePath, | ||||
|                     repair, sub->isTrusted ? NoCheckSigs : CheckSigs); | ||||
|  | @ -4050,7 +4008,7 @@ void SubstitutionGoal::finished() { | |||
|   try { | ||||
|     promise.get_future().get(); | ||||
|   } catch (std::exception& e) { | ||||
|     printError(e.what()); | ||||
|     LOG(ERROR) << e.what(); | ||||
| 
 | ||||
|     /* Cause the parent build to fail unless --fallback is given,
 | ||||
|        or the substitute has disappeared. The latter case behaves | ||||
|  | @ -4071,8 +4029,7 @@ void SubstitutionGoal::finished() { | |||
| 
 | ||||
|   worker.markContentsGood(storePath); | ||||
| 
 | ||||
|   printMsg(lvlChatty, | ||||
|            format("substitution of path '%1%' succeeded") % storePath); | ||||
|   DLOG(INFO) << "substitution of path '" << storePath << "' succeeded"; | ||||
| 
 | ||||
|   maintainRunningSubstitutions.reset(); | ||||
| 
 | ||||
|  | @ -4088,8 +4045,6 @@ void SubstitutionGoal::finished() { | |||
|   worker.doneNarSize += maintainExpectedNar->delta; | ||||
|   maintainExpectedNar.reset(); | ||||
| 
 | ||||
|   worker.updateProgress(); | ||||
| 
 | ||||
|   amDone(ecSuccess); | ||||
| } | ||||
| 
 | ||||
|  | @ -4103,11 +4058,7 @@ void SubstitutionGoal::handleEOF(int fd) { | |||
| 
 | ||||
| static bool working = false; | ||||
| 
 | ||||
| Worker::Worker(LocalStore& store) | ||||
|     : act(*logger, actRealise), | ||||
|       actDerivations(*logger, actBuilds), | ||||
|       actSubstitutions(*logger, actCopyPaths), | ||||
|       store(store) { | ||||
| Worker::Worker(LocalStore& store) : store(store) { | ||||
|   /* Debugging: prevent recursive workers. */ | ||||
|   if (working) abort(); | ||||
|   working = true; | ||||
|  | @ -4242,7 +4193,7 @@ void Worker::childTerminated(Goal* goal, bool wakeSleepers) { | |||
| } | ||||
| 
 | ||||
| void Worker::waitForBuildSlot(GoalPtr goal) { | ||||
|   debug("wait for build slot"); | ||||
|   DLOG(INFO) << "wait for build slot"; | ||||
|   if (getNrLocalBuilds() < settings.maxBuildJobs) | ||||
|     wakeUp(goal); /* we can do it right away */ | ||||
|   else | ||||
|  | @ -4250,19 +4201,19 @@ void Worker::waitForBuildSlot(GoalPtr goal) { | |||
| } | ||||
| 
 | ||||
| void Worker::waitForAnyGoal(GoalPtr goal) { | ||||
|   debug("wait for any goal"); | ||||
|   DLOG(INFO) << "wait for any goal"; | ||||
|   addToWeakGoals(waitingForAnyGoal, goal); | ||||
| } | ||||
| 
 | ||||
| void Worker::waitForAWhile(GoalPtr goal) { | ||||
|   debug("wait for a while"); | ||||
|   DLOG(INFO) << "wait for a while"; | ||||
|   addToWeakGoals(waitingForAWhile, goal); | ||||
| } | ||||
| 
 | ||||
| void Worker::run(const Goals& _topGoals) { | ||||
|   for (auto& i : _topGoals) topGoals.insert(i); | ||||
| 
 | ||||
|   debug("entered goal loop"); | ||||
|   DLOG(INFO) << "entered goal loop"; | ||||
| 
 | ||||
|   while (1) { | ||||
|     checkInterrupt(); | ||||
|  | @ -4308,7 +4259,7 @@ void Worker::run(const Goals& _topGoals) { | |||
| } | ||||
| 
 | ||||
| void Worker::waitForInput() { | ||||
|   printMsg(lvlVomit, "waiting for children"); | ||||
|   DLOG(INFO) << "waiting for children"; | ||||
| 
 | ||||
|   /* Process output from the file descriptors attached to the
 | ||||
|      children, namely log output and output path creation commands. | ||||
|  | @ -4350,7 +4301,7 @@ void Worker::waitForInput() { | |||
|   if (!waitingForAWhile.empty()) { | ||||
|     useTimeout = true; | ||||
|     if (lastWokenUp == steady_time_point::min()) | ||||
|       printError("waiting for locks or build slots..."); | ||||
|       DLOG(WARNING) << "waiting for locks or build slots..."; | ||||
|     if (lastWokenUp == steady_time_point::min() || lastWokenUp > before) | ||||
|       lastWokenUp = before; | ||||
|     timeout.tv_sec = std::max( | ||||
|  | @ -4361,7 +4312,9 @@ void Worker::waitForInput() { | |||
|   } else | ||||
|     lastWokenUp = steady_time_point::min(); | ||||
| 
 | ||||
|   if (useTimeout) vomit("sleeping %d seconds", timeout.tv_sec); | ||||
|   if (useTimeout) { | ||||
|     DLOG(INFO) << "sleeping " << timeout.tv_sec << " seconds"; | ||||
|   } | ||||
| 
 | ||||
|   /* Use select() to wait for the input side of any logger pipe to
 | ||||
|      become `available'.  Note that `available' (i.e., non-blocking) | ||||
|  | @ -4403,15 +4356,14 @@ void Worker::waitForInput() { | |||
|         // FIXME: is there a cleaner way to handle pt close
 | ||||
|         // than EIO? Is this even standard?
 | ||||
|         if (rd == 0 || (rd == -1 && errno == EIO)) { | ||||
|           debug(format("%1%: got EOF") % goal->getName()); | ||||
|           DLOG(WARNING) << goal->getName() << ": got EOF"; | ||||
|           goal->handleEOF(k); | ||||
|           j->fds.erase(k); | ||||
|         } else if (rd == -1) { | ||||
|           if (errno != EINTR) | ||||
|             throw SysError("%s: read failed", goal->getName()); | ||||
|         } else { | ||||
|           printMsg(lvlVomit, | ||||
|                    format("%1%: read %2% bytes") % goal->getName() % rd); | ||||
|           DLOG(INFO) << goal->getName() << ": read " << rd << " bytes"; | ||||
|           string data((char*)buffer.data(), rd); | ||||
|           j->lastOutput = after; | ||||
|           goal->handleChildOutput(k, data); | ||||
|  | @ -4422,8 +4374,8 @@ void Worker::waitForInput() { | |||
|     if (goal->getExitCode() == Goal::ecBusy && 0 != settings.maxSilentTime && | ||||
|         j->respectTimeouts && | ||||
|         after - j->lastOutput >= std::chrono::seconds(settings.maxSilentTime)) { | ||||
|       printError(format("%1% timed out after %2% seconds of silence") % | ||||
|                  goal->getName() % settings.maxSilentTime); | ||||
|       LOG(ERROR) << goal->getName() << " timed out after " | ||||
|                  << settings.maxSilentTime << " seconds of silence"; | ||||
|       goal->timedOut(); | ||||
|     } | ||||
| 
 | ||||
|  | @ -4431,8 +4383,8 @@ void Worker::waitForInput() { | |||
|              0 != settings.buildTimeout && j->respectTimeouts && | ||||
|              after - j->timeStarted >= | ||||
|                  std::chrono::seconds(settings.buildTimeout)) { | ||||
|       printError(format("%1% timed out after %2% seconds") % goal->getName() % | ||||
|                  settings.buildTimeout); | ||||
|       LOG(ERROR) << goal->getName() << " timed out after " | ||||
|                  << settings.buildTimeout << " seconds"; | ||||
|       goal->timedOut(); | ||||
|     } | ||||
|   } | ||||
|  | @ -4473,7 +4425,7 @@ unsigned int Worker::exitStatus() { | |||
| bool Worker::pathContentsGood(const Path& path) { | ||||
|   std::map<Path, bool>::iterator i = pathContentsGoodCache.find(path); | ||||
|   if (i != pathContentsGoodCache.end()) return i->second; | ||||
|   printInfo(format("checking path '%1%'...") % path); | ||||
|   LOG(INFO) << "checking path '" << path << "'..."; | ||||
|   auto info = store.queryPathInfo(path); | ||||
|   bool res; | ||||
|   if (!pathExists(path)) | ||||
|  | @ -4484,7 +4436,9 @@ bool Worker::pathContentsGood(const Path& path) { | |||
|     res = info->narHash == nullHash || info->narHash == current.first; | ||||
|   } | ||||
|   pathContentsGoodCache[path] = res; | ||||
|   if (!res) printError(format("path '%1%' is corrupted or missing!") % path); | ||||
|   if (!res) { | ||||
|     LOG(ERROR) << "path '" << path << "' is corrupted or missing!"; | ||||
|   } | ||||
|   return res; | ||||
| } | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,4 +1,5 @@ | |||
| #include <fcntl.h> | ||||
| #include <glog/logging.h> | ||||
| #include <sys/stat.h> | ||||
| #include <sys/types.h> | ||||
| #include <algorithm> | ||||
|  | @ -22,10 +23,8 @@ static void createLinks(const Path& srcDir, const Path& dstDir, int priority) { | |||
|     srcFiles = readDirectory(srcDir); | ||||
|   } catch (SysError& e) { | ||||
|     if (e.errNo == ENOTDIR) { | ||||
|       printError( | ||||
|           "warning: not including '%s' in the user environment because it's " | ||||
|           "not a directory", | ||||
|           srcDir); | ||||
|       LOG(ERROR) << "warning: not including '" << srcDir | ||||
|                  << "' in the user environment because it's not a directory"; | ||||
|       return; | ||||
|     } | ||||
|     throw; | ||||
|  | @ -43,7 +42,7 @@ static void createLinks(const Path& srcDir, const Path& dstDir, int priority) { | |||
|         throw SysError("getting status of '%1%'", srcFile); | ||||
|     } catch (SysError& e) { | ||||
|       if (e.errNo == ENOENT || e.errNo == ENOTDIR) { | ||||
|         printError("warning: skipping dangling symlink '%s'", dstFile); | ||||
|         LOG(ERROR) << "warning: skipping dangling symlink '" << dstFile << "'"; | ||||
|         continue; | ||||
|       } | ||||
|       throw; | ||||
|  | @ -200,7 +199,7 @@ void builtinBuildenv(const BasicDerivation& drv) { | |||
|     for (const auto& pkgDir : pkgDirs) addPkg(pkgDir, priorityCounter++); | ||||
|   } | ||||
| 
 | ||||
|   printError("created %d symlinks in user environment", symlinks); | ||||
|   LOG(INFO) << "created " << symlinks << " symlinks in user environment"; | ||||
| 
 | ||||
|   createSymlink(getAttr("manifest"), out + "/manifest.nix"); | ||||
| } | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| #include <glog/logging.h> | ||||
| #include "archive.hh" | ||||
| #include "builtins.hh" | ||||
| #include "compression.hh" | ||||
|  | @ -66,7 +67,7 @@ void builtinFetchurl(const BasicDerivation& drv, const std::string& netrcData) { | |||
|               h.to_string(Base16, false)); | ||||
|         return; | ||||
|       } catch (Error& e) { | ||||
|         debug(e.what()); | ||||
|         LOG(ERROR) << e.what(); | ||||
|       } | ||||
| 
 | ||||
|   /* Otherwise try the specified URL. */ | ||||
|  |  | |||
							
								
								
									
										71
									
								
								third_party/nix/src/libstore/download.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										71
									
								
								third_party/nix/src/libstore/download.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -15,6 +15,7 @@ | |||
| 
 | ||||
| #include <curl/curl.h> | ||||
| #include <fcntl.h> | ||||
| #include <glog/logging.h> | ||||
| #include <unistd.h> | ||||
| #include <algorithm> | ||||
| #include <cmath> | ||||
|  | @ -50,7 +51,6 @@ struct CurlDownloader : public Downloader { | |||
|     CurlDownloader& downloader; | ||||
|     DownloadRequest request; | ||||
|     DownloadResult result; | ||||
|     Activity act; | ||||
|     bool done = false;  // whether either the success or failure function has
 | ||||
|                         // been called
 | ||||
|     Callback<DownloadResult> callback; | ||||
|  | @ -77,10 +77,6 @@ struct CurlDownloader : public Downloader { | |||
|                  Callback<DownloadResult>&& callback) | ||||
|         : downloader(downloader), | ||||
|           request(request), | ||||
|           act(*logger, lvlTalkative, actDownload, | ||||
|               fmt(request.data ? "uploading '%s'" : "downloading '%s'", | ||||
|                   request.uri), | ||||
|               {request.uri}, request.parentAct), | ||||
|           callback(std::move(callback)), | ||||
|           finalSink([this](const unsigned char* data, size_t len) { | ||||
|             if (this->request.dataCallback) { | ||||
|  | @ -97,6 +93,9 @@ struct CurlDownloader : public Downloader { | |||
|             } else | ||||
|               this->result.data->append((char*)data, len); | ||||
|           }) { | ||||
|       LOG(INFO) << (request.data ? "uploading '" : "downloading '") | ||||
|                 << request.uri << "'"; | ||||
| 
 | ||||
|       if (!request.expectedETag.empty()) | ||||
|         requestHeaders = curl_slist_append( | ||||
|             requestHeaders, ("If-None-Match: " + request.expectedETag).c_str()); | ||||
|  | @ -162,8 +161,7 @@ struct CurlDownloader : public Downloader { | |||
|     size_t headerCallback(void* contents, size_t size, size_t nmemb) { | ||||
|       size_t realSize = size * nmemb; | ||||
|       std::string line((char*)contents, realSize); | ||||
|       printMsg(lvlVomit, | ||||
|                format("got header for '%s': %s") % request.uri % trim(line)); | ||||
|       DLOG(INFO) << "got header for '" << request.uri << "': " << trim(line); | ||||
|       if (line.compare(0, 5, "HTTP/") == 0) {  // new response starts
 | ||||
|         result.etag = ""; | ||||
|         auto ss = tokenizeString<vector<string>>(line, " "); | ||||
|  | @ -184,8 +182,8 @@ struct CurlDownloader : public Downloader { | |||
|                down the connection because we already have the | ||||
|                data. */ | ||||
|             if (result.etag == request.expectedETag && status == "200") { | ||||
|               debug(format( | ||||
|                   "shutting down on 200 HTTP response with expected ETag")); | ||||
|               DLOG(INFO) | ||||
|                   << "shutting down on 200 HTTP response with expected ETag"; | ||||
|               return 0; | ||||
|             } | ||||
|           } else if (name == "content-encoding") | ||||
|  | @ -205,7 +203,7 @@ struct CurlDownloader : public Downloader { | |||
| 
 | ||||
|     int progressCallback(double dltotal, double dlnow) { | ||||
|       try { | ||||
|         act.progress(dlnow, dltotal); | ||||
|         // TODO(tazjin): this had activity nonsense, clean it up
 | ||||
|       } catch (nix::Interrupted&) { | ||||
|         assert(_isInterrupted); | ||||
|       } | ||||
|  | @ -220,8 +218,9 @@ struct CurlDownloader : public Downloader { | |||
| 
 | ||||
|     static int debugCallback(CURL* handle, curl_infotype type, char* data, | ||||
|                              size_t size, void* userptr) { | ||||
|       if (type == CURLINFO_TEXT) | ||||
|         vomit("curl: %s", chomp(std::string(data, size))); | ||||
|       if (type == CURLINFO_TEXT) { | ||||
|         DLOG(INFO) << "curl: " << chomp(std::string(data, size)); | ||||
|       } | ||||
|       return 0; | ||||
|     } | ||||
| 
 | ||||
|  | @ -245,11 +244,12 @@ struct CurlDownloader : public Downloader { | |||
| 
 | ||||
|       curl_easy_reset(req); | ||||
| 
 | ||||
|       if (verbosity >= lvlVomit) { | ||||
|         curl_easy_setopt(req, CURLOPT_VERBOSE, 1); | ||||
|         curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION, | ||||
|                          DownloadItem::debugCallback); | ||||
|       } | ||||
|       // TODO(tazjin): Add an Abseil flag for this
 | ||||
|       // if (verbosity >= lvlVomit) {
 | ||||
|       //   curl_easy_setopt(req, CURLOPT_VERBOSE, 1);
 | ||||
|       //   curl_easy_setopt(req, CURLOPT_DEBUGFUNCTION,
 | ||||
|       //                    DownloadItem::debugCallback);
 | ||||
|       // }
 | ||||
| 
 | ||||
|       curl_easy_setopt(req, CURLOPT_URL, request.uri.c_str()); | ||||
|       curl_easy_setopt(req, CURLOPT_FOLLOWLOCATION, 1L); | ||||
|  | @ -329,10 +329,10 @@ struct CurlDownloader : public Downloader { | |||
|       curl_easy_getinfo(req, CURLINFO_EFFECTIVE_URL, &effectiveUriCStr); | ||||
|       if (effectiveUriCStr) result.effectiveUri = effectiveUriCStr; | ||||
| 
 | ||||
|       debug( | ||||
|           "finished %s of '%s'; curl status = %d, HTTP status = %d, body = %d " | ||||
|           "bytes", | ||||
|           request.verb(), request.uri, code, httpStatus, result.bodySize); | ||||
|       DLOG(INFO) << "finished " << request.verb() << " of " << request.uri | ||||
|                  << "; curl status = " << code | ||||
|                  << ", HTTP status = " << httpStatus | ||||
|                  << ", body = " << result.bodySize << " bytes"; | ||||
| 
 | ||||
|       if (decompressionSink) { | ||||
|         try { | ||||
|  | @ -356,7 +356,6 @@ struct CurlDownloader : public Downloader { | |||
|                 httpStatus == 226 /* FTP */ || | ||||
|                 httpStatus == 0 /* other protocol */)) { | ||||
|         result.cached = httpStatus == 304; | ||||
|         act.progress(result.bodySize, result.bodySize); | ||||
|         done = true; | ||||
|         callback(std::move(result)); | ||||
|       } | ||||
|  | @ -441,11 +440,12 @@ struct CurlDownloader : public Downloader { | |||
|                    std::pow(2.0f, attempt - 1 + | ||||
|                                       std::uniform_real_distribution<>( | ||||
|                                           0.0, 0.5)(downloader.mt19937)); | ||||
|           if (writtenToSink) | ||||
|             warn("%s; retrying from offset %d in %d ms", exc.what(), | ||||
|                  writtenToSink, ms); | ||||
|           else | ||||
|             warn("%s; retrying in %d ms", exc.what(), ms); | ||||
|           if (writtenToSink) { | ||||
|             LOG(WARNING) << exc.what() << "; retrying from offset " | ||||
|                          << writtenToSink << " in " << ms << "ms"; | ||||
|           } else { | ||||
|             LOG(WARNING) << exc.what() << "; retrying in " << ms << "ms"; | ||||
|           } | ||||
|           embargo = | ||||
|               std::chrono::steady_clock::now() + std::chrono::milliseconds(ms); | ||||
|           downloader.enqueueItem(shared_from_this()); | ||||
|  | @ -565,7 +565,7 @@ struct CurlDownloader : public Downloader { | |||
|                         nextWakeup - std::chrono::steady_clock::now()) | ||||
|                         .count()) | ||||
|               : maxSleepTimeMs; | ||||
|       vomit("download thread waiting for %d ms", sleepTimeMs); | ||||
|       DLOG(INFO) << "download thread waiting for " << sleepTimeMs << " ms"; | ||||
|       mc = curl_multi_wait(curlm, extraFDs, 1, sleepTimeMs, &numfds); | ||||
|       if (mc != CURLM_OK) | ||||
|         throw nix::Error(format("unexpected error from curl_multi_wait(): %s") % | ||||
|  | @ -604,7 +604,8 @@ struct CurlDownloader : public Downloader { | |||
|       } | ||||
| 
 | ||||
|       for (auto& item : incoming) { | ||||
|         debug("starting %s of %s", item->request.verb(), item->request.uri); | ||||
|         DLOG(INFO) << "starting " << item->request.verb() << " of " | ||||
|                    << item->request.uri; | ||||
|         item->init(); | ||||
|         curl_multi_add_handle(curlm, item->req); | ||||
|         item->active = true; | ||||
|  | @ -612,7 +613,7 @@ struct CurlDownloader : public Downloader { | |||
|       } | ||||
|     } | ||||
| 
 | ||||
|     debug("download thread shutting down"); | ||||
|     DLOG(INFO) << "download thread shutting down"; | ||||
|   } | ||||
| 
 | ||||
|   void workerThreadEntry() { | ||||
|  | @ -620,7 +621,7 @@ struct CurlDownloader : public Downloader { | |||
|       workerThreadMain(); | ||||
|     } catch (nix::Interrupted& e) { | ||||
|     } catch (std::exception& e) { | ||||
|       printError("unexpected error in download thread: %s", e.what()); | ||||
|       LOG(ERROR) << "unexpected error in download thread: " << e.what(); | ||||
|     } | ||||
| 
 | ||||
|     { | ||||
|  | @ -762,7 +763,7 @@ void Downloader::download(DownloadRequest&& request, Sink& sink) { | |||
|        download thread. (Hopefully sleeping will throttle the | ||||
|        sender.) */ | ||||
|     if (state->data.size() > 1024 * 1024) { | ||||
|       debug("download buffer is full; going to sleep"); | ||||
|       DLOG(INFO) << "download buffer is full; going to sleep"; | ||||
|       state.wait_for(state->request, std::chrono::seconds(10)); | ||||
|     } | ||||
| 
 | ||||
|  | @ -870,7 +871,7 @@ CachedDownloadResult Downloader::downloadCached( | |||
|           result.effectiveUri = request.uri; | ||||
|           result.etag = ss[1]; | ||||
|         } else if (!ss[1].empty()) { | ||||
|           debug(format("verifying previous ETag '%1%'") % ss[1]); | ||||
|           DLOG(INFO) << "verifying previous ETag: " << ss[1]; | ||||
|           expectedETag = ss[1]; | ||||
|         } | ||||
|       } | ||||
|  | @ -908,7 +909,7 @@ CachedDownloadResult Downloader::downloadCached( | |||
|                 url + "\n" + res.etag + "\n" + std::to_string(time(0)) + "\n"); | ||||
|     } catch (DownloadError& e) { | ||||
|       if (storePath.empty()) throw; | ||||
|       warn("warning: %s; using cached result", e.msg()); | ||||
|       LOG(WARNING) << e.msg() << "; using cached result"; | ||||
|       result.etag = expectedETag; | ||||
|     } | ||||
|   } | ||||
|  | @ -924,7 +925,7 @@ CachedDownloadResult Downloader::downloadCached( | |||
|       if (!store->isValidPath(unpackedStorePath)) unpackedStorePath = ""; | ||||
|     } | ||||
|     if (unpackedStorePath.empty()) { | ||||
|       printInfo(format("unpacking '%1%'...") % url); | ||||
|       LOG(INFO) << "unpacking '" << url << "' ..."; | ||||
|       Path tmpDir = createTempDir(); | ||||
|       AutoDelete autoDelete(tmpDir, true); | ||||
|       // FIXME: this requires GNU tar for decompression.
 | ||||
|  |  | |||
							
								
								
									
										4
									
								
								third_party/nix/src/libstore/download.hh
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								third_party/nix/src/libstore/download.hh
									
										
									
									
										vendored
									
									
								
							|  | @ -46,14 +46,12 @@ struct DownloadRequest { | |||
|   bool head = false; | ||||
|   size_t tries = downloadSettings.tries; | ||||
|   unsigned int baseRetryTimeMs = 250; | ||||
|   ActivityId parentAct; | ||||
|   bool decompress = true; | ||||
|   std::shared_ptr<std::string> data; | ||||
|   std::string mimeType; | ||||
|   std::function<void(char*, size_t)> dataCallback; | ||||
| 
 | ||||
|   DownloadRequest(const std::string& uri) | ||||
|       : uri(uri), parentAct(getCurActivity()) {} | ||||
|   DownloadRequest(const std::string& uri) : uri(uri) {} | ||||
| 
 | ||||
|   std::string verb() { return data ? "upload" : "download"; } | ||||
| }; | ||||
|  |  | |||
							
								
								
									
										129
									
								
								third_party/nix/src/libstore/gc.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										129
									
								
								third_party/nix/src/libstore/gc.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -13,6 +13,7 @@ | |||
| #include "derivations.hh" | ||||
| #include "finally.hh" | ||||
| #include "globals.hh" | ||||
| #include "glog/logging.h" | ||||
| #include "local-store.hh" | ||||
| 
 | ||||
| namespace nix { | ||||
|  | @ -28,15 +29,16 @@ static string gcRootsDir = "gcroots"; | |||
| AutoCloseFD LocalStore::openGCLock(LockType lockType) { | ||||
|   Path fnGCLock = (format("%1%/%2%") % stateDir % gcLockName).str(); | ||||
| 
 | ||||
|   debug(format("acquiring global GC lock '%1%'") % fnGCLock); | ||||
|   DLOG(INFO) << "acquiring global GC lock " << fnGCLock; | ||||
| 
 | ||||
|   AutoCloseFD fdGCLock = | ||||
|       open(fnGCLock.c_str(), O_RDWR | O_CREAT | O_CLOEXEC, 0600); | ||||
|   if (!fdGCLock) | ||||
|   if (!fdGCLock) { | ||||
|     throw SysError(format("opening global GC lock '%1%'") % fnGCLock); | ||||
|   } | ||||
| 
 | ||||
|   if (!lockFile(fdGCLock.get(), lockType, false)) { | ||||
|     printError(format("waiting for the big garbage collector lock...")); | ||||
|     LOG(ERROR) << "waiting for the big garbage collector lock..."; | ||||
|     lockFile(fdGCLock.get(), lockType, true); | ||||
|   } | ||||
| 
 | ||||
|  | @ -116,12 +118,12 @@ Path LocalFSStore::addPermRoot(const Path& _storePath, const Path& _gcRoot, | |||
|      gcroots directory. */ | ||||
|   if (settings.checkRootReachability) { | ||||
|     Roots roots = findRoots(false); | ||||
|     if (roots[storePath].count(gcRoot) == 0) | ||||
|       printError( | ||||
|           format("warning: '%1%' is not in a directory where the garbage " | ||||
|                  "collector looks for roots; " | ||||
|                  "therefore, '%2%' might be removed by the garbage collector") % | ||||
|           gcRoot % storePath); | ||||
|     if (roots[storePath].count(gcRoot) == 0) { | ||||
|       LOG(ERROR) << "warning: '" << gcRoot | ||||
|                  << "' is not in a directory where the garbage " | ||||
|                  << "collector looks for roots; therefore, '" << storePath | ||||
|                  << "' might be removed by the garbage collector"; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /* Grab the global GC root, causing us to block while a GC is in
 | ||||
|  | @ -149,7 +151,7 @@ void LocalStore::addTempRoot(const Path& path) { | |||
| 
 | ||||
|       fdGCLock = -1; | ||||
| 
 | ||||
|       debug(format("acquiring read lock on '%1%'") % fnTempRoots); | ||||
|       DLOG(INFO) << "acquiring read lock on " << fnTempRoots; | ||||
|       lockFile(state->fdTempRoots.get(), ltRead, true); | ||||
| 
 | ||||
|       /* Check whether the garbage collector didn't get in our
 | ||||
|  | @ -167,14 +169,14 @@ void LocalStore::addTempRoot(const Path& path) { | |||
| 
 | ||||
|   /* Upgrade the lock to a write lock.  This will cause us to block
 | ||||
|      if the garbage collector is holding our lock. */ | ||||
|   debug(format("acquiring write lock on '%1%'") % fnTempRoots); | ||||
|   DLOG(INFO) << "acquiring write lock on " << fnTempRoots; | ||||
|   lockFile(state->fdTempRoots.get(), ltWrite, true); | ||||
| 
 | ||||
|   string s = path + '\0'; | ||||
|   writeFull(state->fdTempRoots.get(), s); | ||||
| 
 | ||||
|   /* Downgrade to a read lock. */ | ||||
|   debug(format("downgrading to read lock on '%1%'") % fnTempRoots); | ||||
|   DLOG(INFO) << "downgrading to read lock on " << fnTempRoots; | ||||
|   lockFile(state->fdTempRoots.get(), ltRead, true); | ||||
| } | ||||
| 
 | ||||
|  | @ -188,7 +190,7 @@ void LocalStore::findTempRoots(FDs& fds, Roots& tempRoots, bool censor) { | |||
| 
 | ||||
|     pid_t pid = std::stoi(i.name); | ||||
| 
 | ||||
|     debug(format("reading temporary root file '%1%'") % path); | ||||
|     DLOG(INFO) << "reading temporary root file " << path; | ||||
|     FDPtr fd(new AutoCloseFD(open(path.c_str(), O_CLOEXEC | O_RDWR, 0666))); | ||||
|     if (!*fd) { | ||||
|       /* It's okay if the file has disappeared. */ | ||||
|  | @ -204,7 +206,7 @@ void LocalStore::findTempRoots(FDs& fds, Roots& tempRoots, bool censor) { | |||
|        only succeed if the owning process has died.  In that case | ||||
|        we don't care about its temporary roots. */ | ||||
|     if (lockFile(fd->get(), ltWrite, false)) { | ||||
|       printError(format("removing stale temporary roots file '%1%'") % path); | ||||
|       LOG(ERROR) << "removing stale temporary roots file " << path; | ||||
|       unlink(path.c_str()); | ||||
|       writeFull(fd->get(), "d"); | ||||
|       continue; | ||||
|  | @ -213,7 +215,7 @@ void LocalStore::findTempRoots(FDs& fds, Roots& tempRoots, bool censor) { | |||
|     /* Acquire a read lock.  This will prevent the owning process
 | ||||
|        from upgrading to a write lock, therefore it will block in | ||||
|        addTempRoot(). */ | ||||
|     debug(format("waiting for read lock on '%1%'") % path); | ||||
|     DLOG(INFO) << "waiting for read lock on " << path; | ||||
|     lockFile(fd->get(), ltRead, true); | ||||
| 
 | ||||
|     /* Read the entire file. */ | ||||
|  | @ -224,7 +226,7 @@ void LocalStore::findTempRoots(FDs& fds, Roots& tempRoots, bool censor) { | |||
| 
 | ||||
|     while ((end = contents.find((char)0, pos)) != string::npos) { | ||||
|       Path root(contents, pos, end - pos); | ||||
|       debug("got temporary root '%s'", root); | ||||
|       DLOG(INFO) << "got temporary root " << root; | ||||
|       assertStorePath(root); | ||||
|       tempRoots[root].emplace(censor ? censored : fmt("{temp:%d}", pid)); | ||||
|       pos = end + 1; | ||||
|  | @ -240,8 +242,8 @@ void LocalStore::findRoots(const Path& path, unsigned char type, Roots& roots) { | |||
|     if (isStorePath(storePath) && isValidPath(storePath)) | ||||
|       roots[storePath].emplace(path); | ||||
|     else | ||||
|       printInfo(format("skipping invalid root from '%1%' to '%2%'") % path % | ||||
|                 storePath); | ||||
|       LOG(INFO) << "skipping invalid root from '" << path << "' to '" | ||||
|                 << storePath << "'"; | ||||
|   }; | ||||
| 
 | ||||
|   try { | ||||
|  | @ -261,8 +263,8 @@ void LocalStore::findRoots(const Path& path, unsigned char type, Roots& roots) { | |||
|         target = absPath(target, dirOf(path)); | ||||
|         if (!pathExists(target)) { | ||||
|           if (isInDir(path, stateDir + "/" + gcRootsDir + "/auto")) { | ||||
|             printInfo(format("removing stale link from '%1%' to '%2%'") % path % | ||||
|                       target); | ||||
|             LOG(INFO) << "removing stale link from '" << path << "' to '" | ||||
|                       << target << "'"; | ||||
|             unlink(path.c_str()); | ||||
|           } | ||||
|         } else { | ||||
|  | @ -284,10 +286,11 @@ void LocalStore::findRoots(const Path& path, unsigned char type, Roots& roots) { | |||
| 
 | ||||
|   catch (SysError& e) { | ||||
|     /* We only ignore permanent failures. */ | ||||
|     if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR) | ||||
|       printInfo(format("cannot read potential root '%1%'") % path); | ||||
|     else | ||||
|     if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR) { | ||||
|       LOG(INFO) << "cannot read potential root '" << path << "'"; | ||||
|     } else { | ||||
|       throw; | ||||
|     } | ||||
|   } | ||||
| } | ||||
| 
 | ||||
|  | @ -319,16 +322,21 @@ try_again: | |||
|   char buf[bufsiz]; | ||||
|   auto res = readlink(file.c_str(), buf, bufsiz); | ||||
|   if (res == -1) { | ||||
|     if (errno == ENOENT || errno == EACCES || errno == ESRCH) return; | ||||
|     if (errno == ENOENT || errno == EACCES || errno == ESRCH) { | ||||
|       return; | ||||
|     } | ||||
|     throw SysError("reading symlink"); | ||||
|   } | ||||
|   if (res == bufsiz) { | ||||
|     if (SSIZE_MAX / 2 < bufsiz) throw Error("stupidly long symlink"); | ||||
|     if (SSIZE_MAX / 2 < bufsiz) { | ||||
|       throw Error("stupidly long symlink"); | ||||
|     } | ||||
|     bufsiz *= 2; | ||||
|     goto try_again; | ||||
|   } | ||||
|   if (res > 0 && buf[0] == '/') | ||||
|   if (res > 0 && buf[0] == '/') { | ||||
|     roots[std::string(static_cast<char*>(buf), res)].emplace(file); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| static string quoteRegexChars(const string& raw) { | ||||
|  | @ -340,7 +348,9 @@ static void readFileRoots(const char* path, Roots& roots) { | |||
|   try { | ||||
|     roots[readFile(path)].emplace(path); | ||||
|   } catch (SysError& e) { | ||||
|     if (e.errNo != ENOENT && e.errNo != EACCES) throw; | ||||
|     if (e.errNo != ENOENT && e.errNo != EACCES) { | ||||
|       throw; | ||||
|     } | ||||
|   } | ||||
| } | ||||
| 
 | ||||
|  | @ -434,7 +444,7 @@ void LocalStore::findRuntimeRoots(Roots& roots, bool censor) { | |||
|     if (isInStore(target)) { | ||||
|       Path path = toStorePath(target); | ||||
|       if (isStorePath(path) && isValidPath(path)) { | ||||
|         debug(format("got additional root '%1%'") % path); | ||||
|         DLOG(INFO) << "got additional root " << path; | ||||
|         if (censor) | ||||
|           roots[path].insert(censored); | ||||
|         else | ||||
|  | @ -496,7 +506,7 @@ void LocalStore::deletePathRecursive(GCState& state, const Path& path) { | |||
|     throw SysError(format("getting status of %1%") % realPath); | ||||
|   } | ||||
| 
 | ||||
|   printInfo(format("deleting '%1%'") % path); | ||||
|   LOG(INFO) << "deleting '" << path << "'"; | ||||
| 
 | ||||
|   state.results.paths.insert(path); | ||||
| 
 | ||||
|  | @ -519,8 +529,8 @@ void LocalStore::deletePathRecursive(GCState& state, const Path& path) { | |||
|       state.bytesInvalidated += size; | ||||
|     } catch (SysError& e) { | ||||
|       if (e.errNo == ENOSPC) { | ||||
|         printInfo(format("note: can't create move '%1%': %2%") % realPath % | ||||
|                   e.msg()); | ||||
|         LOG(INFO) << "note: can't create move '" << realPath | ||||
|                   << "': " << e.msg(); | ||||
|         deleteGarbage(state, realPath); | ||||
|       } | ||||
|     } | ||||
|  | @ -529,8 +539,8 @@ void LocalStore::deletePathRecursive(GCState& state, const Path& path) { | |||
| 
 | ||||
|   if (state.results.bytesFreed + state.bytesInvalidated > | ||||
|       state.options.maxFreed) { | ||||
|     printInfo(format("deleted or invalidated more than %1% bytes; stopping") % | ||||
|               state.options.maxFreed); | ||||
|     LOG(INFO) << "deleted or invalidated more than " << state.options.maxFreed | ||||
|               << " bytes; stopping"; | ||||
|     throw GCLimitReached(); | ||||
|   } | ||||
| } | ||||
|  | @ -544,7 +554,7 @@ bool LocalStore::canReachRoot(GCState& state, PathSet& visited, | |||
|   if (state.dead.count(path)) return false; | ||||
| 
 | ||||
|   if (state.roots.count(path)) { | ||||
|     debug(format("cannot delete '%1%' because it's a root") % path); | ||||
|     DLOG(INFO) << "cannot delete '" << path << "' because it's a root"; | ||||
|     state.alive.insert(path); | ||||
|     return true; | ||||
|   } | ||||
|  | @ -611,7 +621,7 @@ void LocalStore::tryToDelete(GCState& state, const Path& path) { | |||
|   PathSet visited; | ||||
| 
 | ||||
|   if (canReachRoot(state, visited, path)) { | ||||
|     debug(format("cannot delete '%1%' because it's still reachable") % path); | ||||
|     DLOG(INFO) << "cannot delete '" << path << "' because it's still reachable"; | ||||
|   } else { | ||||
|     /* No path we visited was a root, so everything is garbage.
 | ||||
|        But we only delete ‘path’ and its referrers here so that | ||||
|  | @ -629,7 +639,9 @@ void LocalStore::tryToDelete(GCState& state, const Path& path) { | |||
|    the link count. */ | ||||
| void LocalStore::removeUnusedLinks(const GCState& state) { | ||||
|   AutoCloseDir dir(opendir(linksDir.c_str())); | ||||
|   if (!dir) throw SysError(format("opening directory '%1%'") % linksDir); | ||||
|   if (!dir) { | ||||
|     throw SysError(format("opening directory '%1%'") % linksDir); | ||||
|   } | ||||
| 
 | ||||
|   long long actualSize = 0, unsharedSize = 0; | ||||
| 
 | ||||
|  | @ -650,21 +662,26 @@ void LocalStore::removeUnusedLinks(const GCState& state) { | |||
|       continue; | ||||
|     } | ||||
| 
 | ||||
|     printMsg(lvlTalkative, format("deleting unused link '%1%'") % path); | ||||
|     LOG(INFO) << "deleting unused link " << path; | ||||
| 
 | ||||
|     if (unlink(path.c_str()) == -1) | ||||
|     if (unlink(path.c_str()) == -1) { | ||||
|       throw SysError(format("deleting '%1%'") % path); | ||||
|     } | ||||
| 
 | ||||
|     state.results.bytesFreed += st.st_size; | ||||
|   } | ||||
| 
 | ||||
|   struct stat st; | ||||
|   if (stat(linksDir.c_str(), &st) == -1) | ||||
|   if (stat(linksDir.c_str(), &st) == -1) { | ||||
|     throw SysError(format("statting '%1%'") % linksDir); | ||||
|   } | ||||
| 
 | ||||
|   long long overhead = st.st_blocks * 512ULL; | ||||
| 
 | ||||
|   printInfo(format("note: currently hard linking saves %.2f MiB") % | ||||
|             ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0))); | ||||
|   // TODO(tazjin): absl::StrFormat %.2f
 | ||||
|   LOG(INFO) << "note: currently hard linking saves " | ||||
|             << ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)) | ||||
|             << " MiB"; | ||||
| } | ||||
| 
 | ||||
| void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) { | ||||
|  | @ -694,11 +711,15 @@ void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) { | |||
| 
 | ||||
|   /* Find the roots.  Since we've grabbed the GC lock, the set of
 | ||||
|      permanent roots cannot increase now. */ | ||||
|   printError(format("finding garbage collector roots...")); | ||||
|   LOG(INFO) << "finding garbage collector roots..."; | ||||
|   Roots rootMap; | ||||
|   if (!options.ignoreLiveness) findRootsNoTemp(rootMap, true); | ||||
|   if (!options.ignoreLiveness) { | ||||
|     findRootsNoTemp(rootMap, true); | ||||
|   } | ||||
| 
 | ||||
|   for (auto& i : rootMap) state.roots.insert(i.first); | ||||
|   for (auto& i : rootMap) { | ||||
|     state.roots.insert(i.first); | ||||
|   } | ||||
| 
 | ||||
|   /* Read the temporary roots.  This acquires read locks on all
 | ||||
|      per-process temporary root files.  So after this point no paths | ||||
|  | @ -719,7 +740,7 @@ void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) { | |||
|       createDirs(trashDir); | ||||
|     } catch (SysError& e) { | ||||
|       if (e.errNo == ENOSPC) { | ||||
|         printInfo(format("note: can't create trash directory: %1%") % e.msg()); | ||||
|         LOG(INFO) << "note: can't create trash directory: " << e.msg(); | ||||
|         state.moveToTrash = false; | ||||
|       } | ||||
|     } | ||||
|  | @ -739,14 +760,15 @@ void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) { | |||
| 
 | ||||
|   } else if (options.maxFreed > 0) { | ||||
|     if (state.shouldDelete) | ||||
|       printError(format("deleting garbage...")); | ||||
|       LOG(INFO) << "deleting garbage..."; | ||||
|     else | ||||
|       printError(format("determining live/dead paths...")); | ||||
|       LOG(ERROR) << "determining live/dead paths..."; | ||||
| 
 | ||||
|     try { | ||||
|       AutoCloseDir dir(opendir(realStoreDir.c_str())); | ||||
|       if (!dir) | ||||
|       if (!dir) { | ||||
|         throw SysError(format("opening directory '%1%'") % realStoreDir); | ||||
|       } | ||||
| 
 | ||||
|       /* Read the store and immediately delete all paths that
 | ||||
|          aren't valid.  When using --max-freed etc., deleting | ||||
|  | @ -799,13 +821,13 @@ void LocalStore::collectGarbage(const GCOptions& options, GCResults& results) { | |||
|   fds.clear(); | ||||
| 
 | ||||
|   /* Delete the trash directory. */ | ||||
|   printInfo(format("deleting '%1%'") % trashDir); | ||||
|   LOG(INFO) << "deleting " << trashDir; | ||||
|   deleteGarbage(state, trashDir); | ||||
| 
 | ||||
|   /* Clean up the links directory. */ | ||||
|   if (options.action == GCOptions::gcDeleteDead || | ||||
|       options.action == GCOptions::gcDeleteSpecific) { | ||||
|     printError(format("deleting unused links...")); | ||||
|     LOG(INFO) << "deleting unused links..."; | ||||
|     removeUnusedLinks(state); | ||||
|   } | ||||
| 
 | ||||
|  | @ -821,8 +843,9 @@ void LocalStore::autoGC(bool sync) { | |||
|       return std::stoll(readFile(fakeFreeSpaceFile)); | ||||
| 
 | ||||
|     struct statvfs st; | ||||
|     if (statvfs(realStoreDir.c_str(), &st)) | ||||
|     if (statvfs(realStoreDir.c_str(), &st)) { | ||||
|       throw SysError("getting filesystem info about '%s'", realStoreDir); | ||||
|     } | ||||
| 
 | ||||
|     return (uint64_t)st.f_bavail * st.f_bsize; | ||||
|   }; | ||||
|  | @ -834,7 +857,7 @@ void LocalStore::autoGC(bool sync) { | |||
| 
 | ||||
|     if (state->gcRunning) { | ||||
|       future = state->gcFuture; | ||||
|       debug("waiting for auto-GC to finish"); | ||||
|       DLOG(INFO) << "waiting for auto-GC to finish"; | ||||
|       goto sync; | ||||
|     } | ||||
| 
 | ||||
|  | @ -870,7 +893,7 @@ void LocalStore::autoGC(bool sync) { | |||
|         GCOptions options; | ||||
|         options.maxFreed = settings.maxFree - avail; | ||||
| 
 | ||||
|         printInfo("running auto-GC to free %d bytes", options.maxFreed); | ||||
|         LOG(INFO) << "running auto-GC to free " << options.maxFreed << " bytes"; | ||||
| 
 | ||||
|         GCResults results; | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,3 +1,4 @@ | |||
| #include <glog/logging.h> | ||||
| #include "binary-cache-store.hh" | ||||
| #include "download.hh" | ||||
| #include "globals.hh" | ||||
|  | @ -45,7 +46,8 @@ class HttpBinaryCacheStore : public BinaryCacheStore { | |||
|     auto state(_state.lock()); | ||||
|     if (state->enabled && settings.tryFallback) { | ||||
|       int t = 60; | ||||
|       printError("disabling binary cache '%s' for %s seconds", getUri(), t); | ||||
|       LOG(WARNING) << "disabling binary cache '" << getUri() << "' for " << t | ||||
|                    << " seconds"; | ||||
|       state->enabled = false; | ||||
|       state->disabledUntil = | ||||
|           std::chrono::steady_clock::now() + std::chrono::seconds(t); | ||||
|  | @ -57,7 +59,7 @@ class HttpBinaryCacheStore : public BinaryCacheStore { | |||
|     if (state->enabled) return; | ||||
|     if (std::chrono::steady_clock::now() > state->disabledUntil) { | ||||
|       state->enabled = true; | ||||
|       debug("re-enabling binary cache '%s'", getUri()); | ||||
|       DLOG(INFO) << "re-enabling binary cache '" << getUri() << "'"; | ||||
|       return; | ||||
|     } | ||||
|     throw SubstituterDisabled("substituter '%s' is disabled", getUri()); | ||||
|  |  | |||
|  | @ -1,5 +1,6 @@ | |||
| #include "archive.hh" | ||||
| #include "derivations.hh" | ||||
| #include "glog/logging.h" | ||||
| #include "pool.hh" | ||||
| #include "remote-store.hh" | ||||
| #include "serve-protocol.hh" | ||||
|  | @ -90,7 +91,8 @@ struct LegacySSHStore : public Store { | |||
|     try { | ||||
|       auto conn(connections->get()); | ||||
| 
 | ||||
|       debug("querying remote host '%s' for info on '%s'", host, path); | ||||
|       DLOG(INFO) << "querying remote host '" << host << "' for info on '" | ||||
|                  << path << "'"; | ||||
| 
 | ||||
|       conn->to << cmdQueryPathInfos << PathSet{path}; | ||||
|       conn->to.flush(); | ||||
|  | @ -125,7 +127,8 @@ struct LegacySSHStore : public Store { | |||
|   void addToStore(const ValidPathInfo& info, Source& source, RepairFlag repair, | ||||
|                   CheckSigsFlag checkSigs, | ||||
|                   std::shared_ptr<FSAccessor> accessor) override { | ||||
|     debug("adding path '%s' to remote host '%s'", info.path, host); | ||||
|     DLOG(INFO) << "adding path '" << info.path << "' to remote host '" << host | ||||
|                << "'"; | ||||
| 
 | ||||
|     auto conn(connections->get()); | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										87
									
								
								third_party/nix/src/libstore/local-store.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										87
									
								
								third_party/nix/src/libstore/local-store.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,6 +1,7 @@ | |||
| #include "local-store.hh" | ||||
| #include <errno.h> | ||||
| #include <fcntl.h> | ||||
| #include <glog/logging.h> | ||||
| #include <grp.h> | ||||
| #include <stdio.h> | ||||
| #include <sys/select.h> | ||||
|  | @ -81,11 +82,10 @@ LocalStore::LocalStore(const Params& params) | |||
|     mode_t perm = 01775; | ||||
| 
 | ||||
|     struct group* gr = getgrnam(settings.buildUsersGroup.get().c_str()); | ||||
|     if (!gr) | ||||
|       printError(format("warning: the group '%1%' specified in " | ||||
|                         "'build-users-group' does not exist") % | ||||
|                  settings.buildUsersGroup); | ||||
|     else { | ||||
|     if (!gr) { | ||||
|       LOG(ERROR) << "warning: the group '" << settings.buildUsersGroup | ||||
|                  << "' specified in 'build-users-group' does not exist"; | ||||
|     } else { | ||||
|       struct stat st; | ||||
|       if (stat(realStoreDir.c_str(), &st)) | ||||
|         throw SysError(format("getting attributes of path '%1%'") % | ||||
|  | @ -147,7 +147,7 @@ LocalStore::LocalStore(const Params& params) | |||
|   globalLock = openLockFile(globalLockPath.c_str(), true); | ||||
| 
 | ||||
|   if (!lockFile(globalLock.get(), ltRead, false)) { | ||||
|     printError("waiting for the big Nix store lock..."); | ||||
|     LOG(INFO) << "waiting for the big Nix store lock..."; | ||||
|     lockFile(globalLock.get(), ltRead, true); | ||||
|   } | ||||
| 
 | ||||
|  | @ -180,7 +180,7 @@ LocalStore::LocalStore(const Params& params) | |||
|           "please upgrade Nix to version 1.11 first."); | ||||
| 
 | ||||
|     if (!lockFile(globalLock.get(), ltWrite, false)) { | ||||
|       printError("waiting for exclusive access to the Nix store..."); | ||||
|       LOG(INFO) << "waiting for exclusive access to the Nix store..."; | ||||
|       lockFile(globalLock.get(), ltWrite, true); | ||||
|     } | ||||
| 
 | ||||
|  | @ -272,7 +272,7 @@ LocalStore::~LocalStore() { | |||
|   } | ||||
| 
 | ||||
|   if (future.valid()) { | ||||
|     printError("waiting for auto-GC to finish on exit..."); | ||||
|     LOG(INFO) << "waiting for auto-GC to finish on exit..."; | ||||
|     future.get(); | ||||
|   } | ||||
| 
 | ||||
|  | @ -850,8 +850,8 @@ void LocalStore::querySubstitutablePathInfos(const PathSet& paths, | |||
|     if (sub->storeDir != storeDir) continue; | ||||
|     for (auto& path : paths) { | ||||
|       if (infos.count(path)) continue; | ||||
|       debug(format("checking substituter '%s' for path '%s'") % sub->getUri() % | ||||
|             path); | ||||
|       DLOG(INFO) << "checking substituter '" << sub->getUri() << "' for path '" | ||||
|                  << path << "'"; | ||||
|       try { | ||||
|         auto info = sub->queryPathInfo(path); | ||||
|         auto narInfo = std::dynamic_pointer_cast<const NarInfo>( | ||||
|  | @ -862,10 +862,11 @@ void LocalStore::querySubstitutablePathInfos(const PathSet& paths, | |||
|       } catch (InvalidPath&) { | ||||
|       } catch (SubstituterDisabled&) { | ||||
|       } catch (Error& e) { | ||||
|         if (settings.tryFallback) | ||||
|           printError(e.what()); | ||||
|         else | ||||
|         if (settings.tryFallback) { | ||||
|           LOG(ERROR) << e.what(); | ||||
|         } else { | ||||
|           throw; | ||||
|         } | ||||
|       } | ||||
|     } | ||||
|   } | ||||
|  | @ -931,7 +932,7 @@ void LocalStore::registerValidPaths(const ValidPathInfos& infos) { | |||
| /* Invalidate a path.  The caller is responsible for checking that
 | ||||
|    there are no referrers. */ | ||||
| void LocalStore::invalidatePath(State& state, const Path& path) { | ||||
|   debug(format("invalidating path '%1%'") % path); | ||||
|   LOG(INFO) << "invalidating path '" << path << "'"; | ||||
| 
 | ||||
|   state.stmtInvalidatePath.use()(path).exec(); | ||||
| 
 | ||||
|  | @ -954,12 +955,15 @@ const PublicKeys& LocalStore::getPublicKeys() { | |||
| void LocalStore::addToStore(const ValidPathInfo& info, Source& source, | ||||
|                             RepairFlag repair, CheckSigsFlag checkSigs, | ||||
|                             std::shared_ptr<FSAccessor> accessor) { | ||||
|   if (!info.narHash) | ||||
|   if (!info.narHash) { | ||||
|     throw Error("cannot add path '%s' because it lacks a hash", info.path); | ||||
|   } | ||||
| 
 | ||||
|   if (requireSigs && checkSigs && !info.checkSignatures(*this, getPublicKeys())) | ||||
|   if (requireSigs && checkSigs && | ||||
|       !info.checkSignatures(*this, getPublicKeys())) { | ||||
|     throw Error("cannot add path '%s' because it lacks a valid signature", | ||||
|                 info.path); | ||||
|   } | ||||
| 
 | ||||
|   addTempRoot(info.path); | ||||
| 
 | ||||
|  | @ -1168,7 +1172,7 @@ void LocalStore::invalidatePathChecked(const Path& path) { | |||
| } | ||||
| 
 | ||||
| bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) { | ||||
|   printError(format("reading the Nix store...")); | ||||
|   LOG(INFO) << "reading the Nix store..."; | ||||
| 
 | ||||
|   bool errors = false; | ||||
| 
 | ||||
|  | @ -1180,7 +1184,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) { | |||
|   for (auto& i : readDirectory(realStoreDir)) store.insert(i.name); | ||||
| 
 | ||||
|   /* Check whether all valid paths actually exist. */ | ||||
|   printInfo("checking path existence..."); | ||||
|   LOG(INFO) << "checking path existence..."; | ||||
| 
 | ||||
|   PathSet validPaths2 = queryAllValidPaths(), validPaths, done; | ||||
| 
 | ||||
|  | @ -1191,7 +1195,7 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) { | |||
| 
 | ||||
|   /* Optionally, check the content hashes (slow). */ | ||||
|   if (checkContents) { | ||||
|     printInfo("checking hashes..."); | ||||
|     LOG(INFO) << "checking hashes..."; | ||||
| 
 | ||||
|     Hash nullHash(htSHA256); | ||||
| 
 | ||||
|  | @ -1201,31 +1205,32 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) { | |||
|             std::shared_ptr<const ValidPathInfo>(queryPathInfo(i))); | ||||
| 
 | ||||
|         /* Check the content hash (optionally - slow). */ | ||||
|         printMsg(lvlTalkative, format("checking contents of '%1%'") % i); | ||||
|         DLOG(INFO) << "checking contents of '" << i << "'"; | ||||
|         HashResult current = hashPath(info->narHash.type, toRealPath(i)); | ||||
| 
 | ||||
|         if (info->narHash != nullHash && info->narHash != current.first) { | ||||
|           printError(format("path '%1%' was modified! " | ||||
|                             "expected hash '%2%', got '%3%'") % | ||||
|                      i % info->narHash.to_string() % current.first.to_string()); | ||||
|           if (repair) | ||||
|           LOG(ERROR) << "path '" << i << "' was modified! expected hash '" | ||||
|                      << info->narHash.to_string() << "', got '" | ||||
|                      << current.first.to_string() << "'"; | ||||
|           if (repair) { | ||||
|             repairPath(i); | ||||
|           else | ||||
|           } else { | ||||
|             errors = true; | ||||
|           } | ||||
|         } else { | ||||
|           bool update = false; | ||||
| 
 | ||||
|           /* Fill in missing hashes. */ | ||||
|           if (info->narHash == nullHash) { | ||||
|             printError(format("fixing missing hash on '%1%'") % i); | ||||
|             LOG(WARNING) << "fixing missing hash on '" << i << "'"; | ||||
|             info->narHash = current.first; | ||||
|             update = true; | ||||
|           } | ||||
| 
 | ||||
|           /* Fill in missing narSize fields (from old stores). */ | ||||
|           if (info->narSize == 0) { | ||||
|             printError(format("updating size field on '%1%' to %2%") % i % | ||||
|                        current.second); | ||||
|             LOG(ERROR) << "updating size field on '" << i << "' to " | ||||
|                        << current.second; | ||||
|             info->narSize = current.second; | ||||
|             update = true; | ||||
|           } | ||||
|  | @ -1239,10 +1244,11 @@ bool LocalStore::verifyStore(bool checkContents, RepairFlag repair) { | |||
|       } catch (Error& e) { | ||||
|         /* It's possible that the path got GC'ed, so ignore
 | ||||
|            errors on invalid paths. */ | ||||
|         if (isValidPath(i)) | ||||
|           printError(format("error: %1%") % e.msg()); | ||||
|         else | ||||
|           printError(format("warning: %1%") % e.msg()); | ||||
|         if (isValidPath(i)) { | ||||
|           LOG(ERROR) << e.msg(); | ||||
|         } else { | ||||
|           LOG(WARNING) << e.msg(); | ||||
|         } | ||||
|         errors = true; | ||||
|       } | ||||
|     } | ||||
|  | @ -1256,11 +1262,13 @@ void LocalStore::verifyPath(const Path& path, const PathSet& store, | |||
|                             RepairFlag repair, bool& errors) { | ||||
|   checkInterrupt(); | ||||
| 
 | ||||
|   if (done.find(path) != done.end()) return; | ||||
|   if (done.find(path) != done.end()) { | ||||
|     return; | ||||
|   } | ||||
|   done.insert(path); | ||||
| 
 | ||||
|   if (!isStorePath(path)) { | ||||
|     printError(format("path '%1%' is not in the Nix store") % path); | ||||
|     LOG(ERROR) << "path '" << path << "' is not in the Nix store"; | ||||
|     auto state(_state.lock()); | ||||
|     invalidatePath(*state, path); | ||||
|     return; | ||||
|  | @ -1279,18 +1287,17 @@ void LocalStore::verifyPath(const Path& path, const PathSet& store, | |||
|       } | ||||
| 
 | ||||
|     if (canInvalidate) { | ||||
|       printError(format("path '%1%' disappeared, removing from database...") % | ||||
|                  path); | ||||
|       LOG(WARNING) << "path '" << path | ||||
|                    << "' disappeared, removing from database..."; | ||||
|       auto state(_state.lock()); | ||||
|       invalidatePath(*state, path); | ||||
|     } else { | ||||
|       printError( | ||||
|           format("path '%1%' disappeared, but it still has valid referrers!") % | ||||
|           path); | ||||
|       LOG(ERROR) << "path '" << path | ||||
|                  << "' disappeared, but it still has valid referrers!"; | ||||
|       if (repair) try { | ||||
|           repairPath(path); | ||||
|         } catch (Error& e) { | ||||
|           printError(format("warning: %1%") % e.msg()); | ||||
|           LOG(WARNING) << e.msg(); | ||||
|           errors = true; | ||||
|         } | ||||
|       else | ||||
|  |  | |||
							
								
								
									
										2
									
								
								third_party/nix/src/libstore/local-store.hh
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								third_party/nix/src/libstore/local-store.hh
									
										
									
									
										vendored
									
									
								
							|  | @ -268,7 +268,7 @@ class LocalStore : public LocalFSStore { | |||
|   InodeHash loadInodeHash(); | ||||
|   Strings readDirectoryIgnoringInodes(const Path& path, | ||||
|                                       const InodeHash& inodeHash); | ||||
|   void optimisePath_(Activity* act, OptimiseStats& stats, const Path& path, | ||||
|   void optimisePath_(OptimiseStats& stats, const Path& path, | ||||
|                      InodeHash& inodeHash); | ||||
| 
 | ||||
|   // Internal versions that are not wrapped in retry_sqlite.
 | ||||
|  |  | |||
							
								
								
									
										11
									
								
								third_party/nix/src/libstore/machines.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								third_party/nix/src/libstore/machines.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,4 +1,5 @@ | |||
| #include "machines.hh" | ||||
| #include <glog/logging.h> | ||||
| #include <algorithm> | ||||
| #include "globals.hh" | ||||
| #include "util.hh" | ||||
|  | @ -53,15 +54,19 @@ void parseMachines(const std::string& s, Machines& machines) { | |||
|       try { | ||||
|         parseMachines(readFile(file), machines); | ||||
|       } catch (const SysError& e) { | ||||
|         if (e.errNo != ENOENT) throw; | ||||
|         debug("cannot find machines file '%s'", file); | ||||
|         if (e.errNo != ENOENT) { | ||||
|           throw; | ||||
|         } | ||||
|         DLOG(INFO) << "cannot find machines file: " << file; | ||||
|       } | ||||
|       continue; | ||||
|     } | ||||
| 
 | ||||
|     auto tokens = tokenizeString<std::vector<string>>(line); | ||||
|     auto sz = tokens.size(); | ||||
|     if (sz < 1) throw FormatError("bad machine specification '%s'", line); | ||||
|     if (sz < 1) { | ||||
|       throw FormatError("bad machine specification '%s'", line); | ||||
|     } | ||||
| 
 | ||||
|     auto isSet = [&](size_t n) { | ||||
|       return tokens.size() > n && tokens[n] != "" && tokens[n] != "-"; | ||||
|  |  | |||
							
								
								
									
										1
									
								
								third_party/nix/src/libstore/meson.build
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								third_party/nix/src/libstore/meson.build
									
										
									
									
										vendored
									
									
								
							|  | @ -79,6 +79,7 @@ libstore_data = files( | |||
| #============================================================================ | ||||
| 
 | ||||
| libstore_dep_list = [ | ||||
|     glog_dep, | ||||
|     libbz2_dep, | ||||
|     libcurl_dep, | ||||
|     libdl_dep, | ||||
|  |  | |||
							
								
								
									
										4
									
								
								third_party/nix/src/libstore/misc.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								third_party/nix/src/libstore/misc.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,3 +1,4 @@ | |||
| #include <glog/logging.h> | ||||
| #include "derivations.hh" | ||||
| #include "globals.hh" | ||||
| #include "local-store.hh" | ||||
|  | @ -99,8 +100,7 @@ void Store::queryMissing(const PathSet& targets, PathSet& willBuild_, | |||
|                          PathSet& willSubstitute_, PathSet& unknown_, | ||||
|                          unsigned long long& downloadSize_, | ||||
|                          unsigned long long& narSize_) { | ||||
|   Activity act(*logger, lvlDebug, actUnknown, | ||||
|                "querying info about missing paths"); | ||||
|   LOG(INFO) << "querying info about missing paths"; | ||||
| 
 | ||||
|   downloadSize_ = narSize_ = 0; | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,4 +1,5 @@ | |||
| #include "nar-info-disk-cache.hh" | ||||
| #include <glog/logging.h> | ||||
| #include <sqlite3.h> | ||||
| #include "globals.hh" | ||||
| #include "sqlite.hh" | ||||
|  | @ -126,8 +127,8 @@ class NarInfoDiskCacheImpl : public NarInfoDiskCache { | |||
|                 now - settings.ttlPositiveNarInfoCache) | ||||
|             .exec(); | ||||
| 
 | ||||
|         debug("deleted %d entries from the NAR info disk cache", | ||||
|               sqlite3_changes(state->db)); | ||||
|         DLOG(INFO) << "deleted " << sqlite3_changes(state->db) | ||||
|                    << " entries from the NAR info disk cache"; | ||||
| 
 | ||||
|         SQLiteStmt( | ||||
|             state->db, | ||||
|  |  | |||
							
								
								
									
										82
									
								
								third_party/nix/src/libstore/optimise-store.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										82
									
								
								third_party/nix/src/libstore/optimise-store.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -7,6 +7,7 @@ | |||
| #include <cstring> | ||||
| #include <regex> | ||||
| #include "globals.hh" | ||||
| #include "glog/logging.h" | ||||
| #include "local-store.hh" | ||||
| #include "util.hh" | ||||
| 
 | ||||
|  | @ -34,11 +35,13 @@ struct MakeReadOnly { | |||
| }; | ||||
| 
 | ||||
| LocalStore::InodeHash LocalStore::loadInodeHash() { | ||||
|   debug("loading hash inodes in memory"); | ||||
|   DLOG(INFO) << "loading hash inodes in memory"; | ||||
|   InodeHash inodeHash; | ||||
| 
 | ||||
|   AutoCloseDir dir(opendir(linksDir.c_str())); | ||||
|   if (!dir) throw SysError(format("opening directory '%1%'") % linksDir); | ||||
|   if (!dir) { | ||||
|     throw SysError(format("opening directory '%1%'") % linksDir); | ||||
|   } | ||||
| 
 | ||||
|   struct dirent* dirent; | ||||
|   while (errno = 0, dirent = readdir(dir.get())) { /* sic */ | ||||
|  | @ -46,9 +49,11 @@ LocalStore::InodeHash LocalStore::loadInodeHash() { | |||
|     // We don't care if we hit non-hash files, anything goes
 | ||||
|     inodeHash.insert(dirent->d_ino); | ||||
|   } | ||||
|   if (errno) throw SysError(format("reading directory '%1%'") % linksDir); | ||||
|   if (errno) { | ||||
|     throw SysError(format("reading directory '%1%'") % linksDir); | ||||
|   } | ||||
| 
 | ||||
|   printMsg(lvlTalkative, format("loaded %1% hash inodes") % inodeHash.size()); | ||||
|   DLOG(INFO) << "loaded " << inodeHash.size() << " hash inodes"; | ||||
| 
 | ||||
|   return inodeHash; | ||||
| } | ||||
|  | @ -58,14 +63,16 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path& path, | |||
|   Strings names; | ||||
| 
 | ||||
|   AutoCloseDir dir(opendir(path.c_str())); | ||||
|   if (!dir) throw SysError(format("opening directory '%1%'") % path); | ||||
|   if (!dir) { | ||||
|     throw SysError(format("opening directory '%1%'") % path); | ||||
|   } | ||||
| 
 | ||||
|   struct dirent* dirent; | ||||
|   while (errno = 0, dirent = readdir(dir.get())) { /* sic */ | ||||
|     checkInterrupt(); | ||||
| 
 | ||||
|     if (inodeHash.count(dirent->d_ino)) { | ||||
|       debug(format("'%1%' is already linked") % dirent->d_name); | ||||
|       DLOG(WARNING) << dirent->d_name << " is already linked"; | ||||
|       continue; | ||||
|     } | ||||
| 
 | ||||
|  | @ -73,13 +80,15 @@ Strings LocalStore::readDirectoryIgnoringInodes(const Path& path, | |||
|     if (name == "." || name == "..") continue; | ||||
|     names.push_back(name); | ||||
|   } | ||||
|   if (errno) throw SysError(format("reading directory '%1%'") % path); | ||||
|   if (errno) { | ||||
|     throw SysError(format("reading directory '%1%'") % path); | ||||
|   } | ||||
| 
 | ||||
|   return names; | ||||
| } | ||||
| 
 | ||||
| void LocalStore::optimisePath_(Activity* act, OptimiseStats& stats, | ||||
|                                const Path& path, InodeHash& inodeHash) { | ||||
| void LocalStore::optimisePath_(OptimiseStats& stats, const Path& path, | ||||
|                                InodeHash& inodeHash) { | ||||
|   checkInterrupt(); | ||||
| 
 | ||||
|   struct stat st; | ||||
|  | @ -100,7 +109,7 @@ void LocalStore::optimisePath_(Activity* act, OptimiseStats& stats, | |||
| 
 | ||||
|   if (S_ISDIR(st.st_mode)) { | ||||
|     Strings names = readDirectoryIgnoringInodes(path, inodeHash); | ||||
|     for (auto& i : names) optimisePath_(act, stats, path + "/" + i, inodeHash); | ||||
|     for (auto& i : names) optimisePath_(stats, path + "/" + i, inodeHash); | ||||
|     return; | ||||
|   } | ||||
| 
 | ||||
|  | @ -117,14 +126,14 @@ void LocalStore::optimisePath_(Activity* act, OptimiseStats& stats, | |||
|      NixOS (example: $fontconfig/var/cache being modified).  Skip | ||||
|      those files.  FIXME: check the modification time. */ | ||||
|   if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) { | ||||
|     printError(format("skipping suspicious writable file '%1%'") % path); | ||||
|     LOG(WARNING) << "skipping suspicious writable file '" << path << "'"; | ||||
|     return; | ||||
|   } | ||||
| 
 | ||||
|   /* This can still happen on top-level files. */ | ||||
|   if (st.st_nlink > 1 && inodeHash.count(st.st_ino)) { | ||||
|     debug(format("'%1%' is already linked, with %2% other file(s)") % path % | ||||
|           (st.st_nlink - 2)); | ||||
|     DLOG(INFO) << path << " is already linked, with " << (st.st_nlink - 2) | ||||
|                << " other file(s)"; | ||||
|     return; | ||||
|   } | ||||
| 
 | ||||
|  | @ -138,7 +147,7 @@ void LocalStore::optimisePath_(Activity* act, OptimiseStats& stats, | |||
|      contents of the symlink (i.e. the result of readlink()), not | ||||
|      the contents of the target (which may not even exist). */ | ||||
|   Hash hash = hashPath(htSHA256, path).first; | ||||
|   debug(format("'%1%' has hash '%2%'") % path % hash.to_string()); | ||||
|   LOG(INFO) << path << " has hash " << hash.to_string(); | ||||
| 
 | ||||
|   /* Check if this is a known hash. */ | ||||
|   Path linkPath = linksDir + "/" + hash.to_string(Base32, false); | ||||
|  | @ -162,8 +171,9 @@ retry: | |||
|            full.  When that happens, it's fine to ignore it: we | ||||
|            just effectively disable deduplication of this | ||||
|            file.  */ | ||||
|         printInfo("cannot link '%s' to '%s': %s", linkPath, path, | ||||
|                   strerror(errno)); | ||||
|         LOG(WARNING) << "cannot link '" << linkPath << " to " << path << ": " | ||||
|                      << strerror(errno); | ||||
| 
 | ||||
|         return; | ||||
| 
 | ||||
|       default: | ||||
|  | @ -178,17 +188,17 @@ retry: | |||
|     throw SysError(format("getting attributes of path '%1%'") % linkPath); | ||||
| 
 | ||||
|   if (st.st_ino == stLink.st_ino) { | ||||
|     debug(format("'%1%' is already linked to '%2%'") % path % linkPath); | ||||
|     DLOG(INFO) << path << " is already linked to " << linkPath; | ||||
|     return; | ||||
|   } | ||||
| 
 | ||||
|   if (st.st_size != stLink.st_size) { | ||||
|     printError(format("removing corrupted link '%1%'") % linkPath); | ||||
|     LOG(WARNING) << "removing corrupted link '" << linkPath << "'"; | ||||
|     unlink(linkPath.c_str()); | ||||
|     goto retry; | ||||
|   } | ||||
| 
 | ||||
|   printMsg(lvlTalkative, format("linking '%1%' to '%2%'") % path % linkPath); | ||||
|   DLOG(INFO) << "linking '" << path << "' to '" << linkPath << "'"; | ||||
| 
 | ||||
|   /* Make the containing directory writable, but only if it's not
 | ||||
|      the store itself (we don't want or need to mess with its | ||||
|  | @ -209,8 +219,9 @@ retry: | |||
|       /* Too many links to the same file (>= 32000 on most file
 | ||||
|          systems).  This is likely to happen with empty files. | ||||
|          Just shrug and ignore. */ | ||||
|       if (st.st_size) | ||||
|         printInfo(format("'%1%' has maximum number of links") % linkPath); | ||||
|       if (st.st_size) { | ||||
|         LOG(WARNING) << linkPath << " has maximum number of links"; | ||||
|       } | ||||
|       return; | ||||
|     } | ||||
|     throw SysError("cannot link '%1%' to '%2%'", tempLink, linkPath); | ||||
|  | @ -218,14 +229,16 @@ retry: | |||
| 
 | ||||
|   /* Atomically replace the old file with the new hard link. */ | ||||
|   if (rename(tempLink.c_str(), path.c_str()) == -1) { | ||||
|     if (unlink(tempLink.c_str()) == -1) | ||||
|       printError(format("unable to unlink '%1%'") % tempLink); | ||||
|     if (unlink(tempLink.c_str()) == -1) { | ||||
|       LOG(ERROR) << "unable to unlink '" << tempLink << "'"; | ||||
|     } | ||||
|     if (errno == EMLINK) { | ||||
|       /* Some filesystems generate too many links on the rename,
 | ||||
|          rather than on the original link.  (Probably it | ||||
|          temporarily increases the st_nlink field before | ||||
|          decreasing it again.) */ | ||||
|       debug("'%s' has reached maximum number of links", linkPath); | ||||
|       DLOG(WARNING) << "'" << linkPath | ||||
|                     << "' has reached maximum number of links"; | ||||
|       return; | ||||
|     } | ||||
|     throw SysError(format("cannot rename '%1%' to '%2%'") % tempLink % path); | ||||
|  | @ -234,30 +247,22 @@ retry: | |||
|   stats.filesLinked++; | ||||
|   stats.bytesFreed += st.st_size; | ||||
|   stats.blocksFreed += st.st_blocks; | ||||
| 
 | ||||
|   if (act) act->result(resFileLinked, st.st_size, st.st_blocks); | ||||
| } | ||||
| 
 | ||||
| void LocalStore::optimiseStore(OptimiseStats& stats) { | ||||
|   Activity act(*logger, actOptimiseStore); | ||||
| 
 | ||||
|   PathSet paths = queryAllValidPaths(); | ||||
|   InodeHash inodeHash = loadInodeHash(); | ||||
| 
 | ||||
|   act.progress(0, paths.size()); | ||||
| 
 | ||||
|   uint64_t done = 0; | ||||
| 
 | ||||
|   for (auto& i : paths) { | ||||
|     addTempRoot(i); | ||||
|     if (!isValidPath(i)) continue; /* path was GC'ed, probably */ | ||||
|     { | ||||
|       Activity act(*logger, lvlTalkative, actUnknown, | ||||
|                    fmt("optimising path '%s'", i)); | ||||
|       optimisePath_(&act, stats, realStoreDir + "/" + baseNameOf(i), inodeHash); | ||||
|       LOG(INFO) << "optimising path '" << i << "'"; | ||||
|       optimisePath_(stats, realStoreDir + "/" + baseNameOf(i), inodeHash); | ||||
|     } | ||||
|     done++; | ||||
|     act.progress(done, paths.size()); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
|  | @ -270,16 +275,17 @@ void LocalStore::optimiseStore() { | |||
| 
 | ||||
|   optimiseStore(stats); | ||||
| 
 | ||||
|   printInfo(format("%1% freed by hard-linking %2% files") % | ||||
|             showBytes(stats.bytesFreed) % stats.filesLinked); | ||||
|   LOG(INFO) << showBytes(stats.bytesFreed) << " freed by hard-linking " | ||||
|             << stats.filesLinked << " files"; | ||||
| } | ||||
| 
 | ||||
| void LocalStore::optimisePath(const Path& path) { | ||||
|   OptimiseStats stats; | ||||
|   InodeHash inodeHash; | ||||
| 
 | ||||
|   if (settings.autoOptimiseStore) | ||||
|     optimisePath_(nullptr, stats, path, inodeHash); | ||||
|   if (settings.autoOptimiseStore) { | ||||
|     optimisePath_(stats, path, inodeHash); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| }  // namespace nix
 | ||||
|  |  | |||
							
								
								
									
										19
									
								
								third_party/nix/src/libstore/pathlocks.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								third_party/nix/src/libstore/pathlocks.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,5 +1,6 @@ | |||
| #include "pathlocks.hh" | ||||
| #include <fcntl.h> | ||||
| #include <glog/logging.h> | ||||
| #include <sys/file.h> | ||||
| #include <sys/stat.h> | ||||
| #include <sys/types.h> | ||||
|  | @ -82,7 +83,7 @@ bool PathLocks::lockPaths(const PathSet& paths, const string& waitMsg, | |||
|     checkInterrupt(); | ||||
|     Path lockPath = path + ".lock"; | ||||
| 
 | ||||
|     debug(format("locking path '%1%'") % path); | ||||
|     DLOG(INFO) << "locking path '" << path << "'"; | ||||
| 
 | ||||
|     AutoCloseFD fd; | ||||
| 
 | ||||
|  | @ -93,7 +94,9 @@ bool PathLocks::lockPaths(const PathSet& paths, const string& waitMsg, | |||
|       /* Acquire an exclusive lock. */ | ||||
|       if (!lockFile(fd.get(), ltWrite, false)) { | ||||
|         if (wait) { | ||||
|           if (waitMsg != "") printError(waitMsg); | ||||
|           if (waitMsg != "") { | ||||
|             LOG(WARNING) << waitMsg; | ||||
|           } | ||||
|           lockFile(fd.get(), ltWrite, true); | ||||
|         } else { | ||||
|           /* Failed to lock this path; release all other
 | ||||
|  | @ -103,7 +106,7 @@ bool PathLocks::lockPaths(const PathSet& paths, const string& waitMsg, | |||
|         } | ||||
|       } | ||||
| 
 | ||||
|       debug(format("lock acquired on '%1%'") % lockPath); | ||||
|       DLOG(INFO) << "lock acquired on '" << lockPath << "'"; | ||||
| 
 | ||||
|       /* Check that the lock file hasn't become stale (i.e.,
 | ||||
|          hasn't been unlinked). */ | ||||
|  | @ -115,7 +118,7 @@ bool PathLocks::lockPaths(const PathSet& paths, const string& waitMsg, | |||
|            a lock on a deleted file.  This means that other | ||||
|            processes may create and acquire a lock on | ||||
|            `lockPath', and proceed.  So we must retry. */ | ||||
|         debug(format("open lock file '%1%' has become stale") % lockPath); | ||||
|         DLOG(INFO) << "open lock file '" << lockPath << "' has become stale"; | ||||
|       else | ||||
|         break; | ||||
|     } | ||||
|  | @ -139,11 +142,11 @@ void PathLocks::unlock() { | |||
|   for (auto& i : fds) { | ||||
|     if (deletePaths) deleteLockFile(i.second, i.first); | ||||
| 
 | ||||
|     if (close(i.first) == -1) | ||||
|       printError(format("error (ignored): cannot close lock file on '%1%'") % | ||||
|                  i.second); | ||||
|     if (close(i.first) == -1) { | ||||
|       LOG(WARNING) << "cannot close lock file on '" << i.second << "'"; | ||||
|     } | ||||
| 
 | ||||
|     debug(format("lock released on '%1%'") % i.second); | ||||
|     DLOG(INFO) << "lock released on '" << i.second << "'"; | ||||
|   } | ||||
| 
 | ||||
|   fds.clear(); | ||||
|  |  | |||
							
								
								
									
										9
									
								
								third_party/nix/src/libstore/profiles.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										9
									
								
								third_party/nix/src/libstore/profiles.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,5 +1,6 @@ | |||
| #include "profiles.hh" | ||||
| #include <errno.h> | ||||
| #include <glog/logging.h> | ||||
| #include <stdio.h> | ||||
| #include <sys/stat.h> | ||||
| #include <sys/types.h> | ||||
|  | @ -110,10 +111,10 @@ void deleteGeneration(const Path& profile, unsigned int gen) { | |||
| 
 | ||||
| static void deleteGeneration2(const Path& profile, unsigned int gen, | ||||
|                               bool dryRun) { | ||||
|   if (dryRun) | ||||
|     printInfo(format("would remove generation %1%") % gen); | ||||
|   else { | ||||
|     printInfo(format("removing generation %1%") % gen); | ||||
|   if (dryRun) { | ||||
|     LOG(INFO) << "would remove generation " << gen; | ||||
|   } else { | ||||
|     LOG(INFO) << "removing generation " << gen; | ||||
|     deleteGeneration(profile, gen); | ||||
|   } | ||||
| } | ||||
|  |  | |||
							
								
								
									
										3
									
								
								third_party/nix/src/libstore/references.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								third_party/nix/src/libstore/references.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,4 +1,5 @@ | |||
| #include "references.hh" | ||||
| #include <glog/logging.h> | ||||
| #include <cstdlib> | ||||
| #include <map> | ||||
| #include "archive.hh" | ||||
|  | @ -32,7 +33,7 @@ static void search(const unsigned char* s, size_t len, StringSet& hashes, | |||
|     if (!match) continue; | ||||
|     string ref((const char*)s + i, refLength); | ||||
|     if (hashes.find(ref) != hashes.end()) { | ||||
|       debug(format("found reference to '%1%' at offset '%2%'") % ref % i); | ||||
|       DLOG(INFO) << "found reference to '" << ref << "' at offset " << i; | ||||
|       seen.insert(ref); | ||||
|       hashes.erase(ref); | ||||
|     } | ||||
|  |  | |||
							
								
								
									
										97
									
								
								third_party/nix/src/libstore/remote-store.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										97
									
								
								third_party/nix/src/libstore/remote-store.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,6 +1,7 @@ | |||
| #include "remote-store.hh" | ||||
| #include <errno.h> | ||||
| #include <fcntl.h> | ||||
| #include <glog/logging.h> | ||||
| #include <sys/socket.h> | ||||
| #include <sys/stat.h> | ||||
| #include <sys/types.h> | ||||
|  | @ -13,6 +14,7 @@ | |||
| #include "finally.hh" | ||||
| #include "globals.hh" | ||||
| #include "pool.hh" | ||||
| #include "prefork-compat.hh" | ||||
| #include "serialise.hh" | ||||
| #include "util.hh" | ||||
| #include "worker-protocol.hh" | ||||
|  | @ -150,10 +152,14 @@ void RemoteStore::initConnection(Connection& conn) { | |||
| } | ||||
| 
 | ||||
| void RemoteStore::setOptions(Connection& conn) { | ||||
|   conn.to << wopSetOptions << settings.keepFailed << settings.keepGoing | ||||
|           << settings.tryFallback << verbosity << settings.maxBuildJobs | ||||
|           << settings.maxSilentTime << true | ||||
|           << (settings.verboseBuild ? lvlError : lvlVomit) | ||||
|   conn.to << wopSetOptions << settings.keepFailed | ||||
|           << settings.keepGoing | ||||
|           // TODO(tazjin): Remove the verbosity stuff here.
 | ||||
|           << settings.tryFallback << compat::kInfo << settings.maxBuildJobs | ||||
|           << settings.maxSilentTime | ||||
|           << true | ||||
|           // TODO(tazjin): what behaviour does this toggle remotely?
 | ||||
|           << (settings.verboseBuild ? compat::kError : compat::kVomit) | ||||
|           << 0  // obsolete log type
 | ||||
|           << 0  /* obsolete print build trace */ | ||||
|           << settings.buildCores << settings.useSubstitutes; | ||||
|  | @ -194,7 +200,8 @@ struct ConnectionHandle { | |||
|   ~ConnectionHandle() { | ||||
|     if (!daemonException && std::uncaught_exceptions()) { | ||||
|       handle.markBad(); | ||||
|       debug("closing daemon connection because of an exception"); | ||||
|       // TODO(tazjin): are these types of things supposed to be DEBUG?
 | ||||
|       DLOG(INFO) << "closing daemon connection because of an exception"; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|  | @ -625,19 +632,29 @@ RemoteStore::Connection::~Connection() { | |||
|   } | ||||
| } | ||||
| 
 | ||||
| static Logger::Fields readFields(Source& from) { | ||||
|   Logger::Fields fields; | ||||
| // TODO(tazjin): these logger fields used to be passed to the JSON
 | ||||
| // logger but I don't care about them, whatever sends them should
 | ||||
| // also be fixed.
 | ||||
| static void ignoreFields(Source& from) { | ||||
|   size_t size = readInt(from); | ||||
| 
 | ||||
|   // This ignores the fields simply by reading the data into nowhere.
 | ||||
|   for (size_t n = 0; n < size; n++) { | ||||
|     auto type = (decltype(Logger::Field::type))readInt(from); | ||||
|     if (type == Logger::Field::tInt) | ||||
|       fields.push_back(readNum<uint64_t>(from)); | ||||
|     else if (type == Logger::Field::tString) | ||||
|       fields.push_back(readString(from)); | ||||
|     else | ||||
|       throw Error("got unsupported field type %x from Nix daemon", (int)type); | ||||
|     auto type_tag = readInt(from); | ||||
| 
 | ||||
|     switch (type_tag) { | ||||
|       case 0:  // previously: 0 ~ Logger::Field::tInt
 | ||||
|         readNum<uint64_t>(from); | ||||
|         break; | ||||
| 
 | ||||
|       case 1:  // previously: 1 ~ Logger::Field::tString
 | ||||
|         readString(from); | ||||
|         break; | ||||
| 
 | ||||
|       default: | ||||
|         throw Error("got unsupported field type %x from Nix daemon", type_tag); | ||||
|     } | ||||
|   } | ||||
|   return fields; | ||||
| } | ||||
| 
 | ||||
| std::exception_ptr RemoteStore::Connection::processStderr(Sink* sink, | ||||
|  | @ -667,36 +684,52 @@ std::exception_ptr RemoteStore::Connection::processStderr(Sink* sink, | |||
|       return std::make_exception_ptr(Error(status, error)); | ||||
|     } | ||||
| 
 | ||||
|     else if (msg == STDERR_NEXT) | ||||
|       printError(chomp(readString(from))); | ||||
|     else if (msg == STDERR_NEXT) { | ||||
|       LOG(ERROR) << chomp(readString(from)); | ||||
|     } | ||||
| 
 | ||||
|     else if (msg == STDERR_START_ACTIVITY) { | ||||
|       auto act = readNum<ActivityId>(from); | ||||
|       auto lvl = (Verbosity)readInt(from); | ||||
|       auto type = (ActivityType)readInt(from); | ||||
|       auto s = readString(from); | ||||
|       auto fields = readFields(from); | ||||
|       auto parent = readNum<ActivityId>(from); | ||||
|       logger->startActivity(act, lvl, type, s, fields, parent); | ||||
|       // Various fields need to be ignored in this case, as the
 | ||||
|       // activity stuff is being removed.
 | ||||
|       readNum<uint64_t>(from);  // used to be ActivityId
 | ||||
|       const auto verbosity = static_cast<compat::Verbosity>(readInt(from)); | ||||
|       readInt(from);  // activity type
 | ||||
|       const auto msg = readString(from); | ||||
|       ignoreFields(from); | ||||
|       readNum<uint64_t>(from);  // ActivityId of "parent"
 | ||||
| 
 | ||||
|       switch (verbosity) { | ||||
|         case compat::kError: | ||||
|           LOG(ERROR) << msg; | ||||
|           break; | ||||
|         case compat::kWarn: | ||||
|           LOG(WARNING) << msg; | ||||
|           break; | ||||
|         case compat::kInfo: | ||||
|           LOG(INFO) << msg; | ||||
|           break; | ||||
|         default: | ||||
|           DLOG(INFO) << msg; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     else if (msg == STDERR_STOP_ACTIVITY) { | ||||
|       auto act = readNum<ActivityId>(from); | ||||
|       logger->stopActivity(act); | ||||
|       readNum<uint64_t>(from);  // used to be ActivityId
 | ||||
|     } | ||||
| 
 | ||||
|     else if (msg == STDERR_RESULT) { | ||||
|       auto act = readNum<ActivityId>(from); | ||||
|       auto type = (ResultType)readInt(from); | ||||
|       auto fields = readFields(from); | ||||
|       logger->result(act, type, fields); | ||||
|       readNum<uint64_t>(from);  // ActivityId
 | ||||
|       readInt(from);            // ResultType
 | ||||
|       ignoreFields(from); | ||||
|     } | ||||
| 
 | ||||
|     else if (msg == STDERR_LAST) | ||||
|     else if (msg == STDERR_LAST) { | ||||
|       break; | ||||
|     } | ||||
| 
 | ||||
|     else | ||||
|     else { | ||||
|       throw Error("got unknown message type %x from Nix daemon", msg); | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   return nullptr; | ||||
|  |  | |||
							
								
								
									
										3
									
								
								third_party/nix/src/libstore/sqlite.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								third_party/nix/src/libstore/sqlite.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,4 +1,5 @@ | |||
| #include "sqlite.hh" | ||||
| #include <glog/logging.h> | ||||
| #include <sqlite3.h> | ||||
| #include <atomic> | ||||
| #include "util.hh" | ||||
|  | @ -157,7 +158,7 @@ void handleSQLiteBusy(const SQLiteBusy& e) { | |||
| 
 | ||||
|   if (now > lastWarned + 10) { | ||||
|     lastWarned = now; | ||||
|     printError("warning: %s", e.what()); | ||||
|     LOG(ERROR) << e.what(); | ||||
|   } | ||||
| 
 | ||||
|   /* Sleep for a while since retrying the transaction right away
 | ||||
|  |  | |||
							
								
								
									
										11
									
								
								third_party/nix/src/libstore/ssh.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								third_party/nix/src/libstore/ssh.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -54,8 +54,13 @@ std::unique_ptr<SSHMaster::Connection> SSHMaster::startCommand( | |||
|         } else { | ||||
|           args = {"ssh", host.c_str(), "-x", "-a"}; | ||||
|           addCommonSSHOpts(args); | ||||
|           if (socketPath != "") args.insert(args.end(), {"-S", socketPath}); | ||||
|           if (verbosity >= lvlChatty) args.push_back("-v"); | ||||
|           if (socketPath != "") { | ||||
|             args.insert(args.end(), {"-S", socketPath}); | ||||
|           } | ||||
|           // TODO(tazjin): Abseil verbosity flag
 | ||||
|           /*if (verbosity >= lvlChatty) {
 | ||||
|               args.push_back("-v"); | ||||
|               }*/ | ||||
|         } | ||||
| 
 | ||||
|         args.push_back(command); | ||||
|  | @ -107,7 +112,7 @@ Path SSHMaster::startMaster() { | |||
|                         "-S",  state->socketPath, | ||||
|                         "-o",  "LocalCommand=echo started", | ||||
|                         "-o",  "PermitLocalCommand=yes"}; | ||||
|         if (verbosity >= lvlChatty) args.push_back("-v"); | ||||
|         // if (verbosity >= lvlChatty) args.push_back("-v");
 | ||||
|         addCommonSSHOpts(args); | ||||
|         execvp(args.begin()->c_str(), stringsToCharPtrs(args).data()); | ||||
| 
 | ||||
|  |  | |||
							
								
								
									
										43
									
								
								third_party/nix/src/libstore/store-api.cc
									
										
									
									
										vendored
									
									
								
							
							
						
						
									
										43
									
								
								third_party/nix/src/libstore/store-api.cc
									
										
									
									
										vendored
									
									
								
							|  | @ -1,4 +1,5 @@ | |||
| #include "store-api.hh" | ||||
| #include <glog/logging.h> | ||||
| #include <future> | ||||
| #include "crypto.hh" | ||||
| #include "derivations.hh" | ||||
|  | @ -526,15 +527,16 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, | |||
|   auto srcUri = srcStore->getUri(); | ||||
|   auto dstUri = dstStore->getUri(); | ||||
| 
 | ||||
|   Activity act(*logger, lvlInfo, actCopyPath, | ||||
|                srcUri == "local" || srcUri == "daemon" | ||||
|                    ? fmt("copying path '%s' to '%s'", storePath, dstUri) | ||||
|                    : dstUri == "local" || dstUri == "daemon" | ||||
|                          ? fmt("copying path '%s' from '%s'", storePath, srcUri) | ||||
|                          : fmt("copying path '%s' from '%s' to '%s'", storePath, | ||||
|                                srcUri, dstUri), | ||||
|                {storePath, srcUri, dstUri}); | ||||
|   PushActivity pact(act.id); | ||||
|   if (srcUri == "local" || srcUri == "daemon") { | ||||
|     LOG(INFO) << "copying path '" << storePath << "' to '" << dstUri << "'"; | ||||
|   } else { | ||||
|     if (dstUri == "local" || dstUri == "daemon") { | ||||
|       LOG(INFO) << "copying path '" << storePath << "' from '" << srcUri << "'"; | ||||
|     } else { | ||||
|       LOG(INFO) << "copying path '" << storePath << "' from '" << srcUri | ||||
|                 << "' to '" << dstUri << "'"; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   auto info = srcStore->queryPathInfo(storePath); | ||||
| 
 | ||||
|  | @ -565,7 +567,6 @@ void copyStorePath(ref<Store> srcStore, ref<Store> dstStore, | |||
|         LambdaSink wrapperSink([&](const unsigned char* data, size_t len) { | ||||
|           sink(data, len); | ||||
|           total += len; | ||||
|           act.progress(total, info->narSize); | ||||
|         }); | ||||
|         srcStore->narFromPath({storePath}, wrapperSink); | ||||
|       }, | ||||
|  | @ -588,18 +589,13 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore, | |||
| 
 | ||||
|   if (missing.empty()) return; | ||||
| 
 | ||||
|   Activity act(*logger, lvlInfo, actCopyPaths, | ||||
|                fmt("copying %d paths", missing.size())); | ||||
|   LOG(INFO) << "copying " << missing.size() << " paths"; | ||||
| 
 | ||||
|   std::atomic<size_t> nrDone{0}; | ||||
|   std::atomic<size_t> nrFailed{0}; | ||||
|   std::atomic<uint64_t> bytesExpected{0}; | ||||
|   std::atomic<uint64_t> nrRunning{0}; | ||||
| 
 | ||||
|   auto showProgress = [&]() { | ||||
|     act.progress(nrDone, missing.size(), nrRunning, nrFailed); | ||||
|   }; | ||||
| 
 | ||||
|   ThreadPool pool; | ||||
| 
 | ||||
|   processGraph<Path>( | ||||
|  | @ -608,14 +604,12 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore, | |||
|       [&](const Path& storePath) { | ||||
|         if (dstStore->isValidPath(storePath)) { | ||||
|           nrDone++; | ||||
|           showProgress(); | ||||
|           return PathSet(); | ||||
|         } | ||||
| 
 | ||||
|         auto info = srcStore->queryPathInfo(storePath); | ||||
| 
 | ||||
|         bytesExpected += info->narSize; | ||||
|         act.setExpected(actCopyPath, bytesExpected); | ||||
| 
 | ||||
|         return info->references; | ||||
|       }, | ||||
|  | @ -625,21 +619,17 @@ void copyPaths(ref<Store> srcStore, ref<Store> dstStore, | |||
| 
 | ||||
|         if (!dstStore->isValidPath(storePath)) { | ||||
|           MaintainCount<decltype(nrRunning)> mc(nrRunning); | ||||
|           showProgress(); | ||||
|           try { | ||||
|             copyStorePath(srcStore, dstStore, storePath, repair, checkSigs); | ||||
|           } catch (Error& e) { | ||||
|             nrFailed++; | ||||
|             if (!settings.keepGoing) throw e; | ||||
|             logger->log(lvlError, | ||||
|                         format("could not copy %s: %s") % storePath % e.what()); | ||||
|             showProgress(); | ||||
|             LOG(ERROR) << "could not copy " << storePath << ": " << e.what(); | ||||
|             return; | ||||
|           } | ||||
|         } | ||||
| 
 | ||||
|         nrDone++; | ||||
|         showProgress(); | ||||
|       }); | ||||
| } | ||||
| 
 | ||||
|  | @ -702,9 +692,8 @@ void ValidPathInfo::sign(const SecretKey& secretKey) { | |||
| 
 | ||||
| bool ValidPathInfo::isContentAddressed(const Store& store) const { | ||||
|   auto warn = [&]() { | ||||
|     printError( | ||||
|         format("warning: path '%s' claims to be content-addressed but isn't") % | ||||
|         path); | ||||
|     LOG(ERROR) << "warning: path '" << path | ||||
|                << "' claims to be content-addressed but isn't"; | ||||
|   }; | ||||
| 
 | ||||
|   if (hasPrefix(ca, "text:")) { | ||||
|  | @ -873,7 +862,7 @@ std::list<ref<Store>> getDefaultSubstituters() { | |||
|       try { | ||||
|         stores.push_back(openStore(uri)); | ||||
|       } catch (Error& e) { | ||||
|         printError("warning: %s", e.what()); | ||||
|         LOG(WARNING) << e.what(); | ||||
|       } | ||||
|     }; | ||||
| 
 | ||||
|  |  | |||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue