style: Switch to nixfmt from nixpkgs-fmt
Most of the ecosystem has moved to this formatter, and many people configured their editors to autoformat it with this formatter. Closes: https://git.snix.dev/snix/snix/issues/62 Change-Id: Icf39e7836c91fc2ae49fbe22a40a639105bfb0bd Reviewed-on: https://cl.snix.dev/c/snix/+/30671 Reviewed-by: Florian Klink <flokli@flokli.de> Tested-by: besadii Autosubmit: Ilan Joselevich <personal@ilanjoselevich.com>
This commit is contained in:
parent
3443e6bd08
commit
91d02d8c84
136 changed files with 39952 additions and 11007 deletions
|
|
@ -1,6 +1,11 @@
|
|||
# See README.md
|
||||
{ depot ? import ../. { }, ... }:
|
||||
{
|
||||
depot ? import ../. { },
|
||||
...
|
||||
}:
|
||||
|
||||
depot.third_party.nixpkgs.extend (_: _: {
|
||||
tvl = depot;
|
||||
})
|
||||
depot.third_party.nixpkgs.extend (
|
||||
_: _: {
|
||||
tvl = depot;
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
{ depot
|
||||
, pkgs
|
||||
, ...
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
|
@ -13,27 +14,33 @@ let
|
|||
'';
|
||||
# clickhouse has a very odd AWS config concept.
|
||||
# Configure it to be a bit more sane.
|
||||
clickhouseLocalFixedAWS = pkgs.runCommand "clickhouse-local-fixed"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
} ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${pkgs.clickhouse}/bin/clickhouse-local $out/bin/clickhouse-local \
|
||||
--append-flags "-C ${clickhouseConfigAWS}"
|
||||
'';
|
||||
clickhouseLocalFixedAWS =
|
||||
pkgs.runCommand "clickhouse-local-fixed"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
}
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${pkgs.clickhouse}/bin/clickhouse-local $out/bin/clickhouse-local \
|
||||
--append-flags "-C ${clickhouseConfigAWS}"
|
||||
'';
|
||||
|
||||
in
|
||||
depot.nix.readTree.drvTargets {
|
||||
inherit clickhouseLocalFixedAWS;
|
||||
|
||||
parse-bucket-logs = pkgs.runCommand "archivist-parse-bucket-logs"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
} ''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${(pkgs.writers.writeRust "parse-bucket-logs-unwrapped" {} ./parse_bucket_logs.rs)} $out/bin/archivist-parse-bucket-logs \
|
||||
--prefix PATH : ${pkgs.lib.makeBinPath [ clickhouseLocalFixedAWS ]}
|
||||
'';
|
||||
parse-bucket-logs =
|
||||
pkgs.runCommand "archivist-parse-bucket-logs"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.makeWrapper ];
|
||||
}
|
||||
''
|
||||
mkdir -p $out/bin
|
||||
makeWrapper ${
|
||||
(pkgs.writers.writeRust "parse-bucket-logs-unwrapped" { } ./parse_bucket_logs.rs)
|
||||
} $out/bin/archivist-parse-bucket-logs \
|
||||
--prefix PATH : ${pkgs.lib.makeBinPath [ clickhouseLocalFixedAWS ]}
|
||||
'';
|
||||
|
||||
# A shell, by default pointing us to the archivist SSO profile / account by default.
|
||||
shell = pkgs.mkShell {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,4 +1,9 @@
|
|||
{ pkgs, depot, lib, ... }:
|
||||
{
|
||||
pkgs,
|
||||
depot,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
(pkgs.callPackage ./Cargo.nix {
|
||||
defaultCrateOverrides = (depot.snix.utils.defaultCrateOverridesForPkgs pkgs) // {
|
||||
|
|
@ -10,6 +15,7 @@
|
|||
nativeBuildInputs = [ pkgs.protobuf ];
|
||||
};
|
||||
};
|
||||
}).rootCrate.build.overrideAttrs {
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
}).rootCrate.build.overrideAttrs
|
||||
{
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -6,6 +6,7 @@
|
|||
src = depot.snix.utils.filterRustCrateSrc { root = prev.src.origSrc; };
|
||||
};
|
||||
};
|
||||
}).rootCrate.build.overrideAttrs {
|
||||
meta.ci.extraSteps.crate2nix = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
}).rootCrate.build.overrideAttrs
|
||||
{
|
||||
meta.ci.extraSteps.crate2nix = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -6,6 +6,7 @@
|
|||
src = depot.snix.utils.filterRustCrateSrc { root = prev.src.origSrc; };
|
||||
};
|
||||
};
|
||||
}).rootCrate.build.overrideAttrs {
|
||||
meta.ci.extraSteps.crate2nix = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
}).rootCrate.build.overrideAttrs
|
||||
{
|
||||
meta.ci.extraSteps.crate2nix = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,4 +1,9 @@
|
|||
{ pkgs, lib, depot, ... }:
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
depot,
|
||||
...
|
||||
}:
|
||||
let
|
||||
pkgsCross = pkgs.pkgsCross.wasm32-unknown-none;
|
||||
in
|
||||
|
|
@ -8,25 +13,26 @@ in
|
|||
src = depot.snix.utils.filterRustCrateSrc { root = prev.src.origSrc; };
|
||||
};
|
||||
};
|
||||
}).rootCrate.build.overrideAttrs (oldAttrs: {
|
||||
installPhase = ''
|
||||
${lib.getExe pkgs.wasm-bindgen-cli} \
|
||||
--target web \
|
||||
--out-dir $out \
|
||||
--out-name ${oldAttrs.crateName} \
|
||||
--no-typescript \
|
||||
target/lib/${oldAttrs.crateName}-${oldAttrs.metadata}.wasm
|
||||
}).rootCrate.build.overrideAttrs
|
||||
(oldAttrs: {
|
||||
installPhase = ''
|
||||
${lib.getExe pkgs.wasm-bindgen-cli} \
|
||||
--target web \
|
||||
--out-dir $out \
|
||||
--out-name ${oldAttrs.crateName} \
|
||||
--no-typescript \
|
||||
target/lib/${oldAttrs.crateName}-${oldAttrs.metadata}.wasm
|
||||
|
||||
mv src/*.{html,css} $out
|
||||
'';
|
||||
mv src/*.{html,css} $out
|
||||
'';
|
||||
|
||||
passthru.serve = pkgs.writeShellScriptBin "snixbolt-serve" ''
|
||||
${lib.getExe pkgs.simple-http-server} \
|
||||
--index \
|
||||
--nocache \
|
||||
"$@" \
|
||||
${depot.contrib.snixbolt}
|
||||
'';
|
||||
passthru.serve = pkgs.writeShellScriptBin "snixbolt-serve" ''
|
||||
${lib.getExe pkgs.simple-http-server} \
|
||||
--index \
|
||||
--nocache \
|
||||
"$@" \
|
||||
${depot.contrib.snixbolt}
|
||||
'';
|
||||
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
})
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
})
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -6,6 +6,7 @@
|
|||
src = depot.snix.utils.filterRustCrateSrc { root = prev.src.origSrc; };
|
||||
};
|
||||
};
|
||||
}).rootCrate.build.overrideAttrs {
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
}).rootCrate.build.overrideAttrs
|
||||
{
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -6,6 +6,7 @@
|
|||
src = depot.snix.utils.filterRustCrateSrc { root = prev.src.origSrc; };
|
||||
};
|
||||
};
|
||||
}).rootCrate.build.overrideAttrs {
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
}).rootCrate.build.overrideAttrs
|
||||
{
|
||||
meta.ci.extraSteps.crate2nix-check = depot.snix.utils.mkCrate2nixCheck ./Cargo.nix;
|
||||
}
|
||||
|
|
|
|||
135
default.nix
135
default.nix
|
|
@ -2,28 +2,31 @@
|
|||
# (see //nix/readTree for details) and constructing a matching attribute set
|
||||
# tree.
|
||||
|
||||
{ nixpkgsBisectPath ? null
|
||||
, parentTargetMap ? null
|
||||
, nixpkgsConfig ? { }
|
||||
, localSystem ? builtins.currentSystem
|
||||
, crossSystem ? null
|
||||
, ...
|
||||
{
|
||||
nixpkgsBisectPath ? null,
|
||||
parentTargetMap ? null,
|
||||
nixpkgsConfig ? { },
|
||||
localSystem ? builtins.currentSystem,
|
||||
crossSystem ? null,
|
||||
...
|
||||
}@args:
|
||||
|
||||
let
|
||||
readTree = import ./nix/readTree { };
|
||||
|
||||
readDepot = depotArgs: readTree {
|
||||
args = depotArgs;
|
||||
path = ./.;
|
||||
scopedArgs = {
|
||||
# FIXME(Lix): this cannot work in Lix itself.
|
||||
# __findFile = _: _: throw "Do not import from NIX_PATH in the depot!";
|
||||
builtins = builtins // {
|
||||
currentSystem = throw "Use localSystem from the readTree args instead of builtins.currentSystem!";
|
||||
readDepot =
|
||||
depotArgs:
|
||||
readTree {
|
||||
args = depotArgs;
|
||||
path = ./.;
|
||||
scopedArgs = {
|
||||
# FIXME(Lix): this cannot work in Lix itself.
|
||||
# __findFile = _: _: throw "Do not import from NIX_PATH in the depot!";
|
||||
builtins = builtins // {
|
||||
currentSystem = throw "Use localSystem from the readTree args instead of builtins.currentSystem!";
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# To determine build targets, we walk through the depot tree and
|
||||
# fetch attributes that were imported by readTree and are buildable.
|
||||
|
|
@ -34,54 +37,66 @@ let
|
|||
eligible = node: (node ? outPath) && !(node.meta.ci.skip or (node.meta.broken or false));
|
||||
|
||||
in
|
||||
readTree.fix (self: (readDepot {
|
||||
inherit localSystem crossSystem;
|
||||
depot = self;
|
||||
readTree.fix (
|
||||
self:
|
||||
(readDepot {
|
||||
inherit localSystem crossSystem;
|
||||
depot = self;
|
||||
|
||||
# Pass third_party as 'pkgs' (for compatibility with external
|
||||
# imports for certain subdirectories)
|
||||
pkgs = self.third_party.nixpkgs;
|
||||
# Pass third_party as 'pkgs' (for compatibility with external
|
||||
# imports for certain subdirectories)
|
||||
pkgs = self.third_party.nixpkgs;
|
||||
|
||||
# Expose lib attribute to packages.
|
||||
lib = self.third_party.nixpkgs.lib;
|
||||
# Expose lib attribute to packages.
|
||||
lib = self.third_party.nixpkgs.lib;
|
||||
|
||||
# Pass arguments passed to the entire depot through, for packages
|
||||
# that would like to add functionality based on this.
|
||||
#
|
||||
# Note that it is intended for exceptional circumstance, such as
|
||||
# debugging by bisecting nixpkgs.
|
||||
externalArgs = args;
|
||||
}) // {
|
||||
# Make the path to the depot available for things that might need it
|
||||
# (e.g. NixOS module inclusions)
|
||||
path = self.third_party.nixpkgs.lib.cleanSourceWith {
|
||||
name = "depot";
|
||||
src = ./.;
|
||||
filter = self.third_party.nixpkgs.lib.cleanSourceFilter;
|
||||
};
|
||||
# Pass arguments passed to the entire depot through, for packages
|
||||
# that would like to add functionality based on this.
|
||||
#
|
||||
# Note that it is intended for exceptional circumstance, such as
|
||||
# debugging by bisecting nixpkgs.
|
||||
externalArgs = args;
|
||||
})
|
||||
// {
|
||||
# Make the path to the depot available for things that might need it
|
||||
# (e.g. NixOS module inclusions)
|
||||
path = self.third_party.nixpkgs.lib.cleanSourceWith {
|
||||
name = "depot";
|
||||
src = ./.;
|
||||
filter = self.third_party.nixpkgs.lib.cleanSourceFilter;
|
||||
};
|
||||
|
||||
# Additionally targets can be excluded from CI by adding them to the
|
||||
# list below.
|
||||
ci.excluded = [
|
||||
];
|
||||
# Additionally targets can be excluded from CI by adding them to the
|
||||
# list below.
|
||||
ci.excluded = [
|
||||
];
|
||||
|
||||
# List of all buildable targets, for CI purposes.
|
||||
#
|
||||
# Note: To prevent infinite recursion, this *must* be a nested
|
||||
# attribute set (which does not have a __readTree attribute).
|
||||
ci.targets = readTree.gather
|
||||
(t: (eligible t) && (!builtins.elem t self.ci.excluded))
|
||||
(self // {
|
||||
# remove the pipelines themselves from the set over which to
|
||||
# generate pipelines because that also leads to infinite
|
||||
# recursion.
|
||||
ops = self.ops // { pipelines = null; };
|
||||
});
|
||||
# List of all buildable targets, for CI purposes.
|
||||
#
|
||||
# Note: To prevent infinite recursion, this *must* be a nested
|
||||
# attribute set (which does not have a __readTree attribute).
|
||||
ci.targets = readTree.gather (t: (eligible t) && (!builtins.elem t self.ci.excluded)) (
|
||||
self
|
||||
// {
|
||||
# remove the pipelines themselves from the set over which to
|
||||
# generate pipelines because that also leads to infinite
|
||||
# recursion.
|
||||
ops = self.ops // {
|
||||
pipelines = null;
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
# Derivation that gcroots all depot targets.
|
||||
ci.gcroot = with self.third_party.nixpkgs; writeText "depot-gcroot"
|
||||
(builtins.concatStringsSep "\n"
|
||||
(lib.flatten
|
||||
(map (p: map (o: p.${o}) p.outputs or [ ]) # list all outputs of each drv
|
||||
self.ci.targets)));
|
||||
})
|
||||
# Derivation that gcroots all depot targets.
|
||||
ci.gcroot =
|
||||
with self.third_party.nixpkgs;
|
||||
writeText "depot-gcroot" (
|
||||
builtins.concatStringsSep "\n" (
|
||||
lib.flatten (
|
||||
map (p: map (o: p.${o}) p.outputs or [ ]) # list all outputs of each drv
|
||||
self.ci.targets
|
||||
)
|
||||
)
|
||||
);
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
{ pkgs, ... }:
|
||||
let
|
||||
mkWebroot = title: imgsrc: pkgs.runCommand "webroot" { } ''
|
||||
mkdir -p $out
|
||||
title="${title}" substituteAll ${./index.html} $out/index.html
|
||||
cp ${imgsrc} $out/solves-this.png
|
||||
'';
|
||||
mkWebroot =
|
||||
title: imgsrc:
|
||||
pkgs.runCommand "webroot" { } ''
|
||||
mkdir -p $out
|
||||
title="${title}" substituteAll ${./index.html} $out/index.html
|
||||
cp ${imgsrc} $out/solves-this.png
|
||||
'';
|
||||
|
||||
in
|
||||
{
|
||||
|
|
|
|||
|
|
@ -1,9 +1,14 @@
|
|||
# Check protobuf breaking. Lints already happen in individual targets.
|
||||
#
|
||||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (depot.nix) bufCheck;# self reference
|
||||
inherit (depot.nix) bufCheck; # self reference
|
||||
|
||||
script = pkgs.writeShellScriptBin "ci-buf-check" ''
|
||||
export PATH="$PATH:${pkgs.lib.makeBinPath [ pkgs.buf ]}"
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
{ makeSetupHook }:
|
||||
|
||||
makeSetupHook
|
||||
{
|
||||
makeSetupHook {
|
||||
name = "rules_java_bazel_hook";
|
||||
substitutions = {
|
||||
local_java = ./local_java;
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
{ stdenvNoCC
|
||||
, lib
|
||||
, makeSetupHook
|
||||
, fetchFromGitHub
|
||||
, coreutils
|
||||
, gnugrep
|
||||
, nodejs
|
||||
, yarn
|
||||
, git
|
||||
, cacert
|
||||
{
|
||||
stdenvNoCC,
|
||||
lib,
|
||||
makeSetupHook,
|
||||
fetchFromGitHub,
|
||||
coreutils,
|
||||
gnugrep,
|
||||
nodejs,
|
||||
yarn,
|
||||
git,
|
||||
cacert,
|
||||
}:
|
||||
let
|
||||
rulesNodeJS = stdenvNoCC.mkDerivation rec {
|
||||
|
|
@ -30,7 +31,12 @@ let
|
|||
--replace-quiet '#!/usr/bin/env bash' '#!${stdenvNoCC.shell}' \
|
||||
--replace-quiet '#!/bin/bash' '#!${stdenvNoCC.shell}'
|
||||
done
|
||||
sed -i '/^#!/a export PATH=${lib.makeBinPath [ coreutils gnugrep ]}:$PATH' internal/node/launcher.sh
|
||||
sed -i '/^#!/a export PATH=${
|
||||
lib.makeBinPath [
|
||||
coreutils
|
||||
gnugrep
|
||||
]
|
||||
}:$PATH' internal/node/launcher.sh
|
||||
'';
|
||||
|
||||
installPhase = ''
|
||||
|
|
@ -38,8 +44,7 @@ let
|
|||
'';
|
||||
};
|
||||
in
|
||||
makeSetupHook
|
||||
{
|
||||
makeSetupHook {
|
||||
name = "bazelbuild-rules_nodejs-5-hook";
|
||||
propagatedBuildInputs = [
|
||||
nodejs
|
||||
|
|
@ -48,7 +53,12 @@ makeSetupHook
|
|||
cacert
|
||||
];
|
||||
substitutions = {
|
||||
inherit nodejs yarn cacert rulesNodeJS;
|
||||
inherit
|
||||
nodejs
|
||||
yarn
|
||||
cacert
|
||||
rulesNodeJS
|
||||
;
|
||||
local_node = ./local_node;
|
||||
local_yarn = ./local_yarn;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,17 +1,19 @@
|
|||
{ stdenv
|
||||
, lib
|
||||
, pkgs
|
||||
, coreutils
|
||||
{
|
||||
stdenv,
|
||||
lib,
|
||||
pkgs,
|
||||
coreutils,
|
||||
}:
|
||||
|
||||
{ name ? "${baseAttrs.pname}-${baseAttrs.version}"
|
||||
, bazelTargets
|
||||
, bazel ? pkgs.bazel
|
||||
, depsHash
|
||||
, extraCacheInstall ? ""
|
||||
, extraBuildSetup ? ""
|
||||
, extraBuildInstall ? ""
|
||||
, ...
|
||||
{
|
||||
name ? "${baseAttrs.pname}-${baseAttrs.version}",
|
||||
bazelTargets,
|
||||
bazel ? pkgs.bazel,
|
||||
depsHash,
|
||||
extraCacheInstall ? "",
|
||||
extraBuildSetup ? "",
|
||||
extraBuildInstall ? "",
|
||||
...
|
||||
}@baseAttrs:
|
||||
|
||||
let
|
||||
|
|
@ -24,20 +26,23 @@ let
|
|||
];
|
||||
attrs = cleanAttrs baseAttrs;
|
||||
|
||||
base = stdenv.mkDerivation (attrs // {
|
||||
nativeBuildInputs = (attrs.nativeBuildInputs or [ ]) ++ [
|
||||
bazel
|
||||
];
|
||||
base = stdenv.mkDerivation (
|
||||
attrs
|
||||
// {
|
||||
nativeBuildInputs = (attrs.nativeBuildInputs or [ ]) ++ [
|
||||
bazel
|
||||
];
|
||||
|
||||
preUnpack = ''
|
||||
if [[ ! -d $HOME ]]; then
|
||||
export HOME=$NIX_BUILD_TOP/home
|
||||
mkdir -p $HOME
|
||||
fi
|
||||
'';
|
||||
preUnpack = ''
|
||||
if [[ ! -d $HOME ]]; then
|
||||
export HOME=$NIX_BUILD_TOP/home
|
||||
mkdir -p $HOME
|
||||
fi
|
||||
'';
|
||||
|
||||
bazelTargetNames = builtins.attrNames bazelTargets;
|
||||
});
|
||||
bazelTargetNames = builtins.attrNames bazelTargets;
|
||||
}
|
||||
);
|
||||
|
||||
cache = base.overrideAttrs (base: {
|
||||
name = "${name}-deps";
|
||||
|
|
@ -89,18 +94,23 @@ let
|
|||
installPhase = ''
|
||||
runHook preInstall
|
||||
|
||||
${builtins.concatStringsSep "\n" (lib.mapAttrsToList (target: outPath: lib.optionalString (outPath != null) ''
|
||||
TARGET_OUTPUTS="$(bazel cquery --repository_cache=$cache/repository-cache $bazelFlags "''${bazelFlagsArray[@]}" --output=files "${target}")"
|
||||
if [[ "$(echo "$TARGET_OUTPUTS" | wc -l)" -gt 1 ]]; then
|
||||
echo "Installing ${target}'s outputs ($TARGET_OUTPUTS) into ${outPath} as a directory"
|
||||
mkdir -p "${outPath}"
|
||||
cp $TARGET_OUTPUTS "${outPath}"
|
||||
else
|
||||
echo "Installing ${target}'s output ($TARGET_OUTPUTS) to ${outPath}"
|
||||
mkdir -p "${dirOf outPath}"
|
||||
cp "$TARGET_OUTPUTS" "${outPath}"
|
||||
fi
|
||||
'') bazelTargets)}
|
||||
${builtins.concatStringsSep "\n" (
|
||||
lib.mapAttrsToList (
|
||||
target: outPath:
|
||||
lib.optionalString (outPath != null) ''
|
||||
TARGET_OUTPUTS="$(bazel cquery --repository_cache=$cache/repository-cache $bazelFlags "''${bazelFlagsArray[@]}" --output=files "${target}")"
|
||||
if [[ "$(echo "$TARGET_OUTPUTS" | wc -l)" -gt 1 ]]; then
|
||||
echo "Installing ${target}'s outputs ($TARGET_OUTPUTS) into ${outPath} as a directory"
|
||||
mkdir -p "${outPath}"
|
||||
cp $TARGET_OUTPUTS "${outPath}"
|
||||
else
|
||||
echo "Installing ${target}'s output ($TARGET_OUTPUTS) to ${outPath}"
|
||||
mkdir -p "${dirOf outPath}"
|
||||
cp "$TARGET_OUTPUTS" "${outPath}"
|
||||
fi
|
||||
''
|
||||
) bazelTargets
|
||||
)}
|
||||
${extraBuildInstall}
|
||||
|
||||
runHook postInstall
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
{ pkgs, ... }:
|
||||
|
||||
(pkgs.callPackage ./buildBazelPackageNG.nix { }) // {
|
||||
(pkgs.callPackage ./buildBazelPackageNG.nix { })
|
||||
// {
|
||||
bazelRulesJavaHook = pkgs.callPackage ./bazelRulesJavaHook { };
|
||||
bazelRulesNodeJS5Hook = pkgs.callPackage ./bazelRulesNodeJS5Hook { };
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,7 +22,8 @@ let
|
|||
listToAttrs
|
||||
mapAttrs
|
||||
toJSON
|
||||
unsafeDiscardStringContext;
|
||||
unsafeDiscardStringContext
|
||||
;
|
||||
|
||||
inherit (pkgs) lib runCommand writeText;
|
||||
inherit (depot.nix.readTree) mkLabel;
|
||||
|
|
@ -33,24 +34,27 @@ rec {
|
|||
# Create a unique key for the buildkite pipeline based on the given derivation
|
||||
# or drvPath. A consequence of using such keys is that every derivation may
|
||||
# only be exposed as a single, unique step in the pipeline.
|
||||
keyForDrv = drvOrPath:
|
||||
keyForDrv =
|
||||
drvOrPath:
|
||||
let
|
||||
drvPath =
|
||||
if lib.isDerivation drvOrPath then drvOrPath.drvPath
|
||||
else if lib.isString drvOrPath then drvOrPath
|
||||
else builtins.throw "keyForDrv: expected string or derivation";
|
||||
if lib.isDerivation drvOrPath then
|
||||
drvOrPath.drvPath
|
||||
else if lib.isString drvOrPath then
|
||||
drvOrPath
|
||||
else
|
||||
builtins.throw "keyForDrv: expected string or derivation";
|
||||
|
||||
# Only use the drv hash to prevent escaping problems. Buildkite also has a
|
||||
# limit of 100 characters on keys.
|
||||
in
|
||||
"drv-" + (builtins.substring 0 32
|
||||
(builtins.baseNameOf (unsafeDiscardStringContext drvPath))
|
||||
);
|
||||
"drv-" + (builtins.substring 0 32 (builtins.baseNameOf (unsafeDiscardStringContext drvPath)));
|
||||
|
||||
# Given an arbitrary attribute path generate a Nix expression which obtains
|
||||
# this from the root of depot (assumed to be ./.). Attributes may be any
|
||||
# Nix strings suitable as attribute names, not just Nix literal-safe strings.
|
||||
mkBuildExpr = attrPath:
|
||||
mkBuildExpr =
|
||||
attrPath:
|
||||
let
|
||||
descend = expr: attr: "builtins.getAttr \"${attr}\" (${expr})";
|
||||
in
|
||||
|
|
@ -58,38 +62,49 @@ rec {
|
|||
|
||||
# Determine whether to skip a target if it has not diverged from the
|
||||
# HEAD branch.
|
||||
shouldSkip = { parentTargetMap ? { }, label, drvPath }:
|
||||
if (hasAttr label parentTargetMap) && parentTargetMap."${label}".drvPath == drvPath
|
||||
then "Target has not changed."
|
||||
else false;
|
||||
shouldSkip =
|
||||
{
|
||||
parentTargetMap ? { },
|
||||
label,
|
||||
drvPath,
|
||||
}:
|
||||
if (hasAttr label parentTargetMap) && parentTargetMap."${label}".drvPath == drvPath then
|
||||
"Target has not changed."
|
||||
else
|
||||
false;
|
||||
|
||||
# Create build command for an attribute path pointing to a derivation.
|
||||
mkBuildCommand = { attrPath, drvPath, outLink ? "result" }: concatStringsSep " " [
|
||||
# If the nix build fails, the Nix command's exit status should be used.
|
||||
"set -o pipefail;"
|
||||
mkBuildCommand =
|
||||
{
|
||||
attrPath,
|
||||
drvPath,
|
||||
outLink ? "result",
|
||||
}:
|
||||
concatStringsSep " " [
|
||||
# If the nix build fails, the Nix command's exit status should be used.
|
||||
"set -o pipefail;"
|
||||
|
||||
# First try to realise the drvPath of the target so we don't evaluate twice.
|
||||
# Nix has no concept of depending on a derivation file without depending on
|
||||
# at least one of its `outPath`s, so we need to discard the string context
|
||||
# if we don't want to build everything during pipeline construction.
|
||||
#
|
||||
# To make this more uniform with how nix-build(1) works, we call realpath(1)
|
||||
# on nix-store(1)'s output since it has the habit of printing the path of the
|
||||
# out link, not the store path.
|
||||
"(nix-store --realise '${drvPath}' --add-root '${outLink}' --indirect | xargs -r realpath)"
|
||||
# First try to realise the drvPath of the target so we don't evaluate twice.
|
||||
# Nix has no concept of depending on a derivation file without depending on
|
||||
# at least one of its `outPath`s, so we need to discard the string context
|
||||
# if we don't want to build everything during pipeline construction.
|
||||
#
|
||||
# To make this more uniform with how nix-build(1) works, we call realpath(1)
|
||||
# on nix-store(1)'s output since it has the habit of printing the path of the
|
||||
# out link, not the store path.
|
||||
"(nix-store --realise '${drvPath}' --add-root '${outLink}' --indirect | xargs -r realpath)"
|
||||
|
||||
# Since we don't gcroot the derivation files, they may be deleted by the
|
||||
# garbage collector. In that case we can reevaluate and build the attribute
|
||||
# using nix-build.
|
||||
"|| (test ! -f '${drvPath}' && nix-build -E '${mkBuildExpr attrPath}' --show-trace --out-link '${outLink}')"
|
||||
];
|
||||
# Since we don't gcroot the derivation files, they may be deleted by the
|
||||
# garbage collector. In that case we can reevaluate and build the attribute
|
||||
# using nix-build.
|
||||
"|| (test ! -f '${drvPath}' && nix-build -E '${mkBuildExpr attrPath}' --show-trace --out-link '${outLink}')"
|
||||
];
|
||||
|
||||
# Attribute path of a target relative to the depot root. Needs to take into
|
||||
# account whether the target is a physical target (which corresponds to a path
|
||||
# in the filesystem) or the subtarget of a physical target.
|
||||
targetAttrPath = target:
|
||||
target.__readTree
|
||||
++ lib.optionals (target ? __subtarget) [ target.__subtarget ];
|
||||
targetAttrPath =
|
||||
target: target.__readTree ++ lib.optionals (target ? __subtarget) [ target.__subtarget ];
|
||||
|
||||
# Given a derivation (identified by drvPath) that is part of the list of
|
||||
# targets passed to mkPipeline, determine all derivations that it depends on
|
||||
|
|
@ -97,11 +112,18 @@ rec {
|
|||
# that build them. This is used to populate `depends_on` in `mkStep`.
|
||||
#
|
||||
# See //nix/dependency-analyzer for documentation on the structure of `targetDepMap`.
|
||||
getTargetPipelineDeps = targetDepMap: drvPath:
|
||||
builtins.map keyForDrv (targetDepMap.${drvPath}.knownDeps or [ ]);
|
||||
getTargetPipelineDeps =
|
||||
targetDepMap: drvPath: builtins.map keyForDrv (targetDepMap.${drvPath}.knownDeps or [ ]);
|
||||
|
||||
# Create a pipeline step from a single target.
|
||||
mkStep = { headBranch, parentTargetMap, targetDepMap, target, cancelOnBuildFailing }:
|
||||
mkStep =
|
||||
{
|
||||
headBranch,
|
||||
parentTargetMap,
|
||||
targetDepMap,
|
||||
target,
|
||||
cancelOnBuildFailing,
|
||||
}:
|
||||
let
|
||||
label = mkLabel target;
|
||||
drvPath = unsafeDiscardStringContext target.drvPath;
|
||||
|
|
@ -120,24 +142,34 @@ rec {
|
|||
# Add a dependency on the initial static pipeline step which
|
||||
# always runs. This allows build steps uploaded in batches to
|
||||
# start running before all batches have been uploaded.
|
||||
depends_on = [ ":init:" ]
|
||||
depends_on = [
|
||||
":init:"
|
||||
]
|
||||
++ getTargetPipelineDeps targetDepMap drvPath
|
||||
++ lib.optionals (target ? meta.ci.buildkiteExtraDeps) target.meta.ci.buildkiteExtraDeps;
|
||||
} // lib.optionalAttrs (target ? meta.timeout) {
|
||||
}
|
||||
// lib.optionalAttrs (target ? meta.timeout) {
|
||||
timeout_in_minutes = target.meta.timeout / 60;
|
||||
# Additional arguments to set on the step.
|
||||
# Keep in mind these *overwrite* existing step args, not extend. Use with caution.
|
||||
} // lib.optionalAttrs (target ? meta.ci.buildkiteExtraStepArgs) target.meta.ci.buildkiteExtraStepArgs;
|
||||
}
|
||||
// lib.optionalAttrs (
|
||||
target ? meta.ci.buildkiteExtraStepArgs
|
||||
) target.meta.ci.buildkiteExtraStepArgs;
|
||||
|
||||
# Helper function to inelegantly divide a list into chunks of at
|
||||
# most n elements.
|
||||
#
|
||||
# This works by assigning each element a chunk ID based on its
|
||||
# index, and then grouping all elements by their chunk ID.
|
||||
chunksOf = n: list:
|
||||
chunksOf =
|
||||
n: list:
|
||||
let
|
||||
chunkId = idx: toString (idx / n + 1);
|
||||
assigned = lib.imap1 (idx: value: { inherit value; chunk = chunkId idx; }) list;
|
||||
assigned = lib.imap1 (idx: value: {
|
||||
inherit value;
|
||||
chunk = chunkId idx;
|
||||
}) list;
|
||||
unchunk = mapAttrs (_: elements: map (e: e.value) elements);
|
||||
in
|
||||
unchunk (lib.groupBy (e: e.chunk) assigned);
|
||||
|
|
@ -156,36 +188,35 @@ rec {
|
|||
# are uploaded sequentially. This is because of a limitation in the
|
||||
# Buildkite backend which struggles to process more than a specific
|
||||
# number of chunks at once.
|
||||
pipelineChunks = name: steps:
|
||||
attrValues (mapAttrs (makePipelineChunk name) (chunksOf 192 steps));
|
||||
pipelineChunks = name: steps: attrValues (mapAttrs (makePipelineChunk name) (chunksOf 192 steps));
|
||||
|
||||
# Create a pipeline structure for the given targets.
|
||||
mkPipeline =
|
||||
{
|
||||
# HEAD branch of the repository on which release steps, GC
|
||||
# anchoring and other "mainline only" steps should run.
|
||||
headBranch
|
||||
, # List of derivations as read by readTree (in most cases just the
|
||||
headBranch,
|
||||
# List of derivations as read by readTree (in most cases just the
|
||||
# output of readTree.gather) that should be built in Buildkite.
|
||||
#
|
||||
# These are scheduled as the first build steps and run as fast as
|
||||
# possible, in order, without any concurrency restrictions.
|
||||
drvTargets
|
||||
, # Derivation map of a parent commit. Only targets which no longer
|
||||
drvTargets,
|
||||
# Derivation map of a parent commit. Only targets which no longer
|
||||
# correspond to the content of this map will be built. Passing an
|
||||
# empty map will always build all targets.
|
||||
parentTargetMap ? { }
|
||||
, # A list of plain Buildkite step structures to run alongside the
|
||||
parentTargetMap ? { },
|
||||
# A list of plain Buildkite step structures to run alongside the
|
||||
# build for all drvTargets, but before proceeding with any
|
||||
# post-build actions such as status reporting.
|
||||
#
|
||||
# Can be used for things like code formatting checks.
|
||||
additionalSteps ? [ ]
|
||||
, # A list of plain Buildkite step structures to run after all
|
||||
additionalSteps ? [ ],
|
||||
# A list of plain Buildkite step structures to run after all
|
||||
# previous steps succeeded.
|
||||
#
|
||||
# Can be used for status reporting steps and the like.
|
||||
postBuildSteps ? [ ]
|
||||
postBuildSteps ? [ ],
|
||||
# The list of phases known by the current Buildkite
|
||||
# pipeline. Dynamic pipeline chunks for each phase are uploaded
|
||||
# to Buildkite on execution of static part of the
|
||||
|
|
@ -199,7 +230,10 @@ rec {
|
|||
# - "build" - main phase for building all Nix targets
|
||||
# - "release" - pushing artifacts to external repositories
|
||||
# - "deploy" - updating external deployment configurations
|
||||
, phases ? [ "build" "release" ]
|
||||
phases ? [
|
||||
"build"
|
||||
"release"
|
||||
],
|
||||
# Build phases that are active for this invocation (i.e. their
|
||||
# steps should be generated).
|
||||
#
|
||||
|
|
@ -208,13 +242,13 @@ rec {
|
|||
# eval contexts.
|
||||
#
|
||||
# TODO(tazjin): Fail/warn if unknown phase is requested.
|
||||
, activePhases ? phases
|
||||
activePhases ? phases,
|
||||
# Setting this attribute to true cancels dynamic pipeline steps
|
||||
# as soon as the build is marked as failing.
|
||||
#
|
||||
# To enable this feature one should enable "Fail Fast" setting
|
||||
# at Buildkite pipeline or on organization level.
|
||||
, cancelOnBuildFailing ? false
|
||||
cancelOnBuildFailing ? false,
|
||||
}:
|
||||
let
|
||||
# List of phases to include.
|
||||
|
|
@ -232,20 +266,25 @@ rec {
|
|||
# the previous pipeline (per parentTargetMap). Unchanged targets will
|
||||
# be skipped (assumed already built), so it's useless to emit deps
|
||||
# on their steps.
|
||||
changedDrvTargets = builtins.filter
|
||||
(target:
|
||||
parentTargetMap.${mkLabel target}.drvPath or null != target.drvPath
|
||||
)
|
||||
drvTargets;
|
||||
changedDrvTargets = builtins.filter (
|
||||
target: parentTargetMap.${mkLabel target}.drvPath or null != target.drvPath
|
||||
) drvTargets;
|
||||
in
|
||||
dependency-analyzer (dependency-analyzer.drvsToPaths changedDrvTargets);
|
||||
|
||||
# Convert a target into all of its steps, separated by build
|
||||
# phase (as phases end up in different chunks).
|
||||
targetToSteps = target:
|
||||
targetToSteps =
|
||||
target:
|
||||
let
|
||||
mkStepArgs = {
|
||||
inherit headBranch parentTargetMap targetDepMap target cancelOnBuildFailing;
|
||||
inherit
|
||||
headBranch
|
||||
parentTargetMap
|
||||
targetDepMap
|
||||
target
|
||||
cancelOnBuildFailing
|
||||
;
|
||||
};
|
||||
step = mkStep mkStepArgs;
|
||||
|
||||
|
|
@ -257,19 +296,21 @@ rec {
|
|||
overridable = f: mkStep (mkStepArgs // { target = (f target); });
|
||||
|
||||
# Split extra steps by phase.
|
||||
splitExtraSteps = lib.groupBy ({ phase, ... }: phase)
|
||||
(attrValues (mapAttrs (normaliseExtraStep phases overridable)
|
||||
(target.meta.ci.extraSteps or { })));
|
||||
splitExtraSteps = lib.groupBy ({ phase, ... }: phase) (
|
||||
attrValues (mapAttrs (normaliseExtraStep phases overridable) (target.meta.ci.extraSteps or { }))
|
||||
);
|
||||
|
||||
extraSteps = mapAttrs
|
||||
(_: steps:
|
||||
map (mkExtraStep (targetAttrPath target) buildEnabled) steps)
|
||||
splitExtraSteps;
|
||||
extraSteps = mapAttrs (
|
||||
_: steps: map (mkExtraStep (targetAttrPath target) buildEnabled) steps
|
||||
) splitExtraSteps;
|
||||
in
|
||||
if !buildEnabled then extraSteps
|
||||
else extraSteps // {
|
||||
build = [ step ] ++ (extraSteps.build or [ ]);
|
||||
};
|
||||
if !buildEnabled then
|
||||
extraSteps
|
||||
else
|
||||
extraSteps
|
||||
// {
|
||||
build = [ step ] ++ (extraSteps.build or [ ]);
|
||||
};
|
||||
|
||||
# Combine all target steps into step lists per phase.
|
||||
#
|
||||
|
|
@ -279,44 +320,47 @@ rec {
|
|||
release = postBuildSteps;
|
||||
};
|
||||
|
||||
phasesWithSteps = lib.zipAttrsWithNames enabledPhases (_: concatLists)
|
||||
((map targetToSteps drvTargets) ++ [ globalSteps ]);
|
||||
phasesWithSteps = lib.zipAttrsWithNames enabledPhases (_: concatLists) (
|
||||
(map targetToSteps drvTargets) ++ [ globalSteps ]
|
||||
);
|
||||
|
||||
# Generate pipeline chunks for each phase.
|
||||
chunks = foldl'
|
||||
(acc: phase:
|
||||
let phaseSteps = phasesWithSteps.${phase} or [ ]; in
|
||||
if phaseSteps == [ ]
|
||||
then acc
|
||||
else acc ++ (pipelineChunks phase phaseSteps))
|
||||
[ ]
|
||||
enabledPhases;
|
||||
chunks = foldl' (
|
||||
acc: phase:
|
||||
let
|
||||
phaseSteps = phasesWithSteps.${phase} or [ ];
|
||||
in
|
||||
if phaseSteps == [ ] then acc else acc ++ (pipelineChunks phase phaseSteps)
|
||||
) [ ] enabledPhases;
|
||||
|
||||
in
|
||||
runCommand "buildkite-pipeline" { } ''
|
||||
mkdir $out
|
||||
echo "Generated ${toString (length chunks)} pipeline chunks"
|
||||
${
|
||||
lib.concatMapStringsSep "\n"
|
||||
(chunk: "cp ${chunk.path} $out/${chunk.filename}") chunks
|
||||
}
|
||||
${lib.concatMapStringsSep "\n" (chunk: "cp ${chunk.path} $out/${chunk.filename}") chunks}
|
||||
'';
|
||||
|
||||
# Create a drvmap structure for the given targets, containing the
|
||||
# mapping of all target paths to their derivations. The mapping can
|
||||
# be persisted for future use.
|
||||
mkDrvmap = drvTargets: writeText "drvmap.json" (toJSON (listToAttrs (map
|
||||
(target: {
|
||||
name = mkLabel target;
|
||||
value = {
|
||||
drvPath = unsafeDiscardStringContext target.drvPath;
|
||||
mkDrvmap =
|
||||
drvTargets:
|
||||
writeText "drvmap.json" (
|
||||
toJSON (
|
||||
listToAttrs (
|
||||
map (target: {
|
||||
name = mkLabel target;
|
||||
value = {
|
||||
drvPath = unsafeDiscardStringContext target.drvPath;
|
||||
|
||||
# Include the attrPath in the output to reconstruct the drv
|
||||
# without parsing the human-readable label.
|
||||
attrPath = targetAttrPath target;
|
||||
};
|
||||
})
|
||||
drvTargets)));
|
||||
# Include the attrPath in the output to reconstruct the drv
|
||||
# without parsing the human-readable label.
|
||||
attrPath = targetAttrPath target;
|
||||
};
|
||||
}) drvTargets
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
# Implementation of extra step logic.
|
||||
#
|
||||
|
|
@ -356,40 +400,49 @@ rec {
|
|||
|
||||
# Create a gated step in a step group, independent from any other
|
||||
# steps.
|
||||
mkGatedStep = { step, label, parent, prompt }: {
|
||||
inherit (step) depends_on;
|
||||
group = label;
|
||||
skip = parent.skip or false;
|
||||
mkGatedStep =
|
||||
{
|
||||
step,
|
||||
label,
|
||||
parent,
|
||||
prompt,
|
||||
}:
|
||||
{
|
||||
inherit (step) depends_on;
|
||||
group = label;
|
||||
skip = parent.skip or false;
|
||||
|
||||
steps = [
|
||||
{
|
||||
inherit prompt;
|
||||
branches = step.branches or [ ];
|
||||
block = ":radio_button: Run ${label}? (from ${parent.env.READTREE_TARGET})";
|
||||
}
|
||||
steps = [
|
||||
{
|
||||
inherit prompt;
|
||||
branches = step.branches or [ ];
|
||||
block = ":radio_button: Run ${label}? (from ${parent.env.READTREE_TARGET})";
|
||||
}
|
||||
|
||||
# The explicit depends_on of the wrapped step must be removed,
|
||||
# otherwise its dependency relationship with the gate step will
|
||||
# break.
|
||||
(builtins.removeAttrs step [ "depends_on" ])
|
||||
];
|
||||
};
|
||||
# The explicit depends_on of the wrapped step must be removed,
|
||||
# otherwise its dependency relationship with the gate step will
|
||||
# break.
|
||||
(builtins.removeAttrs step [ "depends_on" ])
|
||||
];
|
||||
};
|
||||
|
||||
# Validate and normalise extra step configuration before actually
|
||||
# generating build steps, in order to use user-provided metadata
|
||||
# during the pipeline generation.
|
||||
normaliseExtraStep = phases: overridableParent: key:
|
||||
{ command
|
||||
, label ? key
|
||||
, needsOutput ? false
|
||||
, parentOverride ? (x: x)
|
||||
, branches ? null
|
||||
, alwaysRun ? false
|
||||
, prompt ? false
|
||||
, softFail ? false
|
||||
, phase ? "build"
|
||||
, skip ? false
|
||||
, agents ? null
|
||||
normaliseExtraStep =
|
||||
phases: overridableParent: key:
|
||||
{
|
||||
command,
|
||||
label ? key,
|
||||
needsOutput ? false,
|
||||
parentOverride ? (x: x),
|
||||
branches ? null,
|
||||
alwaysRun ? false,
|
||||
prompt ? false,
|
||||
softFail ? false,
|
||||
phase ? "build",
|
||||
skip ? false,
|
||||
agents ? null,
|
||||
}:
|
||||
let
|
||||
parent = overridableParent parentOverride;
|
||||
|
|
@ -401,8 +454,7 @@ rec {
|
|||
Phase '${phase}' is not valid.
|
||||
|
||||
Known phases: ${concatStringsSep ", " phases}
|
||||
''
|
||||
phase;
|
||||
'' phase;
|
||||
in
|
||||
{
|
||||
inherit
|
||||
|
|
@ -416,7 +468,8 @@ rec {
|
|||
parentLabel
|
||||
softFail
|
||||
skip
|
||||
agents;
|
||||
agents
|
||||
;
|
||||
|
||||
phase = validPhase;
|
||||
|
||||
|
|
@ -426,13 +479,13 @@ rec {
|
|||
The 'prompt' feature can not be used by steps in the "build"
|
||||
phase, because CI builds should not be gated on manual human
|
||||
approvals.
|
||||
''
|
||||
prompt;
|
||||
'' prompt;
|
||||
};
|
||||
|
||||
# Create the Buildkite configuration for an extra step, optionally
|
||||
# wrapping it in a gate group.
|
||||
mkExtraStep = parentAttrPath: buildEnabled: cfg:
|
||||
mkExtraStep =
|
||||
parentAttrPath: buildEnabled: cfg:
|
||||
let
|
||||
# ATTN: needs to match an entry in .gitignore so that the tree won't get dirty
|
||||
commandScriptLink = "nix-buildkite-extra-step-command-script";
|
||||
|
|
@ -453,15 +506,11 @@ rec {
|
|||
in
|
||||
if cfg.alwaysRun then false else skip';
|
||||
|
||||
depends_on = lib.optional
|
||||
(buildEnabled && !cfg.alwaysRun && !cfg.needsOutput)
|
||||
cfg.parent.key;
|
||||
depends_on = lib.optional (buildEnabled && !cfg.alwaysRun && !cfg.needsOutput) cfg.parent.key;
|
||||
|
||||
command = ''
|
||||
set -ueo pipefail
|
||||
${lib.optionalString cfg.needsOutput
|
||||
"echo '~~~ Preparing build output of ${cfg.parentLabel}'"
|
||||
}
|
||||
${lib.optionalString cfg.needsOutput "echo '~~~ Preparing build output of ${cfg.parentLabel}'"}
|
||||
${lib.optionalString cfg.needsOutput cfg.parent.command}
|
||||
echo '--- Building extra step script'
|
||||
command_script="$(${
|
||||
|
|
@ -469,9 +518,13 @@ rec {
|
|||
assert builtins.length cfg.command.outputs == 1;
|
||||
mkBuildCommand {
|
||||
# script is exposed at <parent>.meta.ci.extraSteps.<key>.command
|
||||
attrPath =
|
||||
parentAttrPath
|
||||
++ [ "meta" "ci" "extraSteps" cfg.key "command" ];
|
||||
attrPath = parentAttrPath ++ [
|
||||
"meta"
|
||||
"ci"
|
||||
"extraSteps"
|
||||
cfg.key
|
||||
"command"
|
||||
];
|
||||
drvPath = unsafeDiscardStringContext cfg.command.drvPath;
|
||||
# make sure it doesn't conflict with result (from needsOutput)
|
||||
outLink = commandScriptLink;
|
||||
|
|
@ -483,17 +536,17 @@ rec {
|
|||
'';
|
||||
|
||||
soft_fail = cfg.softFail;
|
||||
} // (lib.optionalAttrs (cfg.agents != null) { inherit (cfg) agents; })
|
||||
}
|
||||
// (lib.optionalAttrs (cfg.agents != null) { inherit (cfg) agents; })
|
||||
// (lib.optionalAttrs (cfg.branches != null) {
|
||||
branches = lib.concatStringsSep " " cfg.branches;
|
||||
});
|
||||
in
|
||||
if (isString cfg.prompt)
|
||||
then
|
||||
mkGatedStep
|
||||
{
|
||||
inherit step;
|
||||
inherit (cfg) label parent prompt;
|
||||
}
|
||||
else step;
|
||||
if (isString cfg.prompt) then
|
||||
mkGatedStep {
|
||||
inherit step;
|
||||
inherit (cfg) label parent prompt;
|
||||
}
|
||||
else
|
||||
step;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ lib, depot, pkgs, ... }:
|
||||
{
|
||||
lib,
|
||||
depot,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (builtins) unsafeDiscardStringContext appendContext;
|
||||
|
|
@ -18,23 +23,22 @@ let
|
|||
directDrvDeps =
|
||||
let
|
||||
getDeps =
|
||||
if lib.versionAtLeast builtins.nixVersion "2.6"
|
||||
then
|
||||
# Since https://github.com/NixOS/nix/pull/1643, Nix apparently »preserves
|
||||
# string context« through a readFile invocation. This has the side effect
|
||||
# that it becomes possible to query the actual references a store path has.
|
||||
# Not a 100% sure this is intended, but _very_ convenient for us here.
|
||||
drvPath:
|
||||
builtins.attrNames (builtins.getContext (builtins.readFile drvPath))
|
||||
if lib.versionAtLeast builtins.nixVersion "2.6" then
|
||||
# Since https://github.com/NixOS/nix/pull/1643, Nix apparently »preserves
|
||||
# string context« through a readFile invocation. This has the side effect
|
||||
# that it becomes possible to query the actual references a store path has.
|
||||
# Not a 100% sure this is intended, but _very_ convenient for us here.
|
||||
drvPath: builtins.attrNames (builtins.getContext (builtins.readFile drvPath))
|
||||
else
|
||||
# For Nix < 2.6 we have to rely on HACK, namely grepping for quoted
|
||||
# store path references in the file. In the future this should be
|
||||
# replaced by a proper derivation parser.
|
||||
drvPath: builtins.concatLists (
|
||||
# For Nix < 2.6 we have to rely on HACK, namely grepping for quoted
|
||||
# store path references in the file. In the future this should be
|
||||
# replaced by a proper derivation parser.
|
||||
drvPath:
|
||||
builtins.concatLists (
|
||||
builtins.filter builtins.isList (
|
||||
builtins.split
|
||||
"\"(${lib.escapeRegex builtins.storeDir}/[[:alnum:]+._?=-]+.drv)\""
|
||||
(builtins.readFile drvPath)
|
||||
builtins.split "\"(${lib.escapeRegex builtins.storeDir}/[[:alnum:]+._?=-]+.drv)\"" (
|
||||
builtins.readFile drvPath
|
||||
)
|
||||
)
|
||||
);
|
||||
in
|
||||
|
|
@ -42,15 +46,12 @@ let
|
|||
# if the passed path is not a derivation we can't necessarily get its
|
||||
# dependencies, since it may not be representable as a Nix string due to
|
||||
# NUL bytes, e.g. compressed patch files imported into the Nix store.
|
||||
if builtins.match "^.+\\.drv$" drvPath == null
|
||||
then [ ]
|
||||
else getDeps drvPath;
|
||||
if builtins.match "^.+\\.drv$" drvPath == null then [ ] else getDeps drvPath;
|
||||
|
||||
# Maps a list of derivation to the list of corresponding `drvPath`s.
|
||||
#
|
||||
# Type: [drv] -> [str]
|
||||
drvsToPaths = drvs:
|
||||
builtins.map (drv: builtins.unsafeDiscardOutputDependency drv.drvPath) drvs;
|
||||
drvsToPaths = drvs: builtins.map (drv: builtins.unsafeDiscardOutputDependency drv.drvPath) drvs;
|
||||
|
||||
#
|
||||
# Calculate map of direct derivation dependencies
|
||||
|
|
@ -62,7 +63,8 @@ let
|
|||
# generating the map from
|
||||
#
|
||||
# Type: bool -> string -> set
|
||||
drvEntry = known: drvPath:
|
||||
drvEntry =
|
||||
known: drvPath:
|
||||
let
|
||||
# key may not refer to a store path, …
|
||||
key = unsafeDiscardStringContext drvPath;
|
||||
|
|
@ -85,7 +87,8 @@ let
|
|||
# attribute to `true` if it is in the list of input derivation paths.
|
||||
#
|
||||
# Type: [str] -> set
|
||||
plainDrvDepMap = drvPaths:
|
||||
plainDrvDepMap =
|
||||
drvPaths:
|
||||
builtins.listToAttrs (
|
||||
builtins.genericClosure {
|
||||
startSet = builtins.map (drvEntry true) drvPaths;
|
||||
|
|
@ -121,13 +124,15 @@ let
|
|||
# `fmap (builtins.getAttr "knownDeps") (getAttr drvPath)` will always succeed.
|
||||
#
|
||||
# Type: str -> stateMonad drvDepMap null
|
||||
insertKnownDeps = drvPathWithContext:
|
||||
insertKnownDeps =
|
||||
drvPathWithContext:
|
||||
let
|
||||
# We no longer need to read from the store, so context is irrelevant, but
|
||||
# we need to check for attr names which requires the absence of context.
|
||||
drvPath = unsafeDiscardStringContext drvPathWithContext;
|
||||
in
|
||||
bind get (initDepMap:
|
||||
bind get (
|
||||
initDepMap:
|
||||
# Get the dependency map's state before we've done anything to obtain the
|
||||
# entry we'll be manipulating later as well as its dependencies.
|
||||
let
|
||||
|
|
@ -135,57 +140,48 @@ let
|
|||
|
||||
# We don't need to recurse if our direct dependencies either have their
|
||||
# knownDeps list already populated or are known dependencies themselves.
|
||||
depsPrecalculated =
|
||||
builtins.partition
|
||||
(dep:
|
||||
initDepMap.${dep}.known
|
||||
|| initDepMap.${dep} ? knownDeps
|
||||
)
|
||||
entryPoint.deps;
|
||||
depsPrecalculated = builtins.partition (
|
||||
dep: initDepMap.${dep}.known || initDepMap.${dep} ? knownDeps
|
||||
) entryPoint.deps;
|
||||
|
||||
# If a direct dependency is known, it goes right to our known dependency
|
||||
# list. If it is unknown, we can copy its knownDeps list into our own.
|
||||
initiallyKnownDeps =
|
||||
builtins.concatLists (
|
||||
builtins.map
|
||||
(dep:
|
||||
if initDepMap.${dep}.known
|
||||
then [ dep ]
|
||||
else initDepMap.${dep}.knownDeps
|
||||
)
|
||||
depsPrecalculated.right
|
||||
);
|
||||
initiallyKnownDeps = builtins.concatLists (
|
||||
builtins.map (
|
||||
dep: if initDepMap.${dep}.known then [ dep ] else initDepMap.${dep}.knownDeps
|
||||
) depsPrecalculated.right
|
||||
);
|
||||
in
|
||||
|
||||
# If the information was already calculated before, we can exit right away
|
||||
if entryPoint ? knownDeps
|
||||
then pure null
|
||||
if entryPoint ? knownDeps then
|
||||
pure null
|
||||
else
|
||||
after
|
||||
# For all unknown direct dependencies which don't have a `knownDeps`
|
||||
# list, we call ourselves recursively to populate it. Since this is
|
||||
# done sequentially in the state monad, we avoid recalculating the
|
||||
# list for the same derivation multiple times.
|
||||
(for_
|
||||
depsPrecalculated.wrong
|
||||
insertKnownDeps)
|
||||
(for_ depsPrecalculated.wrong insertKnownDeps)
|
||||
# After this we can obtain the updated dependency map which will have
|
||||
# a `knownDeps` list for all our direct dependencies and update the
|
||||
# entry for the input `drvPath`.
|
||||
(bind
|
||||
get
|
||||
(populatedDepMap:
|
||||
(setAttr drvPath (entryPoint // {
|
||||
knownDeps =
|
||||
lib.unique (
|
||||
(
|
||||
bind get (
|
||||
populatedDepMap:
|
||||
(setAttr drvPath (
|
||||
entryPoint
|
||||
// {
|
||||
knownDeps = lib.unique (
|
||||
initiallyKnownDeps
|
||||
++ builtins.concatLists (
|
||||
builtins.map
|
||||
(dep: populatedDepMap.${dep}.knownDeps)
|
||||
depsPrecalculated.wrong
|
||||
++ builtins.concatLists (
|
||||
builtins.map (dep: populatedDepMap.${dep}.knownDeps) depsPrecalculated.wrong
|
||||
)
|
||||
);
|
||||
}))))
|
||||
}
|
||||
))
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
# This function puts it all together and is exposed via `__functor`.
|
||||
|
|
@ -204,14 +200,8 @@ let
|
|||
# */
|
||||
# ];
|
||||
# }
|
||||
knownDrvDepMap = knownDrvPaths:
|
||||
run
|
||||
(plainDrvDepMap knownDrvPaths)
|
||||
(after
|
||||
(for_
|
||||
knownDrvPaths
|
||||
insertKnownDeps)
|
||||
get);
|
||||
knownDrvDepMap =
|
||||
knownDrvPaths: run (plainDrvDepMap knownDrvPaths) (after (for_ knownDrvPaths insertKnownDeps) get);
|
||||
|
||||
#
|
||||
# Other things based on knownDrvDepMap
|
||||
|
|
@ -221,39 +211,39 @@ let
|
|||
# name, so multiple entries can be collapsed if they have the same name.
|
||||
#
|
||||
# Type: [drv] -> drv
|
||||
knownDependencyGraph = name: drvs:
|
||||
knownDependencyGraph =
|
||||
name: drvs:
|
||||
let
|
||||
justName = drvPath:
|
||||
builtins.substring
|
||||
(builtins.stringLength builtins.storeDir + 1 + 32 + 1)
|
||||
(builtins.stringLength drvPath)
|
||||
(unsafeDiscardStringContext drvPath);
|
||||
justName =
|
||||
drvPath:
|
||||
builtins.substring (
|
||||
builtins.stringLength builtins.storeDir + 1 + 32 + 1
|
||||
) (builtins.stringLength drvPath) (unsafeDiscardStringContext drvPath);
|
||||
|
||||
gv = pkgs.writeText "${name}-dependency-analysis.gv" ''
|
||||
digraph depot {
|
||||
${
|
||||
(lib.concatStringsSep "\n"
|
||||
(lib.mapAttrsToList (name: value:
|
||||
if !value.known then ""
|
||||
else lib.concatMapStringsSep "\n"
|
||||
(knownDep: " \"${justName name}\" -> \"${justName knownDep}\"")
|
||||
value.knownDeps
|
||||
)
|
||||
(depot.nix.dependency-analyzer (
|
||||
drvsToPaths drvs
|
||||
))))
|
||||
(lib.concatStringsSep "\n" (
|
||||
lib.mapAttrsToList (
|
||||
name: value:
|
||||
if !value.known then
|
||||
""
|
||||
else
|
||||
lib.concatMapStringsSep "\n" (
|
||||
knownDep: " \"${justName name}\" -> \"${justName knownDep}\""
|
||||
) value.knownDeps
|
||||
) (depot.nix.dependency-analyzer (drvsToPaths drvs))
|
||||
))
|
||||
}
|
||||
}
|
||||
'';
|
||||
in
|
||||
|
||||
pkgs.runCommand "${name}-dependency-analysis.svg"
|
||||
{
|
||||
nativeBuildInputs = [
|
||||
pkgs.buildPackages.graphviz
|
||||
];
|
||||
}
|
||||
"dot -Tsvg < ${gv} > $out";
|
||||
pkgs.runCommand "${name}-dependency-analysis.svg" {
|
||||
nativeBuildInputs = [
|
||||
pkgs.buildPackages.graphviz
|
||||
];
|
||||
} "dot -Tsvg < ${gv} > $out";
|
||||
in
|
||||
|
||||
{
|
||||
|
|
|
|||
|
|
@ -4,8 +4,8 @@ let
|
|||
# e.g.
|
||||
# a"b\c -> "a\"b\\c"
|
||||
# a\"bc -> "a\\\"bc"
|
||||
escapeExeclineArg = arg:
|
||||
''"${builtins.replaceStrings [ ''"'' ''\'' ] [ ''\"'' ''\\'' ] (toString arg)}"'';
|
||||
escapeExeclineArg =
|
||||
arg: ''"${builtins.replaceStrings [ ''"'' ''\'' ] [ ''\"'' ''\\'' ] (toString arg)}"'';
|
||||
|
||||
# Escapes an execline (list of execline strings) to be passed to execlineb
|
||||
# Give it a nested list of strings. Nested lists are interpolated as execline
|
||||
|
|
@ -15,15 +15,24 @@ let
|
|||
# Example:
|
||||
# escapeExecline [ "if" [ "somecommand" ] "true" ]
|
||||
# == ''"if" { "somecommand" } "true"''
|
||||
escapeExecline = execlineList: lib.concatStringsSep " "
|
||||
(
|
||||
escapeExecline =
|
||||
execlineList:
|
||||
lib.concatStringsSep " " (
|
||||
let
|
||||
go = arg:
|
||||
if builtins.isString arg then [ (escapeExeclineArg arg) ]
|
||||
else if builtins.isPath arg then [ (escapeExeclineArg "${arg}") ]
|
||||
else if lib.isDerivation arg then [ (escapeExeclineArg arg) ]
|
||||
else if builtins.isList arg then [ "{" ] ++ builtins.concatMap go arg ++ [ "}" ]
|
||||
else abort "escapeExecline can only hande nested lists of strings, was ${lib.generators.toPretty {} arg}";
|
||||
go =
|
||||
arg:
|
||||
if builtins.isString arg then
|
||||
[ (escapeExeclineArg arg) ]
|
||||
else if builtins.isPath arg then
|
||||
[ (escapeExeclineArg "${arg}") ]
|
||||
else if lib.isDerivation arg then
|
||||
[ (escapeExeclineArg arg) ]
|
||||
else if builtins.isList arg then
|
||||
[ "{" ] ++ builtins.concatMap go arg ++ [ "}" ]
|
||||
else
|
||||
abort "escapeExecline can only hande nested lists of strings, was ${
|
||||
lib.generators.toPretty { } arg
|
||||
}";
|
||||
in
|
||||
builtins.concatMap go execlineList
|
||||
);
|
||||
|
|
|
|||
|
|
@ -17,75 +17,82 @@ let
|
|||
|
||||
# Create the case statement for a command invocations, optionally
|
||||
# overriding the `TARGET_TOOL` variable.
|
||||
invoke = name: { attr, cmd ? null }: ''
|
||||
${name})
|
||||
attr="${attr}"
|
||||
${if cmd != null then "TARGET_TOOL=\"${cmd}\"\n;;" else ";;"}
|
||||
'';
|
||||
invoke =
|
||||
name:
|
||||
{
|
||||
attr,
|
||||
cmd ? null,
|
||||
}:
|
||||
''
|
||||
${name})
|
||||
attr="${attr}"
|
||||
${if cmd != null then "TARGET_TOOL=\"${cmd}\"\n;;" else ";;"}
|
||||
'';
|
||||
|
||||
# Create command to symlink to the dispatch script for each tool.
|
||||
link = name: "ln -s $target $out/bin/${name}";
|
||||
|
||||
invocations = tools: concatStringsSep "\n" (attrValues (mapAttrs invoke tools));
|
||||
in
|
||||
fix (self:
|
||||
fix (
|
||||
self:
|
||||
|
||||
# Attribute set of tools that should be lazily-added to the $PATH.
|
||||
#
|
||||
# The name of each attribute is used as the command name (on $PATH).
|
||||
# It must contain the keys 'attr' (containing the Nix attribute path
|
||||
# to the tool's derivation from the top-level), and may optionally
|
||||
# contain the key 'cmd' to override the name of the binary inside the
|
||||
# derivation.
|
||||
tools:
|
||||
# Attribute set of tools that should be lazily-added to the $PATH.
|
||||
#
|
||||
# The name of each attribute is used as the command name (on $PATH).
|
||||
# It must contain the keys 'attr' (containing the Nix attribute path
|
||||
# to the tool's derivation from the top-level), and may optionally
|
||||
# contain the key 'cmd' to override the name of the binary inside the
|
||||
# derivation.
|
||||
tools:
|
||||
|
||||
pkgs.runCommandNoCC "lazy-dispatch"
|
||||
{
|
||||
passthru.overrideDeps = newTools: self (tools // newTools);
|
||||
passthru.tools = tools;
|
||||
pkgs.runCommandNoCC "lazy-dispatch"
|
||||
{
|
||||
passthru.overrideDeps = newTools: self (tools // newTools);
|
||||
passthru.tools = tools;
|
||||
|
||||
text = ''
|
||||
#!${pkgs.runtimeShell}
|
||||
set -ue
|
||||
text = ''
|
||||
#!${pkgs.runtimeShell}
|
||||
set -ue
|
||||
|
||||
if ! type git>/dev/null || ! type nix-build>/dev/null; then
|
||||
echo "The 'git' and 'nix-build' commands must be available." >&2
|
||||
exit 127
|
||||
fi
|
||||
if ! type git>/dev/null || ! type nix-build>/dev/null; then
|
||||
echo "The 'git' and 'nix-build' commands must be available." >&2
|
||||
exit 127
|
||||
fi
|
||||
|
||||
readonly REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
TARGET_TOOL=$(basename "$0")
|
||||
readonly REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
TARGET_TOOL=$(basename "$0")
|
||||
|
||||
case "''${TARGET_TOOL}" in
|
||||
${invocations tools}
|
||||
*)
|
||||
echo "''${TARGET_TOOL} is currently not installed in this repository." >&2
|
||||
exit 127
|
||||
;;
|
||||
esac
|
||||
case "''${TARGET_TOOL}" in
|
||||
${invocations tools}
|
||||
*)
|
||||
echo "''${TARGET_TOOL} is currently not installed in this repository." >&2
|
||||
exit 127
|
||||
;;
|
||||
esac
|
||||
|
||||
result=$(nix-build --no-out-link --attr "''${attr}" "''${REPO_ROOT}")
|
||||
PATH="''${result}/bin:$PATH"
|
||||
exec "''${TARGET_TOOL}" "''${@}"
|
||||
'';
|
||||
result=$(nix-build --no-out-link --attr "''${attr}" "''${REPO_ROOT}")
|
||||
PATH="''${result}/bin:$PATH"
|
||||
exec "''${TARGET_TOOL}" "''${@}"
|
||||
'';
|
||||
|
||||
# Access this to get a compatible nix-shell
|
||||
passthru.devShell = pkgs.mkShellNoCC {
|
||||
name = "${self.name}-shell";
|
||||
packages = [ self ];
|
||||
};
|
||||
}
|
||||
''
|
||||
# Write the dispatch code
|
||||
target=$out/bin/__dispatch
|
||||
mkdir -p "$(dirname "$target")"
|
||||
echo "$text" > $target
|
||||
chmod +x $target
|
||||
# Access this to get a compatible nix-shell
|
||||
passthru.devShell = pkgs.mkShellNoCC {
|
||||
name = "${self.name}-shell";
|
||||
packages = [ self ];
|
||||
};
|
||||
}
|
||||
''
|
||||
# Write the dispatch code
|
||||
target=$out/bin/__dispatch
|
||||
mkdir -p "$(dirname "$target")"
|
||||
echo "$text" > $target
|
||||
chmod +x $target
|
||||
|
||||
# Add symlinks from all the tools to the dispatch
|
||||
${concatStringsSep "\n" (map link (attrNames tools))}
|
||||
# Add symlinks from all the tools to the dispatch
|
||||
${concatStringsSep "\n" (map link (attrNames tools))}
|
||||
|
||||
# Check that it's working-ish
|
||||
${pkgs.stdenv.shellDryRun} $target
|
||||
''
|
||||
# Check that it's working-ish
|
||||
${pkgs.stdenv.shellDryRun} $target
|
||||
''
|
||||
)
|
||||
|
|
|
|||
|
|
@ -32,25 +32,30 @@ let
|
|||
map
|
||||
match
|
||||
readDir
|
||||
substring;
|
||||
substring
|
||||
;
|
||||
|
||||
argsWithPath = args: parts:
|
||||
let meta.locatedAt = parts;
|
||||
in meta // (if isAttrs args then args else args meta);
|
||||
argsWithPath =
|
||||
args: parts:
|
||||
let
|
||||
meta.locatedAt = parts;
|
||||
in
|
||||
meta // (if isAttrs args then args else args meta);
|
||||
|
||||
readDirVisible = path:
|
||||
readDirVisible =
|
||||
path:
|
||||
let
|
||||
children = readDir path;
|
||||
# skip hidden files, except for those that contain special instructions to readTree
|
||||
isVisible = f: f == ".skip-subtree" || f == ".skip-tree" || (substring 0 1 f) != ".";
|
||||
names = filter isVisible (attrNames children);
|
||||
in
|
||||
listToAttrs (map
|
||||
(name: {
|
||||
listToAttrs (
|
||||
map (name: {
|
||||
inherit name;
|
||||
value = children.${name};
|
||||
})
|
||||
names);
|
||||
}) names
|
||||
);
|
||||
|
||||
# Create a mark containing the location of this attribute and
|
||||
# a list of all child attribute names added by readTree.
|
||||
|
|
@ -60,39 +65,48 @@ let
|
|||
};
|
||||
|
||||
# Create a label from a target's tree location.
|
||||
mkLabel = target:
|
||||
let label = concatStringsSep "/" target.__readTree;
|
||||
in if target ? __subtarget
|
||||
then "${label}:${target.__subtarget}"
|
||||
else label;
|
||||
mkLabel =
|
||||
target:
|
||||
let
|
||||
label = concatStringsSep "/" target.__readTree;
|
||||
in
|
||||
if target ? __subtarget then "${label}:${target.__subtarget}" else label;
|
||||
|
||||
# Merge two attribute sets, but place attributes in `passthru` via
|
||||
# `overrideAttrs` for derivation targets that support it.
|
||||
merge = a: b:
|
||||
if a ? overrideAttrs
|
||||
then
|
||||
a.overrideAttrs
|
||||
(prev: {
|
||||
passthru = (prev.passthru or { }) // b;
|
||||
})
|
||||
else a // b;
|
||||
merge =
|
||||
a: b:
|
||||
if a ? overrideAttrs then
|
||||
a.overrideAttrs (prev: {
|
||||
passthru = (prev.passthru or { }) // b;
|
||||
})
|
||||
else
|
||||
a // b;
|
||||
|
||||
# Import a file and enforce our calling convention
|
||||
importFile = args: scopedArgs: path: parts: filter:
|
||||
importFile =
|
||||
args: scopedArgs: path: parts: filter:
|
||||
let
|
||||
importedFile =
|
||||
if scopedArgs != { } && builtins ? scopedImport # For snix
|
||||
then builtins.scopedImport scopedArgs path
|
||||
else import path;
|
||||
if
|
||||
scopedArgs != { } && builtins ? scopedImport # For snix
|
||||
then
|
||||
builtins.scopedImport scopedArgs path
|
||||
else
|
||||
import path;
|
||||
pathType = builtins.typeOf importedFile;
|
||||
in
|
||||
if pathType != "lambda"
|
||||
then throw "readTree: trying to import ${toString path}, but it’s a ${pathType}, you need to make it a function like { depot, pkgs, ... }"
|
||||
else importedFile (filter parts (argsWithPath args parts));
|
||||
if pathType != "lambda" then
|
||||
throw "readTree: trying to import ${toString path}, but it’s a ${pathType}, you need to make it a function like { depot, pkgs, ... }"
|
||||
else
|
||||
importedFile (filter parts (argsWithPath args parts));
|
||||
|
||||
nixFileName = file:
|
||||
let res = match "(.*)\\.nix" file;
|
||||
in if res == null then null else head res;
|
||||
nixFileName =
|
||||
file:
|
||||
let
|
||||
res = match "(.*)\\.nix" file;
|
||||
in
|
||||
if res == null then null else head res;
|
||||
|
||||
# Internal implementation of readTree, which handles things like the
|
||||
# skipping of trees and subtrees.
|
||||
|
|
@ -105,7 +119,15 @@ let
|
|||
# The higher-level `readTree` method assembles the final attribute
|
||||
# set out of these results at the top-level, and the internal
|
||||
# `children` implementation unwraps and processes nested trees.
|
||||
readTreeImpl = { args, initPath, rootDir, parts, argsFilter, scopedArgs }:
|
||||
readTreeImpl =
|
||||
{
|
||||
args,
|
||||
initPath,
|
||||
rootDir,
|
||||
parts,
|
||||
argsFilter,
|
||||
scopedArgs,
|
||||
}:
|
||||
let
|
||||
dir = readDirVisible initPath;
|
||||
|
||||
|
|
@ -123,9 +145,10 @@ let
|
|||
joinChild = c: initPath + ("/" + c);
|
||||
|
||||
self =
|
||||
if rootDir
|
||||
then { __readTree = [ ]; }
|
||||
else importFile (args // { here = result; }) scopedArgs initPath parts argsFilter;
|
||||
if rootDir then
|
||||
{ __readTree = [ ]; }
|
||||
else
|
||||
importFile (args // { here = result; }) scopedArgs initPath parts argsFilter;
|
||||
|
||||
# Import subdirectories of the current one, unless any skip
|
||||
# instructions exist.
|
||||
|
|
@ -134,88 +157,93 @@ let
|
|||
# should be ignored, but its content is not inspected by
|
||||
# readTree
|
||||
filterDir = f: dir."${f}" == "directory";
|
||||
filteredChildren = map
|
||||
(c: {
|
||||
name = c;
|
||||
value = readTreeImpl {
|
||||
inherit argsFilter scopedArgs;
|
||||
args = args;
|
||||
initPath = (joinChild c);
|
||||
rootDir = false;
|
||||
parts = (parts ++ [ c ]);
|
||||
};
|
||||
})
|
||||
(filter filterDir (attrNames dir));
|
||||
filteredChildren = map (c: {
|
||||
name = c;
|
||||
value = readTreeImpl {
|
||||
inherit argsFilter scopedArgs;
|
||||
args = args;
|
||||
initPath = (joinChild c);
|
||||
rootDir = false;
|
||||
parts = (parts ++ [ c ]);
|
||||
};
|
||||
}) (filter filterDir (attrNames dir));
|
||||
|
||||
# Remove skipped children from the final set, and unwrap the
|
||||
# result set.
|
||||
children =
|
||||
if skipSubtree then [ ]
|
||||
else map ({ name, value }: { inherit name; value = value.ok; }) (filter (child: child.value ? ok) filteredChildren);
|
||||
if skipSubtree then
|
||||
[ ]
|
||||
else
|
||||
map (
|
||||
{ name, value }:
|
||||
{
|
||||
inherit name;
|
||||
value = value.ok;
|
||||
}
|
||||
) (filter (child: child.value ? ok) filteredChildren);
|
||||
|
||||
# Import Nix files
|
||||
nixFiles =
|
||||
if skipSubtree then [ ]
|
||||
else filter (f: f != null) (map nixFileName (attrNames dir));
|
||||
nixChildren = map
|
||||
(c:
|
||||
let
|
||||
p = joinChild (c + ".nix");
|
||||
childParts = parts ++ [ c ];
|
||||
imported = importFile (args // { here = result; }) scopedArgs p childParts argsFilter;
|
||||
in
|
||||
{
|
||||
name = c;
|
||||
value =
|
||||
if isAttrs imported
|
||||
then merge imported (marker childParts { })
|
||||
else imported;
|
||||
})
|
||||
nixFiles;
|
||||
nixFiles = if skipSubtree then [ ] else filter (f: f != null) (map nixFileName (attrNames dir));
|
||||
nixChildren = map (
|
||||
c:
|
||||
let
|
||||
p = joinChild (c + ".nix");
|
||||
childParts = parts ++ [ c ];
|
||||
imported = importFile (args // { here = result; }) scopedArgs p childParts argsFilter;
|
||||
in
|
||||
{
|
||||
name = c;
|
||||
value = if isAttrs imported then merge imported (marker childParts { }) else imported;
|
||||
}
|
||||
) nixFiles;
|
||||
|
||||
nodeValue = if dir ? "default.nix" then self else { };
|
||||
|
||||
allChildren = listToAttrs (
|
||||
if dir ? "default.nix"
|
||||
then children
|
||||
else nixChildren ++ children
|
||||
);
|
||||
allChildren = listToAttrs (if dir ? "default.nix" then children else nixChildren ++ children);
|
||||
|
||||
result =
|
||||
if isAttrs nodeValue
|
||||
then merge nodeValue (allChildren // (marker parts allChildren))
|
||||
else nodeValue;
|
||||
if isAttrs nodeValue then
|
||||
merge nodeValue (allChildren // (marker parts allChildren))
|
||||
else
|
||||
nodeValue;
|
||||
|
||||
in
|
||||
if skipTree
|
||||
then { skip = true; }
|
||||
else {
|
||||
ok = result;
|
||||
};
|
||||
if skipTree then
|
||||
{ skip = true; }
|
||||
else
|
||||
{
|
||||
ok = result;
|
||||
};
|
||||
|
||||
# Top-level implementation of readTree itself.
|
||||
readTree = args:
|
||||
readTree =
|
||||
args:
|
||||
let
|
||||
tree = readTreeImpl args;
|
||||
in
|
||||
if tree ? skip
|
||||
then throw "Top-level folder has a .skip-tree marker and could not be read by readTree!"
|
||||
else tree.ok;
|
||||
if tree ? skip then
|
||||
throw "Top-level folder has a .skip-tree marker and could not be read by readTree!"
|
||||
else
|
||||
tree.ok;
|
||||
|
||||
# Helper function to fetch subtargets from a target. This is a
|
||||
# temporary helper to warn on the use of the `meta.targets`
|
||||
# attribute, which is deprecated in favour of `meta.ci.targets`.
|
||||
subtargets = node:
|
||||
let targets = (node.meta.targets or [ ]) ++ (node.meta.ci.targets or [ ]);
|
||||
in if node ? meta.targets then
|
||||
subtargets =
|
||||
node:
|
||||
let
|
||||
targets = (node.meta.targets or [ ]) ++ (node.meta.ci.targets or [ ]);
|
||||
in
|
||||
if node ? meta.targets then
|
||||
builtins.trace ''
|
||||
[1;31mWarning: The meta.targets attribute is deprecated.
|
||||
|
||||
Please move the subtargets of //${mkLabel node} to the
|
||||
meta.ci.targets attribute.
|
||||
[0m
|
||||
''
|
||||
targets else targets;
|
||||
'' targets
|
||||
else
|
||||
targets;
|
||||
|
||||
# Function which can be used to find all readTree targets within an
|
||||
# attribute set.
|
||||
|
|
@ -231,23 +259,29 @@ let
|
|||
#
|
||||
# eligible: Function to determine whether the given derivation
|
||||
# should be included in the build.
|
||||
gather = eligible: node:
|
||||
gather =
|
||||
eligible: node:
|
||||
if node ? __readTree then
|
||||
# Include the node itself if it is eligible.
|
||||
# Include the node itself if it is eligible.
|
||||
(if eligible node then [ node ] else [ ])
|
||||
# Include eligible children of the node
|
||||
++ concatMap (gather eligible) (map (attr: node."${attr}") node.__readTreeChildren)
|
||||
# Include specified sub-targets of the node
|
||||
++ filter eligible (map
|
||||
(k: (node."${k}" or { }) // {
|
||||
# Keep the same tree location, but explicitly mark this
|
||||
# node as a subtarget.
|
||||
__readTree = node.__readTree;
|
||||
__readTreeChildren = [ ];
|
||||
__subtarget = k;
|
||||
})
|
||||
(subtargets node))
|
||||
else [ ];
|
||||
++ filter eligible (
|
||||
map (
|
||||
k:
|
||||
(node."${k}" or { })
|
||||
// {
|
||||
# Keep the same tree location, but explicitly mark this
|
||||
# node as a subtarget.
|
||||
__readTree = node.__readTree;
|
||||
__readTreeChildren = [ ];
|
||||
__subtarget = k;
|
||||
}
|
||||
) (subtargets node)
|
||||
)
|
||||
else
|
||||
[ ];
|
||||
|
||||
# Determine whether a given value is a derivation.
|
||||
# Copied from nixpkgs/lib for cases where lib is not available yet.
|
||||
|
|
@ -256,12 +290,14 @@ in
|
|||
{
|
||||
inherit gather mkLabel;
|
||||
|
||||
__functor = _:
|
||||
{ path
|
||||
, args
|
||||
, filter ? (_parts: x: x)
|
||||
, scopedArgs ? { }
|
||||
, rootDir ? true
|
||||
__functor =
|
||||
_:
|
||||
{
|
||||
path,
|
||||
args,
|
||||
filter ? (_parts: x: x),
|
||||
scopedArgs ? { },
|
||||
rootDir ? true,
|
||||
}:
|
||||
readTree {
|
||||
inherit args scopedArgs rootDir;
|
||||
|
|
@ -285,43 +321,56 @@ in
|
|||
# which should be able to access the restricted folder.
|
||||
#
|
||||
# reason: Textual explanation for the restriction (included in errors)
|
||||
restrictFolder = { folder, exceptions ? [ ], reason }: parts: args:
|
||||
if (elemAt parts 0) == folder || elem parts exceptions
|
||||
then args
|
||||
else args // {
|
||||
depot = args.depot // {
|
||||
"${folder}" = throw ''
|
||||
Access to targets under //${folder} is not permitted from
|
||||
other repository paths. Specific exceptions are configured
|
||||
at the top-level.
|
||||
restrictFolder =
|
||||
{
|
||||
folder,
|
||||
exceptions ? [ ],
|
||||
reason,
|
||||
}:
|
||||
parts: args:
|
||||
if (elemAt parts 0) == folder || elem parts exceptions then
|
||||
args
|
||||
else
|
||||
args
|
||||
// {
|
||||
depot = args.depot // {
|
||||
"${folder}" = throw ''
|
||||
Access to targets under //${folder} is not permitted from
|
||||
other repository paths. Specific exceptions are configured
|
||||
at the top-level.
|
||||
|
||||
${reason}
|
||||
At location: ${builtins.concatStringsSep "." parts}
|
||||
'';
|
||||
${reason}
|
||||
At location: ${builtins.concatStringsSep "." parts}
|
||||
'';
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# This definition of fix is identical to <nixpkgs>.lib.fix, but is
|
||||
# provided here for cases where readTree is used before nixpkgs can
|
||||
# be imported.
|
||||
#
|
||||
# It is often required to create the args attribute set.
|
||||
fix = f: let x = f x; in x;
|
||||
fix =
|
||||
f:
|
||||
let
|
||||
x = f x;
|
||||
in
|
||||
x;
|
||||
|
||||
# Takes an attribute set and adds a meta.ci.targets attribute to it
|
||||
# which contains all direct children of the attribute set which are
|
||||
# derivations.
|
||||
#
|
||||
# Type: attrs -> attrs
|
||||
drvTargets = attrs:
|
||||
attrs // {
|
||||
drvTargets =
|
||||
attrs:
|
||||
attrs
|
||||
// {
|
||||
# preserve .meta from original attrs
|
||||
meta = (attrs.meta or { }) // {
|
||||
# preserve .meta.ci (except .targets) from original attrs
|
||||
ci = (attrs.meta.ci or { }) // {
|
||||
targets = builtins.filter
|
||||
(x: isDerivation attrs."${x}")
|
||||
(builtins.attrNames attrs);
|
||||
targets = builtins.filter (x: isDerivation attrs."${x}") (builtins.attrNames attrs);
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,18 +1,26 @@
|
|||
{ depot, lib, pkgs, ... }:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (lib) partition optionalAttrs any;
|
||||
inherit (builtins) tryEval;
|
||||
|
||||
it = msg: asserts:
|
||||
it =
|
||||
msg: asserts:
|
||||
let
|
||||
results = partition (a: a.ok) asserts;
|
||||
in
|
||||
{
|
||||
_it = msg;
|
||||
} // optionalAttrs (results.right != [ ]) {
|
||||
}
|
||||
// optionalAttrs (results.right != [ ]) {
|
||||
passes = map (result: result.test) results.right;
|
||||
} // optionalAttrs (results.wrong != [ ]) {
|
||||
}
|
||||
// optionalAttrs (results.wrong != [ ]) {
|
||||
fails = map (result: result.test) results.wrong;
|
||||
};
|
||||
|
||||
|
|
@ -21,16 +29,18 @@ let
|
|||
ok = a == b;
|
||||
};
|
||||
|
||||
assertThrows = test: value:
|
||||
assertThrows =
|
||||
test: value:
|
||||
let
|
||||
value' = tryEval value;
|
||||
in
|
||||
{
|
||||
inherit test;
|
||||
ok = ! value'.success;
|
||||
ok = !value'.success;
|
||||
};
|
||||
|
||||
runTestsuite = name: its:
|
||||
runTestsuite =
|
||||
name: its:
|
||||
let
|
||||
fails = any (it': it' ? fails) its;
|
||||
in
|
||||
|
|
@ -42,11 +52,14 @@ let
|
|||
inherit its;
|
||||
}
|
||||
(
|
||||
if fails then ''
|
||||
jq '.its' < .attrs.json
|
||||
'' else ''
|
||||
jq '.its' < .attrs.json > $out
|
||||
''
|
||||
if fails then
|
||||
''
|
||||
jq '.its' < .attrs.json
|
||||
''
|
||||
else
|
||||
''
|
||||
jq '.its' < .attrs.json > $out
|
||||
''
|
||||
);
|
||||
|
||||
tree-ex = depot.nix.readTree {
|
||||
|
|
@ -55,25 +68,14 @@ let
|
|||
};
|
||||
|
||||
example = it "corresponds to the README example" [
|
||||
(assertEq "third_party attrset"
|
||||
(lib.isAttrs tree-ex.third_party
|
||||
&& (! lib.isDerivation tree-ex.third_party))
|
||||
true)
|
||||
(assertEq "third_party attrset other attribute"
|
||||
tree-ex.third_party.favouriteColour
|
||||
"orange")
|
||||
(assertEq "rustpkgs attrset aho-corasick"
|
||||
tree-ex.third_party.rustpkgs.aho-corasick
|
||||
"aho-corasick")
|
||||
(assertEq "rustpkgs attrset serde"
|
||||
tree-ex.third_party.rustpkgs.serde
|
||||
"serde")
|
||||
(assertEq "tools cheddear"
|
||||
"cheddar"
|
||||
tree-ex.tools.cheddar)
|
||||
(assertEq "tools roquefort"
|
||||
tree-ex.tools.roquefort
|
||||
"roquefort")
|
||||
(assertEq "third_party attrset" (
|
||||
lib.isAttrs tree-ex.third_party && (!lib.isDerivation tree-ex.third_party)
|
||||
) true)
|
||||
(assertEq "third_party attrset other attribute" tree-ex.third_party.favouriteColour "orange")
|
||||
(assertEq "rustpkgs attrset aho-corasick" tree-ex.third_party.rustpkgs.aho-corasick "aho-corasick")
|
||||
(assertEq "rustpkgs attrset serde" tree-ex.third_party.rustpkgs.serde "serde")
|
||||
(assertEq "tools cheddear" "cheddar" tree-ex.tools.cheddar)
|
||||
(assertEq "tools roquefort" tree-ex.tools.roquefort "roquefort")
|
||||
];
|
||||
|
||||
tree-tl = depot.nix.readTree {
|
||||
|
|
@ -82,65 +84,64 @@ let
|
|||
};
|
||||
|
||||
traversal-logic = it "corresponds to the traversal logic in the README" [
|
||||
(assertEq "skip-tree/a is read"
|
||||
tree-tl.skip-tree.a
|
||||
"a is read normally")
|
||||
(assertEq "skip-tree does not contain b"
|
||||
(builtins.attrNames tree-tl.skip-tree)
|
||||
[ "__readTree" "__readTreeChildren" "a" ])
|
||||
(assertEq "skip-tree children list does not contain b"
|
||||
tree-tl.skip-tree.__readTreeChildren
|
||||
[ "a" ])
|
||||
(assertEq "skip-tree/a is read" tree-tl.skip-tree.a "a is read normally")
|
||||
(assertEq "skip-tree does not contain b" (builtins.attrNames tree-tl.skip-tree) [
|
||||
"__readTree"
|
||||
"__readTreeChildren"
|
||||
"a"
|
||||
])
|
||||
(assertEq "skip-tree children list does not contain b" tree-tl.skip-tree.__readTreeChildren [ "a" ])
|
||||
|
||||
(assertEq "skip subtree default.nix is read"
|
||||
tree-tl.skip-subtree.but
|
||||
"the default.nix is still read")
|
||||
(assertEq "skip subtree a/default.nix is skipped"
|
||||
(tree-tl.skip-subtree ? a)
|
||||
false)
|
||||
(assertEq "skip subtree b/c.nix is skipped"
|
||||
(tree-tl.skip-subtree ? b)
|
||||
false)
|
||||
(assertEq "skip subtree default.nix is read" tree-tl.skip-subtree.but
|
||||
"the default.nix is still read"
|
||||
)
|
||||
(assertEq "skip subtree a/default.nix is skipped" (tree-tl.skip-subtree ? a) false)
|
||||
(assertEq "skip subtree b/c.nix is skipped" (tree-tl.skip-subtree ? b) false)
|
||||
(assertEq "skip subtree a/default.nix would be read without .skip-subtree"
|
||||
(tree-tl.no-skip-subtree.a)
|
||||
"am I subtree yet?")
|
||||
(assertEq "skip subtree b/c.nix would be read without .skip-subtree"
|
||||
(tree-tl.no-skip-subtree.b.c)
|
||||
"cool")
|
||||
"am I subtree yet?"
|
||||
)
|
||||
(assertEq "skip subtree b/c.nix would be read without .skip-subtree" (tree-tl.no-skip-subtree.b.c
|
||||
) "cool")
|
||||
|
||||
(assertEq "default.nix attrset is merged with siblings"
|
||||
tree-tl.default-nix.no
|
||||
"siblings should be read")
|
||||
(assertEq "default.nix means sibling isn’t read"
|
||||
(tree-tl.default-nix ? sibling)
|
||||
false)
|
||||
(assertEq "default.nix attrset is merged with siblings" tree-tl.default-nix.no
|
||||
"siblings should be read"
|
||||
)
|
||||
(assertEq "default.nix means sibling isn’t read" (tree-tl.default-nix ? sibling) false)
|
||||
(assertEq "default.nix means subdirs are still read and merged into default.nix"
|
||||
(tree-tl.default-nix.subdir.a)
|
||||
"but I’m picked up")
|
||||
"but I’m picked up"
|
||||
)
|
||||
|
||||
(assertEq "default.nix can be not an attrset"
|
||||
tree-tl.default-nix.no-merge
|
||||
"I’m not merged with any children")
|
||||
(assertEq "default.nix is not an attrset -> children are not merged"
|
||||
(tree-tl.default-nix.no-merge ? subdir)
|
||||
false)
|
||||
(assertEq "default.nix can be not an attrset" tree-tl.default-nix.no-merge
|
||||
"I’m not merged with any children"
|
||||
)
|
||||
(assertEq "default.nix is not an attrset -> children are not merged" (
|
||||
tree-tl.default-nix.no-merge ? subdir
|
||||
) false)
|
||||
|
||||
(assertEq "default.nix can contain a derivation"
|
||||
(lib.isDerivation tree-tl.default-nix.can-be-drv)
|
||||
true)
|
||||
(assertEq "default.nix can contain a derivation" (lib.isDerivation tree-tl.default-nix.can-be-drv)
|
||||
true
|
||||
)
|
||||
(assertEq "Even if default.nix is a derivation, children are traversed and merged"
|
||||
tree-tl.default-nix.can-be-drv.subdir.a
|
||||
"Picked up through the drv")
|
||||
(assertEq "default.nix drv is not changed by readTree"
|
||||
tree-tl.default-nix.can-be-drv
|
||||
(import ./test-tree-traversal/default-nix/can-be-drv/default.nix { }))
|
||||
"Picked up through the drv"
|
||||
)
|
||||
(assertEq "default.nix drv is not changed by readTree" tree-tl.default-nix.can-be-drv (
|
||||
import ./test-tree-traversal/default-nix/can-be-drv/default.nix { }
|
||||
))
|
||||
(assertEq "`here` argument represents the attrset a given file is part of"
|
||||
(builtins.removeAttrs tree-tl.here-arg [ "__readTree" "__readTreeChildren" "subdir" ])
|
||||
(builtins.removeAttrs tree-tl.here-arg [
|
||||
"__readTree"
|
||||
"__readTreeChildren"
|
||||
"subdir"
|
||||
])
|
||||
{
|
||||
attr1 = "foo";
|
||||
attr2 = "foo";
|
||||
attr3 = "sibl1";
|
||||
})
|
||||
}
|
||||
)
|
||||
];
|
||||
|
||||
# these each call readTree themselves because the throws have to happen inside assertThrows
|
||||
|
|
@ -149,7 +150,8 @@ let
|
|||
(depot.nix.readTree {
|
||||
path = ./test-wrong-not-a-function;
|
||||
args = { };
|
||||
}).not-a-function)
|
||||
}).not-a-function
|
||||
)
|
||||
# can’t test for that, assertThrows can’t catch this error
|
||||
# (assertThrows "this file is a function but doesn’t have dots"
|
||||
# (depot.nix.readTree {} ./test-wrong-no-dots).no-dots-in-function)
|
||||
|
|
@ -160,22 +162,36 @@ let
|
|||
args = { };
|
||||
};
|
||||
|
||||
assertMarkerByPath = path:
|
||||
assertMarkerByPath =
|
||||
path:
|
||||
assertEq "${lib.concatStringsSep "." path} is marked correctly"
|
||||
(lib.getAttrFromPath path read-markers).__readTree
|
||||
path;
|
||||
|
||||
markers = it "marks nodes correctly" [
|
||||
(assertMarkerByPath [ "directory-marked" ])
|
||||
(assertMarkerByPath [ "directory-marked" "nested" ])
|
||||
(assertMarkerByPath [ "file-children" "one" ])
|
||||
(assertMarkerByPath [ "file-children" "two" ])
|
||||
(assertEq "nix file children are marked correctly"
|
||||
read-markers.file-children.__readTreeChildren [ "one" "two" ])
|
||||
(assertEq "directory children are marked correctly"
|
||||
read-markers.directory-marked.__readTreeChildren [ "nested" ])
|
||||
(assertEq "absence of children is marked"
|
||||
read-markers.directory-marked.nested.__readTreeChildren [ ])
|
||||
(assertMarkerByPath [
|
||||
"directory-marked"
|
||||
"nested"
|
||||
])
|
||||
(assertMarkerByPath [
|
||||
"file-children"
|
||||
"one"
|
||||
])
|
||||
(assertMarkerByPath [
|
||||
"file-children"
|
||||
"two"
|
||||
])
|
||||
(assertEq "nix file children are marked correctly" read-markers.file-children.__readTreeChildren [
|
||||
"one"
|
||||
"two"
|
||||
])
|
||||
(assertEq "directory children are marked correctly" read-markers.directory-marked.__readTreeChildren
|
||||
[ "nested" ]
|
||||
)
|
||||
(assertEq "absence of children is marked" read-markers.directory-marked.nested.__readTreeChildren
|
||||
[ ]
|
||||
)
|
||||
];
|
||||
|
||||
in
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{ ... }:
|
||||
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{ ... }:
|
||||
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{ ... }:
|
||||
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
{ ... }:
|
||||
|
||||
{ }
|
||||
{
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,5 +3,8 @@ derivation {
|
|||
name = "im-a-drv";
|
||||
system = builtins.currentSystem;
|
||||
builder = "/bin/sh";
|
||||
args = [ "-c" ''echo "" > $out'' ];
|
||||
args = [
|
||||
"-c"
|
||||
''echo "" > $out''
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
{ here, ... }: {
|
||||
{ here, ... }:
|
||||
{
|
||||
attr1 = "foo";
|
||||
attr2 = here.attr1;
|
||||
|
||||
|
|
|
|||
|
|
@ -1,2 +1 @@
|
|||
{ here, ... }:
|
||||
here.sibl1
|
||||
{ here, ... }: here.sibl1
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
{}:
|
||||
{ }:
|
||||
|
||||
"This is a function, but readTree wants to pass a bunch of arguments, and not having dots means we depend on exactly which arguments."
|
||||
|
|
|
|||
|
|
@ -15,19 +15,19 @@
|
|||
|
||||
{
|
||||
# root path to use as a reference point
|
||||
root
|
||||
, # list of paths below `root` that should be
|
||||
root,
|
||||
# list of paths below `root` that should be
|
||||
# included in the resulting directory
|
||||
#
|
||||
# If path, need to refer to the actual file / directory to be included.
|
||||
# If a string, it is treated as a string relative to the root.
|
||||
paths
|
||||
, # (optional) name to use for the derivation
|
||||
paths,
|
||||
# (optional) name to use for the derivation
|
||||
#
|
||||
# This should always be set when using roots that do not have
|
||||
# controlled names, such as when passing the top-level of a git
|
||||
# repository (e.g. `depot.path.origSrc`).
|
||||
name ? builtins.baseNameOf root
|
||||
name ? builtins.baseNameOf root,
|
||||
}:
|
||||
|
||||
let
|
||||
|
|
@ -36,12 +36,14 @@ let
|
|||
# Count slashes in a path.
|
||||
#
|
||||
# Type: path -> int
|
||||
depth = path: lib.pipe path [
|
||||
toString
|
||||
(builtins.split "/")
|
||||
(builtins.filter builtins.isList)
|
||||
builtins.length
|
||||
];
|
||||
depth =
|
||||
path:
|
||||
lib.pipe path [
|
||||
toString
|
||||
(builtins.split "/")
|
||||
(builtins.filter builtins.isList)
|
||||
builtins.length
|
||||
];
|
||||
|
||||
# (Parent) directories will be created from deepest to shallowest
|
||||
# which should mean no conflicts are caused unless both a child
|
||||
|
|
@ -52,19 +54,24 @@ let
|
|||
# Create a set which contains the source path to copy / symlink and
|
||||
# it's destination, so the path below the destination root including
|
||||
# a leading slash. Additionally some sanity checking is done.
|
||||
makeSymlink = path:
|
||||
makeSymlink =
|
||||
path:
|
||||
let
|
||||
withLeading = p: if builtins.substring 0 1 p == "/" then p else "/" + p;
|
||||
fullPath =
|
||||
if builtins.isPath path then path
|
||||
else if builtins.isString path then (root + withLeading path)
|
||||
else builtins.throw "Unsupported path type ${builtins.typeOf path}";
|
||||
if builtins.isPath path then
|
||||
path
|
||||
else if builtins.isString path then
|
||||
(root + withLeading path)
|
||||
else
|
||||
builtins.throw "Unsupported path type ${builtins.typeOf path}";
|
||||
strPath = toString fullPath;
|
||||
contextPath = "${fullPath}";
|
||||
belowRoot = builtins.substring rootLength (-1) strPath;
|
||||
prefix = builtins.substring 0 rootLength strPath;
|
||||
in
|
||||
assert toString root == prefix; {
|
||||
assert toString root == prefix;
|
||||
{
|
||||
src = contextPath;
|
||||
dst = belowRoot;
|
||||
};
|
||||
|
|
@ -73,12 +80,13 @@ let
|
|||
in
|
||||
|
||||
# TODO(sterni): teach readTree to also read symlinked directories,
|
||||
# so we ln -sT instead of cp -aT.
|
||||
# so we ln -sT instead of cp -aT.
|
||||
pkgs.runCommand "sparse-${name}" { } (
|
||||
lib.concatMapStrings
|
||||
({ src, dst }: ''
|
||||
lib.concatMapStrings (
|
||||
{ src, dst }:
|
||||
''
|
||||
mkdir -p "$(dirname "$out${dst}")"
|
||||
cp -aT --reflink=auto "${src}" "$out${dst}"
|
||||
'')
|
||||
symlinks
|
||||
''
|
||||
) symlinks
|
||||
)
|
||||
|
|
|
|||
|
|
@ -10,26 +10,23 @@ rec {
|
|||
#
|
||||
|
||||
# Type: stateMonad s a -> (a -> stateMonad s b) -> stateMonad s b
|
||||
bind = action: f: state:
|
||||
bind =
|
||||
action: f: state:
|
||||
let
|
||||
afterAction = action state;
|
||||
in
|
||||
(f afterAction.value) afterAction.state;
|
||||
|
||||
# Type: stateMonad s a -> stateMonad s b -> stateMonad s b
|
||||
after = action1: action2: state: action2 (action1 state).state;
|
||||
after =
|
||||
action1: action2: state:
|
||||
action2 (action1 state).state;
|
||||
|
||||
# Type: stateMonad s (stateMonad s a) -> stateMonad s a
|
||||
join = action: bind action (action': action');
|
||||
|
||||
# Type: [a] -> (a -> stateMonad s b) -> stateMonad s null
|
||||
for_ = xs: f:
|
||||
builtins.foldl'
|
||||
(laterAction: x:
|
||||
after (f x) laterAction
|
||||
)
|
||||
(pure null)
|
||||
xs;
|
||||
for_ = xs: f: builtins.foldl' (laterAction: x: after (f x) laterAction) (pure null) xs;
|
||||
|
||||
#
|
||||
# Applicative
|
||||
|
|
@ -52,10 +49,16 @@ rec {
|
|||
#
|
||||
|
||||
# Type: (s -> s) -> stateMonad s null
|
||||
modify = f: state: { value = null; state = f state; };
|
||||
modify = f: state: {
|
||||
value = null;
|
||||
state = f state;
|
||||
};
|
||||
|
||||
# Type: stateMonad s s
|
||||
get = state: { value = state; inherit state; };
|
||||
get = state: {
|
||||
value = state;
|
||||
inherit state;
|
||||
};
|
||||
|
||||
# Type: s -> stateMonad s null
|
||||
set = new: modify (_: new);
|
||||
|
|
@ -64,9 +67,15 @@ rec {
|
|||
getAttr = attr: fmap (state: state.${attr}) get;
|
||||
|
||||
# Type: str -> (any -> any) -> stateMonad s null
|
||||
modifyAttr = attr: f: modify (state: state // {
|
||||
${attr} = f state.${attr};
|
||||
});
|
||||
modifyAttr =
|
||||
attr: f:
|
||||
modify (
|
||||
state:
|
||||
state
|
||||
// {
|
||||
${attr} = f state.${attr};
|
||||
}
|
||||
);
|
||||
|
||||
# Type: str -> any -> stateMonad s null
|
||||
setAttr = attr: value: modifyAttr attr (_: value);
|
||||
|
|
|
|||
|
|
@ -3,28 +3,33 @@ let
|
|||
# Takes a tag, checks whether it is an attrset with one element,
|
||||
# if so sets `isTag` to `true` and sets the name and value.
|
||||
# If not, sets `isTag` to `false` and sets `errmsg`.
|
||||
verifyTag = tag:
|
||||
verifyTag =
|
||||
tag:
|
||||
let
|
||||
cases = builtins.attrNames tag;
|
||||
len = builtins.length cases;
|
||||
in
|
||||
if builtins.length cases == 1
|
||||
then
|
||||
let name = builtins.head cases; in {
|
||||
if builtins.length cases == 1 then
|
||||
let
|
||||
name = builtins.head cases;
|
||||
in
|
||||
{
|
||||
isTag = true;
|
||||
name = name;
|
||||
val = tag.${name};
|
||||
errmsg = null;
|
||||
}
|
||||
else {
|
||||
isTag = false;
|
||||
errmsg =
|
||||
("match: an instance of a sum is an attrset "
|
||||
else
|
||||
{
|
||||
isTag = false;
|
||||
errmsg = (
|
||||
"match: an instance of a sum is an attrset "
|
||||
+ "with exactly one element, yours had ${toString len}"
|
||||
+ ", namely: ${lib.generators.toPretty {} cases}");
|
||||
name = null;
|
||||
val = null;
|
||||
};
|
||||
+ ", namely: ${lib.generators.toPretty { } cases}"
|
||||
);
|
||||
name = null;
|
||||
val = null;
|
||||
};
|
||||
|
||||
# Returns the tag name of a given tag attribute set.
|
||||
# Throws if the tag is invalid.
|
||||
|
|
@ -39,11 +44,15 @@ let
|
|||
tagValue = tag: (assertIsTag tag).val;
|
||||
|
||||
# like `verifyTag`, but throws the error message if it is not a tag.
|
||||
assertIsTag = tag:
|
||||
let res = verifyTag tag; in
|
||||
assertIsTag =
|
||||
tag:
|
||||
let
|
||||
res = verifyTag tag;
|
||||
in
|
||||
assert res.isTag || throw res.errmsg;
|
||||
{ inherit (res) name val; };
|
||||
|
||||
{
|
||||
inherit (res) name val;
|
||||
};
|
||||
|
||||
# Discriminator for values.
|
||||
# Goes through a list of tagged predicates `{ <tag> = <pred>; }`
|
||||
|
|
@ -64,22 +73,22 @@ let
|
|||
# { negative = i: i < 0; }
|
||||
# ] 1
|
||||
# => { smol = 1; }
|
||||
discrDef = defTag: fs: v:
|
||||
discrDef =
|
||||
defTag: fs: v:
|
||||
let
|
||||
res = lib.findFirst
|
||||
(t: t.val v)
|
||||
null
|
||||
(map assertIsTag fs);
|
||||
res = lib.findFirst (t: t.val v) null (map assertIsTag fs);
|
||||
in
|
||||
if res == null
|
||||
then { ${defTag} = v; }
|
||||
else { ${res.name} = v; };
|
||||
if res == null then { ${defTag} = v; } else { ${res.name} = v; };
|
||||
|
||||
# Like `discrDef`, but fail if there is no match.
|
||||
discr = fs: v:
|
||||
let res = discrDef null fs v; in
|
||||
assert lib.assertMsg (res != { })
|
||||
"tag.discr: No predicate found that matches ${lib.generators.toPretty {} v}";
|
||||
discr =
|
||||
fs: v:
|
||||
let
|
||||
res = discrDef null fs v;
|
||||
in
|
||||
assert lib.assertMsg (
|
||||
res != { }
|
||||
) "tag.discr: No predicate found that matches ${lib.generators.toPretty { } v}";
|
||||
res;
|
||||
|
||||
# The canonical pattern matching primitive.
|
||||
|
|
@ -104,20 +113,27 @@ let
|
|||
# match success matcher == 43
|
||||
# && match failure matcher == 0;
|
||||
#
|
||||
match = sum: matcher:
|
||||
let cases = builtins.attrNames sum;
|
||||
in assert
|
||||
let len = builtins.length cases; in
|
||||
lib.assertMsg (len == 1)
|
||||
("match: an instance of a sum is an attrset "
|
||||
match =
|
||||
sum: matcher:
|
||||
let
|
||||
cases = builtins.attrNames sum;
|
||||
in
|
||||
assert
|
||||
let
|
||||
len = builtins.length cases;
|
||||
in
|
||||
lib.assertMsg (len == 1) (
|
||||
"match: an instance of a sum is an attrset "
|
||||
+ "with exactly one element, yours had ${toString len}"
|
||||
+ ", namely: ${lib.generators.toPretty {} cases}");
|
||||
let case = builtins.head cases;
|
||||
in assert
|
||||
lib.assertMsg (matcher ? ${case})
|
||||
("match: \"${case}\" is not a valid case of this sum, "
|
||||
+ "the matcher accepts: ${lib.generators.toPretty {}
|
||||
(builtins.attrNames matcher)}");
|
||||
+ ", namely: ${lib.generators.toPretty { } cases}"
|
||||
);
|
||||
let
|
||||
case = builtins.head cases;
|
||||
in
|
||||
assert lib.assertMsg (matcher ? ${case}) (
|
||||
"match: \"${case}\" is not a valid case of this sum, "
|
||||
+ "the matcher accepts: ${lib.generators.toPretty { } (builtins.attrNames matcher)}"
|
||||
);
|
||||
matcher.${case} sum.${case};
|
||||
|
||||
# A `match` with the arguments flipped.
|
||||
|
|
|
|||
|
|
@ -1,100 +1,99 @@
|
|||
{ depot, lib, ... }:
|
||||
|
||||
let
|
||||
/* Get the basename of a store path without
|
||||
the leading hash.
|
||||
/*
|
||||
Get the basename of a store path without
|
||||
the leading hash.
|
||||
|
||||
Type: (path | drv | string) -> string
|
||||
Type: (path | drv | string) -> string
|
||||
|
||||
Example:
|
||||
storePathName ./foo.c
|
||||
=> "foo.c"
|
||||
Example:
|
||||
storePathName ./foo.c
|
||||
=> "foo.c"
|
||||
|
||||
storePathName (writeText "foo.c" "int main() { return 0; }")
|
||||
=> "foo.c"
|
||||
storePathName (writeText "foo.c" "int main() { return 0; }")
|
||||
=> "foo.c"
|
||||
|
||||
storePathName "${hello}/bin/hello"
|
||||
=> "hello"
|
||||
storePathName "${hello}/bin/hello"
|
||||
=> "hello"
|
||||
*/
|
||||
storePathName = p:
|
||||
if lib.isDerivation p
|
||||
then p.name
|
||||
else if builtins.isPath p
|
||||
then builtins.baseNameOf p
|
||||
else if builtins.isString p || (builtins.isAttrs p && (p ? outPath || p ? __toString))
|
||||
then
|
||||
storePathName =
|
||||
p:
|
||||
if lib.isDerivation p then
|
||||
p.name
|
||||
else if builtins.isPath p then
|
||||
builtins.baseNameOf p
|
||||
else if builtins.isString p || (builtins.isAttrs p && (p ? outPath || p ? __toString)) then
|
||||
let
|
||||
strPath = toString p;
|
||||
# strip leading storeDir and trailing slashes
|
||||
noStoreDir = lib.removeSuffix "/"
|
||||
(lib.removePrefix "${builtins.storeDir}/" strPath);
|
||||
noStoreDir = lib.removeSuffix "/" (lib.removePrefix "${builtins.storeDir}/" strPath);
|
||||
# a basename of a child of a store path isn't really
|
||||
# referring to a store path, so removing the string
|
||||
# context is safe (e. g. "hello" for "${hello}/bin/hello").
|
||||
basename = builtins.unsafeDiscardStringContext
|
||||
(builtins.baseNameOf strPath);
|
||||
basename = builtins.unsafeDiscardStringContext (builtins.baseNameOf strPath);
|
||||
in
|
||||
# If p is a direct child of storeDir, we need to remove
|
||||
# the leading hash as well to make sure that:
|
||||
# `storePathName drv == storePathName (toString drv)`.
|
||||
if noStoreDir == basename
|
||||
then builtins.substring 33 (-1) basename
|
||||
else basename
|
||||
else builtins.throw "Don't know how to get (base)name of "
|
||||
+ lib.generators.toPretty { } p;
|
||||
# the leading hash as well to make sure that:
|
||||
# `storePathName drv == storePathName (toString drv)`.
|
||||
if noStoreDir == basename then builtins.substring 33 (-1) basename else basename
|
||||
else
|
||||
builtins.throw "Don't know how to get (base)name of " + lib.generators.toPretty { } p;
|
||||
|
||||
/* Query the type of a path exposing the same information as would be by
|
||||
`builtins.readDir`, but for a single, specific target path.
|
||||
/*
|
||||
Query the type of a path exposing the same information as would be by
|
||||
`builtins.readDir`, but for a single, specific target path.
|
||||
|
||||
The information is returned as a tagged value, i. e. an attribute set with
|
||||
exactly one attribute where the type of the path is encoded in the name
|
||||
of the single attribute. The allowed tags and values are as follows:
|
||||
The information is returned as a tagged value, i. e. an attribute set with
|
||||
exactly one attribute where the type of the path is encoded in the name
|
||||
of the single attribute. The allowed tags and values are as follows:
|
||||
|
||||
* `regular`: is a regular file, always `true` if returned
|
||||
* `directory`: is a directory, always `true` if returned
|
||||
* `missing`: path does not exist, always `true` if returned
|
||||
* `symlink`: path is a symlink, always `true` if returned
|
||||
* `regular`: is a regular file, always `true` if returned
|
||||
* `directory`: is a directory, always `true` if returned
|
||||
* `missing`: path does not exist, always `true` if returned
|
||||
* `symlink`: path is a symlink, always `true` if returned
|
||||
|
||||
Type: path(-like) -> tag
|
||||
Type: path(-like) -> tag
|
||||
|
||||
`tag` refers to the attribute set format of `//nix/tag`.
|
||||
`tag` refers to the attribute set format of `//nix/tag`.
|
||||
|
||||
Example:
|
||||
pathType ./foo.c
|
||||
=> { regular = true; }
|
||||
Example:
|
||||
pathType ./foo.c
|
||||
=> { regular = true; }
|
||||
|
||||
pathType /home/lukas
|
||||
=> { directory = true; }
|
||||
pathType /home/lukas
|
||||
=> { directory = true; }
|
||||
|
||||
pathType ./result
|
||||
=> { symlink = true; }
|
||||
pathType ./result
|
||||
=> { symlink = true; }
|
||||
|
||||
pathType ./link-to-file
|
||||
=> { symlink = true; }
|
||||
pathType ./link-to-file
|
||||
=> { symlink = true; }
|
||||
|
||||
pathType /does/not/exist
|
||||
=> { missing = true; }
|
||||
pathType /does/not/exist
|
||||
=> { missing = true; }
|
||||
|
||||
# Check if a path exists
|
||||
!(pathType /file ? missing)
|
||||
# Check if a path exists
|
||||
!(pathType /file ? missing)
|
||||
|
||||
# Check if a path is a directory or a symlink to a directory
|
||||
# A handy shorthand for this is provided as `realPathIsDirectory`.
|
||||
pathType /path ? directory || (pathType /path).symlink or null == "directory"
|
||||
# Check if a path is a directory or a symlink to a directory
|
||||
# A handy shorthand for this is provided as `realPathIsDirectory`.
|
||||
pathType /path ? directory || (pathType /path).symlink or null == "directory"
|
||||
|
||||
# Match on the result using //nix/tag
|
||||
nix.tag.match (nix.utils.pathType ./result) {
|
||||
symlink = _: "symlink";
|
||||
directory = _: "directory";
|
||||
regular = _: "regular";
|
||||
missing = _: "path does not exist";
|
||||
}
|
||||
=> "symlink"
|
||||
# Match on the result using //nix/tag
|
||||
nix.tag.match (nix.utils.pathType ./result) {
|
||||
symlink = _: "symlink";
|
||||
directory = _: "directory";
|
||||
regular = _: "regular";
|
||||
missing = _: "path does not exist";
|
||||
}
|
||||
=> "symlink"
|
||||
|
||||
# Query path type
|
||||
nix.tag.tagName (pathType /path)
|
||||
# Query path type
|
||||
nix.tag.tagName (pathType /path)
|
||||
*/
|
||||
pathType = path:
|
||||
pathType =
|
||||
path:
|
||||
let
|
||||
# baseNameOf is very annoyed if we proceed with string context.
|
||||
# We need to call toString to prevent unsafeDiscardStringContext
|
||||
|
|
@ -119,52 +118,56 @@ let
|
|||
${thisPathType} = true;
|
||||
};
|
||||
|
||||
pathType' = path:
|
||||
pathType' =
|
||||
path:
|
||||
let
|
||||
p = pathType path;
|
||||
in
|
||||
if p ? missing
|
||||
then builtins.throw "${lib.generators.toPretty {} path} does not exist"
|
||||
else p;
|
||||
if p ? missing then builtins.throw "${lib.generators.toPretty { } path} does not exist" else p;
|
||||
|
||||
/* Check whether the given path is a directory.
|
||||
Throws if the path in question doesn't exist.
|
||||
/*
|
||||
Check whether the given path is a directory.
|
||||
Throws if the path in question doesn't exist.
|
||||
|
||||
Type: path(-like) -> bool
|
||||
Type: path(-like) -> bool
|
||||
*/
|
||||
isDirectory = path: pathType' path ? directory;
|
||||
|
||||
/* Check whether the given path is a regular file.
|
||||
Throws if the path in question doesn't exist.
|
||||
/*
|
||||
Check whether the given path is a regular file.
|
||||
Throws if the path in question doesn't exist.
|
||||
|
||||
Type: path(-like) -> bool
|
||||
Type: path(-like) -> bool
|
||||
*/
|
||||
isRegularFile = path: pathType' path ? regular;
|
||||
|
||||
/* Check whether the given path is a symbolic link.
|
||||
Throws if the path in question doesn't exist.
|
||||
/*
|
||||
Check whether the given path is a symbolic link.
|
||||
Throws if the path in question doesn't exist.
|
||||
|
||||
Type: path(-like) -> bool
|
||||
Type: path(-like) -> bool
|
||||
*/
|
||||
isSymlink = path: pathType' path ? symlink;
|
||||
|
||||
/* Checks whether the given value is (or contains) a reference to a
|
||||
path that will be retained in the store path resulting from a derivation.
|
||||
So if isReferencablePath returns true, the given value may be used in a
|
||||
way that allows accessing it at runtime of any Nix built program.
|
||||
/*
|
||||
Checks whether the given value is (or contains) a reference to a
|
||||
path that will be retained in the store path resulting from a derivation.
|
||||
So if isReferencablePath returns true, the given value may be used in a
|
||||
way that allows accessing it at runtime of any Nix built program.
|
||||
|
||||
Returns true for:
|
||||
Returns true for:
|
||||
|
||||
- Strings with context (if the string is/contains a single path is not verified!)
|
||||
- Path values
|
||||
- Derivations
|
||||
- Strings with context (if the string is/contains a single path is not verified!)
|
||||
- Path values
|
||||
- Derivations
|
||||
|
||||
Note that the value still needs to used in a way that forces string context
|
||||
(and thus reference tracking) to be created, e.g. in string interpolation.
|
||||
Note that the value still needs to used in a way that forces string context
|
||||
(and thus reference tracking) to be created, e.g. in string interpolation.
|
||||
|
||||
Type: any -> bool
|
||||
Type: any -> bool
|
||||
*/
|
||||
isReferencablePath = value:
|
||||
isReferencablePath =
|
||||
value:
|
||||
builtins.isPath value
|
||||
|| lib.isDerivation value
|
||||
|| (builtins.isString value && builtins.hasContext value);
|
||||
|
|
|
|||
|
|
@ -1,35 +1,55 @@
|
|||
{ depot, lib, pkgs, ... }:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
inherit (lib) fix pipe mapAttrsToList isAttrs concatLines isString isDerivation isPath;
|
||||
inherit (lib)
|
||||
fix
|
||||
pipe
|
||||
mapAttrsToList
|
||||
isAttrs
|
||||
concatLines
|
||||
isString
|
||||
isDerivation
|
||||
isPath
|
||||
;
|
||||
inherit (depot.nix.utils) isReferencablePath;
|
||||
|
||||
esc = s: lib.escapeShellArg /* ensure paths import into store */ "${s}";
|
||||
esc =
|
||||
s:
|
||||
lib.escapeShellArg # ensure paths import into store
|
||||
"${s}";
|
||||
|
||||
writeTreeAtPath = path: tree:
|
||||
writeTreeAtPath =
|
||||
path: tree:
|
||||
''
|
||||
mkdir -p "$out/"${esc path}
|
||||
''
|
||||
+ pipe tree [
|
||||
(mapAttrsToList (k: v:
|
||||
(mapAttrsToList (
|
||||
k: v:
|
||||
if isReferencablePath v then
|
||||
"cp -R --reflink=auto ${esc "${v}"} \"$out/\"${esc path}/${esc k}"
|
||||
else if lib.isAttrs v then
|
||||
writeTreeAtPath (path + "/" + k) v
|
||||
else
|
||||
throw "invalid type (expected path, derivation, string with context, or attrs)"))
|
||||
throw "invalid type (expected path, derivation, string with context, or attrs)"
|
||||
))
|
||||
concatLines
|
||||
];
|
||||
|
||||
/* Create a directory tree specified by a Nix attribute set structure.
|
||||
/*
|
||||
Create a directory tree specified by a Nix attribute set structure.
|
||||
|
||||
Each value in `tree` should either be a file, a directory, or another tree
|
||||
attribute set. Those paths will be written to a directory tree
|
||||
corresponding to the structure of the attribute set.
|
||||
Each value in `tree` should either be a file, a directory, or another tree
|
||||
attribute set. Those paths will be written to a directory tree
|
||||
corresponding to the structure of the attribute set.
|
||||
|
||||
Type: string -> attrSet -> derivation
|
||||
Type: string -> attrSet -> derivation
|
||||
*/
|
||||
writeTree = name: tree:
|
||||
pkgs.runCommandLocal name { } (writeTreeAtPath "" tree);
|
||||
writeTree = name: tree: pkgs.runCommandLocal name { } (writeTreeAtPath "" tree);
|
||||
in
|
||||
|
||||
# __functor trick so readTree can add the tests attribute
|
||||
|
|
|
|||
|
|
@ -1,93 +1,102 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
inherit (pkgs) runCommand writeText writeTextFile;
|
||||
inherit (depot.nix) writeTree;
|
||||
|
||||
checkTree = name: tree: expected:
|
||||
checkTree =
|
||||
name: tree: expected:
|
||||
runCommand "writeTree-test-${name}"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.buildPackages.lr ];
|
||||
passAsFile = [ "expected" ];
|
||||
inherit expected;
|
||||
} ''
|
||||
actualPath="$NIX_BUILD_TOP/actual"
|
||||
cd ${lib.escapeShellArg (writeTree name tree)}
|
||||
lr . > "$actualPath"
|
||||
diff -u "$expectedPath" "$actualPath" | tee "$out"
|
||||
'';
|
||||
}
|
||||
''
|
||||
actualPath="$NIX_BUILD_TOP/actual"
|
||||
cd ${lib.escapeShellArg (writeTree name tree)}
|
||||
lr . > "$actualPath"
|
||||
diff -u "$expectedPath" "$actualPath" | tee "$out"
|
||||
'';
|
||||
in
|
||||
|
||||
depot.nix.readTree.drvTargets {
|
||||
empty = checkTree "empty" { }
|
||||
''
|
||||
.
|
||||
'';
|
||||
empty = checkTree "empty" { } ''
|
||||
.
|
||||
'';
|
||||
|
||||
simple-paths = checkTree "simple"
|
||||
{
|
||||
writeTree = {
|
||||
meta = {
|
||||
"owners.txt" = ../OWNERS;
|
||||
simple-paths =
|
||||
checkTree "simple"
|
||||
{
|
||||
writeTree = {
|
||||
meta = {
|
||||
"owners.txt" = ../OWNERS;
|
||||
};
|
||||
"code.nix" = ../default.nix;
|
||||
all-tests = ./.;
|
||||
nested.dirs.eval-time = builtins.toFile "owothia" ''
|
||||
hold me owo
|
||||
'';
|
||||
};
|
||||
"code.nix" = ../default.nix;
|
||||
all-tests = ./.;
|
||||
nested.dirs.eval-time = builtins.toFile "owothia" ''
|
||||
hold me owo
|
||||
'';
|
||||
};
|
||||
}
|
||||
''
|
||||
.
|
||||
./writeTree
|
||||
./writeTree/all-tests
|
||||
./writeTree/all-tests/default.nix
|
||||
./writeTree/code.nix
|
||||
./writeTree/meta
|
||||
./writeTree/meta/owners.txt
|
||||
./writeTree/nested
|
||||
./writeTree/nested/dirs
|
||||
./writeTree/nested/dirs/eval-time
|
||||
'';
|
||||
|
||||
empty-dirs = checkTree "empty-dirs"
|
||||
{
|
||||
this.dir.is.empty = { };
|
||||
so.is.this.one = { };
|
||||
}
|
||||
''
|
||||
.
|
||||
./so
|
||||
./so/is
|
||||
./so/is/this
|
||||
./so/is/this/one
|
||||
./this
|
||||
./this/dir
|
||||
./this/dir/is
|
||||
./this/dir/is/empty
|
||||
'';
|
||||
|
||||
drvs = checkTree "drvs"
|
||||
{
|
||||
file-drv = writeText "road.txt" ''
|
||||
Any road followed precisely to its end leads precisely nowhere.
|
||||
}
|
||||
''
|
||||
.
|
||||
./writeTree
|
||||
./writeTree/all-tests
|
||||
./writeTree/all-tests/default.nix
|
||||
./writeTree/code.nix
|
||||
./writeTree/meta
|
||||
./writeTree/meta/owners.txt
|
||||
./writeTree/nested
|
||||
./writeTree/nested/dirs
|
||||
./writeTree/nested/dirs/eval-time
|
||||
'';
|
||||
dir-drv = writeTextFile {
|
||||
name = "dir-of-text";
|
||||
destination = "/text/in/more/dirs.txt";
|
||||
text = ''
|
||||
Climb the mountain just a little bit to test that it’s a mountain.
|
||||
From the top of the mountain, you cannot see the mountain.
|
||||
|
||||
empty-dirs =
|
||||
checkTree "empty-dirs"
|
||||
{
|
||||
this.dir.is.empty = { };
|
||||
so.is.this.one = { };
|
||||
}
|
||||
''
|
||||
.
|
||||
./so
|
||||
./so/is
|
||||
./so/is/this
|
||||
./so/is/this/one
|
||||
./this
|
||||
./this/dir
|
||||
./this/dir/is
|
||||
./this/dir/is/empty
|
||||
'';
|
||||
|
||||
drvs =
|
||||
checkTree "drvs"
|
||||
{
|
||||
file-drv = writeText "road.txt" ''
|
||||
Any road followed precisely to its end leads precisely nowhere.
|
||||
'';
|
||||
};
|
||||
}
|
||||
''
|
||||
.
|
||||
./dir-drv
|
||||
./dir-drv/text
|
||||
./dir-drv/text/in
|
||||
./dir-drv/text/in/more
|
||||
./dir-drv/text/in/more/dirs.txt
|
||||
./file-drv
|
||||
'';
|
||||
dir-drv = writeTextFile {
|
||||
name = "dir-of-text";
|
||||
destination = "/text/in/more/dirs.txt";
|
||||
text = ''
|
||||
Climb the mountain just a little bit to test that it’s a mountain.
|
||||
From the top of the mountain, you cannot see the mountain.
|
||||
'';
|
||||
};
|
||||
}
|
||||
''
|
||||
.
|
||||
./dir-drv
|
||||
./dir-drv/text
|
||||
./dir-drv/text/in
|
||||
./dir-drv/text/in/more
|
||||
./dir-drv/text/in/more/dirs.txt
|
||||
./file-drv
|
||||
'';
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
depot.nix.readTree.drvTargets rec {
|
||||
terraform = pkgs.terraform.withPlugins (p: [
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
{ ... }: {
|
||||
{ ... }:
|
||||
{
|
||||
node_exporter = ./json/node_exporter.json;
|
||||
all = ./json;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
depot.nix.readTree.drvTargets rec {
|
||||
# Provide a Terraform wrapper with the right provider installed.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
{ gerrit }:
|
||||
|
||||
let
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
depot.nix.readTree.drvTargets rec {
|
||||
terraform = pkgs.terraform.withPlugins (p: [
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
depot.nix.readTree.drvTargets rec {
|
||||
terraform = pkgs.terraform.withPlugins (p: [
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
depot.nix.readTree.drvTargets rec {
|
||||
# Provide a Terraform wrapper with the right provider installed.
|
||||
|
|
|
|||
|
|
@ -21,10 +21,11 @@ in
|
|||
path = [ depot.contrib.archivist.parse-bucket-logs ];
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
ExecStart = (pkgs.writers.writePython3 "parse-bucket-logs-continuously"
|
||||
{
|
||||
ExecStart = (
|
||||
pkgs.writers.writePython3 "parse-bucket-logs-continuously" {
|
||||
libraries = [ pkgs.python3Packages.boto3 ];
|
||||
} ./parse-bucket-logs-continuously.py);
|
||||
} ./parse-bucket-logs-continuously.py
|
||||
);
|
||||
DynamicUser = "yes";
|
||||
StateDirectory = "parse-bucket-logs";
|
||||
};
|
||||
|
|
@ -38,4 +39,3 @@ in
|
|||
|
||||
system.stateVersion = "23.05"; # Did you read the comment?
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,27 +1,30 @@
|
|||
{ lib, modulesPath, ... }:
|
||||
|
||||
{
|
||||
imports =
|
||||
[
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
imports = [
|
||||
(modulesPath + "/profiles/qemu-guest.nix")
|
||||
];
|
||||
|
||||
boot.initrd.availableKernelModules = [ "ahci" "xhci_pci" "virtio_pci" "sr_mod" "virtio_blk" ];
|
||||
boot.initrd.availableKernelModules = [
|
||||
"ahci"
|
||||
"xhci_pci"
|
||||
"virtio_pci"
|
||||
"sr_mod"
|
||||
"virtio_blk"
|
||||
];
|
||||
boot.initrd.kernelModules = [ ];
|
||||
boot.kernelModules = [ "kvm-amd" ];
|
||||
boot.extraModulePackages = [ ];
|
||||
|
||||
fileSystems."/" =
|
||||
{
|
||||
device = "/dev/disk/by-partlabel/root";
|
||||
fsType = "xfs";
|
||||
};
|
||||
fileSystems."/" = {
|
||||
device = "/dev/disk/by-partlabel/root";
|
||||
fsType = "xfs";
|
||||
};
|
||||
|
||||
fileSystems."/boot" =
|
||||
{
|
||||
device = "/dev/disk/by-partlabel/boot";
|
||||
fsType = "vfat";
|
||||
};
|
||||
fileSystems."/boot" = {
|
||||
device = "/dev/disk/by-partlabel/boot";
|
||||
fsType = "vfat";
|
||||
};
|
||||
|
||||
swapDevices = [ ];
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }: # readTree options
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: # readTree options
|
||||
{ config, ... }: # passed by module system
|
||||
let
|
||||
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
|
||||
|
|
@ -24,7 +29,11 @@ in
|
|||
boot.specialFileSystems = lib.mkForce {
|
||||
"/run/wrappers" = {
|
||||
fsType = "tmpfs";
|
||||
options = [ "nodev" "mode=755" "size=${config.security.wrapperDirSize}" ];
|
||||
options = [
|
||||
"nodev"
|
||||
"mode=755"
|
||||
"size=${config.security.wrapperDirSize}"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
|
|
@ -56,10 +65,17 @@ in
|
|||
|
||||
interfaces.host0.ipv6 = {
|
||||
addresses = [
|
||||
{ address = "2001:bc8:38ee:100:7000::20"; prefixLength = 64; }
|
||||
{
|
||||
address = "2001:bc8:38ee:100:7000::20";
|
||||
prefixLength = 64;
|
||||
}
|
||||
];
|
||||
routes = [
|
||||
{ address = "64:ff9b::"; via = "2001:bc8:38ee:100::100"; prefixLength = 96; }
|
||||
{
|
||||
address = "64:ff9b::";
|
||||
via = "2001:bc8:38ee:100::100";
|
||||
prefixLength = 96;
|
||||
}
|
||||
];
|
||||
};
|
||||
|
||||
|
|
@ -112,7 +128,8 @@ in
|
|||
];
|
||||
|
||||
time.timeZone = "UTC";
|
||||
users.users.root.openssh.authorizedKeys.keys = depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
users.users.root.openssh.authorizedKeys.keys =
|
||||
depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
users.groups.kvm = { };
|
||||
users.users.root.extraGroups = [ "kvm" ];
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }: # readTree options
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: # readTree options
|
||||
{ config, ... }: # passed by module system
|
||||
|
||||
let
|
||||
|
|
@ -111,7 +116,8 @@ in
|
|||
createHome = true;
|
||||
home = "/var/lib/git";
|
||||
};
|
||||
users.root.openssh.authorizedKeys.keys = depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
users.root.openssh.authorizedKeys.keys =
|
||||
depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
};
|
||||
|
||||
boot.initrd.systemd.enable = true;
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }: # readTree options
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: # readTree options
|
||||
{ config, ... }: # passed by module system
|
||||
|
||||
let
|
||||
|
|
@ -130,7 +135,8 @@ in
|
|||
# Required for prometheus to be able to scrape stats
|
||||
services.nginx.statusPage = true;
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys = depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
users.users.root.openssh.authorizedKeys.keys =
|
||||
depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
|
||||
boot.initrd.systemd.enable = true;
|
||||
zramSwap.enable = true;
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, lib, pkgs, ... }: # readTree options
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}: # readTree options
|
||||
{ config, ... }: # passed by module system
|
||||
|
||||
let
|
||||
|
|
@ -158,7 +163,8 @@ in
|
|||
# Required for prometheus to be able to scrape stats
|
||||
services.nginx.statusPage = true;
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys = depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
users.users.root.openssh.authorizedKeys.keys =
|
||||
depot.ops.users.edef ++ depot.ops.users.flokli ++ depot.ops.users.raito;
|
||||
|
||||
boot.initrd.systemd.enable = true;
|
||||
zramSwap.enable = true;
|
||||
|
|
|
|||
|
|
@ -1,18 +1,24 @@
|
|||
{ depot, pkgs, lib, ... }: # readTree options
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}: # readTree options
|
||||
{ config, ... }: # passed by module system
|
||||
|
||||
let
|
||||
srvos =
|
||||
import (builtins.fetchTarball {
|
||||
srvos = import (
|
||||
builtins.fetchTarball {
|
||||
url = "https://github.com/nix-community/srvos/archive/8e7d3c690975ee6790926bdfd1258016c967d163.tar.gz";
|
||||
sha256 = "sha256-l7epHqAcg8Qktu8vO2ZfjSH1wcai01XQOKQA9ADHIk4=";
|
||||
});
|
||||
disko =
|
||||
(builtins.fetchTarball {
|
||||
}
|
||||
);
|
||||
disko = (
|
||||
builtins.fetchTarball {
|
||||
url = "https://github.com/nix-community/disko/archive/84dd8eea9a06006d42b8af7cfd4fda4cf334db81.tar.gz";
|
||||
sha256 = "13mfnjnjp21wms4mw35ar019775qgy3fnjc59zrpnqbkfmzyvv02";
|
||||
});
|
||||
|
||||
}
|
||||
);
|
||||
|
||||
in
|
||||
{
|
||||
|
|
@ -38,7 +44,6 @@ in
|
|||
forceSSL = true;
|
||||
};
|
||||
|
||||
|
||||
security.acme.acceptTerms = true;
|
||||
security.acme.defaults.email = "admin+acme@numtide.com";
|
||||
|
||||
|
|
@ -48,11 +53,9 @@ in
|
|||
|
||||
systemd.network.networks."10-uplink".networkConfig.Address = "2a01:4f9:3071:1091::2/64";
|
||||
|
||||
|
||||
# Enable SSH and add some keys
|
||||
services.openssh.enable = true;
|
||||
|
||||
|
||||
users.users.root.openssh.authorizedKeys.keys =
|
||||
depot.ops.users.edef
|
||||
++ depot.ops.users.flokli
|
||||
|
|
|
|||
|
|
@ -174,7 +174,8 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
systemd.services.grafana.serviceConfig.LoadCredential = "github_auth_client_secret:/etc/secrets/grafana_github_auth_client_secret";
|
||||
systemd.services.grafana.serviceConfig.LoadCredential =
|
||||
"github_auth_client_secret:/etc/secrets/grafana_github_auth_client_secret";
|
||||
systemd.services.grafana.serviceConfig.RuntimeDirectory = "grafana";
|
||||
systemd.services.grafana.serviceConfig.SupplementaryGroups = "nginx";
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
{ config
|
||||
, lib
|
||||
, utils
|
||||
, pkgs
|
||||
, depot
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
utils,
|
||||
pkgs,
|
||||
depot,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.nar-bridge;
|
||||
|
|
|
|||
|
|
@ -9,13 +9,15 @@
|
|||
virtualHosts.${config.machine.domain} = {
|
||||
locations."=/" = {
|
||||
tryFiles = "$uri $uri/index.html =404";
|
||||
root = pkgs.runCommand "index"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.markdown2html-converter ];
|
||||
} ''
|
||||
mkdir -p $out
|
||||
markdown2html-converter ${./README.md} -o $out/index.html
|
||||
'';
|
||||
root =
|
||||
pkgs.runCommand "index"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.markdown2html-converter ];
|
||||
}
|
||||
''
|
||||
mkdir -p $out
|
||||
markdown2html-converter ${./README.md} -o $out/index.html
|
||||
'';
|
||||
};
|
||||
locations."/" = {
|
||||
proxyPass = "http://unix:/run/nar-bridge.sock:/";
|
||||
|
|
|
|||
|
|
@ -3,10 +3,21 @@
|
|||
# Thanks to all the Lix core developers for this!
|
||||
# vim: et:ts=2:sw=2:
|
||||
#
|
||||
{ depot, pkgs, lib, config, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
config,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.depot.forgejo;
|
||||
inherit (lib) types mkEnableOption mkOption mkIf;
|
||||
inherit (lib)
|
||||
types
|
||||
mkEnableOption
|
||||
mkOption
|
||||
mkIf
|
||||
;
|
||||
emojo =
|
||||
let
|
||||
drgn = pkgs.fetchzip {
|
||||
|
|
@ -39,7 +50,15 @@ let
|
|||
'';
|
||||
};
|
||||
in
|
||||
pkgs.symlinkJoin { name = "emojo"; paths = [ drgn neocat neofox dragn ]; };
|
||||
pkgs.symlinkJoin {
|
||||
name = "emojo";
|
||||
paths = [
|
||||
drgn
|
||||
neocat
|
||||
neofox
|
||||
dragn
|
||||
];
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.depot.forgejo = {
|
||||
|
|
@ -77,13 +96,15 @@ in
|
|||
enable = true;
|
||||
|
||||
package = pkgs.forgejo.overrideAttrs (old: {
|
||||
patches = old.patches ++ (with depot.third_party.lix_forgejo.patches; [
|
||||
upstream_link
|
||||
signin_redirect
|
||||
api_dont_notify
|
||||
forgejo_is_now_gerrit_native
|
||||
forgejo_knows_about_gerrit
|
||||
]);
|
||||
patches =
|
||||
old.patches
|
||||
++ (with depot.third_party.lix_forgejo.patches; [
|
||||
upstream_link
|
||||
signin_redirect
|
||||
api_dont_notify
|
||||
forgejo_is_now_gerrit_native
|
||||
forgejo_knows_about_gerrit
|
||||
]);
|
||||
});
|
||||
|
||||
# General settings.
|
||||
|
|
@ -294,10 +315,14 @@ in
|
|||
services.mysql.enable = lib.mkForce true;
|
||||
services.mysql.package = lib.mkForce pkgs.mariadb;
|
||||
|
||||
systemd.tmpfiles.rules = let cfg = config.services.forgejo; in [
|
||||
"d '${cfg.customDir}/public/assets' 0750 ${cfg.user} ${cfg.group} - -"
|
||||
"d '${cfg.customDir}/public/assets/img' 0750 ${cfg.user} ${cfg.group} - -"
|
||||
"L+ '${cfg.customDir}/public/assets/img/emoji' - - - - ${emojo}"
|
||||
];
|
||||
systemd.tmpfiles.rules =
|
||||
let
|
||||
cfg = config.services.forgejo;
|
||||
in
|
||||
[
|
||||
"d '${cfg.customDir}/public/assets' 0750 ${cfg.user} ${cfg.group} - -"
|
||||
"d '${cfg.customDir}/public/assets/img' 0750 ${cfg.user} ${cfg.group} - -"
|
||||
"L+ '${cfg.customDir}/public/assets/img/emoji' - - - - ${emojo}"
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,24 +1,34 @@
|
|||
# Configuration for the Gerrit autosubmit bot (//ops/gerrit-autosubmit)
|
||||
{ depot, pkgs, config, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.gerrit-autosubmit;
|
||||
description = "gerrit-autosubmit - autosubmit bot for Gerrit";
|
||||
mkStringOption = default: lib.mkOption {
|
||||
inherit default;
|
||||
type = lib.types.str;
|
||||
};
|
||||
mkStringOption =
|
||||
default:
|
||||
lib.mkOption {
|
||||
inherit default;
|
||||
type = lib.types.str;
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.depot.gerrit-autosubmit = {
|
||||
enable = lib.mkEnableOption description;
|
||||
gerritUrl = mkStringOption "https://cl.snix.dev";
|
||||
|
||||
secretsFile = with lib; mkOption {
|
||||
description = "Path to a systemd EnvironmentFile containing secrets";
|
||||
default = config.age.secretsDir + "/gerrit-autosubmit";
|
||||
type = types.str;
|
||||
};
|
||||
secretsFile =
|
||||
with lib;
|
||||
mkOption {
|
||||
description = "Path to a systemd EnvironmentFile containing secrets";
|
||||
default = config.age.secretsDir + "/gerrit-autosubmit";
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ config, depot, lib, ... }:
|
||||
{
|
||||
config,
|
||||
depot,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.gerrit-webhook-to-irccat;
|
||||
|
|
@ -21,8 +26,9 @@ in
|
|||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.gerrit-webhook-to-irccat = {
|
||||
serviceConfig = {
|
||||
ExecStart = "${depot.ops.gerrit-webhook-to-irccat}/bin/gerrit-webhook-to-irccat" +
|
||||
" -irccat-url ${cfg.irccatUrl}";
|
||||
ExecStart =
|
||||
"${depot.ops.gerrit-webhook-to-irccat}/bin/gerrit-webhook-to-irccat"
|
||||
+ " -irccat-url ${cfg.irccatUrl}";
|
||||
Restart = "always";
|
||||
RestartSec = 5;
|
||||
User = "gerrit-webhook-to-irccat";
|
||||
|
|
|
|||
|
|
@ -1,11 +1,21 @@
|
|||
{ config, lib, pkgs, modulesPath, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
modulesPath,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.infra.hardware.hetzner-cloud;
|
||||
inherit (lib) types mkOption mkEnableOption mkIf;
|
||||
inherit (lib)
|
||||
types
|
||||
mkOption
|
||||
mkEnableOption
|
||||
mkIf
|
||||
;
|
||||
in
|
||||
{
|
||||
imports =
|
||||
[ (modulesPath + "/profiles/qemu-guest.nix") ];
|
||||
imports = [ (modulesPath + "/profiles/qemu-guest.nix") ];
|
||||
|
||||
options.infra.hardware.hetzner-cloud = {
|
||||
enable = mkEnableOption "the Hetzner Cloud hardware profile";
|
||||
|
|
@ -40,7 +50,10 @@ in
|
|||
}
|
||||
];
|
||||
|
||||
dns = [ "2a01:4ff:ff00::add:1" "2a01:4ff:ff00::add:2" ];
|
||||
dns = [
|
||||
"2a01:4ff:ff00::add:1"
|
||||
"2a01:4ff:ff00::add:2"
|
||||
];
|
||||
};
|
||||
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.irccat;
|
||||
|
|
@ -35,16 +40,18 @@ in
|
|||
wants = [ "network.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
ExecStartPre = (pkgs.writeShellScript "merge-irccat-config" ''
|
||||
if [ ! -f "$CREDENTIALS_DIRECTORY/secrets" ]; then
|
||||
echo "irccat secrets file is missing"
|
||||
exit 1
|
||||
fi
|
||||
ExecStartPre = (
|
||||
pkgs.writeShellScript "merge-irccat-config" ''
|
||||
if [ ! -f "$CREDENTIALS_DIRECTORY/secrets" ]; then
|
||||
echo "irccat secrets file is missing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# jq's * is the recursive merge operator
|
||||
${pkgs.jq}/bin/jq -s '.[0] * .[1]' ${configJson} "$CREDENTIALS_DIRECTORY/secrets" \
|
||||
> /var/lib/irccat/irccat.json
|
||||
'');
|
||||
# jq's * is the recursive merge operator
|
||||
${pkgs.jq}/bin/jq -s '.[0] * .[1]' ${configJson} "$CREDENTIALS_DIRECTORY/secrets" \
|
||||
> /var/lib/irccat/irccat.json
|
||||
''
|
||||
);
|
||||
|
||||
ExecStart = "${pkgs.irccat}/bin/irccat";
|
||||
DynamicUser = true;
|
||||
|
|
@ -57,4 +64,3 @@ in
|
|||
};
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,12 +4,18 @@
|
|||
{
|
||||
programs.ssh.knownHosts = {
|
||||
public01 = {
|
||||
hostNames = [ "public01.infra.snix.dev" "git.snix.dev" ];
|
||||
hostNames = [
|
||||
"public01.infra.snix.dev"
|
||||
"git.snix.dev"
|
||||
];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICzB7bqXWcv+sVokySvj1d74zRlVLSNqBw7/OY3c7QYd";
|
||||
};
|
||||
|
||||
gerrit01 = {
|
||||
hostNames = [ "gerrit01.infra.snix.dev" "cl.snix.dev" ];
|
||||
hostNames = [
|
||||
"gerrit01.infra.snix.dev"
|
||||
"cl.snix.dev"
|
||||
];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN+RCLAExaM5EC70UsCPMtDT1Cfa80Ux/vex95fLk9S4";
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,11 @@
|
|||
# Gerrit configuration for the snix monorepo
|
||||
{ depot, pkgs, config, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.gerrit;
|
||||
|
|
@ -7,10 +13,12 @@ let
|
|||
gerritPackage = depot.third_party.nix-gerrit.gerrit_3_12;
|
||||
gerritPlugins = depot.third_party.nix-gerrit.plugins_3_12;
|
||||
|
||||
besadiiWithConfig = name: pkgs.writeShellScript "besadii-gerrit01" ''
|
||||
export BESADII_CONFIG=/run/agenix/gerrit-besadii-config
|
||||
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
|
||||
'';
|
||||
besadiiWithConfig =
|
||||
name:
|
||||
pkgs.writeShellScript "besadii-gerrit01" ''
|
||||
export BESADII_CONFIG=/run/agenix/gerrit-besadii-config
|
||||
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
|
||||
'';
|
||||
|
||||
gerritHooks = pkgs.runCommand "gerrit-hooks" { } ''
|
||||
mkdir -p $out
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
{ config, depot, ... }: {
|
||||
{ config, depot, ... }:
|
||||
{
|
||||
imports = [
|
||||
depot.third_party.alertmanager-irc-relay.module
|
||||
];
|
||||
|
|
@ -10,7 +11,10 @@
|
|||
irc_port = 6697;
|
||||
irc_nickname = "silentfox";
|
||||
irc_channels = [
|
||||
{ name = "#snix"; password = "$CHANNEL_PASSWORD"; }
|
||||
{
|
||||
name = "#snix";
|
||||
password = "$CHANNEL_PASSWORD";
|
||||
}
|
||||
];
|
||||
};
|
||||
environmentFiles = [
|
||||
|
|
|
|||
|
|
@ -1,15 +1,25 @@
|
|||
{ depot
|
||||
, config
|
||||
, lib
|
||||
, ...
|
||||
{
|
||||
depot,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.infra.monitoring.alloy;
|
||||
inherit (lib) mkEnableOption mkOption mkIf types mapAttrs' nameValuePair;
|
||||
inherit (lib)
|
||||
mkEnableOption
|
||||
mkOption
|
||||
mkIf
|
||||
types
|
||||
mapAttrs'
|
||||
nameValuePair
|
||||
;
|
||||
in
|
||||
{
|
||||
options.infra.monitoring.alloy = {
|
||||
enable = (mkEnableOption "Grafana Alloy") // { default = true; };
|
||||
enable = (mkEnableOption "Grafana Alloy") // {
|
||||
default = true;
|
||||
};
|
||||
|
||||
exporters = mkOption {
|
||||
description = ''
|
||||
|
|
@ -19,12 +29,17 @@ in
|
|||
internally, which ends up exported as `job` label
|
||||
on all metrics of that exporter.
|
||||
'';
|
||||
type = types.attrsOf (types.submodule ({ config, name, ... }: {
|
||||
options.port = mkOption {
|
||||
description = "Exporter port";
|
||||
type = types.int;
|
||||
};
|
||||
}));
|
||||
type = types.attrsOf (
|
||||
types.submodule (
|
||||
{ config, name, ... }:
|
||||
{
|
||||
options.port = mkOption {
|
||||
description = "Exporter port";
|
||||
type = types.int;
|
||||
};
|
||||
}
|
||||
)
|
||||
);
|
||||
default = { };
|
||||
};
|
||||
};
|
||||
|
|
@ -70,8 +85,10 @@ in
|
|||
}
|
||||
}
|
||||
'';
|
||||
} // (mapAttrs'
|
||||
(name: v: nameValuePair "alloy/scrape_${name}.alloy" {
|
||||
}
|
||||
// (mapAttrs' (
|
||||
name: v:
|
||||
nameValuePair "alloy/scrape_${name}.alloy" {
|
||||
text = ''
|
||||
prometheus.scrape "${name}" {
|
||||
targets = [
|
||||
|
|
@ -80,8 +97,8 @@ in
|
|||
forward_to = [prometheus.remote_write.mimir.receiver]
|
||||
}
|
||||
'';
|
||||
})
|
||||
cfg.exporters);
|
||||
}
|
||||
) cfg.exporters);
|
||||
|
||||
systemd.services.alloy.serviceConfig = {
|
||||
LoadCredential = [
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
{ depot
|
||||
, config
|
||||
, lib
|
||||
, ...
|
||||
{
|
||||
depot,
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.depot.grafana;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
{ config
|
||||
, lib
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.depot.loki;
|
||||
|
|
@ -38,7 +39,10 @@ in
|
|||
ring = {
|
||||
kvstore.store = "memberlist";
|
||||
# TODO: Such a ugly hack.
|
||||
instance_interface_names = [ "enp1s0" "lo" ];
|
||||
instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
};
|
||||
replication_factor = 1;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.depot.prometheus;
|
||||
|
|
@ -9,15 +10,17 @@ let
|
|||
|
||||
mimirPort = config.services.mimir.configuration.server.http_listen_port;
|
||||
|
||||
alerts = pkgs.runCommand "mimir-alerts-checked"
|
||||
{
|
||||
src = ./alerts;
|
||||
nativeBuildInputs = with pkgs; [ prometheus.cli ];
|
||||
} ''
|
||||
promtool check rules $src/*
|
||||
mkdir $out
|
||||
cp -R $src $out/anonymous/
|
||||
'';
|
||||
alerts =
|
||||
pkgs.runCommand "mimir-alerts-checked"
|
||||
{
|
||||
src = ./alerts;
|
||||
nativeBuildInputs = with pkgs; [ prometheus.cli ];
|
||||
}
|
||||
''
|
||||
promtool check rules $src/*
|
||||
mkdir $out
|
||||
cp -R $src $out/anonymous/
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.services.depot.prometheus.enable = mkEnableOption "Prometheus scraper";
|
||||
|
|
@ -42,13 +45,34 @@ in
|
|||
};
|
||||
|
||||
# TODO: Such a ugly hack.
|
||||
distributor.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||
ingester.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||
frontend.instance_interface_names = [ "enp1s0" "lo" ];
|
||||
query_scheduler.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||
ruler.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||
compactor.sharding_ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||
store_gateway.sharding_ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||
distributor.ring.instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
ingester.ring.instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
frontend.instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
query_scheduler.ring.instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
ruler.ring.instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
compactor.sharding_ring.instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
store_gateway.sharding_ring.instance_interface_names = [
|
||||
"enp1s0"
|
||||
"lo"
|
||||
];
|
||||
|
||||
memberlist = {
|
||||
advertise_addr = "127.0.0.1";
|
||||
|
|
@ -91,11 +115,13 @@ in
|
|||
receivers = [
|
||||
{
|
||||
name = "irc";
|
||||
webhook_configs = [{
|
||||
# Mimir can't expand environment variables in external config files,
|
||||
# so work around it.
|
||||
url_file = "/run/credentials/mimir.service/webhook-url";
|
||||
}];
|
||||
webhook_configs = [
|
||||
{
|
||||
# Mimir can't expand environment variables in external config files,
|
||||
# so work around it.
|
||||
url_file = "/run/credentials/mimir.service/webhook-url";
|
||||
}
|
||||
];
|
||||
}
|
||||
];
|
||||
};
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
{ config
|
||||
, lib
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
cfg = config.services.depot.tempo;
|
||||
|
|
@ -48,11 +49,14 @@ in
|
|||
};
|
||||
};
|
||||
|
||||
systemd.services.tempo.serviceConfig.EnvironmentFile = [ config.age.secrets.tempo-environment.path ];
|
||||
systemd.services.tempo.serviceConfig.EnvironmentFile = [
|
||||
config.age.secrets.tempo-environment.path
|
||||
];
|
||||
|
||||
services.nginx = {
|
||||
upstreams.tempo = {
|
||||
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" = { };
|
||||
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" =
|
||||
{ };
|
||||
extraConfig = "keepalive 16;";
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -5,14 +5,21 @@
|
|||
# - restic's cache lives in /var/backup/restic/cache
|
||||
# - repository password lives in `config.age.secrets.restic-repository-password.path`
|
||||
# - object storage credentials in `config.age.secrets.restic-bucket-credentials.path`
|
||||
{ config, lib, pkgs, ... }:
|
||||
{
|
||||
config,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.restic;
|
||||
mkStringOption = default: lib.mkOption {
|
||||
inherit default;
|
||||
type = lib.types.str;
|
||||
};
|
||||
mkStringOption =
|
||||
default:
|
||||
lib.mkOption {
|
||||
inherit default;
|
||||
type = lib.types.str;
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.depot.restic = {
|
||||
|
|
@ -23,16 +30,20 @@ in
|
|||
repository = mkStringOption config.networking.hostName;
|
||||
interval = mkStringOption "hourly";
|
||||
|
||||
paths = with lib; mkOption {
|
||||
description = "Directories that should be backed up";
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
paths =
|
||||
with lib;
|
||||
mkOption {
|
||||
description = "Directories that should be backed up";
|
||||
type = types.listOf types.str;
|
||||
};
|
||||
|
||||
exclude = with lib; mkOption {
|
||||
description = "Files that should be excluded from backups";
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
};
|
||||
exclude =
|
||||
with lib;
|
||||
mkOption {
|
||||
description = "Files that should be excluded from backups";
|
||||
type = types.listOf types.str;
|
||||
default = [ ];
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
|
|
@ -51,8 +62,7 @@ in
|
|||
RESTIC_PASSWORD_FILE = config.age.secrets.restic-repository-password.path;
|
||||
RESTIC_CACHE_DIR = "/var/backup/restic/cache";
|
||||
|
||||
RESTIC_EXCLUDE_FILE =
|
||||
builtins.toFile "exclude-files" (lib.concatStringsSep "\n" cfg.exclude);
|
||||
RESTIC_EXCLUDE_FILE = builtins.toFile "exclude-files" (lib.concatStringsSep "\n" cfg.exclude);
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
# Configuration for the snix buildkite agents.
|
||||
{ config
|
||||
, depot
|
||||
, pkgs
|
||||
, lib
|
||||
, ...
|
||||
{
|
||||
config,
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
|
@ -50,61 +51,56 @@ in
|
|||
config = lib.mkIf cfg.enable {
|
||||
# Run the Buildkite agents using the default upstream module.
|
||||
services.buildkite-agents = builtins.listToAttrs (
|
||||
map
|
||||
(n: rec {
|
||||
name = "${hostname}-${toString n}";
|
||||
value =
|
||||
{
|
||||
inherit name;
|
||||
enable = true;
|
||||
tokenPath = config.age.secretsDir + "/buildkite-agent-token";
|
||||
privateSshKeyPath = config.age.secretsDir + "/buildkite-private-key";
|
||||
hooks.post-command = "${buildkiteHooks}/bin/post-command";
|
||||
tags.queue = "default";
|
||||
hooks.environment = ''
|
||||
export PATH=$PATH:/run/wrappers/bin
|
||||
'';
|
||||
map (n: rec {
|
||||
name = "${hostname}-${toString n}";
|
||||
value = {
|
||||
inherit name;
|
||||
enable = true;
|
||||
tokenPath = config.age.secretsDir + "/buildkite-agent-token";
|
||||
privateSshKeyPath = config.age.secretsDir + "/buildkite-private-key";
|
||||
hooks.post-command = "${buildkiteHooks}/bin/post-command";
|
||||
tags.queue = "default";
|
||||
hooks.environment = ''
|
||||
export PATH=$PATH:/run/wrappers/bin
|
||||
'';
|
||||
|
||||
tags = {
|
||||
# all agents support small jobs
|
||||
small = "true";
|
||||
inherit hostname;
|
||||
large = if n <= cfg.largeSlots then "true" else "false";
|
||||
};
|
||||
tags = {
|
||||
# all agents support small jobs
|
||||
small = "true";
|
||||
inherit hostname;
|
||||
large = if n <= cfg.largeSlots then "true" else "false";
|
||||
};
|
||||
|
||||
runtimePackages = with pkgs; [
|
||||
bash
|
||||
coreutils
|
||||
credentialHelper
|
||||
curl
|
||||
git
|
||||
gnutar
|
||||
gzip
|
||||
jq
|
||||
nix
|
||||
];
|
||||
};
|
||||
})
|
||||
agents
|
||||
runtimePackages = with pkgs; [
|
||||
bash
|
||||
coreutils
|
||||
credentialHelper
|
||||
curl
|
||||
git
|
||||
gnutar
|
||||
gzip
|
||||
jq
|
||||
nix
|
||||
];
|
||||
};
|
||||
}) agents
|
||||
);
|
||||
|
||||
# Set up a group for all Buildkite agent users
|
||||
users = {
|
||||
groups.buildkite-agents = { };
|
||||
users = builtins.listToAttrs (
|
||||
map
|
||||
(n: rec {
|
||||
name = "buildkite-agent-${hostname}-${toString n}";
|
||||
value = {
|
||||
isSystemUser = true;
|
||||
group = lib.mkForce "buildkite-agents";
|
||||
extraGroups = [
|
||||
name
|
||||
"docker"
|
||||
];
|
||||
};
|
||||
})
|
||||
agents
|
||||
map (n: rec {
|
||||
name = "buildkite-agent-${hostname}-${toString n}";
|
||||
value = {
|
||||
isSystemUser = true;
|
||||
group = lib.mkForce "buildkite-agents";
|
||||
extraGroups = [
|
||||
name
|
||||
"docker"
|
||||
];
|
||||
};
|
||||
}) agents
|
||||
);
|
||||
};
|
||||
};
|
||||
|
|
|
|||
|
|
@ -2,7 +2,10 @@
|
|||
|
||||
{
|
||||
config = {
|
||||
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
80
|
||||
443
|
||||
];
|
||||
|
||||
security.acme = {
|
||||
acceptTerms = true;
|
||||
|
|
|
|||
|
|
@ -6,7 +6,8 @@
|
|||
|
||||
services.nginx = {
|
||||
upstreams.tempo = {
|
||||
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" = { };
|
||||
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" =
|
||||
{ };
|
||||
};
|
||||
|
||||
virtualHosts."tempo.snix.dev" = {
|
||||
|
|
|
|||
|
|
@ -1,42 +1,56 @@
|
|||
# Helper functions for instantiating depot-compatible NixOS machines.
|
||||
{ depot, lib, pkgs, ... }@args:
|
||||
{
|
||||
depot,
|
||||
lib,
|
||||
pkgs,
|
||||
...
|
||||
}@args:
|
||||
|
||||
let inherit (lib) findFirst;
|
||||
in rec {
|
||||
let
|
||||
inherit (lib) findFirst;
|
||||
in
|
||||
rec {
|
||||
# This provides our standard set of arguments to all NixOS modules.
|
||||
baseModule = { ... }: {
|
||||
nix.nixPath =
|
||||
let
|
||||
# Due to nixpkgsBisectPath, pkgs.path is not always in the nix store
|
||||
nixpkgsStorePath =
|
||||
if lib.hasPrefix builtins.storeDir (toString pkgs.path)
|
||||
then builtins.storePath pkgs.path # nixpkgs is already in the store
|
||||
else pkgs.path; # we need to dump nixpkgs to the store either way
|
||||
in
|
||||
[
|
||||
("nixos=" + nixpkgsStorePath)
|
||||
("nixpkgs=" + nixpkgsStorePath)
|
||||
];
|
||||
};
|
||||
|
||||
nixosFor = configuration: (depot.third_party.nixos {
|
||||
configuration = { ... }: {
|
||||
imports = [
|
||||
baseModule
|
||||
configuration
|
||||
];
|
||||
baseModule =
|
||||
{ ... }:
|
||||
{
|
||||
nix.nixPath =
|
||||
let
|
||||
# Due to nixpkgsBisectPath, pkgs.path is not always in the nix store
|
||||
nixpkgsStorePath =
|
||||
if lib.hasPrefix builtins.storeDir (toString pkgs.path) then
|
||||
builtins.storePath pkgs.path # nixpkgs is already in the store
|
||||
else
|
||||
pkgs.path; # we need to dump nixpkgs to the store either way
|
||||
in
|
||||
[
|
||||
("nixos=" + nixpkgsStorePath)
|
||||
("nixpkgs=" + nixpkgsStorePath)
|
||||
];
|
||||
};
|
||||
|
||||
specialArgs = {
|
||||
inherit (args) depot;
|
||||
};
|
||||
});
|
||||
nixosFor =
|
||||
configuration:
|
||||
(depot.third_party.nixos {
|
||||
configuration =
|
||||
{ ... }:
|
||||
{
|
||||
imports = [
|
||||
baseModule
|
||||
configuration
|
||||
];
|
||||
};
|
||||
|
||||
findSystem = hostname:
|
||||
(findFirst
|
||||
(system: system.config.networking.hostName == hostname)
|
||||
(throw "${hostname} is not a known NixOS host")
|
||||
(map nixosFor depot.ops.machines.all-systems));
|
||||
specialArgs = {
|
||||
inherit (args) depot;
|
||||
};
|
||||
});
|
||||
|
||||
findSystem =
|
||||
hostname:
|
||||
(findFirst (
|
||||
system: system.config.networking.hostName == hostname
|
||||
) (throw "${hostname} is not a known NixOS host") (map nixosFor depot.ops.machines.all-systems));
|
||||
|
||||
# Systems that should be built in CI
|
||||
archivistEC2System = nixosFor depot.ops.machines.archivist-ec2;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,11 @@
|
|||
# This file configures the primary build pipeline used for the
|
||||
# top-level list of depot targets.
|
||||
{ depot, pkgs, externalArgs, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
externalArgs,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
pipeline = depot.nix.buildkite.mkPipeline {
|
||||
|
|
@ -8,9 +13,10 @@ let
|
|||
drvTargets = depot.ci.targets;
|
||||
|
||||
parentTargetMap =
|
||||
if (externalArgs ? parentTargetMap)
|
||||
then builtins.fromJSON (builtins.readFile externalArgs.parentTargetMap)
|
||||
else { };
|
||||
if (externalArgs ? parentTargetMap) then
|
||||
builtins.fromJSON (builtins.readFile externalArgs.parentTargetMap)
|
||||
else
|
||||
{ };
|
||||
|
||||
postBuildSteps = [
|
||||
# After successful builds, create a gcroot for builds on canon.
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
args:
|
||||
let mkSecrets = import ./mkSecrets.nix args; in
|
||||
let
|
||||
mkSecrets = import ./mkSecrets.nix args;
|
||||
in
|
||||
mkSecrets ./. (import ./secrets.nix) // { inherit mkSecrets; }
|
||||
|
|
|
|||
|
|
@ -12,10 +12,12 @@ let
|
|||
agePubkey = types.typedef "age pubkey" (s: isString s && hasPrefix "age" s);
|
||||
|
||||
agenixSecret = types.struct "agenixSecret" {
|
||||
publicKeys = types.listOf (types.union [
|
||||
sshPubkey
|
||||
agePubkey
|
||||
]);
|
||||
publicKeys = types.listOf (
|
||||
types.union [
|
||||
sshPubkey
|
||||
agePubkey
|
||||
]
|
||||
);
|
||||
};
|
||||
|
||||
in
|
||||
|
|
|
|||
|
|
@ -20,13 +20,21 @@ let
|
|||
|
||||
superadmins = raito ++ edef ++ flokli;
|
||||
|
||||
allDefault.publicKeys = superadmins ++ [ gerrit01 public01 build01 meta01 ];
|
||||
allDefault.publicKeys = superadmins ++ [
|
||||
gerrit01
|
||||
public01
|
||||
build01
|
||||
meta01
|
||||
];
|
||||
terraform.publicKeys = superadmins;
|
||||
gerrit01Default.publicKeys = superadmins ++ [ gerrit01 ];
|
||||
public01Default.publicKeys = superadmins ++ [ public01 ];
|
||||
build01Default.publicKeys = superadmins ++ [ build01 ];
|
||||
meta01Default.publicKeys = superadmins ++ [ meta01 ];
|
||||
ciDefault.publicKeys = superadmins ++ [ gerrit01 build01 ];
|
||||
ciDefault.publicKeys = superadmins ++ [
|
||||
gerrit01
|
||||
build01
|
||||
];
|
||||
in
|
||||
{
|
||||
"grafana-agent-password.age" = allDefault;
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
{ ... }: {
|
||||
{ ... }:
|
||||
{
|
||||
flokli = [
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPTVTXOutUZZjXLB0lUSgeKcSY/8mxKkC0ingGK1whD2 flokli"
|
||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE6a15p9HLSrawsMTd2UQGAiM7r7VdyrfSRyzwRYTgWT flokli@m2air"
|
||||
|
|
|
|||
10721
snix/Cargo.nix
10721
snix/Cargo.nix
File diff suppressed because it is too large
Load diff
|
|
@ -16,22 +16,25 @@ rec {
|
|||
# A kernel with virtiofs support baked in
|
||||
# TODO: make a smaller kernel, we don't need a gazillion filesystems and
|
||||
# device drivers in it.
|
||||
kernel = pkgs.buildLinux ({ } // {
|
||||
inherit (pkgs.linuxPackages_latest.kernel) src version modDirVersion;
|
||||
autoModules = false;
|
||||
kernelPreferBuiltin = true;
|
||||
ignoreConfigErrors = true;
|
||||
kernelPatches = [ ];
|
||||
structuredExtraConfig = with pkgs.lib.kernel; {
|
||||
FUSE_FS = option yes;
|
||||
DAX_DRIVER = option yes;
|
||||
DAX = option yes;
|
||||
FS_DAX = option yes;
|
||||
VIRTIO_FS = option yes;
|
||||
VIRTIO = option yes;
|
||||
ZONE_DEVICE = option yes;
|
||||
};
|
||||
});
|
||||
kernel = pkgs.buildLinux (
|
||||
{ }
|
||||
// {
|
||||
inherit (pkgs.linuxPackages_latest.kernel) src version modDirVersion;
|
||||
autoModules = false;
|
||||
kernelPreferBuiltin = true;
|
||||
ignoreConfigErrors = true;
|
||||
kernelPatches = [ ];
|
||||
structuredExtraConfig = with pkgs.lib.kernel; {
|
||||
FUSE_FS = option yes;
|
||||
DAX_DRIVER = option yes;
|
||||
DAX = option yes;
|
||||
FS_DAX = option yes;
|
||||
VIRTIO_FS = option yes;
|
||||
VIRTIO = option yes;
|
||||
ZONE_DEVICE = option yes;
|
||||
};
|
||||
}
|
||||
);
|
||||
|
||||
# A build framework for minimal initrds
|
||||
uroot = pkgs.buildGoModule rec {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
# Seed a snix-store with the specified path, then start a VM with the
|
||||
|
|
@ -6,46 +11,48 @@ let
|
|||
# Allows customizing the cmdline, which can be used to list files,
|
||||
# or specify what init should be booted.
|
||||
mkBootTest =
|
||||
{ blobServiceAddr ? "memory://"
|
||||
, directoryServiceAddr ? "memory://"
|
||||
, pathInfoServiceAddr ? "memory://"
|
||||
|
||||
{
|
||||
blobServiceAddr ? "memory://",
|
||||
directoryServiceAddr ? "memory://",
|
||||
pathInfoServiceAddr ? "memory://",
|
||||
|
||||
# The path to import.
|
||||
, path
|
||||
path,
|
||||
|
||||
# Whether the path should be imported as a closure.
|
||||
# If false, importPathName must be specified.
|
||||
, isClosure ? false
|
||||
isClosure ? false,
|
||||
# Whether to use nar-bridge to upload, rather than snix-store copy.
|
||||
# using nar-bridge currently is "slower", as the `pkgs.mkBinaryCache` build
|
||||
# takes quite some time.
|
||||
, useNarBridge ? false
|
||||
useNarBridge ? false,
|
||||
|
||||
, importPathName ? null
|
||||
importPathName ? null,
|
||||
|
||||
# Commands to run before starting the snix-daemon. Useful to provide
|
||||
# auxillary mock services.
|
||||
, preStart ? ""
|
||||
preStart ? "",
|
||||
|
||||
# The cmdline to pass to the VM.
|
||||
# Defaults to snix.find, which lists all files in the store.
|
||||
, vmCmdline ? "snix.find"
|
||||
vmCmdline ? "snix.find",
|
||||
# The string we expect to find in the VM output.
|
||||
# Defaults the value of `path` (the store path we upload).
|
||||
, assertVMOutput ? path
|
||||
assertVMOutput ? path,
|
||||
}:
|
||||
|
||||
assert isClosure -> importPathName == null;
|
||||
assert (!isClosure) -> importPathName != null;
|
||||
assert isClosure -> importPathName == null;
|
||||
assert (!isClosure) -> importPathName != null;
|
||||
|
||||
pkgs.stdenv.mkDerivation ({
|
||||
pkgs.stdenv.mkDerivation (
|
||||
{
|
||||
name = "run-vm";
|
||||
|
||||
nativeBuildInputs = [
|
||||
depot.snix.store
|
||||
depot.snix.boot.runVM
|
||||
] ++ lib.optionals (isClosure && useNarBridge) [
|
||||
]
|
||||
++ lib.optionals (isClosure && useNarBridge) [
|
||||
depot.snix.nar-bridge
|
||||
pkgs.curl
|
||||
pkgs.rush-parallel
|
||||
|
|
@ -76,17 +83,20 @@ let
|
|||
export BLOB_SERVICE_ADDR=grpc+unix://$PWD/snix-store.sock
|
||||
export DIRECTORY_SERVICE_ADDR=grpc+unix://$PWD/snix-store.sock
|
||||
export PATH_INFO_SERVICE_ADDR=grpc+unix://$PWD/snix-store.sock
|
||||
'' + lib.optionalString (!isClosure) ''
|
||||
''
|
||||
+ lib.optionalString (!isClosure) ''
|
||||
echo "Importing ${path} into snix-store with name ${importPathName}…"
|
||||
cp -R ${path} ${importPathName}
|
||||
outpath=$(snix-store import ${importPathName})
|
||||
|
||||
echo "imported to $outpath"
|
||||
'' + lib.optionalString (isClosure && !useNarBridge) ''
|
||||
''
|
||||
+ lib.optionalString (isClosure && !useNarBridge) ''
|
||||
echo "Copying closure ${path}…"
|
||||
# This picks up the `closure` key in `$NIX_ATTRS_JSON_FILE` automatically.
|
||||
snix-store --otlp=false copy
|
||||
'' + lib.optionalString (isClosure && useNarBridge) ''
|
||||
''
|
||||
+ lib.optionalString (isClosure && useNarBridge) ''
|
||||
echo "Starting nar-bridge…"
|
||||
nar-bridge \
|
||||
--otlp=false \
|
||||
|
|
@ -122,7 +132,8 @@ let
|
|||
# In the future, we might want to make this behaviour configurable,
|
||||
# and disable checking here, to keep the logic simple.
|
||||
ls -d $to_upload/*.narinfo | rush 'curl -s -T - --unix-socket $PWD/nar-bridge.sock http://localhost:9000/$(basename {}) < {}'
|
||||
'' + ''
|
||||
''
|
||||
+ ''
|
||||
# Invoke a VM using snix as the backing store, ensure the outpath appears in its listing.
|
||||
echo "Starting VM…"
|
||||
|
||||
|
|
@ -138,113 +149,130 @@ let
|
|||
meta.ci.buildkiteExtraStepArgs = {
|
||||
retry.automatic = true;
|
||||
};
|
||||
} // lib.optionalAttrs (isClosure && !useNarBridge) {
|
||||
}
|
||||
// lib.optionalAttrs (isClosure && !useNarBridge) {
|
||||
__structuredAttrs = true;
|
||||
exportReferencesGraph.closure = [ path ];
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
testSystem = (pkgs.nixos {
|
||||
# Set some options necessary to evaluate.
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
# TODO: figure out how to disable this without causing eval to fail
|
||||
fileSystems."/" = {
|
||||
device = "/dev/root";
|
||||
fsType = "tmpfs";
|
||||
};
|
||||
testSystem =
|
||||
(pkgs.nixos {
|
||||
# Set some options necessary to evaluate.
|
||||
boot.loader.systemd-boot.enable = true;
|
||||
# TODO: figure out how to disable this without causing eval to fail
|
||||
fileSystems."/" = {
|
||||
device = "/dev/root";
|
||||
fsType = "tmpfs";
|
||||
};
|
||||
|
||||
services.getty.helpLine = "Onwards and upwards.";
|
||||
systemd.services.do-shutdown = {
|
||||
after = [ "getty.target" ];
|
||||
description = "Shut down again";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = "/run/current-system/sw/bin/systemctl poweroff --when=+10s";
|
||||
};
|
||||
services.getty.helpLine = "Onwards and upwards.";
|
||||
systemd.services.do-shutdown = {
|
||||
after = [ "getty.target" ];
|
||||
description = "Shut down again";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
serviceConfig.Type = "oneshot";
|
||||
script = "/run/current-system/sw/bin/systemctl poweroff --when=+10s";
|
||||
};
|
||||
|
||||
# Don't warn about stateVersion.
|
||||
system.stateVersion = "24.05";
|
||||
# Don't warn about stateVersion.
|
||||
system.stateVersion = "24.05";
|
||||
|
||||
# Speed-up evaluation and building.
|
||||
documentation.enable = lib.mkForce false;
|
||||
}).config.system.build.toplevel;
|
||||
# Speed-up evaluation and building.
|
||||
documentation.enable = lib.mkForce false;
|
||||
}).config.system.build.toplevel;
|
||||
|
||||
in
|
||||
depot.nix.readTree.drvTargets {
|
||||
docs-memory = (mkBootTest {
|
||||
path = ../../docs;
|
||||
importPathName = "docs";
|
||||
});
|
||||
docs-persistent = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
directoryServiceAddr = "redb:///build/directories.redb";
|
||||
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
|
||||
path = ../../docs;
|
||||
importPathName = "docs";
|
||||
});
|
||||
docs-memory = (
|
||||
mkBootTest {
|
||||
path = ../../docs;
|
||||
importPathName = "docs";
|
||||
}
|
||||
);
|
||||
docs-persistent = (
|
||||
mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
directoryServiceAddr = "redb:///build/directories.redb";
|
||||
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
|
||||
path = ../../docs;
|
||||
importPathName = "docs";
|
||||
}
|
||||
);
|
||||
|
||||
closure-snix = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
path = depot.snix.store;
|
||||
isClosure = true;
|
||||
});
|
||||
closure-snix = (
|
||||
mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
path = depot.snix.store;
|
||||
isClosure = true;
|
||||
}
|
||||
);
|
||||
|
||||
closure-nixos = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
|
||||
directoryServiceAddr = "redb:///build/directories.redb";
|
||||
path = testSystem;
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
});
|
||||
closure-nixos = (
|
||||
mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
|
||||
directoryServiceAddr = "redb:///build/directories.redb";
|
||||
path = testSystem;
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
}
|
||||
);
|
||||
|
||||
closure-nixos-bigtable = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
directoryServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=directories&family_name=cf1";
|
||||
pathInfoServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=pathinfos&family_name=cf1";
|
||||
path = testSystem;
|
||||
useNarBridge = true;
|
||||
preStart = ''
|
||||
${pkgs.cbtemulator}/bin/cbtemulator -address $PWD/cbtemulator.sock &
|
||||
timeout 22 sh -c 'until [ -e $PWD/cbtemulator.sock ]; do sleep 1; done'
|
||||
closure-nixos-bigtable = (
|
||||
mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
directoryServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=directories&family_name=cf1";
|
||||
pathInfoServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=pathinfos&family_name=cf1";
|
||||
path = testSystem;
|
||||
useNarBridge = true;
|
||||
preStart = ''
|
||||
${pkgs.cbtemulator}/bin/cbtemulator -address $PWD/cbtemulator.sock &
|
||||
timeout 22 sh -c 'until [ -e $PWD/cbtemulator.sock ]; do sleep 1; done'
|
||||
|
||||
export BIGTABLE_EMULATOR_HOST=unix://$PWD/cbtemulator.sock
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable directories
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily directories cf1
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable pathinfos
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily pathinfos cf1
|
||||
'';
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
});
|
||||
export BIGTABLE_EMULATOR_HOST=unix://$PWD/cbtemulator.sock
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable directories
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily directories cf1
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable pathinfos
|
||||
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily pathinfos cf1
|
||||
'';
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
}
|
||||
);
|
||||
|
||||
closure-nixos-s3 = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+s3://mybucket/blobs?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&aws_endpoint_url=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
|
||||
# we cannot use s3 here yet without any caching layer, as we don't allow "deeper" access to directories (non-root nodes)
|
||||
# directoryServiceAddr = "objectstore+s3://mybucket/directories?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&endpoint=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
|
||||
directoryServiceAddr = "memory://";
|
||||
pathInfoServiceAddr = "memory://";
|
||||
path = testSystem;
|
||||
useNarBridge = true;
|
||||
preStart = ''
|
||||
MINIO_ACCESS_KEY=myaccesskey MINIO_SECRET_KEY=supersecret MINIO_ADDRESS=127.0.0.1:9000 ${pkgs.minio}/bin/minio server $(mktemp -d) &
|
||||
timeout 22 sh -c 'until ${pkgs.netcat}/bin/nc -z $0 $1; do sleep 1; done' localhost 9000
|
||||
mc_config_dir=$(mktemp -d)
|
||||
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir alias set 'myminio' 'http://127.0.0.1:9000' 'myaccesskey' 'supersecret'
|
||||
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir mb myminio/mybucket
|
||||
'';
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
});
|
||||
closure-nixos-s3 = (
|
||||
mkBootTest {
|
||||
blobServiceAddr = "objectstore+s3://mybucket/blobs?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&aws_endpoint_url=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
|
||||
# we cannot use s3 here yet without any caching layer, as we don't allow "deeper" access to directories (non-root nodes)
|
||||
# directoryServiceAddr = "objectstore+s3://mybucket/directories?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&endpoint=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
|
||||
directoryServiceAddr = "memory://";
|
||||
pathInfoServiceAddr = "memory://";
|
||||
path = testSystem;
|
||||
useNarBridge = true;
|
||||
preStart = ''
|
||||
MINIO_ACCESS_KEY=myaccesskey MINIO_SECRET_KEY=supersecret MINIO_ADDRESS=127.0.0.1:9000 ${pkgs.minio}/bin/minio server $(mktemp -d) &
|
||||
timeout 22 sh -c 'until ${pkgs.netcat}/bin/nc -z $0 $1; do sleep 1; done' localhost 9000
|
||||
mc_config_dir=$(mktemp -d)
|
||||
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir alias set 'myminio' 'http://127.0.0.1:9000' 'myaccesskey' 'supersecret'
|
||||
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir mb myminio/mybucket
|
||||
'';
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
}
|
||||
);
|
||||
|
||||
closure-nixos-nar-bridge = (mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
path = testSystem;
|
||||
useNarBridge = true;
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
});
|
||||
closure-nixos-nar-bridge = (
|
||||
mkBootTest {
|
||||
blobServiceAddr = "objectstore+file:///build/blobs";
|
||||
path = testSystem;
|
||||
useNarBridge = true;
|
||||
isClosure = true;
|
||||
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
|
||||
assertVMOutput = "Onwards and upwards.";
|
||||
}
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,25 +9,26 @@ in
|
|||
name = "build-go";
|
||||
src = depot.third_party.gitignoreSource ./.;
|
||||
vendorHash = "sha256:1j652an8ir1ybyj21znaipsir7mbs3v972mw27ppsjz9dgh2crx6";
|
||||
}).overrideAttrs (_: {
|
||||
meta.ci.extraSteps = {
|
||||
check = {
|
||||
label = ":water_buffalo: ensure generated protobuf files match";
|
||||
needsOutput = true;
|
||||
command = pkgs.writeShellScript "pb-go-check" ''
|
||||
${regenerate}
|
||||
if [[ -n "$(git status --porcelain -unormal)" ]]; then
|
||||
echo "-----------------------------"
|
||||
echo ".pb.go files need to be updated, mg run //snix/build-go/regenerate"
|
||||
echo "-----------------------------"
|
||||
git status -unormal
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
alwaysRun = true;
|
||||
}).overrideAttrs
|
||||
(_: {
|
||||
meta.ci.extraSteps = {
|
||||
check = {
|
||||
label = ":water_buffalo: ensure generated protobuf files match";
|
||||
needsOutput = true;
|
||||
command = pkgs.writeShellScript "pb-go-check" ''
|
||||
${regenerate}
|
||||
if [[ -n "$(git status --porcelain -unormal)" ]]; then
|
||||
echo "-----------------------------"
|
||||
echo ".pb.go files need to be updated, mg run //snix/build-go/regenerate"
|
||||
echo "-----------------------------"
|
||||
git status -unormal
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
alwaysRun = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
# https://git.snix.dev/snix/snix/issues/60
|
||||
meta.ci.skip = true;
|
||||
passthru.regenerate = regenerate;
|
||||
})
|
||||
# https://git.snix.dev/snix/snix/issues/60
|
||||
meta.ci.skip = true;
|
||||
passthru.regenerate = regenerate;
|
||||
})
|
||||
|
|
|
|||
|
|
@ -2,10 +2,15 @@
|
|||
|
||||
(depot.snix.crates.workspaceMembers.snix-build.build.override {
|
||||
runTests = true;
|
||||
}).overrideAttrs (old: rec {
|
||||
meta.ci.targets = lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (lib.attrNames passthru);
|
||||
passthru = old.passthru // (depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = [ "tonic-reflection" ];
|
||||
});
|
||||
})
|
||||
}).overrideAttrs
|
||||
(old: rec {
|
||||
meta.ci.targets = lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (
|
||||
lib.attrNames passthru
|
||||
);
|
||||
passthru =
|
||||
old.passthru
|
||||
// (depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = [ "tonic-reflection" ];
|
||||
});
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
protos = lib.sourceByRegex depot.path.origSrc [
|
||||
"buf.yaml"
|
||||
|
|
|
|||
|
|
@ -9,23 +9,24 @@ in
|
|||
name = "castore-go";
|
||||
src = depot.third_party.gitignoreSource ./.;
|
||||
vendorHash = "sha256:03wwzk7irlb05y0zjfmpp5c2dxhcpnmfc169g05sn6d3ni07aly8";
|
||||
}).overrideAttrs (_: {
|
||||
meta.ci.extraSteps = {
|
||||
check = {
|
||||
label = ":water_buffalo: ensure generated protobuf files match";
|
||||
needsOutput = true;
|
||||
command = pkgs.writeShellScript "pb-go-check" ''
|
||||
${regenerate}
|
||||
if [[ -n "$(git status --porcelain -unormal)" ]]; then
|
||||
echo "-----------------------------"
|
||||
echo ".pb.go files need to be updated, mg run //snix/castore-go/regenerate"
|
||||
echo "-----------------------------"
|
||||
git status -unormal
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
alwaysRun = true;
|
||||
}).overrideAttrs
|
||||
(_: {
|
||||
meta.ci.extraSteps = {
|
||||
check = {
|
||||
label = ":water_buffalo: ensure generated protobuf files match";
|
||||
needsOutput = true;
|
||||
command = pkgs.writeShellScript "pb-go-check" ''
|
||||
${regenerate}
|
||||
if [[ -n "$(git status --porcelain -unormal)" ]]; then
|
||||
echo "-----------------------------"
|
||||
echo ".pb.go files need to be updated, mg run //snix/castore-go/regenerate"
|
||||
echo "-----------------------------"
|
||||
git status -unormal
|
||||
exit 1
|
||||
fi
|
||||
'';
|
||||
alwaysRun = true;
|
||||
};
|
||||
};
|
||||
};
|
||||
passthru.regenerate = regenerate;
|
||||
})
|
||||
passthru.regenerate = regenerate;
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,28 +1,51 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
(depot.snix.crates.workspaceMembers.snix-castore.build.override {
|
||||
runTests = true;
|
||||
testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
'';
|
||||
}).overrideAttrs (old: rec {
|
||||
meta.ci.targets = [ "integration-tests" ] ++ lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (lib.attrNames passthru);
|
||||
passthru = (depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = ([ "cloud" "fuse" "tonic-reflection" "xp-composition-url-refs" ]
|
||||
# virtiofs feature currently fails to build on Darwin
|
||||
++ lib.optional pkgs.stdenv.isLinux "virtiofs");
|
||||
override.testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
'';
|
||||
}) // {
|
||||
integration-tests = depot.snix.crates.workspaceMembers.${old.crateName}.build.override (old: {
|
||||
runTests = true;
|
||||
testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
export PATH="$PATH:${pkgs.lib.makeBinPath [ pkgs.cbtemulator pkgs.google-cloud-bigtable-tool ]}"
|
||||
'';
|
||||
features = old.features ++ [ "integration" ];
|
||||
});
|
||||
};
|
||||
})
|
||||
}).overrideAttrs
|
||||
(old: rec {
|
||||
meta.ci.targets = [
|
||||
"integration-tests"
|
||||
]
|
||||
++ lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (lib.attrNames passthru);
|
||||
passthru =
|
||||
(depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = (
|
||||
[
|
||||
"cloud"
|
||||
"fuse"
|
||||
"tonic-reflection"
|
||||
"xp-composition-url-refs"
|
||||
]
|
||||
# virtiofs feature currently fails to build on Darwin
|
||||
++ lib.optional pkgs.stdenv.isLinux "virtiofs"
|
||||
);
|
||||
override.testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
'';
|
||||
})
|
||||
// {
|
||||
integration-tests = depot.snix.crates.workspaceMembers.${old.crateName}.build.override (old: {
|
||||
runTests = true;
|
||||
testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
export PATH="$PATH:${
|
||||
pkgs.lib.makeBinPath [
|
||||
pkgs.cbtemulator
|
||||
pkgs.google-cloud-bigtable-tool
|
||||
]
|
||||
}"
|
||||
'';
|
||||
features = old.features ++ [ "integration" ];
|
||||
});
|
||||
};
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,4 +1,9 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
let
|
||||
protos = lib.sourceByRegex depot.path.origSrc [
|
||||
"buf.yaml"
|
||||
|
|
|
|||
|
|
@ -1,111 +1,176 @@
|
|||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
(depot.snix.crates.workspaceMembers.snix-cli.build.override {
|
||||
runTests = true;
|
||||
testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
'';
|
||||
}).overrideAttrs (finalAttrs: previousAttrs:
|
||||
}).overrideAttrs
|
||||
(
|
||||
finalAttrs: previousAttrs:
|
||||
|
||||
let
|
||||
snix-cli = finalAttrs.finalPackage;
|
||||
let
|
||||
snix-cli = finalAttrs.finalPackage;
|
||||
|
||||
benchmark-gnutime-format-string =
|
||||
description:
|
||||
"Benchmark: " +
|
||||
(builtins.toJSON {
|
||||
"${description}" = {
|
||||
kbytes = "%M";
|
||||
system = "%S";
|
||||
user = "%U";
|
||||
benchmark-gnutime-format-string =
|
||||
description:
|
||||
"Benchmark: "
|
||||
+ (builtins.toJSON {
|
||||
"${description}" = {
|
||||
kbytes = "%M";
|
||||
system = "%S";
|
||||
user = "%U";
|
||||
};
|
||||
});
|
||||
|
||||
# You can run the benchmark with a simple `nix run`, like:
|
||||
#
|
||||
# nix-build -A snix.cli.meta.ci.extraSteps.benchmark-nixpkgs-cross-hello-outpath
|
||||
#
|
||||
# TODO(amjoseph): store these results someplace more durable, like git trailers
|
||||
#
|
||||
mkExprBenchmark =
|
||||
{ expr, description }:
|
||||
let
|
||||
name = "snix-cli-benchmark-${description}";
|
||||
in
|
||||
(pkgs.runCommand name { } ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
${lib.escapeShellArgs [
|
||||
"${pkgs.time}/bin/time"
|
||||
"--format"
|
||||
"${benchmark-gnutime-format-string description}"
|
||||
"${snix-cli}/bin/snix"
|
||||
"--no-warnings"
|
||||
"-E"
|
||||
expr
|
||||
]}
|
||||
touch $out
|
||||
'');
|
||||
|
||||
mkNixpkgsBenchmark =
|
||||
attrpath:
|
||||
mkExprBenchmark {
|
||||
description = builtins.replaceStrings [ ".drv" ] [ "-drv" ] attrpath;
|
||||
expr = "(import ${pkgs.path} {}).${attrpath}";
|
||||
};
|
||||
|
||||
# Constructs a Derivation invoking snix-cli inside a build, ensures the
|
||||
# calculated snix output path matches what's passed in externally.
|
||||
mkNixpkgsEvalTest =
|
||||
{
|
||||
attrPath ? null, # An attribute that must already be accessible from `pkgs`. Should evaluate to a store path.
|
||||
expr ? null, # A Nix expression that should evaluate to a store path.
|
||||
expectedPath, # The expected store path that should match one of the above.
|
||||
}:
|
||||
assert lib.assertMsg (attrPath != null || expr != null) "Either 'attrPath' or 'expr' must be set.";
|
||||
let
|
||||
name = "snix-eval-test-${
|
||||
builtins.replaceStrings [ ".drv" ] [ "-drv" ] (if expr != null then "custom-expr" else attrPath)
|
||||
}";
|
||||
in
|
||||
(pkgs.runCommand name { } ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
SNIX_OUTPUT=$(${snix-cli}/bin/snix --no-warnings -E '${
|
||||
if expr != null then expr else "(import ${pkgs.path} {}).${attrPath}"
|
||||
}')
|
||||
EXPECTED='${
|
||||
# the verbatim expected Snix output:
|
||||
"=> \"${builtins.unsafeDiscardStringContext expectedPath}\" :: string"
|
||||
}'
|
||||
|
||||
echo "Snix output: ''${SNIX_OUTPUT}"
|
||||
if [ "$SNIX_OUTPUT" != "$EXPECTED" ]; then
|
||||
echo "Correct would have been ''${EXPECTED}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Output was correct."
|
||||
touch $out
|
||||
'');
|
||||
|
||||
benchmarks = {
|
||||
benchmark-hello = (mkNixpkgsBenchmark "hello.outPath");
|
||||
benchmark-cross-hello = (mkNixpkgsBenchmark "pkgsCross.aarch64-multiplatform.hello.outPath");
|
||||
benchmark-firefox = (mkNixpkgsBenchmark "firefox.outPath");
|
||||
benchmark-cross-firefox = (mkNixpkgsBenchmark "pkgsCross.aarch64-multiplatform.firefox.outPath");
|
||||
# Example used for benchmarking LightSpan::Delayed in commit bf286a54bc2ac5eeb78c3d5c5ae66e9af24d74d4
|
||||
benchmark-nixpkgs-attrnames = (
|
||||
mkExprBenchmark {
|
||||
expr = "builtins.length (builtins.attrNames (import ${pkgs.path} {}))";
|
||||
description = "nixpkgs-attrnames";
|
||||
}
|
||||
);
|
||||
};
|
||||
});
|
||||
|
||||
# You can run the benchmark with a simple `nix run`, like:
|
||||
#
|
||||
# nix-build -A snix.cli.meta.ci.extraSteps.benchmark-nixpkgs-cross-hello-outpath
|
||||
#
|
||||
# TODO(amjoseph): store these results someplace more durable, like git trailers
|
||||
#
|
||||
mkExprBenchmark = { expr, description }:
|
||||
let name = "snix-cli-benchmark-${description}"; in
|
||||
(pkgs.runCommand name { } ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
${lib.escapeShellArgs [
|
||||
"${pkgs.time}/bin/time"
|
||||
"--format" "${benchmark-gnutime-format-string description}"
|
||||
"${snix-cli}/bin/snix"
|
||||
"--no-warnings"
|
||||
"-E" expr
|
||||
]}
|
||||
touch $out
|
||||
'');
|
||||
evalTests = {
|
||||
eval-nixpkgs-stdenv-drvpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
attrPath = "stdenv.drvPath";
|
||||
expectedPath = pkgs.stdenv.drvPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-stdenv-outpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
attrPath = "stdenv.outPath";
|
||||
expectedPath = pkgs.stdenv.outPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-hello-outpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
attrPath = "hello.outPath";
|
||||
expectedPath = pkgs.hello.outPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-firefox-outpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
attrPath = "firefox.outPath";
|
||||
expectedPath = pkgs.firefox.outPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-firefox-drvpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
attrPath = "firefox.drvPath";
|
||||
expectedPath = pkgs.firefox.drvPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-cross-stdenv-outpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
attrPath = "pkgsCross.aarch64-multiplatform.stdenv.outPath";
|
||||
expectedPath = pkgs.pkgsCross.aarch64-multiplatform.stdenv.outPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-cross-hello-outpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
attrPath = "pkgsCross.aarch64-multiplatform.hello.outPath";
|
||||
expectedPath = pkgs.pkgsCross.aarch64-multiplatform.hello.outPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-nixos-graphical-installer-drvpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
expr = "(import ${pkgs.path}/nixos/release.nix { }).iso_graphical.${pkgs.system}.drvPath";
|
||||
expectedPath = (import "${pkgs.path}/nixos/release.nix" { }).iso_graphical.${pkgs.system}.drvPath;
|
||||
}
|
||||
);
|
||||
eval-nixpkgs-nixos-graphical-installer-outpath = (
|
||||
mkNixpkgsEvalTest {
|
||||
expr = "(import ${pkgs.path}/nixos/release.nix { }).iso_graphical.${pkgs.system}.outPath";
|
||||
expectedPath = (import "${pkgs.path}/nixos/release.nix" { }).iso_graphical.${pkgs.system}.outPath;
|
||||
}
|
||||
);
|
||||
};
|
||||
in
|
||||
{
|
||||
meta = {
|
||||
ci.targets = (builtins.attrNames benchmarks) ++ (builtins.attrNames evalTests);
|
||||
};
|
||||
|
||||
mkNixpkgsBenchmark = attrpath:
|
||||
mkExprBenchmark {
|
||||
description = builtins.replaceStrings [ ".drv" ] [ "-drv" ] attrpath;
|
||||
expr = "(import ${pkgs.path} {}).${attrpath}";
|
||||
};
|
||||
|
||||
# Constructs a Derivation invoking snix-cli inside a build, ensures the
|
||||
# calculated snix output path matches what's passed in externally.
|
||||
mkNixpkgsEvalTest =
|
||||
{ attrPath ? null # An attribute that must already be accessible from `pkgs`. Should evaluate to a store path.
|
||||
, expr ? null # A Nix expression that should evaluate to a store path.
|
||||
, expectedPath # The expected store path that should match one of the above.
|
||||
}:
|
||||
assert lib.assertMsg (attrPath != null || expr != null) "Either 'attrPath' or 'expr' must be set.";
|
||||
let
|
||||
name = "snix-eval-test-${builtins.replaceStrings [".drv"] ["-drv"] (if expr != null then "custom-expr" else attrPath)}";
|
||||
in
|
||||
(pkgs.runCommand name { } ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
SNIX_OUTPUT=$(${snix-cli}/bin/snix --no-warnings -E '${if expr != null then expr else "(import ${pkgs.path} {}).${attrPath}"}')
|
||||
EXPECTED='${/* the verbatim expected Snix output: */ "=> \"${builtins.unsafeDiscardStringContext expectedPath}\" :: string"}'
|
||||
|
||||
echo "Snix output: ''${SNIX_OUTPUT}"
|
||||
if [ "$SNIX_OUTPUT" != "$EXPECTED" ]; then
|
||||
echo "Correct would have been ''${EXPECTED}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Output was correct."
|
||||
touch $out
|
||||
'');
|
||||
|
||||
|
||||
benchmarks = {
|
||||
benchmark-hello = (mkNixpkgsBenchmark "hello.outPath");
|
||||
benchmark-cross-hello = (mkNixpkgsBenchmark "pkgsCross.aarch64-multiplatform.hello.outPath");
|
||||
benchmark-firefox = (mkNixpkgsBenchmark "firefox.outPath");
|
||||
benchmark-cross-firefox = (mkNixpkgsBenchmark "pkgsCross.aarch64-multiplatform.firefox.outPath");
|
||||
# Example used for benchmarking LightSpan::Delayed in commit bf286a54bc2ac5eeb78c3d5c5ae66e9af24d74d4
|
||||
benchmark-nixpkgs-attrnames = (mkExprBenchmark { expr = "builtins.length (builtins.attrNames (import ${pkgs.path} {}))"; description = "nixpkgs-attrnames"; });
|
||||
};
|
||||
|
||||
evalTests = {
|
||||
eval-nixpkgs-stdenv-drvpath = (mkNixpkgsEvalTest { attrPath = "stdenv.drvPath"; expectedPath = pkgs.stdenv.drvPath; });
|
||||
eval-nixpkgs-stdenv-outpath = (mkNixpkgsEvalTest { attrPath = "stdenv.outPath"; expectedPath = pkgs.stdenv.outPath; });
|
||||
eval-nixpkgs-hello-outpath = (mkNixpkgsEvalTest { attrPath = "hello.outPath"; expectedPath = pkgs.hello.outPath; });
|
||||
eval-nixpkgs-firefox-outpath = (mkNixpkgsEvalTest { attrPath = "firefox.outPath"; expectedPath = pkgs.firefox.outPath; });
|
||||
eval-nixpkgs-firefox-drvpath = (mkNixpkgsEvalTest { attrPath = "firefox.drvPath"; expectedPath = pkgs.firefox.drvPath; });
|
||||
eval-nixpkgs-cross-stdenv-outpath = (mkNixpkgsEvalTest { attrPath = "pkgsCross.aarch64-multiplatform.stdenv.outPath"; expectedPath = pkgs.pkgsCross.aarch64-multiplatform.stdenv.outPath; });
|
||||
eval-nixpkgs-cross-hello-outpath = (mkNixpkgsEvalTest { attrPath = "pkgsCross.aarch64-multiplatform.hello.outPath"; expectedPath = pkgs.pkgsCross.aarch64-multiplatform.hello.outPath; });
|
||||
eval-nixpkgs-nixos-graphical-installer-drvpath = (mkNixpkgsEvalTest {
|
||||
expr = "(import ${pkgs.path}/nixos/release.nix { }).iso_graphical.${pkgs.system}.drvPath";
|
||||
expectedPath = (import "${pkgs.path}/nixos/release.nix" { }).iso_graphical.${pkgs.system}.drvPath;
|
||||
});
|
||||
eval-nixpkgs-nixos-graphical-installer-outpath = (mkNixpkgsEvalTest {
|
||||
expr = "(import ${pkgs.path}/nixos/release.nix { }).iso_graphical.${pkgs.system}.outPath";
|
||||
expectedPath = (import "${pkgs.path}/nixos/release.nix" { }).iso_graphical.${pkgs.system}.outPath;
|
||||
});
|
||||
};
|
||||
in
|
||||
{
|
||||
meta = {
|
||||
ci.targets = (builtins.attrNames benchmarks) ++ (builtins.attrNames evalTests);
|
||||
};
|
||||
|
||||
# Expose benchmarks and evalTests as standard CI targets.
|
||||
passthru = previousAttrs.passthru // benchmarks // evalTests;
|
||||
})
|
||||
# Expose benchmarks and evalTests as standard CI targets.
|
||||
passthru = previousAttrs.passthru // benchmarks // evalTests;
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{}: import ./six.nix { }
|
||||
{ }: import ./six.nix { }
|
||||
|
|
|
|||
|
|
@ -1 +1,8 @@
|
|||
{}: { six = builtins.foldl' (x: y: x + y) 0 [ 1 2 3 ]; }
|
||||
{ }:
|
||||
{
|
||||
six = builtins.foldl' (x: y: x + y) 0 [
|
||||
1
|
||||
2
|
||||
3
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,5 +1,11 @@
|
|||
# Nix helpers for projects under //snix
|
||||
{ pkgs, lib, depot, here, ... }:
|
||||
{
|
||||
pkgs,
|
||||
lib,
|
||||
depot,
|
||||
here,
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
# Load the crate2nix crate tree.
|
||||
|
|
@ -13,13 +19,18 @@ let
|
|||
# Extract the hashes from `crates` / Cargo.nix, we already get them from cargo2nix.
|
||||
# This returns an attribute set containing "${crateName}-${version}" as key,
|
||||
# and the outputHash as value.
|
||||
outputHashes = builtins.listToAttrs
|
||||
(map
|
||||
(k:
|
||||
(lib.nameValuePair "${crates.internal.crates.${k}.crateName}-${crates.internal.crates.${k}.version}" crates.internal.crates.${k}.src.outputHash)
|
||||
) [
|
||||
"wu-manber"
|
||||
]);
|
||||
outputHashes = builtins.listToAttrs (
|
||||
map
|
||||
(
|
||||
k:
|
||||
(lib.nameValuePair "${crates.internal.crates.${k}.crateName}-${
|
||||
crates.internal.crates.${k}.version
|
||||
}" crates.internal.crates.${k}.src.outputHash)
|
||||
)
|
||||
[
|
||||
"wu-manber"
|
||||
]
|
||||
);
|
||||
};
|
||||
|
||||
# The cleaned sources.
|
||||
|
|
@ -36,32 +47,42 @@ let
|
|||
];
|
||||
};
|
||||
|
||||
mkCargoBuild = args: pkgs.stdenv.mkDerivation ({
|
||||
inherit cargoDeps src;
|
||||
PROTO_ROOT = protos;
|
||||
SNIX_BUILD_SANDBOX_SHELL = "/homeless-shelter";
|
||||
mkCargoBuild =
|
||||
args:
|
||||
pkgs.stdenv.mkDerivation (
|
||||
{
|
||||
inherit cargoDeps src;
|
||||
PROTO_ROOT = protos;
|
||||
SNIX_BUILD_SANDBOX_SHELL = "/homeless-shelter";
|
||||
|
||||
nativeBuildInputs = with pkgs; [
|
||||
cargo
|
||||
pkg-config
|
||||
protobuf
|
||||
rustc
|
||||
rustPlatform.cargoSetupHook
|
||||
] ++ (args.nativeBuildInputs or [ ]);
|
||||
} // (pkgs.lib.removeAttrs args [ "nativeBuildInputs" ]));
|
||||
nativeBuildInputs =
|
||||
with pkgs;
|
||||
[
|
||||
cargo
|
||||
pkg-config
|
||||
protobuf
|
||||
rustc
|
||||
rustPlatform.cargoSetupHook
|
||||
]
|
||||
++ (args.nativeBuildInputs or [ ]);
|
||||
}
|
||||
// (pkgs.lib.removeAttrs args [ "nativeBuildInputs" ])
|
||||
);
|
||||
in
|
||||
{
|
||||
inherit crates protos mkCargoBuild;
|
||||
|
||||
# Provide the snix logo in both .webp and .png format.
|
||||
logo = pkgs.runCommand "logo"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.imagemagick ];
|
||||
} ''
|
||||
mkdir -p $out
|
||||
cp ${./logo.webp} $out/logo.webp
|
||||
convert $out/logo.webp $out/logo.png
|
||||
'';
|
||||
logo =
|
||||
pkgs.runCommand "logo"
|
||||
{
|
||||
nativeBuildInputs = [ pkgs.imagemagick ];
|
||||
}
|
||||
''
|
||||
mkdir -p $out
|
||||
cp ${./logo.webp} $out/logo.webp
|
||||
convert $out/logo.webp $out/logo.png
|
||||
'';
|
||||
|
||||
# Provide a shell for the combined dependencies of all snix Rust
|
||||
# projects. Note that as this is manually maintained it may be
|
||||
|
|
@ -73,7 +94,12 @@ in
|
|||
shell = (import ./shell.nix { inherit pkgs; });
|
||||
|
||||
# Shell, but with tools necessary to run the integration tests
|
||||
shell-integration = (import ./shell.nix { inherit pkgs; withIntegration = true; });
|
||||
shell-integration = (
|
||||
import ./shell.nix {
|
||||
inherit pkgs;
|
||||
withIntegration = true;
|
||||
}
|
||||
);
|
||||
|
||||
# Build the Rust documentation for publishing on snix.dev/rustdoc.
|
||||
rust-docs = mkCargoBuild {
|
||||
|
|
@ -81,7 +107,8 @@ in
|
|||
|
||||
buildInputs = [
|
||||
pkgs.fuse
|
||||
] ++ lib.optional pkgs.stdenv.isDarwin pkgs.libiconv;
|
||||
]
|
||||
++ lib.optional pkgs.stdenv.isDarwin pkgs.libiconv;
|
||||
|
||||
buildPhase = ''
|
||||
RUSTDOCFLAGS="-D rustdoc::broken-intra-doc-links" cargo doc --document-private-items
|
||||
|
|
|
|||
|
|
@ -1,5 +1,10 @@
|
|||
# TODO: find a way to build the benchmarks via crate2nix
|
||||
{ depot, pkgs, lib, ... }:
|
||||
{
|
||||
depot,
|
||||
pkgs,
|
||||
lib,
|
||||
...
|
||||
}:
|
||||
|
||||
(depot.snix.crates.workspaceMembers.snix-eval.build.override {
|
||||
runTests = true;
|
||||
|
|
@ -7,11 +12,16 @@
|
|||
# Make C++ Nix available, to compare eval results against.
|
||||
# This needs Nix 2.3, as nix_oracle.rs fails with pkgs.nix
|
||||
testInputs = [ pkgs.nix_2_3 ];
|
||||
}).overrideAttrs (old: rec {
|
||||
meta.ci.targets = lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (lib.attrNames passthru);
|
||||
passthru = old.passthru // (depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = [ "nix_tests" ];
|
||||
override.testInputs = [ pkgs.nix ];
|
||||
});
|
||||
})
|
||||
}).overrideAttrs
|
||||
(old: rec {
|
||||
meta.ci.targets = lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (
|
||||
lib.attrNames passthru
|
||||
);
|
||||
passthru =
|
||||
old.passthru
|
||||
// (depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = [ "nix_tests" ];
|
||||
override.testInputs = [ pkgs.nix ];
|
||||
});
|
||||
})
|
||||
|
|
|
|||
|
|
@ -5,13 +5,18 @@
|
|||
testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
'';
|
||||
}).overrideAttrs (old: rec {
|
||||
meta.ci.targets = lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (lib.attrNames passthru);
|
||||
passthru = old.passthru // (depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = [ "nix_tests" ];
|
||||
override.testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
'';
|
||||
});
|
||||
})
|
||||
}).overrideAttrs
|
||||
(old: rec {
|
||||
meta.ci.targets = lib.filter (x: lib.hasPrefix "with-features" x || x == "no-features") (
|
||||
lib.attrNames passthru
|
||||
);
|
||||
passthru =
|
||||
old.passthru
|
||||
// (depot.snix.utils.mkFeaturePowerset {
|
||||
inherit (old) crateName;
|
||||
features = [ "nix_tests" ];
|
||||
override.testPreRun = ''
|
||||
export SSL_CERT_FILE=/dev/null
|
||||
'';
|
||||
});
|
||||
})
|
||||
|
|
|
|||
|
|
@ -4,31 +4,37 @@
|
|||
#
|
||||
# TODO: rewrite in native Rust code
|
||||
|
||||
/* This is the implementation of the ‘derivation’ builtin function.
|
||||
It's actually a wrapper around the ‘derivationStrict’ primop. */
|
||||
/*
|
||||
This is the implementation of the ‘derivation’ builtin function.
|
||||
It's actually a wrapper around the ‘derivationStrict’ primop.
|
||||
*/
|
||||
|
||||
drvAttrs @ { outputs ? [ "out" ], ... }:
|
||||
drvAttrs@{
|
||||
outputs ? [ "out" ],
|
||||
...
|
||||
}:
|
||||
|
||||
let
|
||||
|
||||
strict = derivationStrict drvAttrs;
|
||||
|
||||
commonAttrs = drvAttrs // (builtins.listToAttrs outputsList) //
|
||||
{
|
||||
commonAttrs =
|
||||
drvAttrs
|
||||
// (builtins.listToAttrs outputsList)
|
||||
// {
|
||||
all = map (x: x.value) outputsList;
|
||||
inherit drvAttrs;
|
||||
};
|
||||
|
||||
outputToAttrListElement = outputName:
|
||||
{
|
||||
name = outputName;
|
||||
value = commonAttrs // {
|
||||
outPath = builtins.getAttr outputName strict;
|
||||
drvPath = strict.drvPath;
|
||||
type = "derivation";
|
||||
inherit outputName;
|
||||
};
|
||||
outputToAttrListElement = outputName: {
|
||||
name = outputName;
|
||||
value = commonAttrs // {
|
||||
outPath = builtins.getAttr outputName strict;
|
||||
drvPath = strict.drvPath;
|
||||
type = "derivation";
|
||||
inherit outputName;
|
||||
};
|
||||
};
|
||||
|
||||
outputsList = map outputToAttrListElement outputs;
|
||||
|
||||
|
|
|
|||
|
|
@ -5,21 +5,42 @@
|
|||
#
|
||||
# Source: https://github.com/NixOS/nix/blob/2.3.16/corepkgs/fetchurl.nix
|
||||
|
||||
{ system ? "" # obsolete
|
||||
, url
|
||||
, hash ? "" # an SRI hash
|
||||
{
|
||||
system ? "", # obsolete
|
||||
url,
|
||||
hash ? "", # an SRI hash
|
||||
|
||||
# Legacy hash specification
|
||||
, md5 ? ""
|
||||
, sha1 ? ""
|
||||
, sha256 ? ""
|
||||
, sha512 ? ""
|
||||
, outputHash ? if hash != "" then hash else if sha512 != "" then sha512 else if sha1 != "" then sha1 else if md5 != "" then md5 else sha256
|
||||
, outputHashAlgo ? if hash != "" then "" else if sha512 != "" then "sha512" else if sha1 != "" then "sha1" else if md5 != "" then "md5" else "sha256"
|
||||
md5 ? "",
|
||||
sha1 ? "",
|
||||
sha256 ? "",
|
||||
sha512 ? "",
|
||||
outputHash ?
|
||||
if hash != "" then
|
||||
hash
|
||||
else if sha512 != "" then
|
||||
sha512
|
||||
else if sha1 != "" then
|
||||
sha1
|
||||
else if md5 != "" then
|
||||
md5
|
||||
else
|
||||
sha256,
|
||||
outputHashAlgo ?
|
||||
if hash != "" then
|
||||
""
|
||||
else if sha512 != "" then
|
||||
"sha512"
|
||||
else if sha1 != "" then
|
||||
"sha1"
|
||||
else if md5 != "" then
|
||||
"md5"
|
||||
else
|
||||
"sha256",
|
||||
|
||||
, executable ? false
|
||||
, unpack ? false
|
||||
, name ? baseNameOf (toString url)
|
||||
executable ? false,
|
||||
unpack ? false,
|
||||
name ? baseNameOf (toString url),
|
||||
}:
|
||||
|
||||
derivation {
|
||||
|
|
@ -29,7 +50,12 @@ derivation {
|
|||
inherit outputHashAlgo outputHash;
|
||||
outputHashMode = if unpack || executable then "recursive" else "flat";
|
||||
|
||||
inherit name url executable unpack;
|
||||
inherit
|
||||
name
|
||||
url
|
||||
executable
|
||||
unpack
|
||||
;
|
||||
|
||||
system = "builtin";
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,10 @@ let
|
|||
name = "fail";
|
||||
builder = "/bin/false";
|
||||
system = "x86_64-linux";
|
||||
outputs = [ "out" "foo" ];
|
||||
outputs = [
|
||||
"out"
|
||||
"foo"
|
||||
];
|
||||
};
|
||||
|
||||
path = "${./eval-okay-context-introspection.nix}";
|
||||
|
|
@ -13,7 +16,10 @@ let
|
|||
path = true;
|
||||
};
|
||||
"${builtins.unsafeDiscardStringContext drv.drvPath}" = {
|
||||
outputs = [ "foo" "out" ];
|
||||
outputs = [
|
||||
"foo"
|
||||
"out"
|
||||
];
|
||||
allOutputs = true;
|
||||
};
|
||||
};
|
||||
|
|
@ -21,15 +27,12 @@ let
|
|||
combo-path = "${path}${drv.outPath}${drv.foo.outPath}${drv.drvPath}";
|
||||
legit-context = builtins.getContext combo-path;
|
||||
|
||||
reconstructed-path = builtins.appendContext
|
||||
(builtins.unsafeDiscardStringContext combo-path)
|
||||
desired-context;
|
||||
reconstructed-path = builtins.appendContext (builtins.unsafeDiscardStringContext combo-path) desired-context;
|
||||
|
||||
# Eta rule for strings with context.
|
||||
etaRule = str:
|
||||
str == builtins.appendContext
|
||||
(builtins.unsafeDiscardStringContext str)
|
||||
(builtins.getContext str);
|
||||
etaRule =
|
||||
str:
|
||||
str == builtins.appendContext (builtins.unsafeDiscardStringContext str) (builtins.getContext str);
|
||||
|
||||
in
|
||||
[
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
let s = "foo ${builtins.substring 33 100 (baseNameOf "${./eval-okay-context.nix}")} bar";
|
||||
let
|
||||
s = "foo ${builtins.substring 33 100 (baseNameOf "${./eval-okay-context.nix}")} bar";
|
||||
in
|
||||
if s != "foo eval-okay-context.nix bar"
|
||||
then abort "context not discarded"
|
||||
else builtins.unsafeDiscardStringContext s
|
||||
|
||||
if s != "foo eval-okay-context.nix bar" then
|
||||
abort "context not discarded"
|
||||
else
|
||||
builtins.unsafeDiscardStringContext s
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue