feat(tvix/boot/tests): reenable

This got disabled in 5f17df8548 without
much investigation.

After some more digging, I found the root cause -
173de320197ba97f2daf46b7d229055cd3732df4 in nixpkgs changed the files
produced by `mkBinaryCache` to use FileHash, not NarHash to name NAR
files.

While this is more correct and consistent with how it's produced by Nix,
it unfortunately means our uploads will now get rejected.
We didn't notice this, as curl (and rush) were not configured to bail
out - which now is the case again.

Deal with the change, by decompressing twice, once to get the NarHash
again, the second time to feed the data to curl.

Change-Id: Ic1deb470801250e7967083ac78cd46be39f3caa3
Reviewed-on: https://cl.tvl.fyi/c/depot/+/13122
Reviewed-by: Ilan Joselevich <personal@ilanjoselevich.com>
Tested-by: BuildkiteCI
Autosubmit: flokli <flokli@flokli.de>
This commit is contained in:
Florian Klink 2025-02-17 12:47:20 +07:00 committed by clbot
parent 1115bffe47
commit 1acf37c054

View file

@ -50,8 +50,10 @@ let
pkgs.curl pkgs.curl
pkgs.rush-parallel pkgs.rush-parallel
pkgs.xz.bin pkgs.xz.bin
pkgs.nix
]; ];
buildCommand = '' buildCommand = ''
set -eou pipefail
touch $out touch $out
# Ensure we can construct http clients. # Ensure we can construct http clients.
export SSL_CERT_FILE=/dev/null export SSL_CERT_FILE=/dev/null
@ -103,7 +105,7 @@ let
# nar-bridge doesn't care about the path we upload *to*, but a # nar-bridge doesn't care about the path we upload *to*, but a
# subsequent .narinfo upload need to refer to its contents (by narhash). # subsequent .narinfo upload need to refer to its contents (by narhash).
echo -e "Uploading NARs " echo -e "Uploading NARs "
ls -d $to_upload/nar/*.nar.xz | rush 'xz -d < {} | curl -s -T - --unix-socket $PWD/nar-bridge.sock http://localhost:9000/nar/$(basename {} | cut -d "." -f 1).nar' ls -d $to_upload/nar/*.nar.xz | rush -n1 'nar_hash=$(xz -d < {} | nix-hash --base32 --type sha256 --flat /dev/stdin);xz -d < {} | curl -s --fail-with-body -T - --unix-socket $PWD/nar-bridge.sock http://localhost:9000/nar/''${nar_hash}.nar'
echo "Done." echo "Done."
# Upload all NARInfo files. # Upload all NARInfo files.
@ -158,90 +160,83 @@ let
documentation.enable = lib.mkForce false; documentation.enable = lib.mkForce false;
}).config.system.build.toplevel; }).config.system.build.toplevel;
allTests = depot.nix.readTree.drvTargets {
docs-memory = (mkBootTest {
path = ../../docs;
importPathName = "docs";
});
docs-persistent = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
directoryServiceAddr = "redb:///build/directories.redb";
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
path = ../../docs;
importPathName = "docs";
});
closure-tvix = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
path = depot.tvix.store;
isClosure = true;
});
closure-nixos = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
directoryServiceAddr = "redb:///build/directories.redb";
path = testSystem;
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-bigtable = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
directoryServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=directories&family_name=cf1";
pathInfoServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=pathinfos&family_name=cf1";
path = testSystem;
useNarBridge = true;
preStart = ''
${pkgs.cbtemulator}/bin/cbtemulator -address $PWD/cbtemulator.sock &
timeout 22 sh -c 'until [ -e $PWD/cbtemulator.sock ]; do sleep 1; done'
export BIGTABLE_EMULATOR_HOST=unix://$PWD/cbtemulator.sock
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable directories
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily directories cf1
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable pathinfos
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily pathinfos cf1
'';
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-s3 = (mkBootTest {
blobServiceAddr = "objectstore+s3://mybucket/blobs?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&aws_endpoint_url=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
# we cannot use s3 here yet without any caching layer, as we don't allow "deeper" access to directories (non-root nodes)
# directoryServiceAddr = "objectstore+s3://mybucket/directories?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&endpoint=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
directoryServiceAddr = "memory://";
pathInfoServiceAddr = "memory://";
path = testSystem;
useNarBridge = true;
preStart = ''
MINIO_ACCESS_KEY=myaccesskey MINIO_SECRET_KEY=supersecret MINIO_ADDRESS=127.0.0.1:9000 ${pkgs.minio}/bin/minio server $(mktemp -d) &
timeout 22 sh -c 'until ${pkgs.netcat}/bin/nc -z $0 $1; do sleep 1; done' localhost 9000
mc_config_dir=$(mktemp -d)
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir alias set 'myminio' 'http://127.0.0.1:9000' 'myaccesskey' 'supersecret'
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir mb myminio/mybucket
'';
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-nar-bridge = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
path = testSystem;
useNarBridge = true;
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
};
in in
# remove broken tests from set depot.nix.readTree.drvTargets {
builtins.removeAttrs allTests [ docs-memory = (mkBootTest {
# these fail in CI as of 2025-02-09, printing HTTP errors path = ../../docs;
"closure-nixos-bigtable" importPathName = "docs";
"closure-nixos-nar-bridge" });
"closure-nixos-s3" docs-persistent = (mkBootTest {
] blobServiceAddr = "objectstore+file:///build/blobs";
directoryServiceAddr = "redb:///build/directories.redb";
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
path = ../../docs;
importPathName = "docs";
});
closure-tvix = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
path = depot.tvix.store;
isClosure = true;
});
closure-nixos = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
pathInfoServiceAddr = "redb:///build/pathinfo.redb";
directoryServiceAddr = "redb:///build/directories.redb";
path = testSystem;
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-bigtable = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
directoryServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=directories&family_name=cf1";
pathInfoServiceAddr = "bigtable://instance-1?project_id=project-1&table_name=pathinfos&family_name=cf1";
path = testSystem;
useNarBridge = true;
preStart = ''
${pkgs.cbtemulator}/bin/cbtemulator -address $PWD/cbtemulator.sock &
timeout 22 sh -c 'until [ -e $PWD/cbtemulator.sock ]; do sleep 1; done'
export BIGTABLE_EMULATOR_HOST=unix://$PWD/cbtemulator.sock
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable directories
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily directories cf1
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createtable pathinfos
${pkgs.google-cloud-bigtable-tool}/bin/cbt -instance instance-1 -project project-1 createfamily pathinfos cf1
'';
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-s3 = (mkBootTest {
blobServiceAddr = "objectstore+s3://mybucket/blobs?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&aws_endpoint_url=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
# we cannot use s3 here yet without any caching layer, as we don't allow "deeper" access to directories (non-root nodes)
# directoryServiceAddr = "objectstore+s3://mybucket/directories?aws_access_key_id=myaccesskey&aws_secret_access_key=supersecret&endpoint=http%3A%2F%2Flocalhost%3A9000&aws_allow_http=1";
directoryServiceAddr = "memory://";
pathInfoServiceAddr = "memory://";
path = testSystem;
useNarBridge = true;
preStart = ''
MINIO_ACCESS_KEY=myaccesskey MINIO_SECRET_KEY=supersecret MINIO_ADDRESS=127.0.0.1:9000 ${pkgs.minio}/bin/minio server $(mktemp -d) &
timeout 22 sh -c 'until ${pkgs.netcat}/bin/nc -z $0 $1; do sleep 1; done' localhost 9000
mc_config_dir=$(mktemp -d)
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir alias set 'myminio' 'http://127.0.0.1:9000' 'myaccesskey' 'supersecret'
${pkgs.minio-client}/bin/mc --config-dir $mc_config_dir mb myminio/mybucket
'';
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
closure-nixos-nar-bridge = (mkBootTest {
blobServiceAddr = "objectstore+file:///build/blobs";
path = testSystem;
useNarBridge = true;
isClosure = true;
vmCmdline = "init=${testSystem}/init panic=-1"; # reboot immediately on panic
assertVMOutput = "Onwards and upwards.";
});
}