chore(*): drop everything that is not required for Tvix

Co-Authored-By: edef <edef@edef.eu>
Co-Authored-By: Ryan Lahfa <raito@lix.systems>
Change-Id: I9817214c3122e49d694c5e41818622a08d9dfe45
This commit is contained in:
Florian Klink 2025-01-05 17:12:30 +01:00
parent bd91cac1f3
commit df4500ea2b
2905 changed files with 34 additions and 493328 deletions

View file

@ -1,17 +1,15 @@
# Shell derivation to invoke //nix/lazy-deps with the dependencies
# that should be lazily made available in depot.
{ pkgs, depot, ... }:
{ depot, ... }:
depot.nix.lazy-deps {
age-keygen.attr = "third_party.nixpkgs.age";
age.attr = "third_party.nixpkgs.age";
depotfmt.attr = "tools.depotfmt";
fetch-depot-inbox.attr = "tools.fetch-depot-inbox";
git-r.attr = "tools.git-r";
git-review.attr = "third_party.nixpkgs.git-review";
gerrit-update.attr = "tools.gerrit-update";
gerrit.attr = "tools.gerrit-cli";
hash-password.attr = "tools.hash-password";
josh-filter.attr = "third_party.nixpkgs.josh";
mg.attr = "tools.magrathea";
nint.attr = "nix.nint";
@ -24,11 +22,6 @@ depot.nix.lazy-deps {
cmd = "terraform";
};
tf-glesys = {
attr = "ops.glesys.terraform";
cmd = "terraform";
};
tf-keycloak = {
attr = "ops.keycloak.terraform";
cmd = "terraform";

View file

@ -1,23 +0,0 @@
{ depot, pkgs, ... }:
let
em = depot.tools.eaglemode;
in
em.mkCommand {
name = "9 B";
hotkey = "Ctrl+E";
icon = "${./plan9.tga}";
description = ''
Plumb target to Sam or Acme
'';
code = ''
ErrorIfNotSingleTarget();
my @tgt=GetTgt();
my $dir=$tgt[0];
ExecOrError('${pkgs.plan9port}/bin/9', 'B', $dir);
'';
}

View file

@ -1,26 +0,0 @@
{ depot, pkgs, ... }:
let
em = depot.tools.eaglemode;
icon = em.mkTGA "emacs" "${pkgs.emacs}/share/icons/hicolor/128x128/apps/emacs.png";
in
em.mkCommand {
name = "Emacsclient";
hotkey = "Ctrl+E";
icon = "${icon}";
description = ''
Open target in Emacsclient.
Emacs server must be running already for this to have any effect.
'';
code = ''
ErrorIfNotSingleTarget();
my @tgt=GetTgt();
my $dir=$tgt[0];
ExecOrError('${pkgs.emacs}/bin/emacsclient', '-n', $dir);
'';
}

Binary file not shown.

View file

@ -1,146 +0,0 @@
# Helper functions for extending Eagle Mode with useful stuff.
#
# Eagle Mode's customisation usually expects people to copy the entire
# configuration into their user folder, which we can automate fairly easily
# using Nix, letting users choose whether to keep upstream config or not.
{ depot, lib, pkgs, ... }:
let
mkDesc = d: lib.concatMapStringsSep "\n"
(x: "# Descr =${x}")
(builtins.filter (s: s != "") (lib.splitString "\n" d));
configWrapper = pkgs.runCommand "eaglemode-config-wrapper" { } ''
cp ${./wrapper.go} wrapper.go
export HOME=$PWD
${pkgs.go}/bin/go build wrapper.go
install -Dm755 wrapper $out/bin/wrapper
'';
in
rec {
# mkCommand creates an Eagle Mode command for the file browser.
#
# Commands are basically little Perl scripts with a command standard library
# available. They receive the user's selected target from Eagle Mode.
mkCommand = lib.makeOverridable (
{
# Name of the command.
name
, # User-facing description, displayed in Eagle Mode UI. Can be multi-line.
description
, # Verbatim Perl code of the command. Command library is already available.
code
, # Caption for the UI button (defaults to name).
caption ? name
, icon ? "terminal.tga"
, # TODO: what's a good default?
hotkey ? ""
, order ? 1.0
}: pkgs.writeTextDir "emFileMan/Commands/${name}.pl" (''
#!${pkgs.perl}/bin/perl
#[[BEGIN PROPERTIES]]
# Type = Command
# Interpreter = perl
# DefaultFor = directory
# Caption = ${caption}
# Order = ${toString order}
# Icon = ${icon}
''
+ (lib.optionalString (description != "") "${mkDesc description}\n")
+ (lib.optionalString (hotkey != "") "# Hotkey = ${hotkey}\n")
+ ''
#[[END PROPERTIES]]
use strict;
use warnings;
BEGIN { require "$ENV{'EM_DIR'}/res/emFileMan/scripts/cmd-util.pl"; }
${if builtins.isString code
then code
else (if builtins.isPath code
then builtins.readFile code
else throw "code must be a string (literal code) or path to file")}
'')
);
# mkTGA converts the given image to a TGA image.
mkTGA = name: path: pkgs.runCommand "${name}.tga" { } ''
${pkgs.imagemagick}/bin/convert ${path} $out
'';
buildPlugin = lib.makeOverridable (
{ name
, src
, version
, eaglemode ? pkgs.eaglemode
, target ? name
, extraNativeBuildInputs ? [ ]
, extraBuildInputs ? [ ]
}:
pkgs.stdenv.mkDerivation {
pname = "eaglemode-plugin-${name}";
inherit src version;
# inherit (eaglemode.drvAttrs) dontPatchELF;
nativeBuildInputs = eaglemode.drvAttrs.nativeBuildInputs ++ extraNativeBuildInputs;
buildInputs = eaglemode.drvAttrs.buildInputs ++ extraBuildInputs ++ [ eaglemode ];
buildPhase = ''
runHook preBuild
# merge eaglemode & plugin folders
cp -r ${pkgs.srcOnly eaglemode} merged-src && chmod -R u+rw merged-src
cp -r $src/* merged-src && chmod -R u+rw merged-src
cd merged-src
export NIX_LDFLAGS="$NIX_LDFLAGS -lXxf86vm -lXext -lXinerama"
perl make.pl build projects=${target} continue=no
runHook postBuild
'';
installPhase = ''
runHook preInstall
mkdir -p $out/lib
cp -r lib/lib${target}.so $out/lib
if [ -d "$src/etc" ]; then
cp -r $src/etc/* $out
fi
runHook postInstall
'';
}
);
# etcDir creates a directory layout suitable for use in the EM_USER_CONFIG_DIR
# environment variable.
#
# Note that Eagle Mode requires the value of that variable to be mutable at
# runtime (it is the same place where it persists all of its user-controlled
# state), so the results of this function can not be used directly.
etcDir =
{ eaglemode ? pkgs.eaglemode
, extraPaths ? [ ]
}: pkgs.runCommand "eaglemode-config" { } ''
mkdir $out
${
lib.concatMapStringsSep "\n" (s: "cp -rT ${s} $out/\nchmod -R u+rw $out/\n") ([ "${eaglemode}/etc"] ++ extraPaths)
}
'';
# withConfig creates an Eagle Mode wrapper that runs it with the given
# configuration.
withConfig = { eaglemode ? pkgs.eaglemode, config }: pkgs.writeShellScriptBin "eaglemode" ''
${configWrapper}/bin/wrapper --em-config "${config}"
if [ -d "${config}/lib" ]; then
export LD_LIBRARY_PATH="${config}/lib:$LD_LIBRARY_PATH"
exec ${eaglemode}/bin/eaglemode "$@"
fi
exec ${eaglemode}/bin/eaglemode "$@"
'';
}

View file

@ -1,10 +0,0 @@
{ depot, pkgs, ... }:
depot.tools.eaglemode.buildPlugin {
name = "avif";
version = "canon";
src = ./.;
target = "PlAvif";
extraBuildInputs = [ pkgs.libavif ];
extraNativeBuildInputs = [ pkgs.pkg-config ];
}

View file

@ -1,6 +0,0 @@
#%rec:emFpPlugin%#
FileTypes = { ".avif" }
Priority = 1.0
Library = "PlAvif"
Function = "PlAvifFpPluginFunc"

View file

@ -1,64 +0,0 @@
package PlAvif;
use strict;
use warnings;
sub GetDependencies
{
return ('emCore');
}
sub IsEssential
{
return 0;
}
sub GetFileHandlingrules
{
return ();
}
sub GetExtraBuildOptions
{
return ();
}
sub Build
{
shift;
my %options=@_;
my @libAvifFlags=();
if ($options{'avif-inc-dir'} eq '' && $options{'avif-lib-dir'} eq '') {
@libAvifFlags=split("\n",readpipe(
"perl \"".$options{'utils'}."/PkgConfig.pl\" libavif"
));
}
if (!@libAvifFlags) {
if ($options{'avif-inc-dir'} ne '') {
push(@libAvifFlags, "--inc-search-dir", $options{'avif-inc-dir'});
}
if ($options{'avif-lib-dir'} ne '') {
push(@libAvifFlags, "--lib-search-dir", $options{'avif-lib-dir'});
}
push(@libAvifFlags, "--link", "avif");
}
system(
@{$options{'unicc_call'}},
"--math",
"--rtti",
"--exceptions",
"--bin-dir" , "bin",
"--lib-dir" , "lib",
"--obj-dir" , "obj",
"--inc-search-dir", "include",
@libAvifFlags,
"--link" , "emCore",
"--type" , "dynlib",
"--name" , "PlAvif",
"src/PlAvif.cpp"
)==0 or return 0;
return 1;
}

View file

@ -1,190 +0,0 @@
#include <emCore/emFpPlugin.h>
#include <emCore/emImageFile.h>
#include "avif/avif.h"
class PlAvifImageFileModel : public emImageFileModel
{
public:
static emRef<PlAvifImageFileModel> Acquire(
emContext & context, const emString & name, bool common=true
);
protected:
PlAvifImageFileModel(emContext & context, const emString & name);
virtual ~PlAvifImageFileModel();
virtual void TryStartLoading();
virtual bool TryContinueLoading();
virtual void QuitLoading();
virtual void TryStartSaving();
virtual bool TryContinueSaving();
virtual void QuitSaving();
virtual emUInt64 CalcMemoryNeed();
virtual double CalcFileProgress();
private:
struct LoadingState;
LoadingState * L = NULL;
};
struct PlAvifImageFileModel::LoadingState {
avifRGBImage rgb;
avifDecoder * decoder;
};
emRef<PlAvifImageFileModel> PlAvifImageFileModel::Acquire(
emContext & context, const emString & name, bool common
)
{
EM_IMPL_ACQUIRE(PlAvifImageFileModel, context, name, common)
}
PlAvifImageFileModel::PlAvifImageFileModel(
emContext & context, const emString & name
)
: emImageFileModel(context, name)
{
}
PlAvifImageFileModel::~PlAvifImageFileModel()
{
PlAvifImageFileModel::QuitLoading();
PlAvifImageFileModel::QuitSaving();
}
void PlAvifImageFileModel::TryStartLoading()
{
avifResult result;
L = new LoadingState;
memset(L, 0, sizeof(LoadingState));
L->decoder = avifDecoderCreate();
if (L->decoder == NULL) {
throw emException("failed to create AVIF decoder");
}
result = avifDecoderSetIOFile(L->decoder, GetFilePath());
if (result != AVIF_RESULT_OK) {
throw emException("%s", avifResultToString(result));
}
result = avifDecoderParse(L->decoder);
if (result != AVIF_RESULT_OK) {
throw emException("%s", avifResultToString(result));
}
FileFormatInfo = emString::Format(
"AVIF %s %ubpc",
avifPixelFormatToString(L->decoder->image->yuvFormat),
L->decoder->image->depth
);
Signal(ChangeSignal);
}
bool PlAvifImageFileModel::TryContinueLoading()
{
avifResult result;
if (!Image.GetHeight()) {
Image.Setup(
L->decoder->image->width,
L->decoder->image->height,
L->decoder->alphaPresent ? 4 : 3
);
}
result = avifDecoderNextImage(L->decoder);
if (result != AVIF_RESULT_OK) {
throw emException("%s", avifResultToString(result));
}
avifRGBImageSetDefaults(&L->rgb, L->decoder->image);
L->rgb.format = L->decoder->alphaPresent ?
AVIF_RGB_FORMAT_RGBA : AVIF_RGB_FORMAT_RGB;
L->rgb.pixels = Image.GetWritableMap();
L->rgb.width = Image.GetWidth();
L->rgb.height = Image.GetHeight();
L->rgb.depth = 8;
L->rgb.rowBytes = Image.GetWidth() * Image.GetChannelCount();
result = avifImageYUVToRGB(L->decoder->image, &L->rgb);
if (result != AVIF_RESULT_OK) {
throw emException("%s", avifResultToString(result));
}
Signal(ChangeSignal);
return true;
}
void PlAvifImageFileModel::QuitLoading()
{
if (L) {
if (L->decoder) avifDecoderDestroy(L->decoder);
delete L;
L = NULL;
}
}
void PlAvifImageFileModel::TryStartSaving()
{
throw emException("PlAvifImageFileModel: Saving not implemented.");
}
bool PlAvifImageFileModel::TryContinueSaving()
{
return false;
}
void PlAvifImageFileModel::QuitSaving()
{
}
emUInt64 PlAvifImageFileModel::CalcMemoryNeed()
{
return
(emUInt64)
L->decoder->image->width *
L->decoder->image->height *
(L->decoder->alphaPresent ? 4 : 3);
}
double PlAvifImageFileModel::CalcFileProgress()
{
return 0.0;
}
extern "C" {
emPanel * PlAvifFpPluginFunc(
emPanel::ParentArg parent, const emString & name,
const emString & path, emFpPlugin * plugin,
emString * errorBuf
)
{
if (plugin->Properties.GetCount()) {
*errorBuf="PlAvifFpPlugin: No properties allowed.";
return NULL;
}
return new emImageFilePanel(
parent, name,
PlAvifImageFileModel::Acquire(
parent.GetRootContext(), path
)
);
}
}

View file

@ -1,17 +0,0 @@
{ depot, pkgs, ... }:
let
em = depot.tools.eaglemode;
emSrc = with pkgs; srcOnly eaglemode;
in
em.buildPlugin {
name = "example";
version = "canon";
src = pkgs.runCommand "em-plugin-example-src" { } ''
set -ux
cp -r ${emSrc}/doc/examples/CppApiExamples/PluginExample $out
'';
target = "PlEx";
}

View file

@ -1,12 +0,0 @@
{ depot, pkgs, ... }:
let
em = depot.tools.eaglemode;
emSrc = pkgs.srcOnly pkgs.em;
in
em.buildPlugin {
name = "qoi";
version = "canon";
src = ./.;
target = "PlQoi";
}

View file

@ -1,6 +0,0 @@
#%rec:emFpPlugin%#
FileTypes = { ".qoi" }
Priority = 1.0
Library = "PlQoi"
Function = "PlQoiFpPluginFunc"

View file

@ -1,47 +0,0 @@
package PlQoi;
use strict;
use warnings;
sub GetDependencies
{
return ('emCore');
}
sub IsEssential
{
return 0;
}
sub GetFileHandlingrules
{
return ();
}
sub GetExtraBuildOptions
{
return ();
}
sub Build
{
shift;
my %options=@_;
system(
@{$options{'unicc_call'}},
"--math",
"--rtti",
"--exceptions",
"--bin-dir" , "bin",
"--lib-dir" , "lib",
"--obj-dir" , "obj",
"--inc-search-dir", "include",
"--link" , "emCore",
"--type" , "dynlib",
"--name" , "PlQoi",
"src/PlQoi.cpp"
)==0 or return 0;
return 1;
}

View file

@ -1,273 +0,0 @@
#include <emCore/emFpPlugin.h>
#include <emCore/emImageFile.h>
/*
QOI Utilities
Copyright (c) 2021, Dominic Szablewski - https://phoboslab.org
SPDX-License-Identifier: MIT
*/
#define QOI_OP_INDEX 0x00 /* 00xxxxxx */
#define QOI_OP_DIFF 0x40 /* 01xxxxxx */
#define QOI_OP_LUMA 0x80 /* 10xxxxxx */
#define QOI_OP_RUN 0xc0 /* 11xxxxxx */
#define QOI_OP_RGB 0xfe /* 11111110 */
#define QOI_OP_RGBA 0xff /* 11111111 */
#define QOI_MASK_2 0xc0 /* 11000000 */
#define QOI_COLOR_HASH(C) (C.GetRed()*3 + C.GetGreen()*5 + C.GetBlue()*7 + C.GetAlpha()*11)
#define QOI_MAGIC \
(((unsigned int)'q') << 24 | ((unsigned int)'o') << 16 | \
((unsigned int)'i') << 8 | ((unsigned int)'f'))
#define QOI_HEADER_SIZE 14
static unsigned int qoi_read_32(const unsigned char *bytes, int *p) {
unsigned int a = bytes[(*p)++];
unsigned int b = bytes[(*p)++];
unsigned int c = bytes[(*p)++];
unsigned int d = bytes[(*p)++];
return a << 24 | b << 16 | c << 8 | d;
}
class PlQoiImageFileModel : public emImageFileModel
{
public:
static emRef<PlQoiImageFileModel> Acquire(
emContext & context, const emString & name, bool common=true
);
protected:
PlQoiImageFileModel(emContext & context, const emString & name);
virtual ~PlQoiImageFileModel();
virtual void TryStartLoading();
virtual bool TryContinueLoading();
virtual void QuitLoading();
virtual void TryStartSaving();
virtual bool TryContinueSaving();
virtual void QuitSaving();
virtual emUInt64 CalcMemoryNeed();
virtual double CalcFileProgress();
private:
struct LoadingState;
LoadingState * L = NULL;
};
struct PlQoiImageFileModel::LoadingState {
FILE * file;
unsigned int width, height, channels;
size_t file_len;
};
emRef<PlQoiImageFileModel> PlQoiImageFileModel::Acquire(
emContext & context, const emString & name, bool common
)
{
EM_IMPL_ACQUIRE(PlQoiImageFileModel, context, name, common)
}
PlQoiImageFileModel::PlQoiImageFileModel(
emContext & context, const emString & name
)
: emImageFileModel(context, name)
{
}
PlQoiImageFileModel::~PlQoiImageFileModel()
{
PlQoiImageFileModel::QuitLoading();
PlQoiImageFileModel::QuitSaving();
}
void PlQoiImageFileModel::TryStartLoading()
{
unsigned char header[QOI_HEADER_SIZE];
unsigned int header_magic, colorspace;
int pos = 0;
L = new LoadingState;
memset(L, 0, sizeof(LoadingState));
L->file = fopen(GetFilePath(),"rb");
if (!L->file) throw emException("%s",emGetErrorText(errno).Get());
if (fread(header, 1, sizeof(header), L->file) != sizeof(header)) {
if (ferror(L->file)) {
throw emException("%s",emGetErrorText(errno).Get());
}
else {
throw emException("QOI header not found");
}
}
header_magic = qoi_read_32(header, &pos);
L->width = qoi_read_32(header, &pos);
L->height = qoi_read_32(header, &pos);
L->channels = header[pos++];
colorspace = header[pos++];
if (
L->width == 0 || L->height == 0 ||
L->channels < 3 || L->channels > 4 ||
colorspace > 1 ||
header_magic != QOI_MAGIC
) {
throw emException("QOI header not valid");
}
fseek(L->file, 0, SEEK_END);
L->file_len = ftell(L->file);
if (L->file_len <= QOI_HEADER_SIZE || fseek(L->file, 0, SEEK_SET) != 0) {
throw emException("QOI data incomplete");
}
FileFormatInfo = "QOI ";
FileFormatInfo += (
colorspace ? "all channels linear" : "sRGB with linear alpha"
);
Signal(ChangeSignal);
}
bool PlQoiImageFileModel::TryContinueLoading()
{
emArray<unsigned char> data;
emColor index[64];
emColor px { 0, 0, 0, 255 };
int pos = QOI_HEADER_SIZE;
int run = 0;
if (!Image.GetHeight()) {
Image.Setup(L->width, L->height, L->channels);
}
data.SetCount(L->file_len);
if (fread(data.GetWritable(), 1, L->file_len, L->file) < L->file_len) {
if (ferror(L->file)) {
throw emException("%s",emGetErrorText(errno).Get());
}
else {
throw emException("QOI data incomplete");
}
}
memset(index, 0, sizeof(index));
for (int px_y = 0; px_y < L->height; px_y++) {
for (int px_x = 0; px_x < L->width; px_x++) {
if (run > 0) {
run--;
} else if (pos < data.GetCount()) {
int b1 = data.Get(pos++);
if (b1 == QOI_OP_RGB) {
px.SetRed( data.Get(pos++));
px.SetGreen( data.Get(pos++));
px.SetBlue( data.Get(pos++));
} else if (b1 == QOI_OP_RGBA) {
px.SetRed( data.Get(pos++));
px.SetGreen( data.Get(pos++));
px.SetBlue( data.Get(pos++));
px.SetAlpha( data.Get(pos++));
} else if ((b1 & QOI_MASK_2) == QOI_OP_INDEX) {
px = index[b1];
} else if ((b1 & QOI_MASK_2) == QOI_OP_DIFF) {
px.SetRed(
px.GetRed() + ((b1 >> 4) & 0x03) - 2);
px.SetGreen(
px.GetGreen() + ((b1 >> 2) & 0x03) - 2);
px.SetBlue(
px.GetBlue() + ( b1 & 0x03) - 2);
} else if ((b1 & QOI_MASK_2) == QOI_OP_LUMA) {
int b2 = data.Get(pos++);
int vg = (b1 & 0x3f) - 32;
px.SetRed(
px.GetRed() + vg - 8 + ((b2 >> 4) & 0x0f));
px.SetGreen(
px.GetGreen() + vg);
px.SetBlue(
px.GetBlue() + vg - 8 + (b2 & 0x0f));
} else if ((b1 & QOI_MASK_2) == QOI_OP_RUN) {
run = (b1 & 0x3f);
}
index[QOI_COLOR_HASH(px) % 64] = px;
}
Image.SetPixel(px_x, px_y, px);
}
}
Signal(ChangeSignal);
return true;
}
void PlQoiImageFileModel::QuitLoading()
{
if (L) {
if (L->file) fclose(L->file);
delete L;
L = NULL;
}
}
void PlQoiImageFileModel::TryStartSaving()
{
throw emException("PlQoiImageFileModel: Saving not implemented.");
}
bool PlQoiImageFileModel::TryContinueSaving()
{
return false;
}
void PlQoiImageFileModel::QuitSaving()
{
}
emUInt64 PlQoiImageFileModel::CalcMemoryNeed()
{
return
(emUInt64)L->width * L->height * L->channels + L->file_len;
}
double PlQoiImageFileModel::CalcFileProgress()
{
return 0.0;
}
extern "C" {
emPanel * PlQoiFpPluginFunc(
emPanel::ParentArg parent, const emString & name,
const emString & path, emFpPlugin * plugin,
emString * errorBuf
)
{
if (plugin->Properties.GetCount()) {
*errorBuf="PlQoiFpPlugin: No properties allowed.";
return NULL;
}
return new emImageFilePanel(
parent, name,
PlQoiImageFileModel::Acquire(
parent.GetRootContext(), path
)
);
}
}

View file

@ -1,18 +0,0 @@
{ depot, pkgs, ... }:
let
em = depot.tools.eaglemode;
emSrc = with pkgs; srcOnly eaglemode;
in
(em.buildPlugin {
name = "yatracker";
version = "canon";
src = ./.;
target = "PlYaTracker";
}).overrideAttrs (_: {
postInstall = ''
mkdir -p $out/icons
${pkgs.imagemagick}/bin/convert $src/logo.webp $out/icons/yandex-tracker.tga
'';
})

View file

@ -1,6 +0,0 @@
#%rec:emFpPlugin%#
FileTypes = { ".YaTracker" }
Priority = 1.0
Library = "PlYaTracker"
Function = "PlYaTrackerPluginFunc"

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

View file

@ -1,47 +0,0 @@
package PlYaTracker;
use strict;
use warnings;
sub GetDependencies
{
return ('emCore');
}
sub IsEssential
{
return 0;
}
sub GetFileHandlingRules
{
return ();
}
sub GetExtraBuildOptions
{
return ();
}
sub Build
{
shift;
my %options=@_;
system(
@{$options{'unicc_call'}},
"--math",
"--rtti",
"--exceptions",
"--bin-dir" , "bin",
"--lib-dir" , "lib",
"--obj-dir" , "obj",
"--inc-search-dir", "include",
"--link" , "emCore",
"--type" , "dynlib",
"--name" , "PlYaTracker",
"src/PlYaTracker/PlYaTracker.cpp"
)==0 or return 0;
return 1;
}

View file

@ -1,58 +0,0 @@
#include <emCore/emFilePanel.h>
#include <emCore/emFpPlugin.h>
#include <emCore/emRecFileModel.h>
#include <emCore/emToolkit.h>
class PlYaTrackerConfig final : public emRecFileModel, public emStructRec {
public:
static emRef<PlYaTrackerConfig> Acquire(emContext& context,
const emString& name,
bool common = true);
virtual const char* GetFormatName() const;
emStringRec URL;
emStringRec Token;
protected:
PlYaTrackerConfig(emContext& context, const emString& name);
};
emRef<PlYaTrackerConfig> PlYaTrackerConfig::Acquire(emContext& context,
const emString& name,
bool common) {
EM_IMPL_ACQUIRE(PlYaTrackerConfig, context, name, common)
}
const char* PlYaTrackerConfig::GetFormatName() const { return "PlYaTracker"; }
PlYaTrackerConfig::PlYaTrackerConfig(emContext& context, const emString& name)
: emRecFileModel(context, name),
emStructRec(),
URL(this, "URL"),
Token(this, "Token") {
PostConstruct(*this);
}
class PlYaTrackerFilePanel : public emFilePanel {
public:
PlYaTrackerFilePanel(ParentArg parent, const emString& name,
emRef<PlYaTrackerConfig> config);
private:
emRef<PlYaTrackerConfig> Config;
};
PlYaTrackerFilePanel::PlYaTrackerFilePanel(ParentArg parent,
const emString& name,
emRef<PlYaTrackerConfig> config)
: emFilePanel(parent, name, config), Config(config) {}
extern "C" {
emPanel* PlYaTrackerPluginFunc(emPanel::ParentArg parent, const emString& name,
const emString& path, emFpPlugin* plugin,
emString* errorBuf) {
return new PlYaTrackerFilePanel(
parent, name, PlYaTrackerConfig::Acquire(parent.GetRootContext(), path));
}
}

View file

@ -1,156 +0,0 @@
// Eagle Mode configuration wrapper that recreates the required directory
// structure for Eagle Mode based on the output of depot.tools.eaglemode.etcDir
//
// This will replace *all* symlinks in the Eagle Mode configuration directory,
// but it will not touch actual files. Missing folders will be created.
package main
import (
"flag"
"fmt"
"io/fs"
"log"
"os"
"os/user"
"path"
"path/filepath"
"strings"
)
func configDir() (string, error) {
v := os.Getenv("EM_USER_CONFIG_DIR")
if v != "" {
return v, nil
}
usr, err := user.Current()
if err != nil {
return "", fmt.Errorf("failed to get current user: %w", err)
}
return path.Join(usr.HomeDir, ".eaglemode"), nil
}
// cleanupConfig removes *all* existing symlinks in the configuration which do
// not point into the right Nix store path.
func cleanupConfig(conf string, dir string) (map[string]bool, error) {
// In case of first launch, we might have to create the directory.
_ = os.MkdirAll(dir, 0755)
c := 0
currentFiles := map[string]bool{}
walker := func(p string, d fs.DirEntry, e error) error {
if e != nil {
return fmt.Errorf("could not walk %s in config directory: %w", p, e)
}
if d.Type()&fs.ModeSymlink != 0 {
target, err := os.Readlink(p)
if err != nil {
return fmt.Errorf("could not read link for %s: %w", p, err)
}
if !strings.HasPrefix(target, conf) {
err = os.Remove(p)
c++
if err != nil {
return fmt.Errorf("could not remove stale link %q: %w", p, err)
}
log.Printf("removed stale symlink %q", p)
} else {
currentFiles[p] = false
}
}
if d.Type().IsRegular() {
currentFiles[p] = true
}
return nil
}
err := filepath.WalkDir(dir, walker)
if err != nil {
return nil, err
}
if c > 0 {
log.Printf("removed %v stale symlinks", c)
}
return currentFiles, nil
}
// linkConfig traverses the given Eagle Mode configuration and links everything
// to the expected location in the user's configuration directory.
//
// If the user placed actual files in the configuration directory at paths that
// would be overwritten, they will not be touched.
func linkConfig(conf string, dir string, existing map[string]bool) error {
walker := func(p string, d fs.DirEntry, e error) error {
if e != nil {
return fmt.Errorf("could not walk %s in config directory: %w", p, e)
}
target := path.Join(dir, strings.TrimPrefix(p, conf))
if d.Type().IsDir() {
err := os.MkdirAll(target, 0755)
if err != nil {
return fmt.Errorf("could not create directory %q: %w", target, err)
}
return nil
}
if shadow, exists := existing[target]; exists {
if shadow {
log.Printf("WARN: file %q already exists and shadows a file from configuration", target)
}
return nil
}
err := os.Symlink(p, target)
if err != nil {
return fmt.Errorf("failed to link %q: %w", target, err)
}
return nil
}
return filepath.WalkDir(conf, walker)
}
func main() {
emConfig := flag.String("em-config", "", "path to em-config dir")
flag.Parse()
log.Println("verifying current Eagle Mode configuration")
if *emConfig == "" {
log.Fatalf("Eagle Mode configuration must be given")
}
if !strings.HasPrefix(*emConfig, "/nix/store/") {
log.Fatalf("Eagle Mode configuration must be in Nix store")
}
dir, err := configDir()
if err != nil {
log.Fatalf("could not determine Eagle Mode config dir: %v", err)
}
currentFiles, err := cleanupConfig(*emConfig, dir)
if err != nil {
log.Fatalf("failed to remove stale symlinks: %v", err)
}
err = linkConfig(*emConfig, dir, currentFiles)
if err != nil {
log.Fatalf("failed to link new configuration: %v", err)
}
log.Println("Eagle Mode configuration updated")
}

View file

@ -1,6 +0,0 @@
# Users with approval powers for code that requires FSF copyright
# assignment. Users added here should have FSF paperwork on file, and
# should - if changes to a covered project are made - verify that the
# committers also have done the paperwork.
tazjin

View file

@ -1,38 +0,0 @@
# Builder for depot-internal Emacs packages. Packages built using this
# builder are added into the Emacs packages fixpoint under
# `emacsPackages.tvlPackages`, which in turn makes it possible to use
# them with special Emacs features like native compilation.
#
# Arguments passed to the builder are the same as
# emacsPackages.trivialBuild, except:
#
# * packageRequires is not used
#
# * externalRequires takes a selection function for packages from
# emacsPackages
#
# * internalRequires takes other depot packages
{ pkgs, ... }:
buildArgs:
pkgs.callPackage
({ emacsPackages }:
let
# Select external dependencies from the emacsPackages set
externalDeps = (buildArgs.externalRequires or (_: [ ])) emacsPackages;
# Override emacsPackages for depot-internal packages
internalDeps = map (p: p.override { inherit emacsPackages; })
(buildArgs.internalRequires or [ ]);
trivialBuildArgs = builtins.removeAttrs buildArgs [
"externalRequires"
"internalRequires"
] // {
packageRequires = externalDeps ++ internalDeps;
};
in
emacsPackages.trivialBuild trivialBuildArgs)
{ }

View file

@ -1,60 +0,0 @@
;;; defzone.el --- Generate zone files from Elisp -*- lexical-binding: t; -*-
(require 'dash)
(require 'dash-functional)
(require 's)
(defun record-to-record (zone record &optional subdomain)
"Evaluate a record definition and turn it into a zone file
record in ZONE, optionally prefixed with SUBDOMAIN."
(cl-labels ((plist->alist (plist)
(when plist
(cons
(cons (car plist) (cadr plist))
(plist->alist (cddr plist))))))
(let ((name (if subdomain (s-join "." (list subdomain zone)) zone)))
(pcase record
;; SOA RDATA (RFC 1035; 3.3.13)
((and `(SOA . (,ttl . ,keys))
(let (map (:mname mname) (:rname rname) (:serial serial)
(:refresh refresh) (:retry retry) (:expire expire)
(:minimum min))
(plist->alist keys)))
(if-let ((missing (-filter #'null (not (list mname rname serial
refresh retry expire min)))))
(error "Missing fields in SOA record: %s" missing)
(format "%s %s IN SOA %s %s %s %s %s %s %s"
name ttl mname rname serial refresh retry expire min)))
(`(NS . (,ttl . ,targets))
(->> targets
(-map (lambda (target) (format "%s %s IN NS %s" name ttl target)))
(s-join "\n")))
(`(MX . (,ttl . ,pairs))
(->> pairs
(-map (-lambda ((preference . exchange))
(format "%s %s IN MX %s %s" name ttl preference exchange)))
(s-join "\n")))
(`(TXT ,ttl ,text) (format "%s %s IN TXT %s" name ttl (prin1-to-string text)))
(`(A . (,ttl . ,ips))
(->> ips
(-map (lambda (ip) (format "%s %s IN A %s" name ttl ip)))
(s-join "\n")))
(`(CNAME ,ttl ,target) (format "%s %s IN CNAME %s" name ttl target))
((and `(,sub . ,records)
(guard (stringp sub)))
(s-join "\n" (-map (lambda (r) (record-to-record zone r sub)) records)))
(_ (error "Invalid record definition: %s" record))))))
(defmacro defzone (fqdn &rest records)
"Generate zone file for the zone at FQDN from a simple DSL."
(declare (indent defun))
`(s-join "\n" (-map (lambda (r) (record-to-record ,fqdn r)) (quote ,records))))

View file

@ -1,45 +0,0 @@
;;; example.el - usage example for defzone macro
(defzone "tazj.in."
(SOA 21600
:mname "ns-cloud-a1.googledomains.com."
:rname "cloud-dns-hostmaster.google.com."
:serial 123
:refresh 21600
:retry 3600
:expire 1209600
:minimum 300)
(NS 21600
"ns-cloud-a1.googledomains.com."
"ns-cloud-a2.googledomains.com."
"ns-cloud-a3.googledomains.com."
"ns-cloud-a4.googledomains.com.")
(MX 300
(1 . "aspmx.l.google.com.")
(5 . "alt1.aspmx.l.google.com.")
(5 . "alt2.aspmx.l.google.com.")
(10 . "alt3.aspmx.l.google.com.")
(10 . "alt4.aspmx.l.google.com."))
(TXT 3600 "google-site-verification=d3_MI1OwD6q2OT42Vvh0I9w2u3Q5KFBu-PieNUE1Fig")
(A 300 "34.98.120.189")
;; Nested record sets are indicated by a list that starts with a
;; string (this is just joined, so you can nest multiple levels at
;; once)
("blog"
;; Blog "storage engine" is in a separate DNS zone
(NS 21600
"ns-cloud-c1.googledomains.com."
"ns-cloud-c2.googledomains.com."
"ns-cloud-c3.googledomains.com."
"ns-cloud-c4.googledomains.com."))
("git"
(A 300 "34.98.120.189")
(TXT 300 "<3 edef"))
("files" (CNAME 300 "c.storage.googleapis.com.")))

View file

@ -1,7 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage {
pname = "dottime";
version = "1.0";
src = ./dottime.el;
}

View file

@ -1,81 +0,0 @@
;;; dottime.el --- use dottime in the modeline
;;
;; Copyright (C) 2019 Google Inc.
;;
;; Author: Vincent Ambo <tazjin@google.com>
;; Version: 1.0
;; Package-Requires: (cl-lib)
;;
;;; Commentary:
;;
;; This package changes the display of time in the modeline to use
;; dottime (see https://dotti.me/) instead of the standard time
;; display.
;;
;; Modeline dottime display is enabled by calling
;; `dottime-display-mode' and dottime can be used in Lisp code via
;; `dottime-format'.
(require 'cl-lib)
(require 'time)
(defun dottime--format-string (&optional offset prefix)
"Creates the dottime format string for `format-time-string'
based on the local timezone."
(let* ((offset-sec (or offset (car (current-time-zone))))
(offset-hours (/ offset-sec 60 60))
(base (concat prefix "%m-%dT%H·%M")))
(if (/= offset-hours 0)
(concat base (format "%0+3d" offset-hours))
base)))
(defun dottime--display-time-update-advice (orig)
"Function used as advice to `display-time-update' with a
rebound definition of `format-time-string' that renders all
timestamps as dottime."
(cl-letf* ((format-orig (symbol-function 'format-time-string))
((symbol-function 'format-time-string)
(lambda (&rest _)
(funcall format-orig (dottime--format-string) nil t))))
(funcall orig)))
(defun dottime-format (&optional time offset prefix)
"Format the given TIME in dottime at OFFSET. If TIME is nil,
the current time will be used. PREFIX is prefixed to the format
string verbatim.
OFFSET can be an integer representing an offset in seconds, or
the argument can be elided in which case the system time zone
is used."
(format-time-string (dottime--format-string offset prefix) time t))
(defun dottime-display-mode (arg)
"Enable time display as dottime. Disables dottime if called
with prefix 0 or nil."
(interactive "p")
(if (or (eq arg 0) (eq arg nil))
(advice-remove 'display-time-update #'dottime--display-time-update-advice)
(advice-add 'display-time-update :around #'dottime--display-time-update-advice))
(display-time-update)
;; Amend the time display in telega.el to use dottime.
;;
;; This will never display offsets in the chat window, as those are
;; always visible in the modeline anyways.
(when (featurep 'telega)
(defun telega-ins--dottime-advice (orig timestamp)
(let* ((dtime (decode-time timestamp t))
(current-ts (time-to-seconds (current-time)))
(ctime (decode-time current-ts))
(today00 (telega--time-at00 current-ts ctime)))
(if (> timestamp today00)
(telega-ins (format "%02d·%02d" (nth 2 dtime) (nth 1 dtime)))
(funcall orig timestamp))))
(advice-add 'telega-ins--date :around #'telega-ins--dottime-advice)))
(provide 'dottime)

View file

@ -1,7 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage rec {
pname = "niri";
version = "1.0";
src = ./niri.el;
}

View file

@ -1,181 +0,0 @@
;;; niri.el --- seamless niri/emacs integration. -*- lexical-binding: t; -*-
;;
;; Copyright (C) 2024 The TVL Contributors
;;
;; Author: Vincent Ambo <tazjin@tvl.su>
;; Version: 1.0
;; Package-Requires: ((emacs "27.1"))
;;
;;; Commentary:
;;
;; After having used EXWM for many years (7 or so?) it's become second nature
;; that there is no difference between windows and Emacs buffers. This means
;; that from any Emacs buffer (or, in the case of EXWM, from any X window) it's
;; possible to switch to any of the others.
;;
;; This implements similar logic for Emacs running in Niri, consisting of two
;; sides of the integration:
;;
;; # In Emacs
;;
;; Inside of Emacs, when switching buffers, populate the buffer-switching menu
;; additionally with all open Niri windows. Selecting a Niri window moves the
;; screen to that window.
;;
;; # Outside of Emacs
;;
;; Provides an interface for the same core functionality that can be used from
;; shell scripts, and bound to selectors like dmenu or rofi.
;;
;; # Switching to Emacs buffers
;;
;; Some special logic exists for handling the case of switching to an Emacs
;; buffer. There are several conditions that we can be in, that each have a
;; predictable result:
;;
;; In a non-Emacs window, selecting an Emacs buffer will either switch to an
;; Emacs frame already displaying this buffer, or launch a new frame for it.
;;
;; Inside of Emacs, if *another* frame is already displaying the buffer, switch
;; to it. Otherwise the behaviour is the same as standard buffer switching.
(require 'seq)
(require 'map)
(defun niri-list-windows ()
"List all currently open Niri windows."
(json-parse-string
(shell-command-to-string "niri msg -j windows")
:false-object nil))
(defun niri--window-is-emacs (window)
(equal (map-elt window "app_id") "emacs"))
(defun niri--list-selectables ()
"Lists all currently selectable things in a format that can work
with completing-read. Selectable means all open Niri
windows (except Emacs windows) and all Emacs buffers.
Emacs windows are returned separately, as they are required for
frame navigation."
(let* (;; all niri windows, with emacs/non-emacs windows split up
(all-windows (niri-list-windows))
(windows (seq-filter (lambda (w) (not (niri--window-is-emacs w)))
all-windows))
(emacs-windows (seq-filter #'niri--window-is-emacs all-windows))
;; all non-hidden buffers
(buffers (seq-filter (lambda (b) (not (string-prefix-p " " (buffer-name b))))
(buffer-list)))
(selectables (make-hash-table :test 'equal :size (+ (length windows)
(length buffers)))))
(seq-do (lambda (window)
(map-put! selectables (map-elt window "title")
(cons :niri window)))
windows)
(seq-do (lambda (buf)
(map-put! selectables (buffer-name buf)
(cons :emacs buf)))
buffers)
(cons selectables emacs-windows)))
(defun niri--focus-window (window)
(shell-command (format "niri msg action focus-window --id %d"
(map-elt window "id"))))
(defun niri--target-action-internal (target)
"Focus the given TARGET (a Niri window or Emacs buffer). This is
used when called from inside of Emacs. It will NOT correctly
switch Niri windows when called from outside of Emacs."
(pcase (car target)
(:emacs (pop-to-buffer (cdr target) '((display-buffer-reuse-window
display-buffer-same-window)
(reusable-frames . 0))))
(:niri (niri--focus-window (cdr target)))))
(defun niri-go-anywhere ()
"Interactively select and switch to an open Niri window, or an
Emacs buffer."
(interactive)
(let* ((selectables (car (niri--list-selectables)))
;; Annotate buffers that display remote files. I frequently
;; want to see it, because I might have identically named
;; files open locally and remotely at the same time, and it
;; helps with differentiating them.
(completion-extra-properties
'(:annotation-function
(lambda (name)
(let ((elt (map-elt selectables name)))
(pcase (car elt)
(:emacs
(if-let* ((file (buffer-file-name (cdr elt)))
(remote (file-remote-p file)))
(format " [%s]" remote)))
(:niri (format " [%s]" (map-elt (cdr elt) "app_id"))))))))
(target-key (completing-read "Switch to: " (map-keys selectables)))
(target (map-elt selectables target-key)))
(if target
(niri--target-action-internal target)
(switch-to-buffer target-key nil t))))
(defun niri--target-action-external (target frames)
"Focus the given TARGET (a Niri window or Emacs buffer). This
always behaves correctly, but does more work than the -internal
variant. It should only be called when invoking the switcher from
outside of Emacs (i.e. through `emacsclient').
FRAMES is the exact list of Emacs frames that existed at the time
the switcher was invoked."
(pcase (car target)
(:niri (niri--focus-window (cdr target)))
;; When switching to an Emacs buffer from outside of Emacs, we run into the
;; additional complication that Wayland does not allow arbitrary
;; applications to change the focused window. Calling e.g.
;; `select-frame-set-input-focus' has no effect on Wayland when not called
;; from within a focused Emacs frame.
;;
;; However, due to concurrency, frames may change between the moment when we
;; start the switcher (and potentially wait for user input), and when the
;; final selection happens.
;;
;; To get around this we try to match the target Emacs frame (if present) to
;; a Niri window, switch to it optimistically, and *then* execute the final
;; buffer switching command.
(:emacs
(if-let ((window (get-buffer-window (cdr target) t))
(frame (window-frame window))
(frame-name (frame-parameter frame 'name))
(niri-window (seq-find (lambda (w)
(equal (map-elt w "title") frame-name))
frames)))
;; Target frame found and could be matched to a Niri window: Go there!
(progn (select-window window) ;; ensure the right window in the frame has focus
(niri--focus-window niri-window)
(message "Switched to existing window for \"%s\"" (buffer-name (cdr target))))
;; Target frame not found; is Emacs the focused program?
(if (seq-find (lambda (w) (map-elt w "is_focused")) frames)
(switch-to-buffer (cdr target))
;; if not, just make a new frame
(display-buffer (cdr target) '(display-buffer-pop-up-frame)))))))
(defun niri-go-anywhere-external ()
"Use a dmenu-compatible launcher like `fuzzel' to achieve the same
effect as `niri-go-anywhere', but from outside of Emacs through
Emacsclient."
(interactive) ;; TODO no?
(let* ((all (niri--list-selectables))
(selectables (car all))
(target (with-temp-buffer
(dolist (key (map-keys selectables))
(insert key "\n"))
(call-process-region nil nil "fuzzel" t t nil "-d")
(string-trim (buffer-string)))))
(when-let ((selectable (map-elt selectables target)))
(niri--target-action-external selectable (cdr all)))))
(provide 'niri)

View file

@ -1,8 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage {
pname = "nix-util";
version = "1.0";
src = ./nix-util.el;
externalRequires = epkgs: [ epkgs.s ];
}

View file

@ -1,69 +0,0 @@
;;; nix-util.el --- Utilities for dealing with Nix code. -*- lexical-binding: t; -*-
;;
;; Copyright (C) 2019 Google Inc.
;; Copyright (C) 2022 The TVL Authors
;;
;; Author: Vincent Ambo <tazjin@google.com>
;; Version: 1.0
;; Package-Requires: (json map s)
;;
;;; Commentary:
;;
;; This package adds some functionality that I find useful when
;; working in Nix buffers or programs installed from Nix.
(require 'json)
(require 'map)
(require 's)
(defun nix/prefetch-github (owner repo) ; TODO(tazjin): support different branches
"Fetch the master branch of a GitHub repository and insert the
call to `fetchFromGitHub' at point."
(interactive "sOwner: \nsRepository: ")
(let* (;; Keep these vars around for output insertion
(point (point))
(buffer (current-buffer))
(name (concat "github-fetcher/" owner "/" repo))
(outbuf (format "*%s*" name))
(errbuf (get-buffer-create "*github-fetcher/errors*"))
(cleanup (lambda ()
(kill-buffer outbuf)
(kill-buffer errbuf)
(with-current-buffer buffer
(read-only-mode -1))))
(prefetch-handler
(lambda (_process event)
(unwind-protect
(pcase event
("finished\n"
(let* ((json-string (with-current-buffer outbuf
(buffer-string)))
(result (json-read-from-string json-string)))
(with-current-buffer buffer
(goto-char point)
(map-let (("rev" rev) ("sha256" sha256)) result
(read-only-mode -1)
(insert (format "fetchFromGitHub {
owner = \"%s\";
repo = \"%s\";
rev = \"%s\";
sha256 = \"%s\";
};" owner repo rev sha256))
(indent-region point (point))))))
(_ (with-current-buffer errbuf
(error "Failed to prefetch %s/%s: %s"
owner repo (buffer-string)))))
(funcall cleanup)))))
;; Fetching happens asynchronously, but we'd like to make sure the
;; point stays in place while that happens.
(read-only-mode)
(make-process :name name
:buffer outbuf
:command `("nix-prefetch-github" ,owner ,repo)
:stderr errbuf
:sentinel prefetch-handler)))
(provide 'nix-util)

View file

@ -1 +0,0 @@
tazjin

View file

@ -1,17 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage rec {
pname = "notable";
version = "1.0";
src = ./notable.el;
externalRequires = epkgs: with epkgs; [
f
ht
s
];
internalRequires = [
depot.tools.emacs-pkgs.dottime
];
}

View file

@ -1,251 +0,0 @@
;;; notable.el --- a simple note-taking app -*- lexical-binding: t; -*-
;;
;; Copyright (C) 2020 The TVL Contributors
;;
;; Author: Vincent Ambo <mail@tazj.in>
;; Version: 1.0
;; Package-Requires: (cl-lib dash f rx s subr-x)
;;
;;; Commentary:
;;
;; This package provides a simple note-taking application which can be
;; invoked from anywhere in Emacs, with several interactive
;; note-taking functions included.
;;
;; As is tradition for my software, the idea here is to reduce
;; friction which I see even with tools like `org-capture', because
;; `org-mode' does a ton of things I don't care about.
;;
;; Notable stores its notes in simple JSON files in the folder
;; specified by `notable-note-dir'.
(require 'cl-lib)
(require 'dottime)
(require 'f)
(require 'ht)
(require 'rx)
(require 's)
(require 'subr-x)
;; User-facing customisation options
(defgroup notable nil
"Simple note-taking application."
:group 'applications)
;; TODO(tazjin): Use whatever the XDG state dir thing is for these by
;; default.
(defcustom notable-note-dir (expand-file-name "~/.notable/")
"File path to the directory containing notable's notes."
:type 'string
:group 'notable)
;; Package internal definitions
(cl-defstruct (notable--note (:constructor notable--make-note))
"Structure containing the fields of a single notable note."
time ;; UNIX timestamp at which the note was taken
content ;; Textual content of the note
)
(defvar notable--note-lock (make-mutex "notable-notes")
"Exclusive lock for note operations with shared state.")
(defvar notable--note-regexp
(rx "note-"
(group (one-or-more (any num)))
".json")
"Regular expression to match note file names.")
(defvar notable--next-note
(let ((next 0))
(dolist (file (f-entries notable-note-dir))
(when-let* ((match (string-match notable--note-regexp file))
(id (string-to-number
(match-string 1 file)))
(larger (> id next)))
(setq next id)))
(+ 1 next))
"Next ID to use for notes. Initial value is determined based on
the existing notes files.")
(defun notable--serialize-note (note)
"Serialise NOTE into JSON format."
(check-type note notable--note)
(json-serialize (ht ("time" (notable--note-time note))
("content" (notable--note-content note)))))
(defun notable--deserialize-note (json)
"Deserialise JSON into a notable note."
(check-type json string)
(let ((parsed (json-parse-string json)))
(unless (and (ht-contains? parsed "time")
(ht-contains-p parsed "content"))
(error "Missing required keys in note structure!"))
(notable--make-note :time (ht-get parsed "time")
:content (ht-get parsed "content"))))
(defun notable--next-id ()
"Return the next note ID and increment the counter."
(with-mutex notable--note-lock
(let ((id notable--next-note))
(setq notable--next-note (+ 1 id))
id)))
(defun notable--note-path (id)
(check-type id integer)
(f-join notable-note-dir (format "note-%d.json" id)))
(defun notable--archive-path (id)
(check-type id integer)
(f-join notable-note-dir (format "archive-%d.json" id)))
(defun notable--add-note (content)
"Add a note with CONTENT to the note store."
(let* ((id (notable--next-id))
(note (notable--make-note :time (time-convert nil 'integer)
:content content))
(path (notable--note-path id)))
(when (f-exists? path) (error "Note file '%s' already exists!" path))
(f-write-text (notable--serialize-note note) 'utf-8 path)
(message "Saved note %d" id)))
(defun notable--archive-note (id)
"Archive the note with ID."
(check-type id integer)
(unless (f-exists? (notable--note-path id))
(error "There is no note with ID %d." id))
(when (f-exists? (notable--archive-path id))
(error "Oh no, a note with ID %d has already been archived!" id))
(f-move (notable--note-path id) (notable--archive-path id))
(message "Archived note with ID %d." id))
(defun notable--list-note-ids ()
"List all note IDs (not contents) from `notable-note-dir'"
(cl-loop for file in (f-entries notable-note-dir)
with res = nil
if (string-match notable--note-regexp file)
do (push (string-to-number (match-string 1 file)) res)
finally return res))
(defun notable--get-note (id)
(let ((path (notable--note-path id)))
(unless (f-exists? path)
(error "No note with ID %s in note storage!" id))
(notable--deserialize-note (f-read-text path 'utf-8))))
;; Note view buffer implementation
(defvar-local notable--buffer-note nil "The note ID displayed by this buffer.")
(define-derived-mode notable-note-mode fundamental-mode "notable-note"
"Major mode displaying a single Notable note."
(set (make-local-variable 'scroll-preserve-screen-position) t)
(setq truncate-lines t)
(setq buffer-read-only t)
(setq buffer-undo-list t))
(setq notable-note-mode-map
(let ((map (make-sparse-keymap)))
(define-key map "q" 'kill-current-buffer)
map))
(defun notable--show-note (id)
"Display a single note in a separate buffer."
(check-type id integer)
(let ((note (notable--get-note id))
(buffer (get-buffer-create (format "*notable: %d*" id)))
(inhibit-read-only t))
(with-current-buffer buffer
(notable-note-mode)
(erase-buffer)
(setq notable--buffer-note id)
(setq header-line-format
(format "Note from %s"
(dottime-format
(seconds-to-time (notable--note-time note))))))
(switch-to-buffer buffer)
(goto-char (point-min))
(insert (notable--note-content note))))
(defun notable--show-note-at-point ()
(interactive)
(notable--show-note (get-text-property (point) 'notable-note-id)))
(defun notable--archive-note-at-point ()
(interactive)
(notable--archive-note (get-text-property (point) 'notable-note-id)))
;; Note list buffer implementation
(define-derived-mode notable-list-mode fundamental-mode "notable"
"Major mode displaying the Notable note list."
;; TODO(tazjin): `imenu' functions?
(set (make-local-variable 'scroll-preserve-screen-position) t)
(setq truncate-lines t)
(setq buffer-read-only t)
(setq buffer-undo-list t)
(hl-line-mode t))
(setq notable-list-mode-map
(let ((map (make-sparse-keymap)))
(define-key map "a" 'notable--archive-note-at-point)
(define-key map "q" 'kill-current-buffer)
(define-key map "g" 'notable-list-notes)
(define-key map (kbd "RET") 'notable--show-note-at-point)
map))
(defun notable--render-note (id note)
(check-type id integer)
(check-type note notable--note)
(let* ((start (point))
(date (dottime-format (seconds-to-time
(notable--note-time note))))
(first-line (truncate-string-to-width
(car (s-lines (notable--note-content note)))
;; Length of the window, minus the date prefix:
(- (window-width) (+ 2 (length date)))
nil nil 1)))
(insert (propertize (s-concat date " " first-line)
'notable-note-id id))
(insert "\n")))
(defun notable--render-notes (notes)
"Retrieve each note in NOTES by ID and insert its contents into
the list buffer.
For larger notes only the first line is displayed."
(dolist (id notes)
(notable--render-note id (notable--get-note id))))
;; User-facing functions
(defun notable-take-note (content)
"Interactively prompt the user for a note that should be stored
in Notable."
(interactive "sEnter note: ")
(check-type content string)
(notable--add-note content))
(defun notable-list-notes ()
"Open a buffer listing all active notes."
(interactive)
(let ((buffer (get-buffer-create "*notable*"))
(notes (notable--list-note-ids))
(inhibit-read-only t))
(with-current-buffer buffer
(notable-list-mode)
(erase-buffer)
(setq header-line-format "Notable notes"))
(switch-to-buffer buffer)
(goto-char (point-min))
(notable--render-notes notes)))
(provide 'notable)

View file

@ -1 +0,0 @@
tazjin

View file

@ -1,76 +0,0 @@
<!-- SPDX-License-Identifier: MIT -->
passively
=========
Passively is an Emacs Lisp library for passively learning new
information in an Emacs instance.
Passively works by displaying a random piece of information to be
learned in the Emacs echoline whenever Emacs is idle for a set amount
of time.
It was designed to aid in language acquisition by passively displaying
new vocabulary to learn.
Passively is configured with a corpus of information (a hash table
mapping string keys to string values) and maintains a set of terms
that the user already learned in a file on disk.
## Configuration & usage
Configure passively like this:
```lisp
;; Configure the terms to learn. Each term should have a key and a
;; string value which is displayed.
(setq passively-learn-terms
(ht ("забыть" "забыть - to forget")
("действительно" "действительно - indeed, really")))
;; Configure a file in which passively should store its state
;; (defaults to $user-emacs-directory/passively.el)
(setq passively-store-state "/persist/tazjin/passively.el")
;; Configure after how many seconds of idle time passively should
;; display a new piece of information.
;; (defaults to 4 seconds)
(setq passively-show-after-idle-for 5)
;; Once this configuration has been set up, start passively:
(passively-enable)
;; Or, if it annoys you, disable it again:
(passively-disable)
```
These variables are registered with `customize` and may be customised
through its interface.
### Known terms
Passively exposes the interactive function
`passively-mark-last-as-known` which marks the previously displayed
term as known. This means that it will not be included in the random
selection anymore.
### Last term
Passively stores the key of the last known term in
`passively-last-displayed`.
## Installation
Inside of the TVL depot, you can install passively from
`pkgs.emacsPackages.tvlPackages.passively`. Outside of the depot, you
can clone passively like this:
git clone https://code.tvl.fyi/depot.git:/tools/emacs-pkgs/passively.git
Passively depends on `ht.el`.
Feel free to contribute patches by emailing them to `depot@tvl.su`.
## Use-cases
I'm using passively to learn Russian vocabulary. Once I've cleaned up
my configuration for that, my Russian term list will be linked here.

View file

@ -1,8 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage {
pname = "passively";
version = "1.0";
src = ./passively.el;
externalRequires = (epkgs: with epkgs; [ ht ]);
}

View file

@ -1,121 +0,0 @@
;;; passively.el --- Passively learn new information -*- lexical-binding: t; -*-
;;
;; SPDX-License-Identifier: MIT
;; Copyright (C) 2020 The TVL Contributors
;;
;; Author: Vincent Ambo <tazjin@tvl.su>
;; Version: 1.0
;; Package-Requires: (ht seq)
;; URL: https://code.tvl.fyi/about/tools/emacs-pkgs/passively/
;;
;; This file is not part of GNU Emacs.
(require 'ht)
(require 'seq)
;; Customisation options
(defgroup passively nil
"Customisation options for passively"
:group 'applications)
(defcustom passively-learn-terms nil
"Terms that passively should randomly display to the user. The
format of this variable is a hash table with a string key that
uniquely identifies the term, and a string value that is
displayed to the user.
For example, a possible value could be:
(ht (\"забыть\" \"забыть - to forget\")
(\"действительно\" \"действительно - indeed, really\")))
"
;; TODO(tazjin): No hash-table type in customization.el?
:type '(sexp)
:group 'passively)
(defcustom passively-store-state (format "%spassively.el" user-emacs-directory)
"File in which passively should store its state (e.g. known terms)"
:type '(file)
:group 'passively)
(defcustom passively-show-after-idle-for 4
"Number of seconds after Emacs goes idle that passively should
wait before displaying a term."
:type '(integer)
:group 'passively)
;; Implementation of state persistence
(defvar passively-last-displayed nil
"Key of the last displayed passively term.")
(defvar passively--known-terms (make-hash-table)
"Set of terms that are already known.")
(defun passively--persist-known-terms ()
"Persist the set of known passively terms to disk."
(with-temp-file passively-store-state
(insert (prin1-to-string (ht-keys passively--known-terms)))))
(defun passively--load-known-terms ()
"Load the set of known passively terms from disk."
(with-temp-buffer
(insert-file-contents passively-store-state)
(let ((keys (read (current-buffer))))
(setq passively--known-terms (make-hash-table))
(seq-do
(lambda (key) (ht-set passively--known-terms key t))
keys)))
(message "passively: loaded %d known words"
(seq-length (ht-keys passively--known-terms))))
(defun passively-mark-last-as-known ()
"Mark the last term that passively displayed as known. It will
not be displayed again."
(interactive)
(ht-set passively--known-terms passively-last-displayed t)
(passively--persist-known-terms)
(message "passively: Marked '%s' as known" passively-last-displayed))
;; Implementation of main display logic
(defvar passively--display-timer nil
"idle-timer used for displaying terms by passively")
(defun passively--random-term (timeout)
;; This is stupid, calculate set intersections instead.
(if (< 1000 timeout)
(error "It seems you already know all the terms?")
(seq-random-elt (ht-keys passively-learn-terms))))
(defun passively--display-random-term ()
(let* ((timeout 1)
(term (passively--random-term timeout)))
(while (ht-contains? passively--known-terms term)
(setq timeout (+ 1 timeout))
(setq term (passively--random-term timeout)))
(setq passively-last-displayed term)
(message (ht-get passively-learn-terms term))))
(defun passively-enable ()
"Enable automatic display of terms via passively."
(interactive)
(if passively--display-timer
(error "passively: Already running!")
(passively--load-known-terms)
(setq passively--display-timer
(run-with-idle-timer passively-show-after-idle-for t
#'passively--display-random-term))
(message "passively: Now running after %s seconds of idle time"
passively-show-after-idle-for)))
(defun passively-disable ()
"Turn off automatic display of terms via passively."
(interactive)
(unless passively--display-timer
(error "passively: Not running!"))
(cancel-timer passively--display-timer)
(setq passively--display-timer nil)
(message "passively: Now disabled"))
(provide 'passively)

View file

@ -1,8 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage {
pname = "term-switcher";
version = "1.0";
src = ./term-switcher.el;
externalRequires = epkgs: with epkgs; [ dash ivy s vterm ];
}

View file

@ -1,63 +0,0 @@
;;; term-switcher.el --- Easily switch between open vterms
;;
;; Copyright (C) 2019-2020 Google Inc.
;; Copyright (C) 2021-2023 The TVL Authors
;;
;; Author: Vincent Ambo <tazjin@tvl.su>
;; Version: 1.1
;; Package-Requires: (ivy s vterm)
;;
;;; Commentary:
;;
;; This package adds a function that lets users quickly switch between
;; different open vterms via ivy.
(require 'ivy)
(require 's)
(require 'seq)
(require 'vterm)
(defgroup term-switcher nil
"Customization options `term-switcher'.")
(defcustom term-switcher-buffer-prefix "vterm<"
"String prefix for vterm terminal buffers. For example, if you
set your titles to match `vterm<...>' a useful prefix might be
`vterm<'."
:type '(string)
:group 'term-switcher)
(defun ts/create-vterm ()
"Launch vterm, but don't open semi-broken vterms over TRAMP."
(if (file-remote-p default-directory)
(let ((default-directory "~"))
(vterm))
(vterm)))
(defun ts/open-or-create-vterm (buffer)
"Switch to the terminal in BUFFER, or create a new one if buffer is nil."
(if buffer
(switch-to-buffer buffer)
(ts/create-vterm)))
(defun ts/is-vterm-buffer (buffer)
"Determine whether BUFFER runs a vterm."
(equal 'vterm-mode (buffer-local-value 'major-mode buffer)))
(defun ts/switch-to-terminal ()
"Switch to an existing vterm buffer or create a new one."
(interactive)
(let ((terms (seq-map (lambda (b) (cons (buffer-name b) b))
(seq-filter #'ts/is-vterm-buffer (buffer-list)))))
(if terms
(ivy-read "Switch to vterm: "
(cons "New vterm" (seq-map #'car terms))
:caller 'ts/switch-to-terminal
:preselect (s-concat "^" term-switcher-buffer-prefix)
:require-match t
:action (lambda (match)
(ts/open-or-create-vterm (cdr (assoc match terms)))))
(ts/create-vterm))))
(provide 'term-switcher)

View file

@ -1,2 +0,0 @@
set noparent
file:/tools/emacs-pkgs/FSF_OWNERS

View file

@ -1,7 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage {
pname = "treecrumbs";
version = "1.0";
src = ./treecrumbs.el;
}

View file

@ -1,202 +0,0 @@
;; treecrumbs.el --- Fast, tree-sitter based breadcrumbs -*- lexical-binding: t; -*-
;;
;; Copyright (C) Free Software Foundation, Inc.
;; SPDX-License-Identifier: GPL-3.0-or-later
;;
;; Author: Vincent Ambo <tazjin@tvl.su>
;; Created: 2024-03-08
;; Version: 1.0
;; Keywords: convenience
;; Package-Requires: ((emacs "29.1"))
;; URL: https://code.tvl.fyi/tree/tools/emacs-pkgs/treecrumbs
;;
;; This file is not (yet) part of GNU Emacs.
;;; Commentary:
;; This package provides a tree-sitter based implementation of "breadcrumbs",
;; that is indicators displaying where in the semantic structure of a document
;; the point is currently located.
;;
;; Imagine a large YAML-document where the names of the parent keys are far out
;; of view: Treecrumbs can quickly display the hierarchy of keys (e.g. `foo < []
;; < baz') and help figure out where point is.
;;
;; Treecrumbs only works if a tree-sitter parser for the target language is
;; available in the buffer, and the language is supported in the
;; `treecrumbs-languages'. Adding a new language is not difficult, and patches
;; for this are welcome.
;;
;; To active treecrumbs, enable `treecrumbs-mode'. This buffer-local minor mode
;; adds the crumbs to the buffer's `header-line-format'. Alternatively, users
;; can also use the `treecrumbs-line-segment' either in their own header-line,
;; tab-line or mode-line configuration.
;;; Code:
(require 'seq)
(require 'treesit)
(defvar treecrumbs-languages nil
"Describes the tree-sitter language grammars supported by
treecrumbs, and how the breadcrumbs for their node types are
generated.
Alist of symbols representing tree-sitter languages (e.g. `yaml')
to another alist (the \"node type list\") describing how
different node types should be displayed in the crumbs.
See `define-treecrumbs-language' for more details on how to add a
language.")
(defmacro define-treecrumbs-language (lang &rest clauses)
"Defines a new language for use in treecrumbs. LANG should be a
symbol representing the language as understood by treesit (e.g.
`yaml').
Each of CLAUSES is a cons cell mapping the name of a tree
node (in string format) to one of either:
1. a static string, which will become the breadcrumb verbatim
2. a tree-sitter query (in S-expression syntax) which must capture
exactly one argument named `@key' that will become the
breadcrumb (e.g. the name of a function, the key in a map, ...)
Treecrumbs will only consider node types that are mentioned in
CLAUSES. All other nodes are ignored when constructing the
crumbs.
The defined languages are stored in `treecrumbs-languages'."
(declare (indent 1))
(let ((compiled
(seq-map (lambda (clause)
(if (stringp (cdr clause))
`(cons ,(car clause) ,(cdr clause))
`(cons ,(car clause)
(treesit-query-compile ',lang ',(cdr clause)))))
clauses)))
`(setf (alist-get ',lang treecrumbs-languages nil nil #'equal) (list ,@compiled))))
(define-treecrumbs-language yaml
;; In YAML documents, crumbs are generated from the keys of maps, and from
;; elements of arrays. "block"-nodes are standard YAML syntax, "flow"-nodes
;; are inline JSON-ish syntax.
("block_mapping_pair" . ((block_mapping_pair key: (_) @key)))
("block_sequence" . "[]")
;; TODO: Why can this query not match on to (flow_pair)?
("flow_pair" . ((_) key: (_) @key))
("flow_sequence" . "[]"))
(define-treecrumbs-language json
;; In JSON documents, crumbs are generated from key names and array fields.
("pair" . ((pair key: (string (string_content) @key))))
("array" . "[]"))
(define-treecrumbs-language toml
;; TOML has sections, key names and arrays. Sections are the only
;; relevant difference to YAML. Nested keys are not parsed, and just
;; displayed as-is.
("table" . ((table (_) @key)) )
;; TODO: query cannot match on pair in inline_table, hence matching
;; directly on keys
("pair" . ([(dotted_key)
(quoted_key)
(bare_key)]))
("array" . "[]"))
(define-treecrumbs-language cpp
;; In C++ files, crumbs are generated from namespaces and
;; identifier declarations.
("namespace_definition" . ([(namespace_definition
name: (namespace_identifier) @key)
(namespace_definition
"namespace" @key
!name)]))
("function_definition" . ((function_definition
declarator:
(function_declarator
declarator: (_) @key))))
("class_specifier" . ((class_specifier
name: (type_identifier) @key)))
("struct_specifier" . ((struct_specifier
name: (type_identifier) @key)))
("field_declaration" . ((field_declaration
declarator: (_) @key)))
("init_declarator" . ((init_declarator
declarator: (_) @key))))
(defvar-local treecrumbs--current-crumbs nil
"Current crumbs to display in the header line. Only updated when
the node under point changes.")
(defun treecrumbs--crumbs-for (node)
"Construct the crumbs for the given NODE, if its language is
supported in `treecrumbs-languages'. This functions return value
is undefined, it directly updates the buffer-local
`treecrumbs--current-crumbs'."
(let ((lang (cdr (assoc (treesit-node-language node) treecrumbs-languages))))
(unless lang
(user-error "No supported treecrumbs language at point!"))
(setq-local treecrumbs--current-crumbs "")
(treesit-parent-while
node
(lambda (parent)
(when-let ((query (cdr (assoc (treesit-node-type parent) lang))))
(setq-local treecrumbs--current-crumbs
(concat treecrumbs--current-crumbs
(if (string-empty-p treecrumbs--current-crumbs) ""
" < ")
(if (stringp query)
query
(substring-no-properties
(treesit-node-text (cdar (treesit-query-capture parent query))))))))
t))))
(defvar-local treecrumbs--last-node nil
"Caches the node that was last seen at point.")
(defun treecrumbs-at-point ()
"Returns the treecrumbs at point as a string, if point is on a
node in a language supported in `treecrumbs-languages'.
The last known crumbs in a given buffer are cached, and only if
the node under point changes are they updated."
(let ((node (treesit-node-at (point))))
(when (or (not treecrumbs--current-crumbs)
(not (equal treecrumbs--last-node node)))
(setq-local treecrumbs--last-node node)
(treecrumbs--crumbs-for node)))
treecrumbs--current-crumbs)
(defvar treecrumbs-line-segment
'(:eval (treecrumbs-at-point))
"Treecrumbs segment for use in the header-line or mode-line.")
;;;###autoload
(define-minor-mode treecrumbs-mode
"Display header line hints about current position in structure."
:init-value nil
:lighter " Crumbs"
(if treecrumbs-mode
(if (treesit-parser-list)
(push treecrumbs-line-segment header-line-format)
(user-error "Treecrumbs mode works only in tree-sitter based buffers!"))
(setq header-line-format
(delq treecrumbs-line-segment header-line-format))))
(provide 'treecrumbs)
;;; treecrumbs.el ends here

View file

@ -1 +0,0 @@
aspen

View file

@ -1,8 +0,0 @@
{ depot, ... }:
depot.tools.emacs-pkgs.buildEmacsPackage {
pname = "tvl";
version = "1.0";
src = ./tvl.el;
externalRequires = (epkgs: with epkgs; [ magit s ]);
}

View file

@ -1,243 +0,0 @@
;;; tvl.el --- description -*- lexical-binding: t; -*-
;;
;; Copyright (C) 2020 Griffin Smith
;; Copyright (C) 2020-2023, 2025 The TVL Contributors
;;
;; Author: Griffin Smith <grfn@gws.fyi>
;; Version: 0.0.1
;; Package-Requires: (s magit)
;;
;; This file is not part of GNU Emacs.
;;
;;; Commentary:
;;
;; This file provides shared utilities for interacting with the TVL monorepo
;;
;;; Code:
(require 'magit)
(require 's)
(defgroup tvl nil
"Customisation options for TVL functionality.")
(defcustom tvl-gerrit-remote "origin"
"Name of the git remote for gerrit."
:type '(string)
:group 'tvl)
(defcustom tvl-depot-path "/depot"
"Location at which the TVL depot is checked out."
:type '(string)
:group 'tvl)
(defcustom tvl-target-branch "canon"
"Branch to use to target CLs."
:group 'tvl
:type '(string)
:safe (lambda (_) t))
(defun tvl--gerrit-ref (target-branch &optional flags)
(let ((flag-suffix (if flags (format "%%%s" (s-join "," flags))
"")))
(format "HEAD:refs/for/%s%s" target-branch flag-suffix)))
(transient-define-suffix magit-gerrit-push-for-review ()
"Push to Gerrit for review."
(interactive)
(magit-push-refspecs tvl-gerrit-remote
(tvl--gerrit-ref tvl-target-branch)
nil))
(transient-append-suffix
#'magit-push ["r"]
(list "R" "push to Gerrit for review" #'magit-gerrit-push-for-review))
(transient-define-suffix magit-gerrit-push-wip ()
"Push to Gerrit as a work-in-progress."
(interactive)
(magit-push-refspecs tvl-gerrit-remote
(tvl--gerrit-ref tvl-target-branch '("wip"))
nil))
(transient-append-suffix
#'magit-push ["r"]
(list "W" "push to Gerrit as a work-in-progress" #'magit-gerrit-push-wip))
(transient-define-suffix magit-gerrit-push-autosubmit ()
"Push to Gerrit with autosubmit enabled."
(interactive)
(magit-push-refspecs tvl-gerrit-remote
(tvl--gerrit-ref tvl-target-branch '("l=Autosubmit+1"))
nil))
(transient-append-suffix
#'magit-push ["r"]
(list "A" "push to Gerrit with autosubmit enabled" #'magit-gerrit-push-autosubmit))
(transient-define-suffix magit-gerrit-submit ()
"Push to Gerrit for review."
(interactive)
(magit-push-refspecs tvl-gerrit-remote
(tvl--gerrit-ref tvl-target-branch '("submit"))
nil))
(transient-append-suffix
#'magit-push ["r"]
(list "S" "push to Gerrit to submit" #'magit-gerrit-submit))
(transient-define-suffix magit-gerrit-rubberstamp ()
"Push, approve and autosubmit to Gerrit. CLs created via this
rubberstamp method will automatically be submitted after CI
passes. This is potentially dangerous, use with care."
(interactive)
(magit-push-refspecs tvl-gerrit-remote
(tvl--gerrit-ref tvl-target-branch
'("l=Code-Review+2"
"l=Autosubmit+1"
"publish-comments"))
nil))
(transient-append-suffix
#'magit-push ["r"]
(list "P" "push & rubberstamp to Gerrit" #'magit-gerrit-rubberstamp))
(transient-define-suffix magit-gerrit-push-private ()
"Push a private change to Gerrit."
(interactive)
(magit-push-refspecs tvl-gerrit-remote
(tvl--gerrit-ref tvl-target-branch
'("private"
"publish-comments"))
nil))
(transient-append-suffix
#'magit-push ["r"]
(list "Q" "push private change to Gerrit" #'magit-gerrit-push-private))
(defvar magit-cl-history nil)
(defun magit-read-cl (prompt remote)
(let* ((refs (prog2 (message "Determining available refs...")
(magit-remote-list-refs remote)
(message "Determining available refs...done")))
(change-refs (-filter
(apply-partially #'string-prefix-p "refs/changes/")
refs))
(cl-number-to-refs
(-group-by
(lambda (change-ref)
;; refs/changes/34/1234/1
;; ^ ^ ^ ^ ^
;; 1 2 3 4 5
;; ^-- this one
(cadddr
(split-string change-ref (rx "/"))))
change-refs))
(cl-numbers
(-map
(lambda (cl-to-refs)
(let ((latest-patchset-ref
(-max-by
(-on #'> (lambda (ref)
(string-to-number
(nth 4 (split-string ref (rx "/"))))))
(-remove
(apply-partially #'s-ends-with-p "meta")
(cdr cl-to-refs)))))
(propertize (car cl-to-refs) 'ref latest-patchset-ref)))
cl-number-to-refs)))
(get-text-property
0
'ref
(magit-completing-read
prompt cl-numbers nil t nil 'magit-cl-history))))
(transient-define-suffix magit-gerrit-checkout (remote cl-refspec)
"Prompt for a CL number and checkout the latest patchset of that CL with
detached HEAD"
(interactive
(let* ((remote tvl-gerrit-remote)
(cl (magit-read-cl "Checkout CL" remote)))
(list remote cl)))
(magit-fetch-refspec remote cl-refspec (magit-fetch-arguments))
;; That runs async, so wait for it to finish (this is how magit does it)
(while (and magit-this-process
(eq (process-status magit-this-process) 'run))
(sleep-for 0.005))
(magit--checkout "FETCH_HEAD" (magit-branch-arguments))
(message "HEAD detached at %s" cl-refspec))
(transient-append-suffix
#'magit-branch ["l"]
(list "g" "gerrit CL" #'magit-gerrit-checkout))
(transient-define-suffix magit-gerrit-cherry-pick (remote cl-refspec)
"Prompt for a CL number and cherry-pick the latest patchset of that CL"
(interactive
(let* ((remote tvl-gerrit-remote)
(cl (magit-read-cl "Cherry-pick CL" remote)))
(list remote cl)))
(magit-fetch-refspec remote cl-refspec (magit-fetch-arguments))
;; That runs async, so wait for it to finish (this is how magit does it)
(while (and magit-this-process
(eq (process-status magit-this-process) 'run))
(sleep-for 0.005))
(magit-cherry-copy (list "FETCH_HEAD"))
(message "HEAD detached at %s" cl-refspec))
(transient-append-suffix
#'magit-cherry-pick ["m"]
(list "g" "Gerrit CL" #'magit-gerrit-cherry-pick))
(defun tvl-depot-status ()
"Open the TVL monorepo in magit."
(interactive)
(magit-status-setup-buffer tvl-depot-path))
(eval-after-load 'sly
'(defun tvl-sly-from-depot (attribute only-deps)
"Start a Sly REPL configured with a Lisp matching a derivation
from the depot.
The derivation invokes nix.buildLisp.sbclWith and is built
asynchronously. The build output is included in the error
thrown on build failures."
;; TODO(sterni): this function asumes that we are using SBCL
;; - for determining the resulting wrapper's location
;; - for creating the dep-only wrapper
(interactive (list (read-string "Attribute: ")
(yes-or-no-p "Only include dependencies? ")))
(let* ((outbuf (get-buffer-create (format "*depot-out/%s*" attribute)))
(errbuf (get-buffer-create (format "*depot-errors/%s*" attribute)))
(attr-display (if only-deps attribute (format "dependencies of %s" attribute)))
(expression (if only-deps
(format "let d = import <depot> {}; in d.nix.buildLisp.sbcl.lispWith d.%s.lispDeps"
attribute)
(format "(import <depot> {}).%s.repl" attribute)))
(command (list "nix-build" "--no-out-link" "-I" (format "depot=%s" tvl-depot-path) "-E" expression)))
(message "Acquiring Lisp for <depot>.%s" attr-display)
(make-process :name (format "depot-nix-build/%s" attribute)
:buffer outbuf
:stderr errbuf
:command command
:sentinel
(lambda (_process event)
(unwind-protect
(pcase event
("finished\n"
(let* ((outpath (s-trim (with-current-buffer outbuf (buffer-string))))
(lisp-path (s-concat outpath "/bin/sbcl")))
(message "Acquired Lisp for <depot>.%s at %s" attr-display lisp-path)
(sly lisp-path)))
(_ (with-current-buffer errbuf
(error "Failed to build %s:\nTried building '%s':\n%s" attr-display expression (buffer-string)))))
(kill-buffer outbuf)
(kill-buffer errbuf)))))))
(provide 'tvl)
;;; tvl.el ends here

View file

@ -1,49 +0,0 @@
# Wrapper script that uses offlineimap to fetch the depot inbox from
# inbox.tvl.su.
#
# Run with the desired output directory as the only argument.
#
# Alternatively, users can browse the inbox on https://inbox.tvl.su
# and interact with public-inbox in any other supported way (IMAP,
# NNTP, git, etc.).
{ pkgs, depot, ... }:
let
config = pkgs.writeText "offlineimaprc" ''
[general]
accounts = depot
[Account depot]
localrepository = Local
remoterepository = Remote
[Repository Local]
type = Maildir
# localfolders set by CLI
[Repository Remote]
type = IMAP
ssl = yes
sslcacertfile = /etc/ssl/certs/ca-bundle.crt
remotehost = inbox.tvl.su
remoteuser = anonymous
remotepass = anonymous
'';
in
pkgs.writeShellScriptBin "fetch-depot-inbox" ''
readonly MAILDIR=''${1}
if [ -z "''${MAILDIR}" ]; then
echo "[inbox] must specify target maildir as the first argument!" >&2
exit 1
fi
if [ ! -d "''${MAILDIR}" ]; then
echo "[inbox] specified maildir must exist and be a directory!" >&2
exit 1
fi
echo "[inbox] Synchronising TVL depot inbox into ''${MAILDIR}"
${pkgs.offlineimap}/bin/offlineimap -c ${config} \
-k "Repository_Local:localfolders=''${MAILDIR}"
''

View file

@ -1,17 +0,0 @@
# Utility for invoking slappasswd with the correct options for
# creating an ARGON2 password hash.
#
# Users should generally use https://signup.tvl.fyi instead.
{ pkgs, ... }:
let
script = pkgs.writeShellScriptBin "hash-password" ''
${pkgs.openldap}/bin/slappasswd -o module-load=argon2 -h '{ARGON2}' "$@"
'';
in
script.overrideAttrs (old: {
doCheck = true;
checkPhase = ''
${pkgs.stdenv.shell} $out/bin/hash-password -s example-password > /dev/null
'';
})

View file

@ -1,12 +0,0 @@
result
result-*
.envrc
debug/
# Just to be sure, since we're occasionally handling test keys:
*.pem
*.p12
*.json
# Created by the integration test
var-cache-nixery

View file

@ -1 +0,0 @@
Imported subtree is not yet fully readTree-compatible.

View file

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,156 +0,0 @@
<div align="center">
<img src="https://nixery.dev/nixery-logo.png">
</div>
-----------------
[![Build status](https://badge.buildkite.com/016bff4b8ae2704a3bbbb0a250784e6692007c582983b6dea7.svg?branch=refs/heads/canon)](https://buildkite.com/tvl/depot)
**Nixery** is a Docker-compatible container registry that is capable of
transparently building and serving container images using [Nix][].
Images are built on-demand based on the *image name*. Every package that the
user intends to include in the image is specified as a path component of the
image name.
The path components refer to top-level keys in `nixpkgs` and are used to build a
container image using a [layering strategy][] that optimises for caching popular
and/or large dependencies.
A public instance as well as additional documentation is available at
[nixery.dev][public].
You can watch the NixCon 2019 [talk about
Nixery](https://www.youtube.com/watch?v=pOI9H4oeXqA) for more information about
the project and its use-cases.
The canonical location of the Nixery source code is
[`//tools/nixery`][depot-link] in the [TVL](https://tvl.fyi)
monorepository. If cloning the entire repository is not desirable, the
Nixery subtree can be cloned like this:
git clone https://code.tvl.fyi/depot.git:/tools/nixery.git
The subtree is infrequently mirrored to `tazjin/nixery` on Github.
## Demo
Click the image to see an example in which an image containing an interactive
shell and GNU `hello` is downloaded.
[![asciicast](https://asciinema.org/a/262583.png)](https://asciinema.org/a/262583?autoplay=1)
To try it yourself, head to [nixery.dev][public]!
The special meta-package `shell` provides an image base with many core
components (such as `bash` and `coreutils`) that users commonly expect in
interactive images.
## Feature overview
* Serve container images on-demand using image names as content specifications
Specify package names as path components and Nixery will create images, using
the most efficient caching strategy it can to share data between different
images.
* Use private package sets from various sources
In addition to building images from the publicly available Nix/NixOS channels,
a private Nixery instance can be configured to serve images built from a
package set hosted in a custom git repository or filesystem path.
When using this feature with custom git repositories, Nixery will forward the
specified image tags as git references.
For example, if a company used a custom repository overlaying their packages
on the Nix package set, images could be built from a git tag `release-v2`:
`docker pull nixery.thecompany.website/custom-service:release-v2`
* Efficient serving of image layers from Google Cloud Storage
After building an image, Nixery stores all of its layers in a GCS bucket and
forwards requests to retrieve layers to the bucket. This enables efficient
serving of layers, as well as sharing of image layers between redundant
instances.
## Configuration
Nixery supports the following configuration options, provided via environment
variables:
* `PORT`: HTTP port on which Nixery should listen
* `NIXERY_CHANNEL`: The name of a Nix/NixOS channel to use for building
* `NIXERY_PKGS_REPO`: URL of a git repository containing a package set (uses
locally configured SSH/git credentials)
* `NIXERY_PKGS_PATH`: A local filesystem path containing a Nix package set to
use for building
* `NIXERY_STORAGE_BACKEND`: The type of backend storage to use, currently
supported values are `gcs` (Google Cloud Storage) and `filesystem`.
For each of these additional backend configuration is necessary, see the
[storage section](#storage) for details.
* `NIX_TIMEOUT`: Number of seconds that any Nix builder is allowed to run
(defaults to 60)
* `NIX_POPULARITY_URL`: URL to a file containing popularity data for
the package set (see `popcount/`)
If the `GOOGLE_APPLICATION_CREDENTIALS` environment variable is set to a service
account key, Nixery will also use this key to create [signed URLs][] for layers
in the storage bucket. This makes it possible to serve layers from a bucket
without having to make them publicly available.
In case the `GOOGLE_APPLICATION_CREDENTIALS` environment variable is not set, a
redirect to storage.googleapis.com is issued, which means the underlying bucket
objects need to be publicly accessible.
### Storage
Nixery supports multiple different storage backends in which its build cache and
image layers are kept, and from which they are served.
Currently the available storage backends are Google Cloud Storage and the local
file system.
In the GCS case, images are served by redirecting clients to the storage bucket.
Layers stored on the filesystem are served straight from the local disk.
These extra configuration variables must be set to configure storage backends:
* `GCS_BUCKET`: Name of the Google Cloud Storage bucket to use (**required** for
`gcs`)
* `GOOGLE_APPLICATION_CREDENTIALS`: Path to a GCP service account JSON key
(**optional** for `gcs`)
* `STORAGE_PATH`: Path to a folder in which to store and from which to serve
data (**required** for `filesystem`)
### Background
The project started out inspired by the [buildLayeredImage][] blog post with the
intention of becoming a Kubernetes controller that can serve declarative image
specifications specified in CRDs as container images. The design for this was
outlined in [a public gist][gist].
## Roadmap
### Kubernetes integration
It should be trivial to deploy Nixery inside of a Kubernetes cluster with
correct caching behaviour, addressing and so on.
See [issue #4](https://github.com/tazjin/nixery/issues/4).
### Nix-native builder
The image building and layering functionality of Nixery will be extracted into a
separate Nix function, which will make it possible to build images directly in
Nix builds.
[Nix]: https://nixos.org/
[layering strategy]: https://tazj.in/blog/nixery-layers
[gist]: https://gist.github.com/tazjin/08f3d37073b3590aacac424303e6f745
[buildLayeredImage]: https://grahamc.com/blog/nix-and-layered-docker-images
[public]: https://nixery.dev
[depot-link]: https://code.tvl.fyi/tree/tools/nixery
[gcs]: https://cloud.google.com/storage/

View file

@ -1,104 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
package builder
// This file implements logic for walking through a directory and creating a
// tarball of it.
//
// The tarball is written straight to the supplied reader, which makes it
// possible to create an image layer from the specified store paths, hash it and
// upload it in one reading pass.
import (
"archive/tar"
"compress/gzip"
"crypto/sha256"
"fmt"
"io"
"os"
"path/filepath"
"github.com/google/nixery/layers"
)
// Create a new compressed tarball from each of the paths in the list
// and write it to the supplied writer.
//
// The uncompressed tarball is hashed because image manifests must
// contain both the hashes of compressed and uncompressed layers.
func packStorePaths(l *layers.Layer, w io.Writer) (string, error) {
shasum := sha256.New()
gz := gzip.NewWriter(w)
multi := io.MultiWriter(shasum, gz)
t := tar.NewWriter(multi)
for _, path := range l.Contents {
err := filepath.Walk(path, tarStorePath(t))
if err != nil {
return "", err
}
}
if err := t.Close(); err != nil {
return "", err
}
if err := gz.Close(); err != nil {
return "", err
}
return fmt.Sprintf("sha256:%x", shasum.Sum([]byte{})), nil
}
func tarStorePath(w *tar.Writer) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// If the entry is not a symlink or regular file, skip it.
if info.Mode()&os.ModeSymlink == 0 && !info.Mode().IsRegular() {
return nil
}
// the symlink target is read if this entry is a symlink, as it
// is required when creating the file header
var link string
if info.Mode()&os.ModeSymlink != 0 {
link, err = os.Readlink(path)
if err != nil {
return err
}
}
header, err := tar.FileInfoHeader(info, link)
if err != nil {
return err
}
// The name retrieved from os.FileInfo only contains the file's
// basename, but the full path is required within the layer
// tarball.
header.Name = path
if err = w.WriteHeader(header); err != nil {
return err
}
// At this point, return if no file content needs to be written
if !info.Mode().IsRegular() {
return nil
}
f, err := os.Open(path)
if err != nil {
return err
}
if _, err := io.Copy(w, f); err != nil {
return err
}
f.Close()
return nil
}
}

View file

@ -1,527 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// Package builder implements the logic for assembling container
// images. It shells out to Nix to retrieve all required Nix-packages
// and assemble the symlink layer and then creates the required
// tarballs in-process.
package builder
import (
"bufio"
"bytes"
"compress/gzip"
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"sort"
"strings"
"github.com/google/nixery/config"
"github.com/google/nixery/layers"
"github.com/google/nixery/manifest"
"github.com/google/nixery/storage"
"github.com/im7mortal/kmutex"
log "github.com/sirupsen/logrus"
)
// The maximum number of layers in an image is 125. To allow for
// extensibility, the actual number of layers Nixery is "allowed" to
// use up is set at a lower point.
const LayerBudget int = 94
// State holds the runtime state that is carried around in Nixery and
// passed to builder functions.
type State struct {
Storage storage.Backend
Cache *LocalCache
Cfg config.Config
Pop layers.Popularity
UploadMutex *kmutex.Kmutex
}
// Architecture represents the possible CPU architectures for which
// container images can be built.
//
// The default architecture is amd64, but support for ARM platforms is
// available within nixpkgs and can be toggled via meta-packages.
type Architecture struct {
// Name of the system tuple to pass to Nix
nixSystem string
// Name of the architecture as used in the OCI manifests
imageArch string
}
var amd64 = Architecture{"x86_64-linux", "amd64"}
var arm64 = Architecture{"aarch64-linux", "arm64"}
// Image represents the information necessary for building a container image.
// This can be either a list of package names (corresponding to keys in the
// nixpkgs set) or a Nix expression that results in a *list* of derivations.
type Image struct {
Name string
Tag string
// Names of packages to include in the image. These must correspond
// directly to top-level names of Nix packages in the nixpkgs tree.
Packages []string
// Architecture for which to build the image. Nixery defaults
// this to amd64 if not specified via meta-packages.
Arch *Architecture
}
// BuildResult represents the data returned from the server to the
// HTTP handlers. Error information is propagated straight from Nix
// for errors inside of the build that should be fed back to the
// client (such as missing packages).
type BuildResult struct {
Error string `json:"error"`
Pkgs []string `json:"pkgs"`
Manifest json.RawMessage `json:"manifest"`
}
// ImageFromName parses an image name into the corresponding structure which can
// be used to invoke Nix.
//
// It will expand convenience names under the hood (see the `convenienceNames`
// function below) and append packages that are always included (cacert, iana-etc).
//
// Once assembled the image structure uses a sorted representation of
// the name. This is to avoid unnecessarily cache-busting images if
// only the order of requested packages has changed.
func ImageFromName(name string, tag string) Image {
pkgs := strings.Split(name, "/")
arch, expanded := metaPackages(pkgs)
expanded = append(expanded, "cacert", "iana-etc")
sort.Strings(pkgs)
sort.Strings(expanded)
return Image{
Name: strings.Join(pkgs, "/"),
Tag: tag,
Packages: expanded,
Arch: arch,
}
}
// ImageResult represents the output of calling the Nix derivation
// responsible for preparing an image.
type ImageResult struct {
// These fields are populated in case of an error
Error string `json:"error"`
Pkgs []string `json:"pkgs"`
// These fields are populated in case of success
Graph layers.RuntimeGraph `json:"runtimeGraph"`
SymlinkLayer struct {
Size int `json:"size"`
TarHash string `json:"tarHash"`
Path string `json:"path"`
} `json:"symlinkLayer"`
}
// metaPackages expands package names defined by Nixery which either
// include sets of packages or trigger certain image-building
// behaviour.
//
// Meta-packages must be specified as the first packages in an image
// name.
//
// Currently defined meta-packages are:
//
// * `shell`: Includes bash, coreutils and other common command-line tools
// * `arm64`: Causes Nixery to build images for the ARM64 architecture
func metaPackages(packages []string) (*Architecture, []string) {
arch := &amd64
var metapkgs []string
lastMeta := 0
for idx, p := range packages {
if p == "shell" || p == "arm64" {
metapkgs = append(metapkgs, p)
lastMeta = idx + 1
} else {
break
}
}
// Chop off the meta-packages from the front of the package
// list
packages = packages[lastMeta:]
for _, p := range metapkgs {
switch p {
case "shell":
packages = append(packages, "bashInteractive", "coreutils", "moreutils", "nano")
case "arm64":
arch = &arm64
}
}
return arch, packages
}
// logNix logs each output line from Nix. It runs in a goroutine per
// output channel that should be live-logged.
func logNix(image, cmd string, r io.ReadCloser) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
log.WithFields(log.Fields{
"image": image,
"cmd": cmd,
}).Info("[nix] " + scanner.Text())
}
}
func callNix(program, image string, args []string) ([]byte, error) {
cmd := exec.Command(program, args...)
outpipe, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
errpipe, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
go logNix(image, program, errpipe)
if err = cmd.Start(); err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image,
"cmd": program,
}).Error("error invoking Nix")
return nil, err
}
log.WithFields(log.Fields{
"cmd": program,
"image": image,
}).Info("invoked Nix build")
stdout, _ := ioutil.ReadAll(outpipe)
if err = cmd.Wait(); err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image,
"cmd": program,
"stdout": stdout,
}).Info("failed to invoke Nix")
return nil, err
}
resultFile := strings.TrimSpace(string(stdout))
buildOutput, err := ioutil.ReadFile(resultFile)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image,
"file": resultFile,
}).Info("failed to read Nix result file")
return nil, err
}
return buildOutput, nil
}
// Call out to Nix and request metadata for the image to be built. All
// required store paths for the image will be realised, but layers
// will not yet be created from them.
//
// This function is only invoked if the manifest is not found in any
// cache.
func prepareImage(s *State, image *Image) (*ImageResult, error) {
packages, err := json.Marshal(image.Packages)
if err != nil {
return nil, err
}
srcType, srcArgs := s.Cfg.Pkgs.Render(image.Tag)
args := []string{
"--timeout", s.Cfg.Timeout,
"--argstr", "packages", string(packages),
"--argstr", "srcType", srcType,
"--argstr", "srcArgs", srcArgs,
"--argstr", "system", image.Arch.nixSystem,
}
output, err := callNix("nixery-prepare-image", image.Name, args)
if err != nil {
// granular error logging is performed in callNix already
return nil, err
}
log.WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
}).Info("finished image preparation via Nix")
var result ImageResult
err = json.Unmarshal(output, &result)
if err != nil {
return nil, err
}
return &result, nil
}
// Groups layers and checks whether they are present in the cache
// already, otherwise calls out to Nix to assemble layers.
//
// Newly built layers are uploaded to the bucket. Cache entries are
// added only after successful uploads, which guarantees that entries
// retrieved from the cache are present in the bucket.
func prepareLayers(ctx context.Context, s *State, image *Image, result *ImageResult) ([]manifest.Entry, error) {
grouped := layers.GroupLayers(&result.Graph, &s.Pop, LayerBudget)
var entries []manifest.Entry
// Splits the layers into those which are already present in
// the cache, and those that are missing.
//
// Missing layers are built and uploaded to the storage
// bucket.
for _, l := range grouped {
lh := l.Hash()
// While packing store paths, the SHA sum of
// the uncompressed layer is computed and
// written to `tarhash`.
//
// TODO(tazjin): Refactor this to make the
// flow of data cleaner.
lw := func(w io.Writer) (string, error) {
tarhash, err := packStorePaths(&l, w)
if err != nil {
return "", err
}
var pkgs []string
for _, p := range l.Contents {
pkgs = append(pkgs, layers.PackageFromPath(p))
}
log.WithFields(log.Fields{
"layer": lh,
"packages": pkgs,
"tarhash": tarhash,
}).Info("created image layer")
return tarhash, err
}
entry, err := uploadHashLayer(ctx, s, lh, l.MergeRating, lw)
if err != nil {
return nil, err
}
entries = append(entries, *entry)
}
// Symlink layer (built in the first Nix build) needs to be
// included here manually:
slkey := result.SymlinkLayer.TarHash
entry, err := uploadHashLayer(ctx, s, slkey, 0, func(w io.Writer) (string, error) {
f, err := os.Open(result.SymlinkLayer.Path)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
"layer": slkey,
}).Error("failed to open symlink layer")
return "", err
}
defer f.Close()
gz := gzip.NewWriter(w)
_, err = io.Copy(gz, f)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
"layer": slkey,
}).Error("failed to upload symlink layer")
return "", err
}
return "sha256:" + slkey, gz.Close()
})
if err != nil {
return nil, err
}
entries = append(entries, *entry)
return entries, nil
}
// layerWriter is the type for functions that can write a layer to the
// multiwriter used for uploading & hashing.
//
// This type exists to avoid duplication between the handling of
// symlink layers and store path layers.
type layerWriter func(w io.Writer) (string, error)
// byteCounter is a special io.Writer that counts all bytes written to
// it and does nothing else.
//
// This is required because the ad-hoc writing of tarballs leaves no
// single place to count the final tarball size otherwise.
type byteCounter struct {
count int64
}
func (b *byteCounter) Write(p []byte) (n int, err error) {
b.count += int64(len(p))
return len(p), nil
}
// Upload a layer tarball to the storage bucket, while hashing it at
// the same time. The supplied function is expected to provide the
// layer data to the writer.
//
// The initial upload is performed in a 'staging' folder, as the
// SHA256-hash is not yet available when the upload is initiated.
//
// After a successful upload, the file is moved to its final location
// in the bucket and the build cache is populated.
//
// The return value is the layer's SHA256 hash, which is used in the
// image manifest.
func uploadHashLayer(ctx context.Context, s *State, key string, mrating uint64, lw layerWriter) (*manifest.Entry, error) {
s.UploadMutex.Lock(key)
defer s.UploadMutex.Unlock(key)
if entry, cached := layerFromCache(ctx, s, key); cached {
return entry, nil
}
path := "staging/" + key
var tarhash string
sha256sum, size, err := s.Storage.Persist(ctx, path, manifest.LayerType, func(sw io.Writer) (string, int64, error) {
// Sets up a "multiwriter" that simultaneously runs both hash
// algorithms and uploads to the storage backend.
shasum := sha256.New()
counter := &byteCounter{}
multi := io.MultiWriter(sw, shasum, counter)
var err error
tarhash, err = lw(multi)
sha256sum := fmt.Sprintf("%x", shasum.Sum([]byte{}))
return sha256sum, counter.count, err
})
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Error("failed to create and store layer")
return nil, err
}
// Hashes are now known and the object is in the bucket, what
// remains is to move it to the correct location and cache it.
err = s.Storage.Move(ctx, "staging/"+key, "layers/"+sha256sum)
if err != nil {
log.WithError(err).WithField("layer", key).
Error("failed to move layer from staging")
return nil, err
}
log.WithFields(log.Fields{
"layer": key,
"sha256": sha256sum,
"size": size,
}).Info("created and persisted layer")
entry := manifest.Entry{
Digest: "sha256:" + sha256sum,
Size: size,
TarHash: tarhash,
MergeRating: mrating,
}
cacheLayer(ctx, s, key, entry)
return &entry, nil
}
func BuildImage(ctx context.Context, s *State, image *Image) (*BuildResult, error) {
key := s.Cfg.Pkgs.CacheKey(image.Packages, image.Tag)
if key != "" {
if m, c := manifestFromCache(ctx, s, key); c {
return &BuildResult{
Manifest: m,
}, nil
}
}
imageResult, err := prepareImage(s, image)
if err != nil {
return nil, err
}
if imageResult.Error != "" {
return &BuildResult{
Error: imageResult.Error,
Pkgs: imageResult.Pkgs,
}, nil
}
layers, err := prepareLayers(ctx, s, image, imageResult)
if err != nil {
return nil, err
}
// If the requested packages include a shell,
// set cmd accordingly.
cmd := ""
for _, pkg := range image.Packages {
if pkg == "bashInteractive" {
cmd = "bash"
}
}
m, c := manifest.Manifest(image.Arch.imageArch, layers, cmd)
lw := func(w io.Writer) (string, error) {
r := bytes.NewReader(c.Config)
_, err := io.Copy(w, r)
return "", err
}
if _, err = uploadHashLayer(ctx, s, c.SHA256, 0, lw); err != nil {
log.WithError(err).WithFields(log.Fields{
"image": image.Name,
"tag": image.Tag,
}).Error("failed to upload config")
return nil, err
}
if key != "" {
go cacheManifest(ctx, s, key, m)
}
result := BuildResult{
Manifest: m,
}
return &result, nil
}

View file

@ -1,112 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
package builder
import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"testing"
)
var ignoreArch = cmpopts.IgnoreFields(Image{}, "Arch")
func TestImageFromNameSimple(t *testing.T) {
image := ImageFromName("hello", "latest")
expected := Image{
Name: "hello",
Tag: "latest",
Packages: []string{
"cacert",
"hello",
"iana-etc",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"hello\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameMultiple(t *testing.T) {
image := ImageFromName("hello/git/htop", "latest")
expected := Image{
Name: "git/hello/htop",
Tag: "latest",
Packages: []string{
"cacert",
"git",
"hello",
"htop",
"iana-etc",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"hello/git/htop\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameShell(t *testing.T) {
image := ImageFromName("shell", "latest")
expected := Image{
Name: "shell",
Tag: "latest",
Packages: []string{
"bashInteractive",
"cacert",
"coreutils",
"iana-etc",
"moreutils",
"nano",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"shell\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameShellMultiple(t *testing.T) {
image := ImageFromName("shell/htop", "latest")
expected := Image{
Name: "htop/shell",
Tag: "latest",
Packages: []string{
"bashInteractive",
"cacert",
"coreutils",
"htop",
"iana-etc",
"moreutils",
"nano",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"shell/htop\", \"latest\") mismatch:\n%s", diff)
}
}
func TestImageFromNameShellArm64(t *testing.T) {
image := ImageFromName("shell/arm64", "latest")
expected := Image{
Name: "arm64/shell",
Tag: "latest",
Packages: []string{
"bashInteractive",
"cacert",
"coreutils",
"iana-etc",
"moreutils",
"nano",
},
}
if diff := cmp.Diff(expected, image, ignoreArch); diff != "" {
t.Fatalf("Image(\"shell/arm64\", \"latest\") mismatch:\n%s", diff)
}
if image.Arch.imageArch != "arm64" {
t.Fatal("Image(\"shell/arm64\"): Expected arch arm64")
}
}

View file

@ -1,225 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
package builder
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"os"
"sync"
"github.com/google/nixery/manifest"
log "github.com/sirupsen/logrus"
)
// LocalCache implements the structure used for local caching of
// manifests and layer uploads.
type LocalCache struct {
// Manifest cache
mmtx sync.RWMutex
mdir string
// Layer cache
lmtx sync.RWMutex
lcache map[string]manifest.Entry
}
// Creates an in-memory cache and ensures that the local file path for
// manifest caching exists.
func NewCache() (LocalCache, error) {
path := os.TempDir() + "/nixery"
err := os.MkdirAll(path, 0755)
if err != nil {
return LocalCache{}, err
}
return LocalCache{
mdir: path + "/",
lcache: make(map[string]manifest.Entry),
}, nil
}
// Retrieve a cached manifest if the build is cacheable and it exists.
func (c *LocalCache) manifestFromLocalCache(key string) (json.RawMessage, bool) {
c.mmtx.RLock()
defer c.mmtx.RUnlock()
f, err := os.Open(c.mdir + key)
if err != nil {
// This is a debug log statement because failure to
// read the manifest key is currently expected if it
// is not cached.
log.WithError(err).WithField("manifest", key).
Debug("failed to read manifest from local cache")
return nil, false
}
defer f.Close()
m, err := ioutil.ReadAll(f)
if err != nil {
log.WithError(err).WithField("manifest", key).
Error("failed to read manifest from local cache")
return nil, false
}
return json.RawMessage(m), true
}
// Adds the result of a manifest build to the local cache, if the
// manifest is considered cacheable.
//
// Manifests can be quite large and are cached on disk instead of in
// memory.
func (c *LocalCache) localCacheManifest(key string, m json.RawMessage) {
c.mmtx.Lock()
defer c.mmtx.Unlock()
err := ioutil.WriteFile(c.mdir+key, []byte(m), 0644)
if err != nil {
log.WithError(err).WithField("manifest", key).
Error("failed to locally cache manifest")
}
}
// Retrieve a layer build from the local cache.
func (c *LocalCache) layerFromLocalCache(key string) (*manifest.Entry, bool) {
c.lmtx.RLock()
e, ok := c.lcache[key]
c.lmtx.RUnlock()
return &e, ok
}
// Add a layer build result to the local cache.
func (c *LocalCache) localCacheLayer(key string, e manifest.Entry) {
c.lmtx.Lock()
c.lcache[key] = e
c.lmtx.Unlock()
}
// Retrieve a manifest from the cache(s). First the local cache is
// checked, then the storage backend.
func manifestFromCache(ctx context.Context, s *State, key string) (json.RawMessage, bool) {
if m, cached := s.Cache.manifestFromLocalCache(key); cached {
return m, true
}
r, err := s.Storage.Fetch(ctx, "manifests/"+key)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"manifest": key,
"backend": s.Storage.Name(),
}).Error("failed to fetch manifest from cache")
return nil, false
}
defer r.Close()
m, err := ioutil.ReadAll(r)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"manifest": key,
"backend": s.Storage.Name(),
}).Error("failed to read cached manifest from storage backend")
return nil, false
}
go s.Cache.localCacheManifest(key, m)
log.WithField("manifest", key).Info("retrieved manifest from GCS")
return json.RawMessage(m), true
}
// Add a manifest to the bucket & local caches
func cacheManifest(ctx context.Context, s *State, key string, m json.RawMessage) {
go s.Cache.localCacheManifest(key, m)
path := "manifests/" + key
_, size, err := s.Storage.Persist(ctx, path, manifest.ManifestType, func(w io.Writer) (string, int64, error) {
size, err := io.Copy(w, bytes.NewReader([]byte(m)))
return "", size, err
})
if err != nil {
log.WithError(err).WithFields(log.Fields{
"manifest": key,
"backend": s.Storage.Name(),
}).Error("failed to cache manifest to storage backend")
return
}
log.WithFields(log.Fields{
"manifest": key,
"size": size,
"backend": s.Storage.Name(),
}).Info("cached manifest to storage backend")
}
// Retrieve a layer build from the cache, first checking the local
// cache followed by the bucket cache.
func layerFromCache(ctx context.Context, s *State, key string) (*manifest.Entry, bool) {
if entry, cached := s.Cache.layerFromLocalCache(key); cached {
return entry, true
}
r, err := s.Storage.Fetch(ctx, "builds/"+key)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Debug("failed to retrieve cached layer from storage backend")
return nil, false
}
defer r.Close()
jb := bytes.NewBuffer([]byte{})
_, err = io.Copy(jb, r)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Error("failed to read cached layer from storage backend")
return nil, false
}
var entry manifest.Entry
err = json.Unmarshal(jb.Bytes(), &entry)
if err != nil {
log.WithError(err).WithField("layer", key).
Error("failed to unmarshal cached layer")
return nil, false
}
go s.Cache.localCacheLayer(key, entry)
return &entry, true
}
func cacheLayer(ctx context.Context, s *State, key string, entry manifest.Entry) {
s.Cache.localCacheLayer(key, entry)
j, _ := json.Marshal(&entry)
path := "builds/" + key
_, _, err := s.Storage.Persist(ctx, path, "", func(w io.Writer) (string, int64, error) {
size, err := io.Copy(w, bytes.NewReader(j))
return "", size, err
})
if err != nil {
log.WithError(err).WithFields(log.Fields{
"layer": key,
"backend": s.Storage.Name(),
}).Error("failed to cache layer")
}
return
}

View file

@ -1,283 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// The nixery server implements a container registry that transparently builds
// container images based on Nix derivations.
//
// The Nix derivation used for image creation is responsible for creating
// objects that are compatible with the registry API. The targeted registry
// protocol is currently Docker's.
//
// When an image is requested, the required contents are parsed out of the
// request and a Nix-build is initiated that eventually responds with the
// manifest as well as information linking each layer digest to a local
// filesystem path.
package main
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"regexp"
"github.com/google/nixery/builder"
"github.com/google/nixery/config"
"github.com/google/nixery/layers"
"github.com/google/nixery/logs"
mf "github.com/google/nixery/manifest"
"github.com/google/nixery/storage"
"github.com/im7mortal/kmutex"
log "github.com/sirupsen/logrus"
)
// ManifestMediaType is the Content-Type used for the manifest itself. This
// corresponds to the "Image Manifest V2, Schema 2" described on this page:
//
// https://docs.docker.com/registry/spec/manifest-v2-2/
const manifestMediaType string = "application/vnd.docker.distribution.manifest.v2+json"
// This variable will be initialised during the build process and set
// to the hash of the entire Nixery source tree.
var version string = "devel"
// Regexes matching the V2 Registry API routes. This only includes the
// routes required for serving images, since pushing and other such
// functionality is not available.
var (
manifestRegex = regexp.MustCompile(`^/v2/([\w|\-|\.|\_|\/]+)/manifests/([\w|\-|\.|\_]+)$`)
blobRegex = regexp.MustCompile(`^/v2/([\w|\-|\.|\_|\/]+)/(blobs|manifests)/sha256:(\w+)$`)
)
// Downloads the popularity information for the package set from the
// URL specified in Nixery's configuration.
func downloadPopularity(url string) (layers.Popularity, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
return nil, fmt.Errorf("popularity download from '%s' returned status: %s\n", url, resp.Status)
}
j, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var pop layers.Popularity
err = json.Unmarshal(j, &pop)
if err != nil {
return nil, err
}
return pop, nil
}
// Error format corresponding to the registry protocol V2 specification. This
// allows feeding back errors to clients in a way that can be presented to
// users.
type registryError struct {
Code string `json:"code"`
Message string `json:"message"`
}
type registryErrors struct {
Errors []registryError `json:"errors"`
}
func writeError(w http.ResponseWriter, status int, code, message string) {
err := registryErrors{
Errors: []registryError{
{code, message},
},
}
json, _ := json.Marshal(err)
w.WriteHeader(status)
w.Header().Add("Content-Type", "application/json")
w.Write(json)
}
type registryHandler struct {
state *builder.State
}
// Serve a manifest by tag, building it via Nix and populating caches
// if necessary.
func (h *registryHandler) serveManifestTag(w http.ResponseWriter, r *http.Request, name string, tag string) {
log.WithFields(log.Fields{
"image": name,
"tag": tag,
}).Info("requesting image manifest")
image := builder.ImageFromName(name, tag)
buildResult, err := builder.BuildImage(r.Context(), h.state, &image)
if err != nil {
writeError(w, 500, "UNKNOWN", "image build failure")
log.WithError(err).WithFields(log.Fields{
"image": name,
"tag": tag,
}).Error("failed to build image manifest")
return
}
// Some error types have special handling, which is applied
// here.
if buildResult.Error == "not_found" {
s := fmt.Sprintf("Could not find Nix packages: %v", buildResult.Pkgs)
writeError(w, 404, "MANIFEST_UNKNOWN", s)
log.WithFields(log.Fields{
"image": name,
"tag": tag,
"packages": buildResult.Pkgs,
}).Warn("could not find Nix packages")
return
}
// This marshaling error is ignored because we know that this
// field represents valid JSON data.
manifest, _ := json.Marshal(buildResult.Manifest)
w.Header().Add("Content-Type", manifestMediaType)
// The manifest needs to be persisted to the blob storage (to become
// available for clients that fetch manifests by their hash, e.g.
// containerd) and served to the client.
//
// Since we have no stable key to address this manifest (it may be
// uncacheable, yet still addressable by blob) we need to separate
// out the hashing, uploading and serving phases. The latter is
// especially important as clients may start to fetch it by digest
// as soon as they see a response.
sha256sum := fmt.Sprintf("%x", sha256.Sum256(manifest))
path := "layers/" + sha256sum
ctx := context.TODO()
_, _, err = h.state.Storage.Persist(ctx, path, mf.ManifestType, func(sw io.Writer) (string, int64, error) {
// We already know the hash, so no additional hash needs to be
// constructed here.
written, err := sw.Write(manifest)
return sha256sum, int64(written), err
})
if err != nil {
writeError(w, 500, "MANIFEST_UPLOAD", "could not upload manifest to blob store")
log.WithError(err).WithFields(log.Fields{
"image": name,
"tag": tag,
}).Error("could not upload manifest")
return
}
w.Write(manifest)
}
// serveBlob serves a blob from storage by digest
func (h *registryHandler) serveBlob(w http.ResponseWriter, r *http.Request, blobType, digest string) {
storage := h.state.Storage
err := storage.Serve(digest, r, w)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"type": blobType,
"digest": digest,
"backend": storage.Name(),
}).Error("failed to serve blob from storage backend")
}
}
// ServeHTTP dispatches HTTP requests to the matching handlers.
func (h *registryHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// Acknowledge that we speak V2 with an empty response
if r.RequestURI == "/v2/" {
return
}
// Build & serve a manifest by tag
manifestMatches := manifestRegex.FindStringSubmatch(r.RequestURI)
if len(manifestMatches) == 3 {
h.serveManifestTag(w, r, manifestMatches[1], manifestMatches[2])
return
}
// Serve a blob by digest
layerMatches := blobRegex.FindStringSubmatch(r.RequestURI)
if len(layerMatches) == 4 {
h.serveBlob(w, r, layerMatches[2], layerMatches[3])
return
}
log.WithField("uri", r.RequestURI).Info("unsupported registry route")
w.WriteHeader(404)
}
func main() {
logs.Init(version)
cfg, err := config.FromEnv()
if err != nil {
log.WithError(err).Fatal("failed to load configuration")
}
var s storage.Backend
switch cfg.Backend {
case config.GCS:
s, err = storage.NewGCSBackend()
case config.FileSystem:
s, err = storage.NewFSBackend()
}
if err != nil {
log.WithError(err).Fatal("failed to initialise storage backend")
}
log.WithField("backend", s.Name()).Info("initialised storage backend")
cache, err := builder.NewCache()
if err != nil {
log.WithError(err).Fatal("failed to instantiate build cache")
}
var pop layers.Popularity
if cfg.PopUrl != "" {
pop, err = downloadPopularity(cfg.PopUrl)
if err != nil {
log.WithError(err).WithField("popURL", cfg.PopUrl).
Fatal("failed to fetch popularity information")
}
}
state := builder.State{
Cache: &cache,
Cfg: cfg,
Pop: pop,
Storage: s,
UploadMutex: kmutex.New(),
}
log.WithFields(log.Fields{
"version": version,
"port": cfg.Port,
}).Info("starting Nixery")
// All /v2/ requests belong to the registry handler.
http.Handle("/v2/", &registryHandler{
state: &state,
})
// All other roots are served by the static file server.
webDir := http.Dir(cfg.WebDir)
http.Handle("/", http.FileServer(webDir))
log.Fatal(http.ListenAndServe(":"+cfg.Port, nil))
}

View file

@ -1,73 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// Package config implements structures to store Nixery's configuration at
// runtime as well as the logic for instantiating this configuration from the
// environment.
package config
import (
"os"
log "github.com/sirupsen/logrus"
)
func getConfig(key, desc, def string) string {
value := os.Getenv(key)
if value == "" && def == "" {
log.WithFields(log.Fields{
"option": key,
"description": desc,
}).Fatal("missing required configuration envvar")
} else if value == "" {
return def
}
return value
}
// Backend represents the possible storage backend types
type Backend int
const (
GCS = iota
FileSystem
)
// Config holds the Nixery configuration options.
type Config struct {
Port string // Port on which to launch HTTP server
Pkgs PkgSource // Source for Nix package set
Timeout string // Timeout for a single Nix builder (seconds)
WebDir string // Directory with static web assets
PopUrl string // URL to the Nix package popularity count
Backend Backend // Storage backend to use for Nixery
}
func FromEnv() (Config, error) {
pkgs, err := pkgSourceFromEnv()
if err != nil {
return Config{}, err
}
var b Backend
switch os.Getenv("NIXERY_STORAGE_BACKEND") {
case "gcs":
b = GCS
case "filesystem":
b = FileSystem
default:
log.WithField("values", []string{
"gcs",
}).Fatal("NIXERY_STORAGE_BACKEND must be set to a supported value (gcs or filesystem)")
}
return Config{
Port: getConfig("PORT", "HTTP port", ""),
Pkgs: pkgs,
Timeout: getConfig("NIX_TIMEOUT", "Nix builder timeout", "60"),
WebDir: getConfig("WEB_DIR", "Static web file dir", ""),
PopUrl: os.Getenv("NIX_POPULARITY_URL"),
Backend: b,
}, nil
}

View file

@ -1,148 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
package config
import (
"crypto/sha1"
"encoding/json"
"fmt"
"os"
"regexp"
"strings"
log "github.com/sirupsen/logrus"
)
// PkgSource represents the source from which the Nix package set used
// by Nixery is imported. Users configure the source by setting one of
// the supported environment variables.
type PkgSource interface {
// Convert the package source into the representation required
// for calling Nix.
Render(tag string) (string, string)
// Create a key by which builds for this source and image
// combination can be cached.
//
// The empty string means that this value is not cacheable due
// to the package source being a moving target (such as a
// channel).
CacheKey(pkgs []string, tag string) string
}
type GitSource struct {
repository string
}
// Regex to determine whether a git reference is a commit hash or
// something else (branch/tag).
//
// Used to check whether a git reference is cacheable, and to pass the
// correct git structure to Nix.
//
// Note: If a user creates a branch or tag with the name of a commit
// and references it intentionally, this heuristic will fail.
var commitRegex = regexp.MustCompile(`^[0-9a-f]{40}$`)
func (g *GitSource) Render(tag string) (string, string) {
args := map[string]string{
"url": g.repository,
}
// The 'git' source requires a tag to be present. If the user
// has not specified one, it is assumed that the default
// 'master' branch should be used.
if tag == "latest" || tag == "" {
tag = "master"
}
if commitRegex.MatchString(tag) {
args["rev"] = tag
} else {
args["ref"] = tag
}
j, _ := json.Marshal(args)
return "git", string(j)
}
func (g *GitSource) CacheKey(pkgs []string, tag string) string {
// Only full commit hashes can be used for caching, as
// everything else is potentially a moving target.
if !commitRegex.MatchString(tag) {
return ""
}
unhashed := strings.Join(pkgs, "") + tag
hashed := fmt.Sprintf("%x", sha1.Sum([]byte(unhashed)))
return hashed
}
type NixChannel struct {
channel string
}
func (n *NixChannel) Render(tag string) (string, string) {
return "nixpkgs", n.channel
}
func (n *NixChannel) CacheKey(pkgs []string, tag string) string {
// Since Nix channels are downloaded from the nixpkgs-channels
// Github, users can specify full commit hashes as the
// "channel", in which case builds are cacheable.
if !commitRegex.MatchString(n.channel) {
return ""
}
unhashed := strings.Join(pkgs, "") + n.channel
hashed := fmt.Sprintf("%x", sha1.Sum([]byte(unhashed)))
return hashed
}
type PkgsPath struct {
path string
}
func (p *PkgsPath) Render(tag string) (string, string) {
return "path", p.path
}
func (p *PkgsPath) CacheKey(pkgs []string, tag string) string {
// Path-based builds are not currently cacheable because we
// have no local hash of the package folder's state easily
// available.
return ""
}
// Retrieve a package source from the environment. If no source is
// specified, the Nix code will default to a recent NixOS channel.
func pkgSourceFromEnv() (PkgSource, error) {
if channel := os.Getenv("NIXERY_CHANNEL"); channel != "" {
log.WithField("channel", channel).Info("using Nix package set from Nix channel or commit")
return &NixChannel{
channel: channel,
}, nil
}
if git := os.Getenv("NIXERY_PKGS_REPO"); git != "" {
log.WithField("repo", git).Info("using Nix package set from git repository")
return &GitSource{
repository: git,
}, nil
}
if path := os.Getenv("NIXERY_PKGS_PATH"); path != "" {
log.WithField("path", path).Info("using Nix package set at local path")
return &PkgsPath{
path: path,
}, nil
}
return nil, fmt.Errorf("no valid package source has been specified")
}

View file

@ -1,129 +0,0 @@
# Copyright 2022 The TVL Contributors
# SPDX-License-Identifier: Apache-2.0
# This function header aims to provide compatibility between builds of
# Nixery taking place inside/outside of the TVL depot.
#
# In the future, Nixery will transition to using //nix/buildGo for its
# build system and this will need some major adaptations to support
# that.
{ depot ? { nix.readTree.drvTargets = x: x; }
, pkgs ? import <nixpkgs> { }
, preLaunch ? ""
, extraPackages ? [ ]
, maxLayers ? 20
, commitHash ? null
, ...
}@args:
with pkgs;
let
inherit (pkgs) buildGoModule lib;
# Avoid extracting this from git until we have a way to plumb
# through revision numbers.
nixery-commit-hash = "depot";
in
depot.nix.readTree.drvTargets rec {
# Implementation of the Nix image building logic
nixery-prepare-image = import ./prepare-image { inherit pkgs; };
# Include the Nixery website into the Nix store, unless its being
# overridden to something else. Nixery will serve this as its front
# page when visited from a browser.
nixery-web = ./web;
nixery-popcount = callPackage ./popcount { };
# Build Nixery's Go code, resulting in the binaries used for various
# bits of functionality.
#
# The server binary is wrapped to ensure that required environment
# variables are set at runtime.
nixery = buildGoModule rec {
name = "nixery";
src = ./.;
doCheck = true;
# Needs to be updated after every modification of go.mod/go.sum
vendorHash = "sha256-io9NCeZmjCZPLmII3ajXIsBWbT40XiW8ncXOuUDabbo=";
ldflags = [
"-s"
"-w"
"-X"
"main.version=${nixery-commit-hash}"
];
nativeBuildInputs = [ makeWrapper ];
postInstall = ''
wrapProgram $out/bin/server \
--set-default WEB_DIR "${nixery-web}" \
--prefix PATH : ${nixery-prepare-image}/bin
'';
# Nixery is mirrored to Github at tazjin/nixery; this is
# automatically updated from CI for canon builds.
passthru.meta.ci.extraSteps.github = depot.tools.releases.filteredGitPush {
filter = ":/tools/nixery";
remote = "git@github.com:tazjin/nixery.git";
ref = "refs/heads/master";
};
};
# Wrapper script for the wrapper script (meta!) which configures
# the container environment appropriately.
#
# Most importantly, sandboxing is disabled to avoid privilege
# issues in containers.
nixery-launch-script = writeShellScriptBin "nixery" ''
set -e
export PATH=${coreutils}/bin:$PATH
export NIX_SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt
mkdir -p /tmp
# Create the build user/group required by Nix
echo 'nixbld:x:30000:nixbld' >> /etc/group
echo 'nixbld:x:30000:30000:nixbld:/tmp:/bin/bash' >> /etc/passwd
echo 'root:x:0:0:root:/root:/bin/bash' >> /etc/passwd
echo 'root:x:0:' >> /etc/group
# Disable sandboxing to avoid running into privilege issues
mkdir -p /etc/nix
echo 'sandbox = false' >> /etc/nix/nix.conf
# In some cases users building their own image might want to
# customise something on the inside (e.g. set up an environment
# for keys or whatever).
#
# This can be achieved by setting a 'preLaunch' script.
${preLaunch}
exec ${nixery}/bin/server
'';
# Container image containing Nixery and Nix itself. This image can
# be run on Kubernetes, published on AppEngine or whatever else is
# desired.
nixery-image = dockerTools.buildLayeredImage {
name = "nixery";
config.Cmd = [ "${nixery-launch-script}/bin/nixery" ];
inherit maxLayers;
contents = [
bashInteractive
cacert
coreutils
git
gnutar
gzip
iana-etc
nix
nixery-prepare-image
nixery-launch-script
openssh
zlib
] ++ extraPackages;
};
}

View file

@ -1,14 +0,0 @@
module github.com/google/nixery
go 1.15
require (
cloud.google.com/go/storage v1.22.1
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/go-cmp v0.5.8
github.com/im7mortal/kmutex v1.0.1 // indirect
github.com/pkg/xattr v0.4.7
github.com/sirupsen/logrus v1.8.1
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401
gonum.org/v1/gonum v0.11.0
)

View file

@ -1,708 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/compute v1.6.0 h1:XdQIN5mdPTSBVwSIVDuY5e8ZzVAccsHvD3qTEz4zIps=
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.22.1 h1:F6IlQJZrZM++apn9V5/VfS3gbTUYg98PS3EMQAzqtfg=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk=
github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
github.com/googleapis/gax-go/v2 v2.3.0 h1:nRJtk3y8Fm770D42QV6T90ZnvFZyk7agSo3Q+Z9p3WI=
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/im7mortal/kmutex v1.0.1 h1:zAACzjwD+OEknDqnLdvRa/BhzFM872EBwKijviGLc9Q=
github.com/im7mortal/kmutex v1.0.1/go.mod h1:f71c/Ugk/+58OHRAgvgzPP3QEiWGUjK13fd8ozfKWdo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/xattr v0.4.7 h1:XoA3KzmFvyPlH4RwX5eMcgtzcaGBaSvgt3IoFQfbrmQ=
github.com/pkg/xattr v0.4.7/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401 h1:zwrSfklXn0gxyLRX/aR+q6cgHbV/ItVyzbPlbA+dkAw=
golang.org/x/oauth2 v0.0.0-20220524215830-622c5d57e401/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f h1:8w7RhxzTVgUzw/AH/9mUV5q0vMgy40SQRursCcfmkCw=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
gonum.org/v1/gonum v0.11.0 h1:f1IJhK4Km5tBJmaiJXtk/PkL4cdVX6J+tGiM187uT5E=
gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0 h1:ExR2D+5TYIrMphWgs5JCgwRhEDlPDXXrLwHHMgPHTXE=
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335 h1:2D0OT6tPVdrQTOnVe1VQjfJPTED6EZ7fdJ/f6Db6OsY=
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View file

@ -1,354 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// This package reads an export reference graph (i.e. a graph representing the
// runtime dependencies of a set of derivations) created by Nix and groups it in
// a way that is likely to match the grouping for other derivation sets with
// overlapping dependencies.
//
// This is used to determine which derivations to include in which layers of a
// container image.
//
// # Inputs
//
// - a graph of Nix runtime dependencies, generated via exportReferenceGraph
// - popularity values of each package in the Nix package set (in the form of a
// direct reference count)
// - a maximum number of layers to allocate for the image (the "layer budget")
//
// # Algorithm
//
// It works by first creating a (directed) dependency tree:
//
// img (root node)
// │
// ├───> A ─────┐
// │ v
// ├───> B ───> E
// │ ^
// ├───> C ─────┘
// │ │
// │ v
// └───> D ───> F
//
// │
// └────> G
//
// Each node (i.e. package) is then visited to determine how important
// it is to separate this node into its own layer, specifically:
//
// 1. Is the node within a certain threshold percentile of absolute
// popularity within all of nixpkgs? (e.g. `glibc`, `openssl`)
//
// 2. Is the node's runtime closure above a threshold size? (e.g. 100MB)
//
// In either case, a bit is flipped for this node representing each
// condition and an edge to it is inserted directly from the image
// root, if it does not already exist.
//
// For the rest of the example we assume 'G' is above the threshold
// size and 'E' is popular.
//
// This tree is then transformed into a dominator tree:
//
// img
// │
// ├───> A
// ├───> B
// ├───> C
// ├───> E
// ├───> D ───> F
// └───> G
//
// Specifically this means that the paths to A, B, C, E, G, and D
// always pass through the root (i.e. are dominated by it), whilst F
// is dominated by D (all paths go through it).
//
// The top-level subtrees are considered as the initially selected
// layers.
//
// If the list of layers fits within the layer budget, it is returned.
//
// Otherwise, a merge rating is calculated for each layer. This is the
// product of the layer's total size and its root node's popularity.
//
// Layers are then merged in ascending order of merge ratings until
// they fit into the layer budget.
//
// # Threshold values
//
// Threshold values for the partitioning conditions mentioned above
// have not yet been determined, but we will make a good first guess
// based on gut feeling and proceed to measure their impact on cache
// hits/misses.
//
// # Example
//
// Using the logic described above as well as the example presented in
// the introduction, this program would create the following layer
// groupings (assuming no additional partitioning):
//
// Layer budget: 1
// Layers: { A, B, C, D, E, F, G }
//
// Layer budget: 2
// Layers: { G }, { A, B, C, D, E, F }
//
// Layer budget: 3
// Layers: { G }, { E }, { A, B, C, D, F }
//
// Layer budget: 4
// Layers: { G }, { E }, { D, F }, { A, B, C }
//
// ...
//
// Layer budget: 10
// Layers: { E }, { D, F }, { A }, { B }, { C }
package layers
import (
"crypto/sha1"
"fmt"
"regexp"
"sort"
"strings"
log "github.com/sirupsen/logrus"
"gonum.org/v1/gonum/graph/flow"
"gonum.org/v1/gonum/graph/simple"
)
// runtimeGraph represents structured information from Nix about the runtime
// dependencies of a derivation.
//
// This is generated in Nix by using the exportReferencesGraph feature.
type RuntimeGraph struct {
References struct {
Graph []string `json:"graph"`
} `json:"exportReferencesGraph"`
Graph []struct {
Size uint64 `json:"closureSize"`
Path string `json:"path"`
Refs []string `json:"references"`
} `json:"graph"`
}
// Popularity data for each Nix package that was calculated in advance.
//
// Popularity is a number from 1-100 that represents the
// popularity percentile in which this package resides inside
// of the nixpkgs tree.
type Popularity = map[string]int
// Layer represents the data returned for each layer that Nix should
// build for the container image.
type Layer struct {
Contents []string `json:"contents"`
MergeRating uint64
}
// Hash the contents of a layer to create a deterministic identifier that can be
// used for caching.
func (l *Layer) Hash() string {
sum := sha1.Sum([]byte(strings.Join(l.Contents, ":")))
return fmt.Sprintf("%x", sum)
}
func (a Layer) merge(b Layer) Layer {
a.Contents = append(a.Contents, b.Contents...)
a.MergeRating += b.MergeRating
return a
}
// closure as pointed to by the graph nodes.
type closure struct {
GraphID int64
Path string
Size uint64
Refs []string
Popularity int
}
func (c *closure) ID() int64 {
return c.GraphID
}
var nixRegexp = regexp.MustCompile(`^/nix/store/[a-z0-9]+-`)
// PackageFromPath returns the name of a Nix package based on its
// output store path.
func PackageFromPath(path string) string {
return nixRegexp.ReplaceAllString(path, "")
}
// DOTID provides a human-readable package name. The name stems from
// the dot format used by GraphViz, into which the dependency graph
// can be rendered.
func (c *closure) DOTID() string {
return PackageFromPath(c.Path)
}
// bigOrPopular checks whether this closure should be considered for
// separation into its own layer, even if it would otherwise only
// appear in a subtree of the dominator tree.
func (c *closure) bigOrPopular() bool {
const sizeThreshold = 100 * 1000000 // 100MB
if c.Size > sizeThreshold {
return true
}
// Threshold value is picked arbitrarily right now. The reason
// for this is that some packages (such as `cacert`) have very
// few direct dependencies, but are required by pretty much
// everything.
if c.Popularity >= 100 {
return true
}
return false
}
func insertEdges(graph *simple.DirectedGraph, cmap *map[string]*closure, node *closure) {
// Big or popular nodes get a separate edge from the top to
// flag them for their own layer.
if node.bigOrPopular() && !graph.HasEdgeFromTo(0, node.ID()) {
edge := graph.NewEdge(graph.Node(0), node)
graph.SetEdge(edge)
}
for _, c := range node.Refs {
// Nix adds a self reference to each node, which
// should not be inserted.
if c != node.Path {
edge := graph.NewEdge(node, (*cmap)[c])
graph.SetEdge(edge)
}
}
}
// Create a graph structure from the references supplied by Nix.
func buildGraph(refs *RuntimeGraph, pop *Popularity) *simple.DirectedGraph {
cmap := make(map[string]*closure)
graph := simple.NewDirectedGraph()
// Insert all closures into the graph, as well as a fake root
// closure which serves as the top of the tree.
//
// A map from store paths to IDs is kept to actually insert
// edges below.
root := &closure{
GraphID: 0,
Path: "image_root",
}
graph.AddNode(root)
for idx, c := range refs.Graph {
node := &closure{
GraphID: int64(idx + 1), // inc because of root node
Path: c.Path,
Size: c.Size,
Refs: c.Refs,
}
// The packages `nss-cacert` and `iana-etc` are added
// by Nixery to *every single image* and should have a
// very high popularity.
//
// Other popularity values are populated from the data
// set assembled by Nixery's popcount.
id := node.DOTID()
if strings.HasPrefix(id, "nss-cacert") || strings.HasPrefix(id, "iana-etc") {
// glibc has ~300k references, these packages need *more*
node.Popularity = 500000
} else if p, ok := (*pop)[id]; ok {
node.Popularity = p
} else {
node.Popularity = 1
}
graph.AddNode(node)
cmap[c.Path] = node
}
// Insert the top-level closures with edges from the root
// node, then insert all edges for each closure.
for _, p := range refs.References.Graph {
edge := graph.NewEdge(root, cmap[p])
graph.SetEdge(edge)
}
for _, c := range cmap {
insertEdges(graph, &cmap, c)
}
return graph
}
// Extracts a subgraph starting at the specified root from the
// dominator tree. The subgraph is converted into a flat list of
// layers, each containing the store paths and merge rating.
func groupLayer(dt *flow.DominatorTree, root *closure) Layer {
size := root.Size
contents := []string{root.Path}
children := dt.DominatedBy(root.ID())
// This iteration does not use 'range' because the list being
// iterated is modified during the iteration (yes, I'm sorry).
for i := 0; i < len(children); i++ {
child := children[i].(*closure)
size += child.Size
contents = append(contents, child.Path)
children = append(children, dt.DominatedBy(child.ID())...)
}
// Contents are sorted to ensure that hashing is consistent
sort.Strings(contents)
return Layer{
Contents: contents,
MergeRating: uint64(root.Popularity) * size,
}
}
// Calculate the dominator tree of the entire package set and group
// each top-level subtree into a layer.
//
// Layers are merged together until they fit into the layer budget,
// based on their merge rating.
func dominate(budget int, graph *simple.DirectedGraph) []Layer {
dt := flow.Dominators(graph.Node(0), graph)
var layers []Layer
for _, n := range dt.DominatedBy(dt.Root().ID()) {
layers = append(layers, groupLayer(&dt, n.(*closure)))
}
sort.Slice(layers, func(i, j int) bool {
return layers[i].MergeRating < layers[j].MergeRating
})
if len(layers) > budget {
log.WithFields(log.Fields{
"layers": len(layers),
"budget": budget,
}).Info("ideal image exceeds layer budget")
}
for len(layers) > budget {
merged := layers[0].merge(layers[1])
layers[1] = merged
layers = layers[1:]
}
return layers
}
// groupLayers applies the algorithm described above the its input and returns a
// list of layers, each consisting of a list of Nix store paths that it should
// contain.
func GroupLayers(refs *RuntimeGraph, pop *Popularity, budget int) []Layer {
graph := buildGraph(refs, pop)
return dominate(budget, graph)
}

View file

@ -1,108 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
package logs
// This file configures different log formatters via logrus. The
// standard formatter uses a structured JSON format that is compatible
// with Stackdriver Error Reporting.
//
// https://cloud.google.com/error-reporting/docs/formatting-error-messages
import (
"bytes"
"encoding/json"
log "github.com/sirupsen/logrus"
)
type stackdriverFormatter struct{}
type serviceContext struct {
Service string `json:"service"`
Version string `json:"version"`
}
type reportLocation struct {
FilePath string `json:"filePath"`
LineNumber int `json:"lineNumber"`
FunctionName string `json:"functionName"`
}
var nixeryContext = serviceContext{
Service: "nixery",
}
// isError determines whether an entry should be logged as an error
// (i.e. with attached `context`).
//
// This requires the caller information to be present on the log
// entry, as stacktraces are not available currently.
func isError(e *log.Entry) bool {
l := e.Level
return (l == log.ErrorLevel || l == log.FatalLevel || l == log.PanicLevel) &&
e.HasCaller()
}
// logSeverity formats the entry's severity into a format compatible
// with Stackdriver Logging.
//
// The two formats that are being mapped do not have an equivalent set
// of severities/levels, so the mapping is somewhat arbitrary for a
// handful of them.
//
// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#LogSeverity
func logSeverity(l log.Level) string {
switch l {
case log.TraceLevel:
return "DEBUG"
case log.DebugLevel:
return "DEBUG"
case log.InfoLevel:
return "INFO"
case log.WarnLevel:
return "WARNING"
case log.ErrorLevel:
return "ERROR"
case log.FatalLevel:
return "CRITICAL"
case log.PanicLevel:
return "EMERGENCY"
default:
return "DEFAULT"
}
}
func (f stackdriverFormatter) Format(e *log.Entry) ([]byte, error) {
msg := e.Data
msg["serviceContext"] = &nixeryContext
msg["message"] = &e.Message
msg["eventTime"] = &e.Time
msg["severity"] = logSeverity(e.Level)
if e, ok := msg[log.ErrorKey]; ok {
if err, isError := e.(error); isError {
msg[log.ErrorKey] = err.Error()
} else {
delete(msg, log.ErrorKey)
}
}
if isError(e) {
loc := reportLocation{
FilePath: e.Caller.File,
LineNumber: e.Caller.Line,
FunctionName: e.Caller.Function,
}
msg["context"] = &loc
}
b := new(bytes.Buffer)
err := json.NewEncoder(b).Encode(&msg)
return b.Bytes(), err
}
func Init(version string) {
nixeryContext.Version = version
log.SetReportCaller(true)
log.SetFormatter(stackdriverFormatter{})
}

View file

@ -1,135 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// Package image implements logic for creating the image metadata
// (such as the image manifest and configuration).
package manifest
import (
"crypto/sha256"
"encoding/json"
"fmt"
"sort"
)
const (
// manifest constants
schemaVersion = 2
// media types
ManifestType = "application/vnd.docker.distribution.manifest.v2+json"
LayerType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
configType = "application/vnd.docker.container.image.v1+json"
// image config constants
os = "linux"
fsType = "layers"
)
type Entry struct {
MediaType string `json:"mediaType,omitempty"`
Size int64 `json:"size"`
Digest string `json:"digest"`
// These fields are internal to Nixery and not part of the
// serialised entry.
MergeRating uint64 `json:"-"`
TarHash string `json:",omitempty"`
}
type manifest struct {
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
Config Entry `json:"config"`
Layers []Entry `json:"layers"`
}
type imageConfig struct {
Architecture string `json:"architecture"`
OS string `json:"os"`
RootFS struct {
FSType string `json:"type"`
DiffIDs []string `json:"diff_ids"`
} `json:"rootfs"`
Config struct {
Cmd []string `json:",omitempty"`
Env []string `json:",omitempty"`
} `json:"config"`
}
// ConfigLayer represents the configuration layer to be included in
// the manifest, containing its JSON-serialised content and SHA256
// hash.
type ConfigLayer struct {
Config []byte
SHA256 string
}
// imageConfig creates an image configuration with the values set to
// the constant defaults.
//
// Outside of this module the image configuration is treated as an
// opaque blob and it is thus returned as an already serialised byte
// array and its SHA256-hash.
func configLayer(arch string, hashes []string, cmd string) ConfigLayer {
c := imageConfig{}
c.Architecture = arch
c.OS = os
c.RootFS.FSType = fsType
c.RootFS.DiffIDs = hashes
if cmd != "" {
c.Config.Cmd = []string{cmd}
}
c.Config.Env = []string{"SSL_CERT_FILE=/etc/ssl/certs/ca-bundle.crt"}
j, _ := json.Marshal(c)
return ConfigLayer{
Config: j,
SHA256: fmt.Sprintf("%x", sha256.Sum256(j)),
}
}
// Manifest creates an image manifest from the specified layer entries
// and returns its JSON-serialised form as well as the configuration
// layer.
//
// Callers do not need to set the media type for the layer entries.
func Manifest(arch string, layers []Entry, cmd string) (json.RawMessage, ConfigLayer) {
// Sort layers by their merge rating, from highest to lowest.
// This makes it likely for a contiguous chain of shared image
// layers to appear at the beginning of a layer.
//
// Due to moby/moby#38446 Docker considers the order of layers
// when deciding which layers to download again.
sort.Slice(layers, func(i, j int) bool {
return layers[i].MergeRating > layers[j].MergeRating
})
hashes := make([]string, len(layers))
for i, l := range layers {
hashes[i] = l.TarHash
l.MediaType = LayerType
l.TarHash = ""
layers[i] = l
}
c := configLayer(arch, hashes, cmd)
m := manifest{
SchemaVersion: schemaVersion,
MediaType: ManifestType,
Config: Entry{
MediaType: configType,
Size: int64(len(c.Config)),
Digest: "sha256:" + c.SHA256,
},
Layers: layers,
}
j, _ := json.Marshal(m)
return json.RawMessage(j), c
}

View file

@ -1,39 +0,0 @@
popcount
========
This script is used to count the popularity for each package in `nixpkgs`, by
determining how many other packages depend on it.
It skips over all packages that fail to build, are not cached or are unfree -
but these omissions do not meaningfully affect the statistics.
It currently does not evaluate nested attribute sets (such as
`haskellPackages`).
## Usage
1. Generate a list of all top-level attributes in `nixpkgs`:
```shell
nix eval '(with builtins; toJSON (attrNames (import <nixpkgs> {})))' | jq -r | jq > all-top-level.json
```
2. Run `./popcount > all-runtime-deps.txt`
3. Collect and count the results with the following magic incantation:
```shell
cat all-runtime-deps.txt \
| sed -r 's|/nix/store/[a-z0-9]+-||g' \
| sort \
| uniq -c \
| sort -n -r \
| awk '{ print "{\"" $2 "\":" $1 "}"}' \
| jq -c -s '. | add | with_entries(select(.value > 1))' \
> your-output-file
```
In essence, this will trim Nix's store paths and hashes from the output,
count the occurrences of each package and return the output as JSON. All
packages that have no references other than themselves are removed from the
output.

View file

@ -1,19 +0,0 @@
# Copyright 2022, 2024 The TVL Contributors
# SPDX-License-Identifier: Apache-2.0
{ buildGoModule }:
buildGoModule {
name = "nixery-popcount";
src = ./.;
vendorHash = null;
# https://nixos.org/manual/nixpkgs/stable/#buildGoPackage-migration
postPatch = ''
go mod init github.com/google/nixery/popcount
'';
doCheck = true;
}

View file

@ -1,280 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// Popcount fetches popularity information for each store path in a
// given Nix channel from the upstream binary cache.
//
// It does this simply by inspecting the narinfo files, rather than
// attempting to deal with instantiation of the binary cache.
//
// This is *significantly* faster than attempting to realise the whole
// channel and then calling `nix path-info` on it.
//
// TODO(tazjin): Persist intermediate results (references for each
// store path) to speed up subsequent runs.
package main
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"regexp"
"strings"
)
var client http.Client
var pathexp = regexp.MustCompile("/nix/store/([a-z0-9]{32})-(.*)$")
var refsexp = regexp.MustCompile("(?m:^References: (.*)$)")
var refexp = regexp.MustCompile("^([a-z0-9]{32})-(.*)$")
type meta struct {
name string
url string
commit string
}
type item struct {
name string
hash string
}
func failOn(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
func channelMetadata(channel string) meta {
// This needs an HTTP client that does not follow redirects
// because the channel URL is used explicitly for other
// downloads.
c := http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
resp, err := c.Get(fmt.Sprintf("https://channels.nixos.org/%s", channel))
failOn(err, "failed to retrieve channel metadata")
loc, err := resp.Location()
failOn(err, "no redirect location given for channel")
// TODO(tazjin): These redirects are currently served as 301s, but
// should (and used to) be 302s. Check if/when this is fixed and
// update accordingly.
if !(resp.StatusCode == 301 || resp.StatusCode == 302) {
log.Fatalf("Expected redirect for channel, but received '%s'\n", resp.Status)
}
commitResp, err := c.Get(fmt.Sprintf("%s/git-revision", loc.String()))
failOn(err, "failed to retrieve commit for channel")
defer commitResp.Body.Close()
commit, err := ioutil.ReadAll(commitResp.Body)
failOn(err, "failed to read commit from response")
if commitResp.StatusCode != 200 {
log.Fatalf("non-success status code when fetching commit: %s (%v)", string(commit), commitResp.StatusCode)
}
return meta{
name: channel,
url: loc.String(),
commit: string(commit),
}
}
func downloadStorePaths(c *meta) []string {
resp, err := client.Get(fmt.Sprintf("%s/store-paths.xz", c.url))
failOn(err, "failed to download store-paths.xz")
defer resp.Body.Close()
cmd := exec.Command("xzcat")
stdin, err := cmd.StdinPipe()
failOn(err, "failed to open xzcat stdin")
stdout, err := cmd.StdoutPipe()
failOn(err, "failed to open xzcat stdout")
defer stdout.Close()
go func() {
defer stdin.Close()
io.Copy(stdin, resp.Body)
}()
err = cmd.Start()
failOn(err, "failed to start xzcat")
paths, err := ioutil.ReadAll(stdout)
failOn(err, "failed to read uncompressed store paths")
err = cmd.Wait()
failOn(err, "xzcat failed to decompress")
return strings.Split(string(paths), "\n")
}
func storePathToItem(path string) *item {
res := pathexp.FindStringSubmatch(path)
if len(res) != 3 {
return nil
}
return &item{
hash: res[1],
name: res[2],
}
}
func narInfoToRefs(narinfo string) []string {
all := refsexp.FindAllStringSubmatch(narinfo, 1)
if len(all) != 1 {
log.Fatalf("failed to parse narinfo:\n%s\nfound: %v\n", narinfo, all[0])
}
if len(all[0]) != 2 {
// no references found
return []string{}
}
refs := strings.Split(all[0][1], " ")
for i, s := range refs {
if s == "" {
continue
}
res := refexp.FindStringSubmatch(s)
refs[i] = res[2]
}
return refs
}
func fetchNarInfo(i *item) (string, error) {
file, err := ioutil.ReadFile("popcache/" + i.hash)
if err == nil {
return string(file), nil
}
resp, err := client.Get(fmt.Sprintf("https://cache.nixos.org/%s.narinfo", i.hash))
if err != nil {
return "", err
}
defer resp.Body.Close()
narinfo, err := ioutil.ReadAll(resp.Body)
// best-effort write the file to the cache
ioutil.WriteFile("popcache/"+i.hash, narinfo, 0644)
return string(narinfo), err
}
// downloader starts a worker that takes care of downloading narinfos
// for all paths received from the queue.
//
// If there is no data remaining in the queue, the downloader exits
// and informs the finaliser queue about having exited.
func downloader(queue chan *item, narinfos chan string, downloaders chan struct{}) {
for i := range queue {
ni, err := fetchNarInfo(i)
if err != nil {
log.Printf("couldn't fetch narinfo for %s: %s\n", i.name, err)
continue
}
narinfos <- ni
}
downloaders <- struct{}{}
}
// finaliser counts the number of downloaders that have exited and
// closes the narinfos queue to signal to the counters that no more
// elements will arrive.
func finaliser(count int, downloaders chan struct{}, narinfos chan string) {
for range downloaders {
count--
if count == 0 {
close(downloaders)
close(narinfos)
break
}
}
}
func main() {
if len(os.Args) == 1 {
log.Fatalf("Nix channel must be specified as first argument")
}
err := os.MkdirAll("popcache", 0755)
if err != nil {
log.Fatalf("Failed to create 'popcache' directory in current folder: %s\n", err)
}
count := 42 // concurrent downloader count
channel := os.Args[1]
log.Printf("Fetching metadata for channel '%s'\n", channel)
meta := channelMetadata(channel)
log.Printf("Pinned channel '%s' to commit '%s'\n", meta.name, meta.commit)
paths := downloadStorePaths(&meta)
log.Printf("Fetching references for %d store paths\n", len(paths))
// Download paths concurrently and receive their narinfos into
// a channel. Data is collated centrally into a map and
// serialised at the /very/ end.
downloadQueue := make(chan *item, len(paths))
for _, p := range paths {
if i := storePathToItem(p); i != nil {
downloadQueue <- i
}
}
close(downloadQueue)
// Set up a task tracking channel for parsing & counting
// narinfos, as well as a coordination channel for signaling
// that all downloads have finished
narinfos := make(chan string, 50)
downloaders := make(chan struct{}, count)
for i := 0; i < count; i++ {
go downloader(downloadQueue, narinfos, downloaders)
}
go finaliser(count, downloaders, narinfos)
counts := make(map[string]int)
for ni := range narinfos {
refs := narInfoToRefs(ni)
for _, ref := range refs {
if ref == "" {
continue
}
counts[ref] += 1
}
}
// Remove all self-references (i.e. packages not referenced by anyone else)
for k, v := range counts {
if v == 1 {
delete(counts, k)
}
}
bytes, _ := json.Marshal(counts)
outfile := fmt.Sprintf("popularity-%s-%s.json", meta.name, meta.commit)
err = ioutil.WriteFile(outfile, bytes, 0644)
if err != nil {
log.Fatalf("Failed to write output to '%s': %s\n", outfile, err)
}
log.Printf("Wrote output to '%s'\n", outfile)
}

View file

@ -1,18 +0,0 @@
# Copyright 2022 The TVL Contributors
# SPDX-License-Identifier: Apache-2.0
# This file builds a wrapper script called by Nixery to ask for the
# content information for a given image.
#
# The purpose of using a wrapper script is to ensure that the paths to
# all required Nix files are set correctly at runtime.
{ pkgs ? import <nixpkgs> { } }:
pkgs.writeShellScriptBin "nixery-prepare-image" ''
exec ${pkgs.nix}/bin/nix-build \
--show-trace \
--no-out-link "$@" \
--argstr loadPkgs ${./load-pkgs.nix} \
${./prepare-image.nix}
''

View file

@ -1,36 +0,0 @@
# Copyright 2022 The TVL Contributors
# SPDX-License-Identifier: Apache-2.0
# Load a Nix package set from one of the supported source types
# (nixpkgs, git, path).
{ srcType, srcArgs, importArgs ? { } }:
with builtins;
let
# If a nixpkgs channel is requested, it is retrieved from Github (as
# a tarball) and imported.
fetchImportChannel = channel:
let
url =
"https://github.com/NixOS/nixpkgs/archive/${channel}.tar.gz";
in
import (fetchTarball url) importArgs;
# If a git repository is requested, it is retrieved via
# builtins.fetchGit which defaults to the git configuration of the
# outside environment. This means that user-configured SSH
# credentials etc. are going to work as expected.
fetchImportGit = spec: import (fetchGit spec) importArgs;
# No special handling is used for paths, so users are expected to pass one
# that will work natively with Nix.
importPath = path: import (toPath path) importArgs;
in
if srcType == "nixpkgs" then
fetchImportChannel srcArgs
else if srcType == "git" then
fetchImportGit (fromJSON srcArgs)
else if srcType == "path" then
importPath srcArgs
else
throw ("Invalid package set source specification: ${srcType} (${srcArgs})")

View file

@ -1,198 +0,0 @@
# Copyright 2022 The TVL Contributors
# SPDX-License-Identifier: Apache-2.0
# This file contains a derivation that outputs structured information
# about the runtime dependencies of an image with a given set of
# packages. This is used by Nixery to determine the layer grouping and
# assemble each layer.
#
# In addition it creates and outputs a meta-layer with the symlink
# structure required for using the image together with the individual
# package layers.
{
# Description of the package set to be used (will be loaded by load-pkgs.nix)
srcType ? "nixpkgs"
, srcArgs ? "nixos-unstable"
, system ? "x86_64-linux"
, importArgs ? { }
, # Path to load-pkgs.nix
loadPkgs ? ./load-pkgs.nix
, # Packages to install by name (which must refer to top-level attributes of
# nixpkgs). This is passed in as a JSON-array in string form.
packages ? "[]"
}:
let
inherit (builtins)
foldl'
fromJSON
hasAttr
length
match
readFile
toFile
toJSON;
# Package set to use for sourcing utilities
nativePkgs = import loadPkgs { inherit srcType srcArgs importArgs; };
inherit (nativePkgs) coreutils jq openssl lib runCommand writeText symlinkJoin;
# Package set to use for packages to be included in the image. This
# package set is imported with the system set to the target
# architecture.
pkgs = import loadPkgs {
inherit srcType srcArgs;
importArgs = importArgs // {
inherit system;
};
};
# deepFetch traverses the top-level Nix package set to retrieve an item via a
# path specified in string form.
#
# For top-level items, the name of the key yields the result directly. Nested
# items are fetched by using dot-syntax, as in Nix itself.
#
# Due to a restriction of the registry API specification it is not possible to
# pass uppercase characters in an image name, however the Nix package set
# makes use of camelCasing repeatedly (for example for `haskellPackages`).
#
# To work around this, if no value is found on the top-level a second lookup
# is done on the package set using lowercase-names. This is not done for
# nested sets, as they often have keys that only differ in case.
#
# For example, `deepFetch pkgs "xorg.xev"` retrieves `pkgs.xorg.xev` and
# `deepFetch haskellpackages.stylish-haskell` retrieves
# `haskellPackages.stylish-haskell`.
deepFetch = with lib; s: n:
let
path = splitString "." n;
err = { error = "not_found"; pkg = n; };
# The most efficient way I've found to do a lookup against
# case-differing versions of an attribute is to first construct a
# mapping of all lowercased attribute names to their differently cased
# equivalents.
#
# This map is then used for a second lookup if the top-level
# (case-sensitive) one does not yield a result.
hasUpper = str: (match ".*[A-Z].*" str) != null;
allUpperKeys = filter hasUpper (attrNames s);
lowercased = listToAttrs (map
(k: {
name = toLower k;
value = k;
})
allUpperKeys);
caseAmendedPath = map (v: if hasAttr v lowercased then lowercased."${v}" else v) path;
fetchLower = attrByPath caseAmendedPath err s;
in
attrByPath path fetchLower s;
# Workaround for a workaround in nixpkgs: Unquoted language
# identifiers can not start with numbers in Nix, but some package
# names start with numbers (such as `1password`).
#
# In nixpkgs convention, these identifiers are prefixed with
# underscores (e.g. `_1password`), however this is not accepted by
# the Docker registry protocol.
#
# To make this work, we detect these kinds of packages and add the
# missing underscore.
needsUnderscore = pkg: (builtins.match "^[0-9].*" pkg) != null;
normalisedPackages = map (p: if needsUnderscore p then "_${p}" else p) (fromJSON packages);
# allContents contains all packages successfully retrieved by name
# from the package set, as well as any errors encountered while
# attempting to fetch a package.
#
# Accumulated error information is returned back to the server.
allContents =
# Folds over the results of 'deepFetch' on all requested packages to
# separate them into errors and content. This allows the program to
# terminate early and return only the errors if any are encountered.
let
splitter = attrs: res:
if hasAttr "error" res
then attrs // { errors = attrs.errors ++ [ res ]; }
else attrs // { contents = attrs.contents ++ [ res ]; };
init = { contents = [ ]; errors = [ ]; };
fetched = (map (deepFetch pkgs) normalisedPackages);
in
foldl' splitter init fetched;
# Contains the export references graph of all retrieved packages,
# which has information about all runtime dependencies of the image.
#
# This is used by Nixery to group closures into image layers.
runtimeGraph = runCommand "runtime-graph.json"
{
__structuredAttrs = true;
exportReferencesGraph.graph = allContents.contents;
PATH = "${coreutils}/bin";
builder = toFile "builder" ''
. .attrs.sh
cp .attrs.json ''${outputs[out]}
'';
} "";
# Create a symlink forest into all top-level store paths of the
# image contents.
contentsEnv = symlinkJoin {
name = "bulk-layers";
paths = allContents.contents;
# Provide a few essentials that many programs expect:
# - a /tmp directory,
# - a /usr/bin/env for shell scripts that require it.
#
# Note that in images that do not actually contain `coreutils`,
# /usr/bin/env will be a dangling symlink.
#
# TODO(tazjin): Don't link /usr/bin/env if coreutils is not included.
postBuild = ''
mkdir -p $out/tmp
mkdir -p $out/usr/bin
ln -s ${coreutils}/bin/env $out/usr/bin/env
'';
};
# Image layer that contains the symlink forest created above. This
# must be included in the image to ensure that the filesystem has a
# useful layout at runtime.
symlinkLayer = runCommand "symlink-layer.tar" { } ''
cp -r ${contentsEnv}/ ./layer
tar --transform='s|^\./||' -C layer --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 -cf $out .
'';
# Metadata about the symlink layer which is required for serving it.
# Two different hashes are computed for different usages (inclusion
# in manifest vs. content-checking in the layer cache).
symlinkLayerMeta = fromJSON (builtins.unsafeDiscardStringContext (readFile (runCommand "symlink-layer-meta.json"
{
buildInputs = [ coreutils jq openssl ];
} ''
tarHash=$(sha256sum ${symlinkLayer} | cut -d ' ' -f1)
layerSize=$(stat --printf '%s' ${symlinkLayer})
jq -n -c --arg tarHash $tarHash --arg size $layerSize --arg path ${symlinkLayer} \
'{ size: ($size | tonumber), tarHash: $tarHash, path: $path }' >> $out
'')));
# Final output structure returned to Nixery if the build succeeded
buildOutput = {
runtimeGraph = fromJSON (builtins.unsafeDiscardStringContext (readFile runtimeGraph));
symlinkLayer = symlinkLayerMeta;
};
# Output structure returned if errors occured during the build. Currently the
# only error type that is returned in a structured way is 'not_found'.
errorOutput = {
error = "not_found";
pkgs = map (err: err.pkg) allContents.errors;
};
in
writeText "build-output.json" (if (length allContents.errors) == 0
then toJSON buildOutput
else toJSON errorOutput
)

View file

@ -1,59 +0,0 @@
#!/usr/bin/env bash
set -eou pipefail
# This integration test makes sure that the container image built
# for Nixery itself runs fine in Docker, and that images pulled
# from it work in Docker.
IMG=$(docker load -q -i "$(nix-build -A nixery-image)" | awk '{ print $3 }')
echo "Loaded Nixery image as ${IMG}"
# Run the built nixery docker image in the background, but keep printing its
# output as it occurs.
# We can't just mount a tmpfs to /var/cache/nixery, as tmpfs doesn't support
# user xattrs.
# So create a temporary directory in the current working directory, and hope
# it's backed by something supporting user xattrs.
# We'll notice it isn't if nixery starts complaining about not able to set
# xattrs anyway.
if [ -d var-cache-nixery ]; then rm -Rf var-cache-nixery; fi
mkdir var-cache-nixery
docker run --privileged --rm -p 8080:8080 --name nixery \
-e PORT=8080 \
--mount "type=bind,source=${PWD}/var-cache-nixery,target=/var/cache/nixery" \
-e NIXERY_CHANNEL=nixos-unstable \
-e NIXERY_STORAGE_BACKEND=filesystem \
-e STORAGE_PATH=/var/cache/nixery \
"${IMG}" &
# Give the container ~20 seconds to come up
set +e
attempts=0
echo -n "Waiting for Nixery to start ..."
until curl --fail --silent "http://localhost:8080/v2/"; do
[[ attempts -eq 30 ]] && echo "Nixery container failed to start!" && exit 1
((attempts++))
echo -n "."
sleep 1
done
set -e
# Pull and run an image of the current CPU architecture
case $(uname -m) in
x86_64)
docker run --rm localhost:8080/hello hello
;;
aarch64)
docker run --rm localhost:8080/arm64/hello hello
;;
esac
# Pull an image of the opposite CPU architecture (but without running it)
case $(uname -m) in
x86_64)
docker pull localhost:8080/arm64/hello
;;
aarch64)
docker pull localhost:8080/hello
;;
esac

View file

@ -1,13 +0,0 @@
# Copyright 2022 The TVL Contributors
# SPDX-License-Identifier: Apache-2.0
# Configures a shell environment that builds required local packages to
# run Nixery.
{ pkgs ? import <nixpkgs> { } }:
let nixery = import ./default.nix { inherit pkgs; };
in pkgs.stdenv.mkDerivation {
name = "nixery-dev-shell";
buildInputs = with pkgs; [ jq nixery.nixery-prepare-image ];
}

View file

@ -1,99 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// Filesystem storage backend for Nixery.
package storage
import (
"context"
"fmt"
"io"
"net/http"
"os"
"path"
"github.com/pkg/xattr"
log "github.com/sirupsen/logrus"
)
type FSBackend struct {
path string
}
func NewFSBackend() (*FSBackend, error) {
p := os.Getenv("STORAGE_PATH")
if p == "" {
return nil, fmt.Errorf("STORAGE_PATH must be set for filesystem storage")
}
p = path.Clean(p)
err := os.MkdirAll(p, 0755)
if err != nil {
return nil, fmt.Errorf("failed to create storage dir: %s", err)
}
return &FSBackend{p}, nil
}
func (b *FSBackend) Name() string {
return fmt.Sprintf("Filesystem (%s)", b.path)
}
func (b *FSBackend) Persist(ctx context.Context, key, contentType string, f Persister) (string, int64, error) {
full := path.Join(b.path, key)
dir := path.Dir(full)
err := os.MkdirAll(dir, 0755)
if err != nil {
log.WithError(err).WithField("path", dir).Error("failed to create storage directory")
return "", 0, err
}
file, err := os.OpenFile(full, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
log.WithError(err).WithField("file", full).Error("failed to write file")
return "", 0, err
}
defer file.Close()
err = xattr.Set(full, "user.mime_type", []byte(contentType))
if err != nil {
log.WithError(err).WithField("file", full).Error("failed to store file type in xattrs")
return "", 0, err
}
return f(file)
}
func (b *FSBackend) Fetch(ctx context.Context, key string) (io.ReadCloser, error) {
full := path.Join(b.path, key)
return os.Open(full)
}
func (b *FSBackend) Move(ctx context.Context, old, new string) error {
newpath := path.Join(b.path, new)
err := os.MkdirAll(path.Dir(newpath), 0755)
if err != nil {
return err
}
return os.Rename(path.Join(b.path, old), newpath)
}
func (b *FSBackend) Serve(digest string, r *http.Request, w http.ResponseWriter) error {
p := path.Join(b.path, "layers", digest)
log.WithFields(log.Fields{
"digest": digest,
"path": p,
}).Info("serving blob from filesystem")
contentType, err := xattr.Get(p, "user.mime_type")
if err != nil {
log.WithError(err).WithField("file", p).Error("failed to read file type from xattrs")
return err
}
w.Header().Add("Content-Type", string(contentType))
http.ServeFile(w, r, p)
return nil
}

View file

@ -1,231 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// Google Cloud Storage backend for Nixery.
package storage
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"time"
"cloud.google.com/go/storage"
log "github.com/sirupsen/logrus"
"golang.org/x/oauth2/google"
)
// HTTP client to use for direct calls to APIs that are not part of the SDK
var client = &http.Client{}
// API scope needed for renaming objects in GCS
const gcsScope = "https://www.googleapis.com/auth/devstorage.read_write"
type GCSBackend struct {
bucket string
handle *storage.BucketHandle
signing *storage.SignedURLOptions
}
// Constructs a new GCS bucket backend based on the configured
// environment variables.
func NewGCSBackend() (*GCSBackend, error) {
bucket := os.Getenv("GCS_BUCKET")
if bucket == "" {
return nil, fmt.Errorf("GCS_BUCKET must be configured for GCS usage")
}
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
log.WithError(err).Fatal("failed to set up Cloud Storage client")
}
handle := client.Bucket(bucket)
if _, err := handle.Attrs(ctx); err != nil {
log.WithError(err).WithField("bucket", bucket).Error("could not access configured bucket")
return nil, err
}
signing, err := signingOptsFromEnv()
if err != nil {
log.WithError(err).Error("failed to configure GCS bucket signing")
return nil, err
}
return &GCSBackend{
bucket: bucket,
handle: handle,
signing: signing,
}, nil
}
func (b *GCSBackend) Name() string {
return "Google Cloud Storage (" + b.bucket + ")"
}
func (b *GCSBackend) Persist(ctx context.Context, path, contentType string, f Persister) (string, int64, error) {
obj := b.handle.Object(path)
w := obj.NewWriter(ctx)
hash, size, err := f(w)
if err != nil {
log.WithError(err).WithField("path", path).Error("failed to write to GCS")
return hash, size, err
}
err = w.Close()
if err != nil {
log.WithError(err).WithField("path", path).Error("failed to complete GCS upload")
return hash, size, err
}
// GCS natively supports content types for objects, which will be
// used when serving them back.
if contentType != "" {
_, err = obj.Update(ctx, storage.ObjectAttrsToUpdate{
ContentType: contentType,
})
if err != nil {
log.WithError(err).WithField("path", path).Error("failed to update object attrs")
return hash, size, err
}
}
return hash, size, nil
}
func (b *GCSBackend) Fetch(ctx context.Context, path string) (io.ReadCloser, error) {
obj := b.handle.Object(path)
// Probe whether the file exists before trying to fetch it
_, err := obj.Attrs(ctx)
if err != nil {
return nil, err
}
return obj.NewReader(ctx)
}
// renameObject renames an object in the specified Cloud Storage
// bucket.
//
// The Go API for Cloud Storage does not support renaming objects, but
// the HTTP API does. The code below makes the relevant call manually.
func (b *GCSBackend) Move(ctx context.Context, old, new string) error {
creds, err := google.FindDefaultCredentials(ctx, gcsScope)
if err != nil {
return err
}
token, err := creds.TokenSource.Token()
if err != nil {
return err
}
// as per https://cloud.google.com/storage/docs/renaming-copying-moving-objects#rename
url := fmt.Sprintf(
"https://www.googleapis.com/storage/v1/b/%s/o/%s/rewriteTo/b/%s/o/%s",
url.PathEscape(b.bucket), url.PathEscape(old),
url.PathEscape(b.bucket), url.PathEscape(new),
)
req, err := http.NewRequest("POST", url, nil)
req.Header.Add("Authorization", "Bearer "+token.AccessToken)
_, err = client.Do(req)
if err != nil {
return err
}
// It seems that 'rewriteTo' copies objects instead of
// renaming/moving them, hence a deletion call afterwards is
// required.
if err = b.handle.Object(old).Delete(ctx); err != nil {
log.WithError(err).WithFields(log.Fields{
"new": new,
"old": old,
}).Warn("failed to delete renamed object")
// this error should not break renaming and is not returned
}
return nil
}
func (b *GCSBackend) Serve(digest string, r *http.Request, w http.ResponseWriter) error {
url, err := b.constructLayerUrl(digest)
if err != nil {
log.WithError(err).WithFields(log.Fields{
"digest": digest,
"bucket": b.bucket,
}).Error("failed to sign GCS URL")
return err
}
log.WithField("digest", digest).Info("redirecting blob request to GCS bucket")
w.Header().Set("Location", url)
w.WriteHeader(303)
return nil
}
// Configure GCS URL signing in the presence of a service account key
// (toggled if the user has set GOOGLE_APPLICATION_CREDENTIALS).
func signingOptsFromEnv() (*storage.SignedURLOptions, error) {
path := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
if path == "" {
// No credentials configured -> no URL signing
return nil, nil
}
key, err := ioutil.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("failed to read service account key: %s", err)
}
conf, err := google.JWTConfigFromJSON(key)
if err != nil {
return nil, fmt.Errorf("failed to parse service account key: %s", err)
}
log.WithField("account", conf.Email).Info("GCS URL signing enabled")
return &storage.SignedURLOptions{
Scheme: storage.SigningSchemeV4,
GoogleAccessID: conf.Email,
PrivateKey: conf.PrivateKey,
Method: "GET",
}, nil
}
// layerRedirect constructs the public URL of the layer object in the Cloud
// Storage bucket, signs it and redirects the user there.
//
// Signing the URL allows unauthenticated clients to retrieve objects from the
// bucket.
//
// In case signing is not configured, a redirect to storage.googleapis.com is
// issued, which means the underlying bucket objects need to be publicly
// accessible.
//
// The Docker client is known to follow redirects, but this might not be true
// for all other registry clients.
func (b *GCSBackend) constructLayerUrl(digest string) (string, error) {
log.WithField("layer", digest).Info("redirecting layer request to bucket")
object := "layers/" + digest
if b.signing != nil {
opts := *b.signing
opts.Expires = time.Now().Add(5 * time.Minute)
return storage.SignedURL(b.bucket, object, &opts)
} else {
return ("https://storage.googleapis.com/" + b.bucket + "/" + object), nil
}
}

View file

@ -1,40 +0,0 @@
// Copyright 2022 The TVL Contributors
// SPDX-License-Identifier: Apache-2.0
// Package storage implements an interface that can be implemented by
// storage backends, such as Google Cloud Storage or the local
// filesystem.
package storage
import (
"context"
"io"
"net/http"
)
type Persister = func(io.Writer) (string, int64, error)
type Backend interface {
// Name returns the name of the storage backend, for use in
// log messages and such.
Name() string
// Persist provides a user-supplied function with a writer
// that stores data in the storage backend.
//
// It needs to return the SHA256 hash of the data written as
// well as the total number of bytes, as those are required
// for the image manifest.
Persist(ctx context.Context, path, contentType string, f Persister) (string, int64, error)
// Fetch retrieves data from the storage backend.
Fetch(ctx context.Context, path string) (io.ReadCloser, error)
// Move renames a path inside the storage backend. This is
// used for staging uploads while calculating their hashes.
Move(ctx context.Context, old, new string) error
// Serve provides a handler function to serve HTTP requests
// for objects in the storage backend.
Serve(digest string, r *http.Request, w http.ResponseWriter) error
}

View file

@ -1,166 +0,0 @@
<!DOCTYPE html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="description" content="The Virus Lounge">
<link rel="stylesheet" type="text/css" href="https://static.tvl.fyi/latest/tvl.css" media="all">
<link rel="icon" type="image/webp" href="/favicon.webp">
<title>Nixery</title>
</head>
<body class="light">
<img src="./nixery-logo.png" alt="Nixery">
<hr>
<p>
Welcome to this instance of Nixery, an ad-hoc container image registry that provides
packages from the <a href="https://nixos.org/nix">Nix</a> package manager.
</p>
<p>
You can pull container images from this registry
at <code><span class="registry-hostname">nixery.dev</span></code> by appending any
packages that you need in the URL, separated by slashes.
</p>
<noscript>
<p class="cheddar-callout cheddar-tip">
<strong>NOTE:</strong> When pulling from a private Nixery instance,
replace <code>nixery.dev</code> in the above examples with your registry address.
</p>
</noscript>
<h2><a href="#demo" aria-hidden="true" class="anchor" id="demo"></a>Demo</h2>
<noscript>
<p>
The interactive demo needs Javascript to run, but you can just read the Usage
instructions below instead
</p>
</noscript>
<script src="https://asciinema.org/a/262583.js" id="asciicast-262583" async data-autoplay="true" data-loop="true"></script>
<h2><a href="#usage" aria-hidden="true" class="anchor" id="usage"></a>Usage</h2>
<p>
These usage examples assume that you use Docker, but should not be much different for
other OCI-compatible platforms.
</p>
<p>
Pull an image from this registry, separating each package you want included by a
slash:
</p>
<pre style="background-color:#f6f8fa;padding:16px;"><span style="color:#323232;">docker pull <span class="registry-hostname">nixery.dev</span>/shell/git/htop</span></pre>
<p>
This gives you an image with <code>git</code>, <code>htop</code> and an interactively
configured shell. You could run it like this:
</p>
<pre style="background-color:#f6f8fa;padding:16px;"><span style="color:#323232;">docker run -ti <span class="registry-hostname">nixery.dev</span>/shell/git/htop bash</span></pre>
<p>
Each path segment corresponds either to a key in the Nix package set, or a
meta-package that automatically expands to several other packages.
</p>
<p>
Meta-packages <strong>must</strong> be the first path component if they are used.
Currently there are only two meta-packages:
</p>
<ul>
<li>
<p>
<code>shell</code>, which provides a <code>bash</code>-shell with interactive
configuration and standard tools like <code>coreutils</code></p>
</li>
<li>
<p><code>arm64</code>, which provides ARM64 binaries</p>
</li>
</ul>
<h2><a href="#faq" aria-hidden="true" class="anchor" id="faq"></a>FAQ</h2>
<h3>
<a href="#how-does-this-work" aria-hidden="true" class="anchor" id="how-does-this-work"></a>
How does this work?
</h3>
<p>
The short version is that we use the Nix package manager and an optimised
<a href="https://tazj.in/blog/nixery-layers">layering strategy</a>.
</p>
<p>
Check out <a href="https://www.youtube.com/watch?v=pOI9H4oeXqA">the Nixery talk</a>
from NixCon 2019 for more information.
</p>
<h3>
<a href="#should-i-depend-on-nixerydev-in-production" aria-hidden="true" class="anchor" id="should-i-depend-on-nixerydev-in-production"></a>
Should I depend on <code>nixery.dev</code> in production?
</h3>
<p>
While we appreciate the enthusiasm, if you would like to use Nixery in your production
project we recommend setting up a private instance. The public Nixery
at <code>nixery.dev</code> is run on a best-effort basis and we make no guarantees
about availability.
</p>
<h3>
<a href="#who-made-this" aria-hidden="true" class="anchor" id="who-made-this"></a>
Who made this?
</h3>
<p>
Nixery was written by <a href="https://tazj.in">tazjin</a>, originally at Google.
These days Nixery is maintained by <a href="https://tvl.su">TVL</a>.
</p>
<p>
Nixery would not be possible without the many people that have contributed to Nix and
nixpkgs over time, maybe you could become one of them?
</p>
<h3>
<a href="#where-is-the-source-code-for-this" aria-hidden="true" class="anchor" id="where-is-the-source-code-for-this"></a>
Where is the source code for this?
</h3>
<p>
Nixery lives in the <a href="https://code.tvl.fyi/tree/tools/nixery">TVL
monorepo</a>. All development happens there and follows
the <a href="https://code.tvl.fyi/about/docs/CONTRIBUTING.md">TVL contribution
guidelines</a>.
</p>
<p>
We <em>mirror</em> the source code <a href="https://github.com/tazjin/nixery">to
Github</a> but do not guarantee that anyone will look at PRs or issues there.
</p>
<hr>
<footer>
<p class="footer">
<a class="uncoloured-link" href="https://at.tvl.fyi/?q=//tools/nixery">code</a>
|
<a class="uncoloured-link" href="https://cl.tvl.fyi/q/file:%2522%255Etools/nixery/.*%2522">reviews</a>
|
<a class="uncoloured-link" href="https://b.tvl.fyi/">bugs</a>
</p>
<p class="lod">ಠ_ಠ</p>
</footer>
<script>
/* Replace the hostnames above with the one at which this page runs. */
let hostname = window.location.hostname;
if (hostname != '') {
for (span of document.getElementsByClassName("registry-hostname")) {
span.textContent = hostname;
}
}
</script>
</body>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 190 KiB

View file

@ -1,29 +0,0 @@
# Configures a running Pulseaudio instance with an LADSP filter that
# creates a noise-cancelling sink.
#
# This can be used to, for example, cancel noise from an incoming
# video conferencing audio stream.
#
# There are some caveats, for example this will not distinguish
# between noise from different participants and I have no idea what
# happens if the default sink goes away.
#
# If this script is run while an NSFV sink exists, the existing sink
# will first be removed.
{ depot, pkgs, ... }:
let
inherit (pkgs) ripgrep pulseaudio;
inherit (depot.third_party) nsfv;
in
pkgs.writeShellScriptBin "nsfv-setup" ''
export PATH="${ripgrep}/bin:${pulseaudio}/bin:$PATH"
if pacmd list-sinks | rg librnnoise_ladspa.so >/dev/null; then
pactl unload-module module-ladspa-sink
fi
SINK=$(${pulseaudio}/bin/pacmd info | ${ripgrep}/bin/rg -r '$1' '^Default sink name: (.*)$')
echo "Setting up NSFV filtering to sink ''${SINK}"
${pulseaudio}/bin/pacmd load-module module-ladspa-sink sink_name=NSFV sink_master=''${SINK} label=noise_suppressor_mono plugin=${nsfv}/lib/ladspa/librnnoise_ladspa.so control=42 rate=48000
''