chore(ops/modules): Cleanup leftovers from TVL

Change-Id: I979cb18f3b8d461d21424e8dae6b0b2d7407809d
Reviewed-on: https://cl.snix.dev/c/snix/+/30106
Tested-by: besadii
Reviewed-by: Ryan Lahfa <masterancpp@gmail.com>
Autosubmit: Ilan Joselevich <personal@ilanjoselevich.com>
This commit is contained in:
Ilan Joselevich 2025-03-18 16:49:17 +00:00 committed by clbot
parent f8bdafd3f2
commit 580f03f6fd
21 changed files with 0 additions and 1113 deletions

View file

@ -1,38 +0,0 @@
{ depot, config, lib, pkgs, ... }:
let
cfg = config.services.depot.atward;
description = "atward - (attempt to) cleverly route queries";
in
{
options.services.depot.atward = {
enable = lib.mkEnableOption description;
host = lib.mkOption {
type = lib.types.str;
default = "[::1]";
description = "Host on which atward should listen";
};
port = lib.mkOption {
type = lib.types.int;
default = 28973;
description = "Port on which atward should listen";
};
};
config = lib.mkIf cfg.enable {
systemd.services.atward = {
inherit description;
script = "${depot.web.atward}/bin/atward";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
Restart = "always";
};
environment.ATWARD_LISTEN_ADDRESS = "${cfg.host}:${toString cfg.port}";
};
};
}

View file

@ -1,25 +0,0 @@
# Automatically performs a scrub on all btrfs filesystems configured in
# `config.fileSystems` on a daily schedule (by default). Activated by importing.
{ config, lib, ... }:
{
config = {
services = {
btrfs.autoScrub = {
enable = true;
interval = lib.mkDefault "*-*-* 03:30:00";
# gather all btrfs fileSystems, extra ones can be added via the NixOS
# module merging mechanism, of course.
fileSystems = lib.concatLists (
lib.mapAttrsToList
(
_:
{ fsType, mountPoint, ... }:
if fsType == "btrfs" then [ mountPoint ] else [ ]
)
config.fileSystems
);
};
};
};
}

View file

@ -1,110 +0,0 @@
# This is a fork of the nixpkgs module for Harmonia, which adds compatibility
# with Nix 2.3.
#
# We will upstream this eventually.
{ config, pkgs, lib, ... }:
let
cfg = config.services.depot.harmonia;
format = pkgs.formats.toml { };
credentials = lib.imap0
(i: signKeyPath: {
id = "sign-key-${builtins.toString i}";
path = signKeyPath;
})
cfg.signKeyPaths;
in
{
options = {
services.depot.harmonia = {
enable = lib.mkEnableOption "Harmonia: Nix binary cache written in Rust";
signKeyPaths = lib.mkOption {
type = lib.types.listOf lib.types.path;
default = [ ];
description = "Paths to the signing keys to use for signing the cache";
};
package = lib.mkPackageOption pkgs "harmonia" { };
settings = lib.mkOption {
inherit (format) type;
default = { };
description = ''
Settings to merge with the default configuration.
For the list of the default configuration, see <https://github.com/nix-community/harmonia/tree/master#configuration>.
'';
};
};
};
config = lib.mkIf cfg.enable {
users.users.harmonia = {
isSystemUser = true;
group = "harmonia";
};
users.groups.harmonia = { };
systemd.services.harmonia = {
description = "harmonia binary cache service";
requires = [ "nix-daemon.socket" ];
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
environment = {
CONFIG_FILE = format.generate "harmonia.toml" cfg.settings;
SIGN_KEY_PATHS = lib.strings.concatMapStringsSep " "
(
credential: "%d/${credential.id}"
)
credentials;
# Note: it's important to set this for nix-store, because it wants to use
# $HOME in order to use a temporary cache dir. bizarre failures will occur
# otherwise
HOME = "/run/harmonia";
};
serviceConfig = {
ExecStart = lib.getExe cfg.package;
User = "harmonia";
Group = "harmonia";
Restart = "on-failure";
PrivateUsers = true;
DeviceAllow = [ "" ];
UMask = "0066";
RuntimeDirectory = "harmonia";
LoadCredential = builtins.map (credential: "${credential.id}:${credential.path}") credentials;
SystemCallFilter = [
"@system-service"
"~@privileged"
"~@resources"
];
CapabilityBoundingSet = "";
ProtectKernelModules = true;
ProtectKernelTunables = true;
ProtectControlGroups = true;
ProtectKernelLogs = true;
ProtectHostname = true;
ProtectClock = true;
RestrictRealtime = true;
MemoryDenyWriteExecute = true;
ProcSubset = "pid";
ProtectProc = "invisible";
RestrictNamespaces = true;
SystemCallArchitectures = "native";
PrivateNetwork = false;
PrivateTmp = true;
PrivateDevices = true;
PrivateMounts = true;
NoNewPrivileges = true;
ProtectSystem = "strict";
ProtectHome = true;
LockPersonality = true;
RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
LimitNOFILE = 65536;
};
};
};
}

View file

@ -1,64 +0,0 @@
{ depot, config, lib, pkgs, ... }:
let
cfg = config.services.depot.irccat;
description = "irccat - forward messages to IRC";
# irccat expects to read its configuration from the *current
# directory*, and its configuration contains secrets.
#
# To make this work we construct the JSON configuration file and
# then recursively merge it with an on-disk secret using jq on
# service launch.
configJson = pkgs.writeText "irccat.json" (builtins.toJSON cfg.config);
# Right now, merging configuration file with secrets and running the main
# application needs to happen both in ExecStart=, due to
# https://github.com/systemd/systemd/issues/19604#issuecomment-989279884
mergeAndLaunch = pkgs.writeShellScript "merge-irccat-config" ''
if [ ! -f "$CREDENTIALS_DIRECTORY/secrets" ]; then
echo "irccat secrets file is missing"
exit 1
fi
# jq's * is the recursive merge operator
${pkgs.jq}/bin/jq -s '.[0] * .[1]' ${configJson} "$CREDENTIALS_DIRECTORY/secrets" \
> /var/lib/irccat/irccat.json
exec ${depot.third_party.irccat}/bin/irccat
'';
in
{
options.services.depot.irccat = {
enable = lib.mkEnableOption description;
config = lib.mkOption {
type = lib.types.attrsOf lib.types.anything; # varying value types
description = "Configuration structure (unchecked!)";
};
secretsFile = lib.mkOption {
type = lib.types.str;
description = "Path to the secrets file to be merged";
default = config.age.secretsDir + "/irccat";
};
};
config = lib.mkIf cfg.enable {
systemd.services.irccat = {
inherit description;
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" ];
wants = [ "network-online.target" ];
serviceConfig = {
ExecStart = "${mergeAndLaunch}";
DynamicUser = true;
StateDirectory = "irccat";
WorkingDirectory = "/var/lib/irccat";
LoadCredential = "secrets:${cfg.secretsFile}";
Restart = "always";
};
};
};
}

View file

@ -1,33 +0,0 @@
# Configures the public josh instance for serving the depot.
{ config, depot, lib, pkgs, ... }:
let
cfg = config.services.depot.josh;
in
{
options.services.depot.josh = with lib; {
enable = mkEnableOption "Enable josh for serving the depot";
port = mkOption {
description = "Port on which josh should listen";
type = types.int;
default = 5674;
};
};
config = lib.mkIf cfg.enable {
# Run josh for the depot.
systemd.services.josh = {
description = "josh - partial cloning of monorepos";
wantedBy = [ "multi-user.target" ];
path = [ pkgs.git pkgs.bash ];
serviceConfig = {
DynamicUser = true;
StateDirectory = "josh";
Restart = "always";
ExecStart = "${pkgs.josh}/bin/josh-proxy --no-background --local /var/lib/josh --port ${toString cfg.port} --remote https://cl.tvl.fyi/ --require-auth";
};
};
};
}

View file

@ -1,102 +0,0 @@
# Configures a code search instance using Livegrep.
#
# We do not currently build Livegrep in Nix, because it's a complex,
# multi-language Bazel build and doesn't play nicely with Nix.
{ config, lib, pkgs, ... }:
let
cfg = config.services.depot.livegrep;
livegrepConfig = {
name = "livegrep";
fs_paths = [{
name = "depot";
path = "/depot";
metadata.url_pattern = "https://code.tvl.fyi/tree/{path}?id={version}#n{lno}";
}];
repositories = [{
name = "depot";
path = "/depot";
revisions = [ "HEAD" ];
metadata = {
url_pattern = "https://code.tvl.fyi/tree/{path}?id={version}#n{lno}";
remote = "https://cl.tvl.fyi/depot.git";
};
}];
};
configFile = pkgs.writeText "livegrep-config.json" (builtins.toJSON livegrepConfig);
# latest as of 2024-02-17
image = "ghcr.io/livegrep/livegrep/base:033fa0e93c";
in
{
options.services.depot.livegrep = with lib; {
enable = mkEnableOption "Run livegrep code search for depot";
port = mkOption {
description = "Port on which livegrep web UI should listen";
type = types.int;
default = 5477; # lgrp
};
};
config = lib.mkIf cfg.enable {
virtualisation.oci-containers.containers.livegrep-codesearch = {
inherit image;
extraOptions = [ "--net=host" ];
volumes = [
"${configFile}:/etc/livegrep-config.json:ro"
"/var/lib/gerrit/git/depot.git:/depot:ro"
];
entrypoint = "/livegrep/bin/codesearch";
cmd = [
"-grpc"
"0.0.0.0:5427" # lgcs
"-reload_rpc"
"-revparse"
"/etc/livegrep-config.json"
];
};
virtualisation.oci-containers.containers.livegrep-frontend = {
inherit image;
dependsOn = [ "livegrep-codesearch" ];
extraOptions = [ "--net=host" ];
entrypoint = "/livegrep/bin/livegrep";
cmd = [
"-listen"
"0.0.0.0:${toString cfg.port}"
"-reload"
"-connect"
"localhost:5427"
"-docroot"
"/livegrep/web"
# TODO(tazjin): docroot with styles etc.
];
};
systemd.services.livegrep-reindex = {
script = "${pkgs.podman}/bin/podman exec livegrep-codesearch /livegrep/bin/livegrep-reload localhost:5427";
serviceConfig.Type = "oneshot";
};
systemd.paths.livegrep-reindex = {
description = "Executes a livegrep reindex if depot refs change";
wantedBy = [ "multi-user.target" ];
pathConfig = {
PathChanged = [
"/var/lib/gerrit/git/depot.git/packed-refs"
"/var/lib/gerrit/git/depot.git/refs"
];
};
};
};
}

View file

@ -1,106 +0,0 @@
# Runs the TVL Monitoring setup (currently Grafana + Prometheus).
{ depot, pkgs, config, lib, ... }:
{
# Required for prometheus to be able to scrape stats
services.nginx.statusPage = true;
# Configure Prometheus & Grafana. Exporter configuration for
# Prometheus is inside the respective service modules.
services.prometheus = {
enable = true;
retentionTime = "90d";
exporters = {
node = {
enable = true;
enabledCollectors = [
"logind"
"processes"
"systemd"
];
};
nginx = {
enable = true;
sslVerify = false;
constLabels = [ "host=${config.networking.hostName}" ];
};
};
scrapeConfigs = [{
job_name = "node";
scrape_interval = "5s";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
}];
}
{
job_name = "nginx";
scrape_interval = "5s";
static_configs = [{
targets = [ "localhost:${toString config.services.prometheus.exporters.nginx.port}" ];
}];
}];
};
services.grafana = {
enable = true;
settings = {
server = {
http_port = 4723; # "graf" on phone keyboard
domain = "status.tvl.su";
root_url = "https://status.tvl.su";
};
analytics.reporting_enabled = false;
"auth.generic_oauth" = {
enabled = true;
client_id = "grafana";
scopes = "openid profile email";
name = "TVL";
email_attribute_path = "mail";
login_attribute_path = "sub";
name_attribute_path = "displayName";
auth_url = "https://auth.tvl.fyi/auth/realms/TVL/protocol/openid-connect/auth";
token_url = "https://auth.tvl.fyi/auth/realms/TVL/protocol/openid-connect/token";
api_url = "https://auth.tvl.fyi/auth/realms/TVL/protocol/openid-connect/userinfo";
# Give lukegb, aspen, tazjin "Admin" rights.
role_attribute_path = "((sub == 'lukegb' || sub == 'aspen' || sub == 'tazjin') && 'Admin') || 'Editor'";
# Allow creating new Grafana accounts from OAuth accounts.
allow_sign_up = true;
};
"auth.anonymous" = {
enabled = true;
org_name = "The Virus Lounge";
org_role = "Viewer";
};
"auth.basic".enabled = false;
auth = {
oauth_auto_login = true;
disable_login_form = true;
};
};
provision = {
enable = true;
datasources.settings.datasources = [{
name = "Prometheus";
type = "prometheus";
url = "http://localhost:9090";
}];
};
};
# Contains GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET.
systemd.services.grafana.serviceConfig.EnvironmentFile = config.age.secretsDir + "/grafana";
}

View file

@ -1,54 +0,0 @@
# NixOS module to configure the Estonian e-ID software.
{ pkgs, ... }:
{
services.pcscd.enable = true;
# Tell p11-kit to load/proxy opensc-pkcs11.so, providing all available slots
# (PIN1 for authentication/decryption, PIN2 for signing).
environment.etc."pkcs11/modules/opensc-pkcs11".text = ''
module: ${pkgs.opensc}/lib/opensc-pkcs11.so
'';
# Configure Firefox (in case users set `programs.firefox.enable = true;`)
programs.firefox = {
# Allow a possibly installed "Web eID" extension to do native messaging with
# the "web-eid-app" native component.
# Users not using `programs.firefox.enable` can override their firefox
# derivation, by setting `extraNativeMessagingHosts = [ pkgs.web-eid-app ]`.
nativeMessagingHosts.packages = [ pkgs.web-eid-app ];
# Configure Firefox to load smartcards via p11kit-proxy.
# Users not using `programs.firefox.enable` can override their firefox
# derivation, by setting
# `extraPolicies.SecurityDevices.p11-kit-proxy "${pkgs.p11-kit}/lib/p11-kit-proxy.so"`.
policies.SecurityDevices.p11-kit-proxy = "${pkgs.p11-kit}/lib/p11-kit-proxy.so";
};
# Chromium users need a symlink to their (slightly different) .json file
# in the native messaging hosts' manifest file location.
environment.etc."chromium/native-messaging-hosts/eu.webeid.json".source = "${pkgs.web-eid-app}/share/web-eid/eu.webeid.json";
environment.etc."opt/chrome/native-messaging-hosts/eu.webeid.json".source = "${pkgs.web-eid-app}/share/web-eid/eu.webeid.json";
environment.systemPackages = with pkgs; [
libdigidocpp.bin # provides digidoc-tool(1)
qdigidoc
# Wrapper script to tell to Chrome/Chromium to use p11-kit-proxy to load
# security devices, so they can be used for TLS client auth.
# Each user needs to run this themselves, it does not work on a system level
# due to a bug in Chromium:
#
# https://bugs.chromium.org/p/chromium/issues/detail?id=16387
#
# Firefox users can just set
# extraPolicies.SecurityDevices.p11-kit-proxy "${pkgs.p11-kit}/lib/p11-kit-proxy.so";
# when overriding the firefox derivation.
(pkgs.writeShellScriptBin "setup-browser-eid" ''
NSSDB="''${HOME}/.pki/nssdb"
mkdir -p ''${NSSDB}
${pkgs.nssTools}/bin/modutil -force -dbdir sql:$NSSDB -add p11-kit-proxy \
-libfile ${pkgs.p11-kit}/lib/p11-kit-proxy.so
'')
];
}

View file

@ -1,68 +0,0 @@
# Run the owothia IRC bot.
{ depot, config, lib, pkgs, ... }:
let
cfg = config.services.depot.owothia;
description = "owothia - i'm a service owo";
in
{
options.services.depot.owothia = {
enable = lib.mkEnableOption description;
secretsFile = lib.mkOption {
type = lib.types.str;
description = "File path from which systemd should read secrets";
default = config.age.secretsDir + "/owothia";
};
owoChance = lib.mkOption {
type = lib.types.int;
description = "How likely is owo?";
default = 200;
};
ircServer = lib.mkOption {
type = lib.types.str;
description = "IRC server hostname";
};
ircPort = lib.mkOption {
type = lib.types.int;
description = "IRC server port";
};
ircIdent = lib.mkOption {
type = lib.types.str;
description = "IRC username";
default = "owothia";
};
ircChannels = lib.mkOption {
type = with lib.types; listOf str;
description = "IRC channels to join";
default = [ "#tvl" ];
};
};
config = lib.mkIf cfg.enable {
systemd.services.owothia = {
inherit description;
script = "${depot.fun.owothia}/bin/owothia";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
Restart = "always";
EnvironmentFile = cfg.secretsFile;
};
environment = {
OWO_CHANCE = toString cfg.owoChance;
IRC_SERVER = cfg.ircServer;
IRC_PORT = toString cfg.ircPort;
IRC_IDENT = cfg.ircIdent;
IRC_CHANNELS = builtins.toJSON cfg.ircChannels;
};
};
};
}

View file

@ -1,28 +0,0 @@
{ depot, config, lib, pkgs, ... }:
let
cfg = config.services.depot.paroxysm;
description = "TVL's majestic IRC bot";
in
{
options.services.depot.paroxysm.enable = lib.mkEnableOption description;
config = lib.mkIf cfg.enable {
systemd.services.paroxysm = {
inherit description;
script = "${depot.fun.paroxysm}/bin/paroxysm";
wantedBy = [ "multi-user.target" ];
environment = {
PARX_DATABASE_URL = "postgresql://tvldb:tvldb@localhost/tvldb";
PARX_IRC_CONFIG_PATH = "/var/lib/paroxysm/irc.toml";
};
serviceConfig = {
DynamicUser = true;
StateDirectory = "paroxysm";
Restart = "always";
};
};
};
}

View file

@ -1,52 +0,0 @@
{ config, lib, pkgs, depot, ... }:
let
cfg = config.services.prometheus-fail2ban-exporter;
in
{
options.services.prometheus-fail2ban-exporter = with lib; {
enable = mkEnableOption "Prometheus Fail2ban Exporter";
interval = mkOption {
description = "Systemd calendar expression for how often to run the interval";
type = types.str;
default = "minutely";
example = "hourly";
};
};
config = lib.mkIf cfg.enable {
systemd.services."prometheus-fail2ban-exporter" = {
wantedBy = [ "multi-user.target" ];
after = [ "network.target" "fail2ban.service" ];
serviceConfig = {
User = "root";
Type = "oneshot";
ExecStart = pkgs.writeShellScript "prometheus-fail2ban-exporter" ''
set -eo pipefail
mkdir -p /var/lib/prometheus/node-exporter
exec prometheus-fail2ban-exporter
'';
};
path = [
pkgs.fail2ban
depot.third_party.prometheus-fail2ban-exporter
];
};
systemd.timers."prometheus-fail2ban-exporter" = {
wantedBy = [ "multi-user.target" ];
timerConfig.OnCalendar = cfg.interval;
};
services.prometheus.exporters.node = {
enabledCollectors = [ "textfile" ];
extraFlags = [
"--collector.textfile.directory=/var/lib/prometheus/node-exporter"
];
};
};
}

View file

@ -1,83 +0,0 @@
# A more modern module for running Quassel.
{ config, lib, pkgs, ... }:
let
cfg = config.services.depot.quassel;
quasselDaemon = pkgs.quassel.override {
monolithic = false;
enableDaemon = true;
withKDE = false;
};
in
{
options.services.depot.quassel = with lib; {
enable = mkEnableOption "Quassel IRC daemon";
acmeHost = mkOption {
description = "ACME host to use for the Quassel TLS certificate";
type = lib.types.str;
};
bindAddresses = mkOption {
description = "Addresses Quassel will bind to/listen on";
default = [ "127.0.0.1" ];
};
logLevel = mkOption {
description = "Log level for Quassel Core";
default = "Info";
type = lib.types.enum [
"Debug"
"Info"
"Warning"
"Error"
];
};
port = mkOption {
default = 6698;
description = ''
The port number the Quassel daemon will be listening to.
'';
};
};
config = with lib; mkIf cfg.enable {
networking.firewall.allowedTCPPorts = [ cfg.port ];
systemd.services.quassel = {
description = "Quassel IRC daemon";
wantedBy = [ "multi-user.target" ];
script = concatStringsSep " " [
"${quasselDaemon}/bin/quasselcore"
"--listen=${concatStringsSep "," cfg.bindAddresses}"
"--port=${toString cfg.port}"
"--configdir=/var/lib/quassel"
"--require-ssl"
"--ssl-cert=$CREDENTIALS_DIRECTORY/quassel.pem"
"--loglevel=${cfg.logLevel}"
];
serviceConfig = {
Restart = "always";
User = "quassel";
Group = "quassel";
StateDirectory = "quassel";
# Avoid trouble with the ACME file permissions by using the
# systemd credentials feature.
LoadCredential = "quassel.pem:/var/lib/acme/${cfg.acmeHost}/full.pem";
};
};
users = {
users.quassel = {
isSystemUser = true;
group = "quassel";
};
groups.quassel = { };
};
};
}

View file

@ -1,61 +0,0 @@
# NixOS module for configuring the simple SMTP relay.
{ depot, pkgs, config, lib, ... }:
let
inherit (builtins) attrValues mapAttrs;
inherit (lib)
concatStringsSep
mkEnableOption
mkIf
mkOption
types
;
cfg = config.services.depot.smtprelay;
description = "Simple SMTP relay";
# Configuration values that are always overridden.
#
# - logging is pinned to stdout for journald compatibility
# - secret config is loaded through systemd's credential loading facility
overrideArgs = {
logfile = "";
config = "$CREDENTIALS_DIRECTORY/secrets";
};
# Creates the command line argument string for the service.
prepareArgs = args:
concatStringsSep " "
(attrValues (mapAttrs (key: value: "-${key} \"${toString value}\"")
(args // overrideArgs)));
in
{
options.services.depot.smtprelay = {
enable = mkEnableOption description;
args = mkOption {
type = types.attrsOf types.str;
description = "Key value pairs for command line arguments";
};
secretsFile = mkOption {
type = types.str;
default = config.age.secretsDir + "/smtprelay";
};
};
config = mkIf cfg.enable {
systemd.services.smtprelay = {
inherit description;
script = "${pkgs.smtprelay}/bin/smtprelay ${prepareArgs cfg.args}";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Restart = "always";
StateDirectory = "smtprelay";
DynamicUser = true;
LoadCredential = "secrets:${cfg.secretsFile}";
};
};
};
}

View file

@ -1,40 +0,0 @@
# Run the Telegram<>IRC sync bot for the Volga Sprint channel.
#
# This module is written in a pretty ad-hoc style, as it is sort of a
# throwaway thing (will be removed again after the event).
{ depot, config, lib, pkgs, ... }:
let
cfg = config.services.depot.teleirc;
description = "IRC<>Telegram sync for Volga Sprint channel";
configFile = builtins.toFile "teleirc.env" ''
# connect through tvlbot's ZNC bouncer
IRC_SERVER="localhost"
IRC_PORT=2627
IRC_USE_SSL=false
IRC_CHANNEL="#volgasprint"
IRC_BLACKLIST="tvlbot"
IRC_BOT_NAME="tvlbot"
IRC_BOT_REALNAME="TVL bot for Volga Sprint"
IRC_BOT_IDENT="tvlbot"
IRC_SEND_STICKER_EMOJI=false # look into this
TELEGRAM_CHAT_ID=-1002153072030
'';
in
{
options.services.depot.teleirc.enable = lib.mkEnableOption description;
config = lib.mkIf cfg.enable {
systemd.services.teleirc = {
inherit description;
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
Restart = "always";
EnvironmentFile = "/run/agenix/teleirc";
ExecStart = "${depot.third_party.teleirc}/bin/teleirc -conf ${configFile}";
};
};
};
}

View file

@ -1,22 +0,0 @@
{ config, lib, pkgs, ... }:
{
options = {
tvl.cache.enable = lib.mkEnableOption "the TVL binary cache";
tvl.cache.builderball = lib.mkEnableOption "use experimental builderball cache";
};
config = lib.mkIf config.tvl.cache.enable {
nix.settings = {
trusted-public-keys = [
"cache.tvl.su:kjc6KOMupXc1vHVufJUoDUYeLzbwSr9abcAKdn/U1Jk="
];
substituters = [
(if config.tvl.cache.builderball
then "https://cache.tvl.fyi"
else "https://cache.tvl.su")
];
};
};
}

View file

@ -1,71 +0,0 @@
# Configuration for the coordination server for net.tvl.fyi, a
# tailscale network run using headscale.
#
# All TVL members can join this network, which provides several exit
# nodes through which traffic can be routed.
#
# The coordination server is currently run on sanduny.tvl.su. It is
# managed manually, ping somebody with access ... for access.
#
# Servers should join using approximately this command:
# tailscale up --login-server https://net.tvl.fyi --accept-dns=false --advertise-exit-node
#
# Clients should join using approximately this command:
# tailscale up --login-server https://net.tvl.fyi --accept-dns=false
{ config, pkgs, ... }:
let
acl = with builtins; toFile "headscale-acl.json" (toJSON {
acls = [{
action = "accept";
src = [ "*" ];
dst = [ "*:*" ];
}];
groups."group:builders" = [ "tvl" "tvl-builders" ];
tagOwners."tag:builders" = [ "group:builders" ];
});
in
{
# TODO(tazjin): run embedded DERP server
services.headscale = {
enable = true;
port = 4725; # hscl
settings = {
server_url = "https://net.tvl.fyi";
dns.magic_dns = false;
policy.path = acl;
# TLS is handled by nginx
tls_cert_path = null;
tls_key_path = null;
};
};
environment.systemPackages = [ pkgs.headscale ]; # admin CLI
services.nginx.virtualHosts."net.tvl.fyi" = {
serverName = "net.tvl.fyi";
enableACME = true;
forceSSL = true;
# See https://github.com/juanfont/headscale/blob/v0.22.3/docs/reverse-proxy.md#nginx
extraConfig = ''
location / {
proxy_pass http://localhost:${toString config.services.headscale.port};
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $server_name;
proxy_redirect http:// https://;
proxy_buffering off;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $http_x_forwarded_proto;
add_header Strict-Transport-Security "max-age=15552000; includeSubDomains" always;
}
'';
};
}

View file

@ -1,21 +0,0 @@
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."nixery.dev" = {
serverName = "nixery.dev";
enableACME = true;
forceSSL = true;
extraConfig = ''
location / {
proxy_pass http://localhost:${toString config.services.depot.nixery.port};
}
'';
};
};
}

View file

@ -1,27 +0,0 @@
# Redirect the hostname of a machine to its configuration in a web
# browser.
#
# Works by convention, assuming that the machine has its configuration
# at //ops/machines/${hostname}.
{ config, ... }:
let
host = "${config.networking.hostName}.${config.networking.domain}";
in
{
imports = [
./base.nix
];
config.services.nginx.virtualHosts."${host}" = {
serverName = host;
addSSL = true; # SSL is not forced on these redirects
enableACME = true;
extraConfig = ''
location = / {
return 302 https://at.tvl.fyi/?q=%2F%2Fops%2Fmachines%2F${config.networking.hostName};
}
'';
};
}

View file

@ -1,15 +0,0 @@
{ depot, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."volgasprint.org" = {
enableACME = true;
forceSSL = true;
root = "${depot.web.volgasprint}";
};
};
}

View file

@ -1,15 +0,0 @@
{ depot, lib, pkgs, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."wigglydonke.rs" = {
enableACME = true;
forceSSL = true;
root = "${depot.path + "/users/aspen/wigglydonke.rs"}";
};
};
}

View file

@ -1,78 +0,0 @@
# Profile for virtual machines on Yandex Cloud, intended for disk
# images.
#
# https://cloud.yandex.com/en/docs/compute/operations/image-create/custom-image
#
# TODO(tazjin): Upstream to nixpkgs once it works well.
{ config, lib, pkgs, modulesPath, ... }:
let
cfg = config.virtualisation.yandexCloud;
# Kernel modules required for interacting with the hypervisor. These
# must be available during stage 1 boot and during normal operation,
# as disks and network do not work without them.
modules = [
"virtio-net"
"virtio-blk"
"virtio-pci"
"virtiofs"
];
in
{
imports = [
"${modulesPath}/profiles/headless.nix"
];
options = {
virtualisation.yandexCloud.rootPartitionUuid = with lib; mkOption {
type = types.str;
default = "C55A5EE2-E5FA-485C-B3AE-CC928429AB6B";
description = ''
UUID to use for the root partition of the disk image. Yandex
Cloud requires that root partitions are mounted by UUID.
Most users do not need to set this to a non-default value.
'';
};
};
config = {
fileSystems."/" = {
device = "/dev/disk/by-uuid/${lib.toLower cfg.rootPartitionUuid}";
fsType = "ext4";
autoResize = true;
};
boot = {
loader.grub.device = "/dev/vda";
initrd.kernelModules = modules;
kernelModules = modules;
kernelParams = [
# Enable support for the serial console
"console=ttyS0"
];
growPartition = true;
};
environment.etc.securetty = {
text = "ttyS0";
mode = "0644";
};
systemd.services."serial-getty@ttyS0".enable = true;
services.openssh.enable = true;
system.build.yandexCloudImage = import (pkgs.path + "/nixos/lib/make-disk-image.nix") {
inherit lib config pkgs;
additionalSpace = "128M";
format = "qcow2";
partitionTableType = "legacy+gpt";
rootGPUID = cfg.rootPartitionUuid;
};
};
}