feat(*): initialize new Snix infrastructure

Co-Authored-By: edef <edef@edef.eu>
Co-Authored-by: Ryan Lahfa <raito@lix.systems>
Change-Id: Ica1cda177a236814de900f50a8a61d288f58f519
This commit is contained in:
Florian Klink 2025-01-06 01:06:47 +01:00
parent 067eff3427
commit a52ea3675c
124 changed files with 27723 additions and 1631 deletions

View file

@ -9,5 +9,6 @@
imports = [
./automatic-gc.nix
./auto-deploy.nix
./raito-vm.nix
];
}

305
ops/modules/forgejo.nix Normal file
View file

@ -0,0 +1,305 @@
#
# Forgejo Git Backend taken from Lix configuration.
# Thanks to all the Lix core developers for this!
# vim: et:ts=2:sw=2:
#
{ depot, pkgs, lib, config, ... }:
let
cfg = config.services.depot.forgejo;
inherit (lib) types mkEnableOption mkOption mkIf;
emojo =
let
handlePostFetch = ''
for i in $out/*_256.png; do
mv $i $(echo $i | sed -E 's/_256//g')
done
'';
drgn = pkgs.fetchzip {
url = "https://volpeon.ink/emojis/drgn/drgn.zip";
stripRoot = false;
sha256 = "sha256-/2MpbxMJC92a4YhwG5rP6TsDC/q1Ng5fFq4xe2cBrrM=";
postFetch = handlePostFetch;
};
neocat = pkgs.fetchzip {
url = "https://volpeon.ink/emojis/neocat/neocat.zip";
stripRoot = false;
sha256 = "sha256-Irh6Mv6ICDkaaenIFf8Cm1AFkdZy0gRVbXqgnwpk3Qw=";
postFetch = handlePostFetch;
};
neofox = pkgs.fetchzip {
url = "https://volpeon.ink/emojis/neofox/neofox.zip";
stripRoot = false;
sha256 = "sha256-FSTVYP/Bt25JfLr/Ny1g9oI9aAvAYLYhct31j3XRXYc=";
postFetch = handlePostFetch;
};
dragon = pkgs.fetchFromGitHub {
owner = "chr-1x";
repo = "dragn-emoji";
rev = "969543d9918ce2f0794ccd1e41b276d1ab22f0d5";
sha256 = "sha256-+40e9nKaIpQYZUiXh3Qe5jp2uvRbAQYDdXMGLEWHJio=";
postFetch = ''
for i in $out/*.svg; do
${pkgs.librsvg}/bin/rsvg-convert -h 256 $i > a.png;
mv a.png $(echo $i | sed -E "s/svg$/png/");
rm $i
done
${pkgs.oxipng}/bin/oxipng -o max $out/*.png
'';
};
in
pkgs.symlinkJoin { name = "emojo"; paths = [ drgn neocat neofox dragon ]; };
in
{
options.services.depot.forgejo = {
enable = mkEnableOption "Forgejo Forge";
domain = mkOption {
type = types.str;
};
};
config = mkIf cfg.enable {
# we have to use redis since we apparently have a "large instance" which
# "leaks hilarious amounts of memory if you use the default configuration"
services.redis = {
package = pkgs.valkey;
vmOverCommit = true;
servers.forgejo = {
enable = true;
# disable persistence, so when redis inevitably OOMs due to
# forgejo throwing to much in it, we don't restore the dataset
# that caused the OOM, breaking the restart loop.
save = [ ];
};
};
systemd.services.redis-forgejo.serviceConfig = {
Restart = "always";
};
systemd.services.forgejo = {
after = [ "redis-forgejo.service" ];
wants = [ "redis-forgejo.service" ];
};
services.forgejo = {
enable = true;
package = pkgs.forgejo.overrideAttrs (old: {
patches = old.patches ++ (with depot.third_party.lix_forgejo.patches; [
upstream_link
signin_redirect
api_dont_notify
forgejo_is_now_gerrit_native
forgejo_knows_about_gerrit
]);
});
# General settings.
lfs.enable = true;
# Make our checkout paths more in line with expectations by calling our user "git".
user = "git";
group = "git";
# Secret mail config.
# mailerPasswordFile = config.age.secrets.forgejoSmtpSecret.path;
# Server and database config.
settings = {
# Sets the name in the titlebar, mostly.
DEFAULT.APP_NAME = "Snix Project";
# Settings for how we serve things.
server = {
DOMAIN = cfg.domain;
PROTOCOL = "http";
ENABLE_ACME = true;
ACME_ACCEPTTOS = true;
ACME_EMAIL = "acme@snix.dev";
LANDING_PAGE = "explore";
ROOT_URL = "https://${cfg.domain}";
# open a server on localhost:6060 with pprof data
# !! note: the documentation says that this causes forgejo serv to dump
# random files in PPROF_DATA_PATH.
# This documentation is wrong, ENABLE_PPROF only affects forgejo web,
# and forgejo serv requires a --enable-pprof arg to do that. But it's
# not causing perf problems right now so we don't care about that
# anyway.
ENABLE_PPROF = true;
};
# openid is not used in our setup
openid = {
ENABLE_OPENID_SIGNIN = false;
ENABLE_OPENID_SIGNUP = false;
};
oauth2_client = {
ENABLE_AUTO_REGISTRATION = true;
REGISTER_EMAIL_CONFIRM = false;
ACCOUNT_LINKING = "login";
USERNAME = "nickname";
OPENID_CONNECT_SCOPES = "email profile";
};
cache = {
ADAPTER = "redis";
HOST = "redis+socket://${config.services.redis.servers.forgejo.unixSocket}";
};
"cache.last_commit" = {
ITEM_TTL = "24h"; # from default 8760h (1 year)
};
service = {
# We previously ran with "disable registration" which doesn't actually
# do anything to the OAuth login form, just the link account form. We
# suspect that if the account has all the required metadata like email
# to register cleanly, it doesn't use DISABLE_REGISTRATION at all.
#
# However this was probably relying on forgejo bugs, let's set it
# unambiguously.
DISABLE_REGISTRATION = false;
ALLOW_ONLY_EXTERNAL_REGISTRATION = true;
#REQUIRE_SIGNIN_VIEW = false;
ENABLE_NOTIFY_MAIL = true;
# Don't add org members as watchers on all repos, or indeed on new
# repos either.
#
# See: https://github.com/bmackinney/gitea/commit/a9eb2167536cfa8f7b7a23f73e11c8edf5dc0dc0
AUTO_WATCH_NEW_REPOS = false;
};
session = {
# Put sessions in the DB so they survive restarts
PROVIDER = "db";
PROVIDER_CONFIG = "";
# Cookie only works over https
COOKIE_SECURE = true;
# 5 day sessions
SESSION_LIFE_TIME = 86400 * 5;
};
# Careful with these!
security = {
# Don't allow access to the install page; manage exclusively via Nix.
INSTALL_LOCK = true;
# Allow internal users with the right permissions to set up Git hooks.
DISABLE_GIT_HOOKS = false;
};
# Note: PASSWD is set by NixOS up.
# mailer = {
# ENABLED = true;
# PROTOCOL = "smtps";
# SMTP_ADDR = "";
# SMTP_PORT = 465;
# USER = "";
# FROM = "";
# };
ui = {
# Add the used emojis from https://volpeon.ink/emojis/ as well as https://github.com/chr-1x/dragn-emoji
CUSTOM_EMOJIS = builtins.readFile depot.third_party.lix_forgejo.custom_emojis;
# Normal reaction emoji people always need.
REACTIONS = "+1, -1, laugh, confused, heart, hooray, eyes, melting_face, neocat_scream_scared, neofox_scream_scared, drgn_scream, neocat_heart, neofox_heart, drgn_heart, neocat_floof_reach, neocat_pleading, neofox_floof_reach, neofox_pleading, drgn_pleading";
# To protect privacy of users.
SHOW_USER_EMAIL = false;
};
# No runners are configured.
actions.ENABLED = false;
};
# Use a MySQL database, which we enable below.
database = {
type = "mysql";
user = config.services.forgejo.user;
};
};
# Inspired from Gerrit's way of doing things (from Lix).
# Before starting Forgejo, we will re-converge any required information.
# TODO: learn how to use update-oauth as well?
systemd.services.forgejo-keys = {
enable = true;
before = [ "forgejo.service" ];
wantedBy = [ "forgejo.service" ];
after = [ "network.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = "true";
WorkingDirectory = "/var/lib/forgejo";
User = "git";
Group = "git";
Environment = [
"FORGEJO_WORK_DIR=/var/lib/forgejo"
];
};
path = [ config.services.forgejo.package ];
script = ''
NAME="Snix project"
PROVIDER="openidConnect"
CLIENT_ID="forgejo"
CLIENT_SECRET=$(cat ${config.age.secrets.forgejo-oauth-secret.path})
DISCOVERY_URL="https://auth.snix.dev/realms/snix-project/.well-known/openid-configuration"
SCOPES=("openid" "profile" "email")
# Check if the OAuth2 source already exists
if gitea admin auth list | grep -q "$NAME"; then
echo "OAuth2 source '$NAME' already exists. Skipping creation."
exit 0
fi
# Add the OAuth2 source
gitea admin auth add-oauth \
--name "$NAME" \
--provider "$PROVIDER" \
--key "$CLIENT_ID" \
--secret "$CLIENT_SECRET" \
--auto-discover-url "$DISCOVERY_URL" \
$(printf -- '--scopes "%s" ' "''${SCOPES[@]}") \
--icon-url "$ICON_URL"
echo "OAuth2 source '$NAME' added successfully."
'';
};
# Create our user an group. This is necessary for any name that's
# not "forgejo", due to the nix module config.
users.users."${config.services.forgejo.group}" = {
description = "Gitea Service";
useDefaultShell = true;
home = config.services.forgejo.stateDir;
group = config.services.forgejo.group;
# redis instance runs as redis-forgejo, so we need to be in that group to be able to connect
extraGroups = [ "redis-forgejo" ];
isSystemUser = true;
};
users.groups."${config.services.forgejo.group}" = { };
# Enable the mysql server, which will provide the forgejo backing store.
services.mysql.enable = lib.mkForce true;
services.mysql.package = lib.mkForce pkgs.mariadb;
systemd.tmpfiles.rules = let cfg = config.services.forgejo; in [
"d '${cfg.customDir}/public/assets' 0750 ${cfg.user} ${cfg.group} - -"
"d '${cfg.customDir}/public/assets/img' 0750 ${cfg.user} ${cfg.group} - -"
"L+ '${cfg.customDir}/public/assets/img/emoji' - - - - ${emojo}"
];
};
}

View file

@ -12,7 +12,7 @@ in
{
options.services.depot.gerrit-autosubmit = {
enable = lib.mkEnableOption description;
gerritUrl = mkStringOption "https://cl.tvl.fyi";
gerritUrl = mkStringOption "https://cl.snix.dev";
secretsFile = with lib; mkOption {
description = "Path to a systemd EnvironmentFile containing secrets";
@ -37,6 +37,7 @@ in
environment = {
GERRIT_URL = cfg.gerritUrl;
GERRIT_USERNAME = "clbot";
};
};
};

View file

@ -0,0 +1,76 @@
{ config, lib, pkgs, modulesPath, ... }:
let
cfg = config.infra.hardware.hetzner-cloud;
inherit (lib) types mkOption mkEnableOption mkIf;
in
{
imports =
[ (modulesPath + "/profiles/qemu-guest.nix") ];
options.infra.hardware.hetzner-cloud = {
enable = mkEnableOption "the Hetzner Cloud hardware profile";
ipv6 = mkOption {
type = types.str;
};
floatingIPs = mkOption {
type = types.listOf types.str;
default = [ ];
};
};
config = mkIf cfg.enable {
services.qemuGuest.enable = true;
systemd.network.enable = true;
networking.useDHCP = lib.mkDefault false;
systemd.network.networks."10-wan" = {
matchConfig.Name = "enp1s0";
linkConfig.RequiredForOnline = true;
networkConfig = {
# DHCPv4 for the IPv4 only.
DHCP = "ipv4";
Address = [ cfg.ipv6 ] ++ cfg.floatingIPs;
};
routes = [
{
Gateway = "fe80::1";
}
];
dns = [ "2a01:4ff:ff00::add:1" "2a01:4ff:ff00::add:2" ];
};
boot.loader.systemd-boot.enable = true;
boot.initrd.kernelModules = [
"virtio_balloon"
"virtio_console"
"virtio_rng"
];
boot.initrd.availableKernelModules = [
"9p"
"9pnet_virtio"
"ata_piix"
"nvme"
"sd_mod"
"sr_mod"
"uhci_hcd"
"virtio_blk"
"virtio_mmio"
"virtio_net"
"virtio_pci"
"virtio_scsi"
"xhci_pci"
"ahci"
];
boot.kernelModules = [ ];
boot.extraModulePackages = [ ];
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
};
}

View file

@ -1,8 +1,23 @@
# Configure public keys for SSH hosts known to TVL.
# Configure public keys for SSH hosts known to the snix project.
{ ... }:
{
programs.ssh.knownHosts = {
public01 = {
hostNames = [ "public01.infra.snix.dev" "git.snix.dev" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICzB7bqXWcv+sVokySvj1d74zRlVLSNqBw7/OY3c7QYd";
};
gerrit01 = {
hostNames = [ "gerrit01.infra.snix.dev" "cl.snix.dev" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN+RCLAExaM5EC70UsCPMtDT1Cfa80Ux/vex95fLk9S4";
};
build01 = {
hostNames = [ "build01.infra.snix.dev" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEteVaeN/FEAY8yyGWdAbv6+X6yv2m8+4F5qZEAhxW9f";
};
github = {
hostNames = [ "github.com" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl";

View file

@ -1,10 +1,13 @@
# Gerrit configuration for the TVL monorepo
# Gerrit configuration for the snix monorepo
{ depot, pkgs, config, lib, ... }:
let
cfg = config.services.gerrit;
besadiiWithConfig = name: pkgs.writeShellScript "besadii-hook" ''
gerritPackage = depot.third_party.nix-gerrit.gerrit_3_11;
gerritPlugins = depot.third_party.nix-gerrit.plugins_3_11;
besadiiWithConfig = name: pkgs.writeShellScript "besadii-gerrit01" ''
export BESADII_CONFIG=/run/agenix/gerrit-besadii-config
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
'';
@ -16,10 +19,11 @@ let
'';
in
{
networking.firewall.allowedTCPPorts = [ 29418 ];
services.gerrit = {
enable = true;
listenAddress = "[::]:4778"; # 4778 - grrt
serverId = "4fdfa107-4df9-4596-8e0a-1d2bbdd96e36";
serverId = "b4813230-0b9b-46cb-b400-dcbed70f87e6";
builtinPlugins = [
"download-commands"
@ -27,28 +31,32 @@ in
"replication"
];
plugins = with depot.third_party.gerrit_plugins; [
code-owners
plugins = with gerritPlugins; [
# TODO: re-enable once we have figured out all the email situation.
# code-owners
oauth
depot.ops.gerrit-tvl
(depot.ops.gerrit-tvl {
gerrit = gerritPackage;
})
];
package = depot.third_party.gerrit;
package = gerritPackage;
jvmHeapLimit = "4g";
# In some NixOS channel bump, the default version of OpenJDK has
# changed to one that is incompatible with our current version of
# Gerrit.
#
# TODO(tazjin): Update Gerrit and remove this when possible.
jvmPackage = pkgs.openjdk17_headless;
# WARN(raito): keep this synchronized with the Gerrit version!
jvmPackage = pkgs.openjdk21_headless;
jvmOpts = [
# https://bugs.openjdk.org/browse/JDK-8170568 someday… !
"-Djava.net.preferIPv6Addresses=system"
];
settings = {
core.packedGitLimit = "100m";
log.jsonLogging = true;
log.textLogging = false;
sshd.advertisedAddress = "code.tvl.fyi:29418";
sshd.advertisedAddress = "cl.snix.dev:29418";
hooks.path = "${gerritHooks}";
cache.web_sessions.maxAge = "3 months";
plugins.allowRemoteAdmin = false;
@ -58,7 +66,7 @@ in
# Configures gerrit for being reverse-proxied by nginx as per
# https://gerrit-review.googlesource.com/Documentation/config-reverseproxy.html
gerrit = {
canonicalWebUrl = "https://cl.tvl.fyi";
canonicalWebUrl = "https://cl.snix.dev";
docUrl = "/Documentation";
};
@ -72,43 +80,43 @@ in
];
# Configure for cgit.
gitweb = {
type = "custom";
url = "https://code.tvl.fyi";
project = "/";
revision = "/commit/?id=\${commit}";
branch = "/log/?h=\${branch}";
tag = "/tag/?h=\${tag}";
roottree = "/tree/?h=\${commit}";
file = "/tree/\${file}?h=\${commit}";
filehistory = "/log/\${file}?h=\${branch}";
linkname = "cgit";
};
# gitweb = {
# type = "custom";
# url = "https://code.snix.dev";
# project = "/";
# revision = "/commit/?id=\${commit}";
# branch = "/log/?h=\${branch}";
# tag = "/tag/?h=\${tag}";
# roottree = "/tree/?h=\${commit}";
# file = "/tree/\${file}?h=\${commit}";
# filehistory = "/log/\${file}?h=\${branch}";
# linkname = "cgit";
# };
# Auto-link panettone bug links
commentlink.panettone = {
match = "b/(\\d+)";
link = "https://b.tvl.fyi/issues/$1";
};
# # Auto-link panettone bug links
# commentlink.panettone = {
# match = "b/(\\d+)";
# link = "https://b.tvl.fyi/issues/$1";
# };
# Auto-link other CLs
commentlink.gerrit = {
match = "cl/(\\d+)";
link = "https://cl.tvl.fyi/$1";
link = "https://cl.snix.dev/$1";
};
# Auto-link links to monotonically increasing revisions/commits
commentlink.revision = {
match = "r/(\\d+)";
link = "https://code.tvl.fyi/commit/?h=refs/r/$1";
};
# commentlink.revision = {
# match = "r/(\\d+)";
# link = "https://code.tvl.fyi/commit/?h=refs/r/$1";
# };
# Configures integration with Keycloak, which then integrates with a
# variety of backends.
auth.type = "OAUTH";
plugin.gerrit-oauth-provider-keycloak-oauth = {
root-url = "https://auth.tvl.fyi/auth";
realm = "TVL";
root-url = "https://auth.snix.dev/";
realm = "snix-project";
client-id = "gerrit";
# client-secret is set in /var/lib/gerrit/etc/secure.config.
};
@ -136,31 +144,34 @@ in
# $site_path/etc/secure.config and is *not* controlled by Nix.
#
# Receiving email is not currently supported.
sendemail = {
enable = true;
html = false;
connectTimeout = "10sec";
from = "TVL Code Review <tvlbot@tazj.in>";
includeDiff = true;
smtpEncryption = "none";
smtpServer = "localhost";
smtpServerPort = 2525;
};
# sendemail = {
# enable = true;
# html = false;
# connectTimeout = "10sec";
# from = "TVL Code Review <tvlbot@tazj.in>";
# includeDiff = true;
# smtpEncryption = "none";
# smtpServer = "localhost";
# smtpServerPort = 2525;
# };
};
# Replication of the depot repository to secondary machines, for
# serving cgit/josh.
# Replication of the snix repository to secondary machines, for
# serving forgejo.
replicationSettings = {
gerrit.replicateOnStartup = true;
remote.sanduny = {
url = "depot@sanduny.tvl.su:/var/lib/depot";
projects = "depot";
};
remote.bugry = {
url = "depot@bugry.tvl.fyi:/var/lib/depot";
projects = "depot";
# Replicate to our forgejo instance.
remote.forgejo = {
url = "git@git.snix.dev:snix/snix.git";
push = [ "+refs/heads/*:refs/heads/*" "+refs/tags/*:refs/tags/*" ];
timeout = 30;
threads = 3;
remoteNameStyle = "dash";
mirror = true;
# we are unsure if this should be private info
replicatePermissions = false;
projects = [ "snix" ];
};
};
};
@ -178,6 +189,52 @@ in
};
};
# Taken from Lix.
# Before starting gerrit, we'll want to create a "secure auth" file that contains our secrets.
systemd.services.gerrit-keys = {
enable = true;
before = [ "gerrit.service" ];
wantedBy = [ "gerrit.service" ];
after = [ "network.target" ];
serviceConfig = {
Type = "oneshot";
RemainAfterExit = "true";
WorkingDirectory = "/var/lib/gerrit";
};
path = [ pkgs.git ];
script = ''
CONF=etc/secure.config
# Ensure our config file is accessible to gerrit.
touch $CONF
chmod 600 $CONF
# Configure the SSH replication material
mkdir -p /var/lib/git/.ssh
cp ${config.age.secrets.gerrit-replication-key.path} /var/lib/git/.ssh/id_replication
cat > /var/lib/git/.ssh/config <<EOF
Host *
IdentityFile /var/lib/git/.ssh/id_replication
EOF
chmod 600 /var/lib/git/.ssh/id_replication
chmod 600 /var/lib/git/.ssh/config
chmod 700 /var/lib/git/.ssh
cp -L /etc/ssh/ssh_known_hosts /var/lib/git/.ssh/known_hosts
chmod 600 /var/lib/git/.ssh/known_hosts
chown -R git:git /var/lib/git/.ssh
# ... and finally, plop our secrets inside, and give the file to gerrit.
git config -f $CONF plugin.gerrit-oauth-provider-keycloak-oauth.client-secret \
"$(cat ${config.age.secrets.gerrit-oauth-secret.path})"
chown git:git $CONF
'';
};
services.depot.restic = {
paths = [ "/var/lib/gerrit" ];
exclude = [ "/var/lib/gerrit/tmp" ];

132
ops/modules/o11y/agent.nix Normal file
View file

@ -0,0 +1,132 @@
{ depot
, config
, lib
, ...
}:
let
cfg = config.infra.monitoring.grafana-agent;
inherit (lib) mkEnableOption mkOption mkIf types;
passwordAsCredential = "\${CREDENTIALS_DIRECTORY}/password";
in
{
options.infra.monitoring.grafana-agent = {
enable = (mkEnableOption "Grafana Agent") // { default = true; };
exporters = mkOption {
description = ''
Set of additional exporters to scrape.
The attribute name will be used as `job_name`
internally, which ends up exported as `job` label
on all metrics of that exporter.
'';
type = types.attrsOf (types.submodule ({ config, name, ... }: {
options.port = mkOption {
description = "Exporter port";
type = types.int;
};
options.bearerTokenFile = mkOption {
description = "File containing a bearer token";
type = types.nullOr types.path;
default = null;
};
options.scrapeConfig = mkOption {
description = "Prometheus scrape config";
type = types.attrs;
};
config.scrapeConfig = lib.mkMerge [{
job_name = name;
static_configs = [
{ targets = [ "localhost:${toString config.port}" ]; }
];
}
(lib.mkIf (config.bearerTokenFile != null) {
authorization.credentials_file = "\${CREDENTIALS_DIRECTORY}/${name}-bearer-token";
})];
options.secrets = mkOption {
description = "Secrets required for scrape config";
type = types.attrs;
internal = true;
default = { };
};
config.secrets = lib.mkIf (config.bearerTokenFile != null) {
"${name}-bearer-token" = config.bearerTokenFile;
};
}));
default = { };
};
};
config = mkIf cfg.enable {
age.secrets.grafana-agent-password.file = depot.ops.secrets."grafana-agent-password.age";
services.grafana-agent = {
enable = true;
credentials = lib.mkMerge ([{ password = config.age.secrets.grafana-agent-password.path; }] ++
lib.mapAttrsToList (name: value: value.secrets) config.infra.monitoring.grafana-agent.exporters);
settings = {
metrics = {
global.remote_write = [
{
url = "https://mimir.snix.dev/api/v1/push";
basic_auth = {
username = "promtail";
password_file = passwordAsCredential;
};
}
];
global.external_labels = {
hostname = config.networking.hostName;
};
configs = [
{
name = config.networking.hostName;
scrape_configs = lib.mapAttrsToList (name: value: value.scrapeConfig) config.infra.monitoring.grafana-agent.exporters;
}
];
};
# logs = {
# global.clients = [
# {
# url = "https://loki.forkos.org/loki/api/v1/push";
# basic_auth = {
# username = "promtail";
# password_file = passwordAsCredential;
# };
# }
# ];
# configs = [
# {
# name = "journald";
# scrape_configs = [
# {
# job_name = "system";
# journal = {
# max_age = "12h";
# labels = {
# job = "systemd-journal";
# host = config.networking.hostName;
# };
# };
# relabel_configs = [
# {
# source_labels = [ "__journal__systemd_unit" ];
# target_label = "unit";
# }
# ];
# }
# ];
# }
# ];
# positions_directory = "\${STATE_DIRECTORY}/positions";
# };
integrations.node_exporter.enable_collectors = [
"processes"
"systemd"
];
};
};
};
}

View file

@ -0,0 +1,20 @@
{ config, depot, ... }: {
imports = [
depot.third_party.alertmanager-irc-relay.module
];
services.alertmanager-irc-relay = {
enable = true;
settings = {
irc_host = "irc.hackint.org";
irc_port = 6697;
irc_nickname = "silentfox";
irc_channels = [
{ name = "#snix"; password = "$CHANNEL_PASSWORD"; }
];
};
environmentFiles = [
config.age.secrets.alertmanager-irc-relay-environment.path
];
};
}

View file

View file

@ -0,0 +1,148 @@
{ depot
, config
, lib
, ...
}:
let
cfg = config.services.depot.grafana;
inherit (lib) mkEnableOption mkIf;
in
{
options.services.depot.grafana.enable = mkEnableOption "Grafana frontend";
config = mkIf cfg.enable {
services = {
grafana = {
enable = true;
settings = {
server = {
domain = "status.snix.dev";
http_addr = "127.0.0.1";
http_port = 2342;
root_url = "https://status.snix.dev/";
};
database = {
type = "postgres";
user = "grafana";
host = "/run/postgresql";
};
"auth.anonymous" = {
enabled = true;
org_name = "Main Org.";
org_role = "Viewer";
};
"auth.generic_oauth" = {
enabled = true;
name = "snix SSO";
client_id = "grafana";
client_secret = "$__file{${config.age.secrets.grafana-oauth-secret.path}}";
auth_url = "https://auth.snix.dev/realms/snix-project/protocol/openid-connect/auth";
token_url = "https://auth.snix.dev/realms/snix-project/protocol/openid-connect/token";
api_url = "https://auth.snix.dev/realms/snix-project/protocol/openid-connect/userinfo";
login_attribute_path = "username";
email_attribute_path = "email";
name_attribute_path = "full_name";
scopes = [
"openid"
"profile"
"email"
"offline_access"
"roles"
];
allow_sign_up = true;
auto_login = true;
allow_assign_grafana_admin = true;
role_attribute_path = "contains(grafana_roles[*], 'Admin') && 'GrafanaAdmin' || contains(grafana_roles[*], 'Editor') && 'Editor' || 'Viewer'";
};
dashboards.default_home_dashboard_path = "${depot.ops.dashboards.node_exporter}";
feature_toggles.enable = "autoMigrateOldPanels newVizTooltips";
security.angular_support_enabled = false;
};
provision = {
dashboards.settings = {
apiVersion = 1;
providers = [
{
name = "default";
options.path = depot.ops.dashboards.all;
}
];
};
datasources.settings = {
apiVersion = 1;
datasources = [
{
name = "Mimir";
type = "prometheus";
uid = "mimir";
access = "proxy";
url = "http://mimir.snix.dev:9009/prometheus";
isDefault = true;
}
{
name = "Loki";
type = "loki";
uid = "loki";
access = "proxy";
url = "http://loki.snix.dev:9090/";
}
{
name = "Tempo";
type = "tempo";
uid = "tempo";
access = "proxy";
url = "http://tempo.snix.dev:9190";
jsonData.streamingEnabled.search = true;
}
{
name = "Mimir Alertmanager";
type = "alertmanager";
uid = "mimir-alertmanager";
access = "proxy";
url = "http://mimir.snix.dev:9009/";
jsonData = {
handleGrafanaManagedAlerts = true;
implementation = "mimir";
};
}
# {
# name = "Pyroscope";
# type = "grafana-pyroscope-datasource";
# uid = "pyroscope";
# access = "proxy";
# url = "http://127.0.0.1:4040";
# }
];
};
};
};
postgresql = {
ensureDatabases = [ "grafana" ];
ensureUsers = [
{
name = "grafana";
ensureDBOwnership = true;
}
];
};
};
infra.monitoring.grafana-agent.exporters.grafana.port = 2342;
};
}

90
ops/modules/o11y/loki.nix Normal file
View file

@ -0,0 +1,90 @@
{ config
, lib
, ...
}:
let
cfg = config.services.depot.loki;
inherit (lib) mkEnableOption mkIf;
in
{
options.services.depot.loki.enable = mkEnableOption "Loki storage";
config = mkIf cfg.enable {
services.loki = {
enable = true;
extraFlags = [ "--config.expand-env" ];
configuration = {
server = {
http_listen_port = 9090;
grpc_listen_port = 9096;
# 16M
grpc_server_max_recv_msg_size = 16777216;
grpc_server_max_send_msg_size = 16777216;
};
auth_enabled = false;
common = {
storage.s3 = {
endpoint = "fsn1.your-objectstorage.com";
region = "fsn1";
bucketnames = "snix-loki";
secret_access_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
access_key_id = "\${S3_KEY_ID}";
s3forcepathstyle = true;
};
ring = {
kvstore.store = "memberlist";
# TODO: Such a ugly hack.
instance_interface_names = [ "enp1s0" "lo" ];
};
replication_factor = 1;
};
memberlist = {
advertise_addr = "127.0.0.1";
cluster_label = "snix";
bind_port = 7947;
advertise_port = 7947;
};
storage_config.tsdb_shipper = {
active_index_directory = "/var/lib/loki/index";
cache_location = "/var/lib/loki/cache";
};
compactor = {
working_directory = "/var/lib/loki/compactor";
compaction_interval = "10m";
retention_enabled = true;
retention_delete_delay = "1s";
retention_delete_worker_count = 150;
delete_request_store = "filesystem";
};
limits_config.retention_period = "1w";
schema_config = {
configs = [
{
from = "2024-07-01";
store = "tsdb";
object_store = "s3";
schema = "v13";
index = {
prefix = "index_";
period = "24h";
};
}
];
};
};
};
systemd.services.loki.serviceConfig.EnvironmentFile = [ config.age.secrets.loki-environment.path ];
infra.monitoring.grafana-agent.exporters.loki.port = 9090;
};
}

123
ops/modules/o11y/mimir.nix Normal file
View file

@ -0,0 +1,123 @@
{ config
, lib
, pkgs
, ...
}:
let
cfg = config.services.depot.prometheus;
inherit (lib) mkEnableOption mkIf;
mimirPort = config.services.mimir.configuration.server.http_listen_port;
alerts = pkgs.runCommand "mimir-alerts-checked"
{
src = ./alerts;
nativeBuildInputs = with pkgs; [ prometheus.cli ];
} ''
promtool check rules $src/*
mkdir $out
cp -R $src $out/anonymous/
'';
in
{
options.services.depot.prometheus.enable = mkEnableOption "Prometheus scraper";
config = mkIf cfg.enable {
services.mimir = {
enable = true;
extraFlags = [ "--config.expand-env=true" ];
configuration = {
target = "all,alertmanager";
multitenancy_enabled = false;
common.storage = {
backend = "s3";
s3 = {
endpoint = "fsn1.your-objectstorage.com";
bucket_name = "snix-mimir";
secret_access_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
access_key_id = "\${S3_KEY_ID}";
};
};
# TODO: Such a ugly hack.
distributor.ring.instance_interface_names = [ "enp1s0" "lo" ];
ingester.ring.instance_interface_names = [ "enp1s0" "lo" ];
frontend.instance_interface_names = [ "enp1s0" "lo" ];
query_scheduler.ring.instance_interface_names = [ "enp1s0" "lo" ];
ruler.ring.instance_interface_names = [ "enp1s0" "lo" ];
compactor.sharding_ring.instance_interface_names = [ "enp1s0" "lo" ];
store_gateway.sharding_ring.instance_interface_names = [ "enp1s0" "lo" ];
memberlist = {
advertise_addr = "127.0.0.1";
cluster_label = "snix";
};
server = {
http_listen_port = 9009;
grpc_server_max_recv_msg_size = 104857600;
grpc_server_max_send_msg_size = 104857600;
grpc_server_max_concurrent_streams = 1000;
};
ingester.ring.replication_factor = 1;
distributor.instance_limits.max_ingestion_rate = 0; # unlimited
limits = {
ingestion_rate = 1000000; # can't set to unlimited :(
out_of_order_time_window = "12h";
max_global_series_per_user = 0; # unlimited
};
blocks_storage.backend = "s3";
ruler_storage = {
backend = "local";
local.directory = alerts;
};
alertmanager = {
sharding_ring = {
replication_factor = 1;
# TODO: hack
instance_interface_names = [ "enp1s0" ];
};
fallback_config_file = pkgs.writers.writeYAML "alertmanager.yaml" {
route = {
group_by = [ "alertname" ];
receiver = "irc";
};
receivers = [
{
name = "irc";
webhook_configs = [{
# Mimir can't expand environment variables in external config files,
# so work around it.
url_file = "/run/credentials/mimir.service/webhook-url";
}];
}
];
};
};
alertmanager_storage.backend = "filesystem";
ruler.alertmanager_url = "http://localhost:${toString mimirPort}/alertmanager";
};
};
systemd.services.mimir = {
# Mimir tries to determine its own IP address for gossip purposes,
# even when it's the only instance, and fails if it can't find one.
# Avoid that by ensuring it starts after the network is set up.
wants = [ "network-online.target" ];
after = [ "network-online.target" ];
serviceConfig = {
EnvironmentFile = [ config.age.secrets.mimir-environment.path ];
LoadCredential = [ "webhook-url:${config.age.secrets.mimir-webhook-url.path}" ];
};
};
infra.monitoring.grafana-agent.exporters.mimir.port = 9009;
};
}

View file

@ -0,0 +1,71 @@
{ config
, lib
, ...
}:
let
cfg = config.services.depot.tempo;
inherit (lib) mkEnableOption mkIf;
in
{
options.services.depot.tempo.enable = mkEnableOption "Tempo trace store";
config = mkIf cfg.enable {
services.tempo = {
enable = true;
extraFlags = [ "--config.expand-env=true" ];
settings = {
multitenancy_enabled = false;
stream_over_http_enabled = true;
server = {
http_listen_port = 9190;
grpc_listen_port = 9195;
};
distributor.receivers.otlp.protocols.http.endpoint = "127.0.0.1:4138";
# TODO: S3
storage.trace = {
backend = "s3";
s3 = {
endpoint = "fsn1.your-objectstorage.com";
bucket = "snix-tempo";
secret_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
access_key = "\${S3_KEY_ID}";
};
wal.path = "/var/lib/tempo/traces-wal";
};
metrics_generator.storage = {
path = "/var/lib/tempo/metrics-wal";
remote_write = [
{
url = "http://127.0.0.1:9009/api/v1/push";
}
];
};
overrides.defaults.metrics_generator.processors = [ "span-metrics" ];
};
};
systemd.services.tempo.serviceConfig.EnvironmentFile = [ config.age.secrets.tempo-environment.path ];
services.nginx = {
upstreams.tempo = {
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" = { };
extraConfig = "keepalive 16;";
};
virtualHosts."tempo.snix.dev" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://tempo";
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
};
};
};
infra.monitoring.grafana-agent.exporters.tempo.port = 9190;
};
}

76
ops/modules/raito-vm.nix Normal file
View file

@ -0,0 +1,76 @@
{ lib, config, ... }:
let
cfg = config.infra.hardware.raito-vm;
inherit (lib) mkEnableOption mkIf mkOption types;
in
{
options.infra.hardware.raito-vm = {
enable = mkEnableOption "Raito's VM hardware defaults";
networking = {
nat64.enable = mkEnableOption "the setup of NAT64 rules to the local NAT64 node";
wan = {
address = mkOption {
type = types.str;
description = "IPv6 prefix for WAN. Ask Raito when in doubt.";
};
mac = mkOption {
type = types.str;
description = "MAC address for the WAN interface.";
};
};
};
};
config = mkIf cfg.enable {
services.qemuGuest.enable = true;
systemd.network.enable = true;
networking.useDHCP = lib.mkDefault false;
systemd.network.networks."10-wan" = {
matchConfig.Name = "wan";
linkConfig.RequiredForOnline = true;
networkConfig.Address = [ cfg.networking.wan.address ];
routes = mkIf cfg.networking.nat64.enable [
{
Destination = "64:ff9b::/96";
Gateway = "2001:bc8:38ee:100::100";
Scope = "site";
}
];
# Enable DNS64 resolvers from Google, I'm too lazy.
dns = mkIf cfg.networking.nat64.enable [ "2001:4860:4860::6464" "2001:4860:4860::64" ];
};
systemd.network.links."10-wan" = {
matchConfig.MACAddress = cfg.networking.wan.mac;
linkConfig.Name = "wan";
};
boot.loader.systemd-boot.enable = true;
boot.initrd.kernelModules = [
"virtio_balloon"
"virtio_console"
"virtio_rng"
];
boot.initrd.availableKernelModules = [
"9p"
"9pnet_virtio"
"ata_piix"
"nvme"
"sr_mod"
"uhci_hcd"
"virtio_blk"
"virtio_mmio"
"virtio_net"
"virtio_pci"
"virtio_scsi"
"xhci_pci"
];
};
}

View file

@ -1,13 +1,14 @@
# Configure restic backups to S3-compatible storage, in our case
# Yandex Cloud Storage.
# Hetzner Cloud object storage.
#
# When adding a new machine, the repository has to be initialised once. Refer to
# the Restic documentation for details on this process.
{ config, depot, lib, pkgs, ... }:
# Conventions:
# - restic's cache lives in /var/backup/restic/cache
# - repository password lives in `config.age.secrets.restic-repository-password.path`
# - object storage credentials in `config.age.secrets.restic-bucket-credentials.path`
{ config, lib, pkgs, ... }:
let
cfg = config.services.depot.restic;
description = "Restic backups to Yandex Cloud";
mkStringOption = default: lib.mkOption {
inherit default;
type = lib.types.str;
@ -15,10 +16,10 @@ let
in
{
options.services.depot.restic = {
enable = lib.mkEnableOption description;
bucketEndpoint = mkStringOption "storage.yandexcloud.net";
bucketName = mkStringOption "tvl-backups";
bucketCredentials = mkStringOption "/run/agenix/yc-restic";
enable = lib.mkEnableOption "the restic backups";
bucketEndpoint = mkStringOption "fsn1.your-objectstorage.com";
bucketName = mkStringOption "snix-backups";
bucketCredentials = mkStringOption config.age.secrets.restic-bucket-credentials.path;
repository = mkStringOption config.networking.hostName;
interval = mkStringOption "hourly";
@ -30,24 +31,24 @@ in
exclude = with lib; mkOption {
description = "Files that should be excluded from backups";
type = types.listOf types.str;
default = [ ];
};
};
config = lib.mkIf cfg.enable {
age.secrets = {
restic-password.file = depot.ops.secrets."restic-${config.networking.hostName}.age";
yc-restic.file = depot.ops.secrets."yc-restic.age";
};
systemd.services.restic = {
description = "Backups to Yandex Cloud";
description = "Backups to Hetzner Cloud";
script = "${pkgs.restic}/bin/restic backup ${lib.concatStringsSep " " cfg.paths}";
serviceConfig.ExecStartPre = pkgs.writeShellScript "init-repo" ''
${pkgs.restic}/bin/restic init && echo "Initializing the repository." || echo "Already initialized."
'';
environment = {
RESTIC_REPOSITORY = "s3:${cfg.bucketEndpoint}/${cfg.bucketName}/${cfg.repository}";
AWS_SHARED_CREDENTIALS_FILE = cfg.bucketCredentials;
RESTIC_PASSWORD_FILE = "/run/agenix/restic-password";
RESTIC_PASSWORD_FILE = config.age.secrets.restic-repository-password.path;
RESTIC_CACHE_DIR = "/var/backup/restic/cache";
RESTIC_EXCLUDE_FILE =

View file

@ -0,0 +1,112 @@
# Configuration for the snix buildkite agents.
{ config
, depot
, pkgs
, lib
, ...
}:
let
cfg = config.services.depot.buildkite;
agents = lib.range 1 cfg.agentCount;
hostname = config.networking.hostName;
description = "Buildkite agents for snix";
besadiiWithConfig =
name:
pkgs.writeShellScript "besadii-${hostname}" ''
export BESADII_CONFIG=/run/agenix/buildkite-besadii-config
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
'';
# All Buildkite hooks are actually besadii, but it's being invoked
# with different names.
buildkiteHooks = pkgs.runCommand "buildkite-hooks" { } ''
mkdir -p $out/bin
ln -s ${besadiiWithConfig "post-command"} $out/bin/post-command
'';
credentialHelper = pkgs.writeShellScriptBin "git-credential-gerrit-creds" ''
echo 'username=besadii'
echo "password=$(jq -r '.gerritPassword' /run/agenix/buildkite-besadii-config)"
'';
in
{
options.services.depot.buildkite = {
enable = lib.mkEnableOption description;
agentCount = lib.mkOption {
type = lib.types.int;
description = "Number of Buildkite agents to launch";
};
largeSlots = lib.mkOption {
type = lib.types.int;
default = cfg.agentCount;
description = "Number of agents with 'large=true'";
};
};
config = lib.mkIf cfg.enable {
# Run the Buildkite agents using the default upstream module.
services.buildkite-agents = builtins.listToAttrs (
map
(n: rec {
name = "${hostname}-${toString n}";
value =
{
inherit name;
enable = true;
tokenPath = config.age.secretsDir + "/buildkite-agent-token";
privateSshKeyPath = config.age.secretsDir + "/buildkite-private-key";
hooks.post-command = "${buildkiteHooks}/bin/post-command";
tags.queue = "default";
hooks.environment = ''
export PATH=$PATH:/run/wrappers/bin
'';
tags.hostname = hostname;
# all agents support small jobs
tags.small = "true";
runtimePackages = with pkgs; [
bash
coreutils
credentialHelper
curl
git
gnutar
gzip
jq
nix
];
}
// (lib.optionalAttrs (n <= cfg.largeSlots) {
tags.large = "true";
});
})
agents
);
# Set up a group for all Buildkite agent users
users = {
groups.buildkite-agents = { };
users = builtins.listToAttrs (
map
(n: rec {
name = "buildkite-agent-${hostname}-${toString n}";
value = {
isSystemUser = true;
group = lib.mkForce "buildkite-agents";
extraGroups = [
name
"docker"
];
};
})
agents
);
};
};
}

83
ops/modules/stalwart.nix Normal file
View file

@ -0,0 +1,83 @@
# Stalwart is an all-in-one mailserver in Rust.
# https://stalw.art/
{ config, lib, ... }:
let
inherit (lib) mkOption mkEnableOption mkIf types;
cfg = config.services.depot.stalwart;
certs = config.security.acme.certs.${cfg.mailDomain} or (throw "NixOS-level ACME was not enabled for `${cfg.mailDomain}`: mailserver cannot autoconfigure!");
mkBind = port: ip: "${ip}:${toString port}";
in
{
options.services.depot.stalwart = {
enable = mkEnableOption "Stalwart Mail server";
listenAddresses = mkOption {
type = types.listOf types.str;
default = [
"49.12.112.149"
"[2a01:4f8:c013:3e62::2]"
];
};
mailDomain = mkOption {
type = types.str;
description = "The email domain, i.e. the part after @";
example = "snix.dev";
};
};
config = mkIf cfg.enable {
# Open only from the listen addresses.
networking.firewall.allowedTCPPorts = [ 25 587 143 443 ];
services.stalwart-mail = {
enable = true;
settings = {
certificate.letsencrypt = {
cert = "file://${certs.directory}/fullchain.pem";
private-key = "file://${certs.directory}/key.pem";
};
server = {
hostname = cfg.mailDomain;
tls = {
certificate = "letsencrypt";
enable = true;
implicit = false;
};
listener = {
smtp = {
bind = map (mkBind 587) cfg.listenAddresses;
protocol = "smtp";
};
imap = {
bind = map (mkBind 143) cfg.listenAddresses;
protocol = "imap";
};
mgmt = {
bind = map (mkBind 443) cfg.listenAddresses;
protocol = "https";
};
};
};
session = {
rcpt = {
directory = "in-memory";
# Allow this server to be used as a relay for authenticated principals.
relay = [
{ "if" = "!is_empty(authenticated_as)"; "then" = true; }
{ "else" = false; }
];
};
auth = {
mechanisms = [ "PLAIN" ];
directory = "in-memory";
};
};
jmap.directory = "in-memory";
queue.outbound.next-hop = [ "local" ];
directory.in-memory = {
type = "memory";
};
};
};
};
}

View file

@ -1,95 +0,0 @@
# Configuration for the TVL buildkite agents.
{ config, depot, pkgs, lib, ... }:
let
cfg = config.services.depot.buildkite;
agents = lib.range 1 cfg.agentCount;
description = "Buildkite agents for TVL";
hostname = config.networking.hostName;
besadiiWithConfig = name: pkgs.writeShellScript "besadii-${hostname}" ''
export BESADII_CONFIG=/run/agenix/buildkite-besadii-config
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
'';
# All Buildkite hooks are actually besadii, but it's being invoked
# with different names.
buildkiteHooks = pkgs.runCommand "buildkite-hooks" { } ''
mkdir -p $out/bin
ln -s ${besadiiWithConfig "post-command"} $out/bin/post-command
'';
credentialHelper = pkgs.writeShellScriptBin "git-credential-gerrit-creds" ''
echo 'username=buildkite'
echo "password=$(jq -r '.gerritPassword' /run/agenix/buildkite-besadii-config)"
'';
in
{
options.services.depot.buildkite = {
enable = lib.mkEnableOption description;
agentCount = lib.mkOption {
type = lib.types.int;
description = "Number of Buildkite agents to launch";
};
largeSlots = lib.mkOption {
type = lib.types.int;
default = cfg.agentCount;
description = "Number of agents with 'large=true'";
};
};
config = lib.mkIf cfg.enable {
# Run the Buildkite agents using the default upstream module.
services.buildkite-agents = builtins.listToAttrs (map
(n: rec {
name = "${hostname}-${toString n}";
value = {
inherit name;
enable = true;
tokenPath = config.age.secretsDir + "/buildkite-agent-token";
privateSshKeyPath = config.age.secretsDir + "/buildkite-private-key";
hooks.post-command = "${buildkiteHooks}/bin/post-command";
hooks.environment = ''
export PATH=$PATH:/run/wrappers/bin
'';
tags.hostname = hostname;
# all agents support small jobs
tags.small = "true";
runtimePackages = with pkgs; [
bash
coreutils
credentialHelper
curl
git
gnutar
gzip
jq
nix
];
} // (lib.optionalAttrs (n <= cfg.largeSlots) {
tags.large = "true";
});
})
agents);
# Set up a group for all Buildkite agent users
users = {
groups.buildkite-agents = { };
users = builtins.listToAttrs (map
(n: rec {
name = "buildkite-agent-${hostname}-${toString n}";
value = {
isSystemUser = true;
group = lib.mkForce "buildkite-agents";
extraGroups = [ name "docker" ];
};
})
agents);
};
};
}

View file

@ -0,0 +1,52 @@
{ config, ... }:
let
host = "auth.snix.dev";
realm = "snix-project";
in
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."${host}" = {
serverName = host;
enableACME = true;
forceSSL = true;
locations."/" = {
recommendedProxySettings = true;
proxyPass = "http://127.0.0.1:9091";
extraConfig = ''
proxy_pass_header Authorization;
proxy_busy_buffers_size 512k;
proxy_buffers 4 512k;
proxy_buffer_size 256k;
# Allow clients with Auth hardcoded to use our base path.
#
# XXX: ok so this is horrible. For some reason gerrit explodes if
# it receives a redirect when doing auth. But we need to redirect
# the browser to reuse sessions. Thus, user agent scanning.
if ($http_user_agent ~* "^Java.*$") {
rewrite ^/auth/(.*)$ /$1 last;
}
rewrite ^/auth/(.*)$ /$1 redirect;
# Hacks to make us compatible with authenticators that expect GitLab's format.
rewrite ^/realms/${realm}/protocol/openid-connect/api/v4/user$ /realms/${realm}/protocol/openid-connect/userinfo;
rewrite ^/realms/${realm}/protocol/openid-connect/oauth/authorize$ /realms/${realm}/protocol/openid-connect/auth?scope=openid%20email%20profile;
rewrite ^/realms/${realm}/protocol/openid-connect/oauth/token$ /realms/${realm}/protocol/openid-connect/token;
'';
};
# Forward our admin address to our default realm.
locations."= /admin".extraConfig = "return 302 https://${host}/admin/snix-project/console/;";
locations."= /superadmin".extraConfig = "return 302 https://${host}/admin/master/console/;";
# Forward our root address to the account management portal.
locations."= /".extraConfig = "return 302 https://${host}/realms/${realm}/account;";
};
};
}

View file

@ -2,9 +2,11 @@
{
config = {
networking.firewall.allowedTCPPorts = [ 80 443 ];
security.acme = {
acceptTerms = true;
defaults.email = "letsencrypt@tvl.su";
defaults.email = "acme@snix.dev";
};
services.nginx = {

View file

@ -8,12 +8,11 @@
config = {
services.nginx.virtualHosts."cl-shortlink" = {
serverName = "cl";
extraConfig = "return 302 https://cl.tvl.fyi$request_uri;";
extraConfig = "return 302 https://cl.snix.dev$request_uri;";
};
services.nginx.virtualHosts.gerrit = {
serverName = "cl.tvl.fyi";
serverAliases = [ "cl.tvl.su" ];
serverName = "cl.snix.dev";
enableACME = true;
forceSSL = true;
@ -21,7 +20,7 @@
location / {
proxy_pass http://localhost:4778;
proxy_set_header X-Forwarded-For $remote_addr;
# The :443 suffix is a workaround for https://b.tvl.fyi/issues/88.
# The :443 suffix is a workaround for https://b.snix.dev/issues/88.
proxy_set_header Host $host:443;
}

View file

@ -1,82 +0,0 @@
{ depot, pkgs, config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts.cgit = {
serverName = "code.tvl.fyi";
serverAliases = [ "code.tvl.su" ];
enableACME = true;
forceSSL = true;
extraConfig = ''
if ($http_user_agent ~ (ClaudeBot|GPTBot|Amazonbot)) {
return 403;
}
location = /go-get/tvix/build-go {
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/build-go git https://code.tvl.fyi/depot.git:/tvix/build-go.git"></html>''};
}
location = /go-get/tvix/castore-go {
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/castore-go git https://code.tvl.fyi/depot.git:/tvix/castore-go.git"></html>''};
}
location = /go-get/tvix/store-go {
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/store-go git https://code.tvl.fyi/depot.git:/tvix/store-go.git"></html>''};
}
location = /go-get/tvix/nar-bridge {
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/nar-bridge git https://code.tvl.fyi/depot.git:/tvix/nar-bridge.git"></html>''};
}
location = /tvix/build-go {
if ($args ~* "/?go-get=1") {
return 302 /go-get/tvix/build-go;
}
}
location = /tvix/castore-go {
if ($args ~* "/?go-get=1") {
return 302 /go-get/tvix/castore-go;
}
}
location = /tvix/store-go {
if ($args ~* "/?go-get=1") {
return 302 /go-get/tvix/store-go;
}
}
location = /tvix/nar-bridge {
if ($args ~* "/?go-get=1") {
return 302 /go-get/tvix/nar-bridge;
}
}
# Git operations on depot.git hit josh
location /depot.git {
proxy_pass http://127.0.0.1:${toString config.services.depot.josh.port};
}
# Git clone operations on '/' should be redirected to josh now.
location = /info/refs {
return 302 https://code.tvl.fyi/depot.git/info/refs$is_args$args;
}
# Static assets must always hit the root.
location ~ ^/(favicon\.ico|cgit\.(css|png))$ {
proxy_pass http://localhost:2448;
}
# Everything else is forwarded to cgit for the web view
location / {
proxy_pass http://localhost:2448/cgit.cgi/depot/;
}
'';
};
};
}

View file

@ -0,0 +1,26 @@
{ ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts.forgejo = {
serverName = "git.snix.dev";
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://127.0.0.1:3000";
extraConfig = ''
proxy_ssl_server_name on;
proxy_pass_header Authorization;
# This has to be sufficiently large for uploading layers of
# non-broken docker images.
client_max_body_size 1G;
'';
};
};
};
}

View file

@ -0,0 +1,22 @@
{ config, ... }:
{
imports = [
./base.nix
];
services.nginx = {
upstreams.loki = {
servers."127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}" = { };
extraConfig = "keepalive 16;";
};
virtualHosts."loki.snix.dev" = {
enableACME = true;
forceSSL = true;
locations."/loki/api/v1/push" = {
proxyPass = "http://loki";
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
};
};
};
}

View file

@ -0,0 +1,25 @@
{ config, ... }:
{
imports = [
./base.nix
];
config = {
# Listen on a special IPv4 & IPv6 specialized for mail.
# This NGINX has only one role: obtain TLS/SSL certificates for the mailserver.
# All the TLS, IMAP, SMTP stuff is handled directly by the mailserver runtime.
# This is why you will not see any `stream { }` block here.
services.nginx.virtualHosts.stalwart = {
serverName = "mail.snix.dev";
enableACME = true;
forceSSL = true;
listenAddresses = [
"127.0.0.2"
"49.12.112.149"
"[2a01:4f8:c013:3e62::2]"
];
};
};
}

View file

@ -0,0 +1,24 @@
{ config, ... }:
let
mimirPort = config.services.mimir.configuration.server.http_listen_port;
in
{
imports = [
./base.nix
];
services.nginx = {
upstreams.mimir = {
servers."127.0.0.1:${toString mimirPort}" = { };
extraConfig = "keepalive 16;";
};
virtualHosts."mimir.snix.dev" = {
enableACME = true;
forceSSL = true;
locations."/api/v1/push" = {
proxyPass = "http://mimir";
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
};
};
};
}

View file

@ -0,0 +1,25 @@
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx =
let
scfg = config.services.grafana.settings.server;
in
{
enable = true;
virtualHosts."${scfg.domain}" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://${scfg.http_addr}:${toString scfg.http_port}";
proxyWebsockets = true;
};
};
};
};
}

View file

@ -1,25 +0,0 @@
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."status-fyi" = {
serverName = "status.tvl.fyi";
enableACME = true;
extraConfig = "return 302 https://status.tvl.su$request_uri;";
};
services.nginx.virtualHosts.grafana = {
serverName = "status.tvl.su";
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
};
};
};
}

View file

@ -0,0 +1,22 @@
{ config, ... }:
{
imports = [
./base.nix
];
services.nginx = {
upstreams.tempo = {
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" = { };
extraConfig = "keepalive 16;";
};
virtualHosts."tempo.snix.dev" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://tempo";
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
};
};
};
}