chore(*): drop everything that is not required for Tvix
Co-Authored-By: edef <edef@edef.eu> Co-Authored-By: Ryan Lahfa <raito@lix.systems> Change-Id: I9817214c3122e49d694c5e41818622a08d9dfe45
This commit is contained in:
parent
bd91cac1f3
commit
df4500ea2b
2905 changed files with 34 additions and 493328 deletions
|
|
@ -1,55 +0,0 @@
|
|||
# Configuration for running the TVL cgit instance using thttpd.
|
||||
{ config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.cgit;
|
||||
|
||||
userConfig =
|
||||
if builtins.isNull cfg.user then {
|
||||
DynamicUser = true;
|
||||
} else {
|
||||
User = cfg.user;
|
||||
Group = cfg.user;
|
||||
};
|
||||
in
|
||||
{
|
||||
options.services.depot.cgit = with lib; {
|
||||
enable = mkEnableOption "Run cgit web interface for depot";
|
||||
|
||||
port = mkOption {
|
||||
description = "Port on which cgit should listen";
|
||||
type = types.int;
|
||||
default = 2448;
|
||||
};
|
||||
|
||||
repo = mkOption {
|
||||
description = "Path to depot's .git folder on the machine";
|
||||
type = types.str;
|
||||
default = "/var/lib/gerrit/git/depot.git/";
|
||||
};
|
||||
|
||||
user = mkOption {
|
||||
description = ''
|
||||
User to use for the cgit service. It is expected that this is
|
||||
also the name of the user's primary group.
|
||||
'';
|
||||
|
||||
type = with types; nullOr str;
|
||||
default = null;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.cgit = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
Restart = "on-failure";
|
||||
|
||||
ExecStart = depot.web.cgit-tvl.override {
|
||||
inherit (cfg) port repo;
|
||||
};
|
||||
} // userConfig;
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,29 +0,0 @@
|
|||
{ depot, config, pkgs, lib, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.cheddar;
|
||||
description = "cheddar - markdown/highlighting server";
|
||||
in
|
||||
{
|
||||
options.services.depot.cheddar = with lib; {
|
||||
enable = mkEnableOption description;
|
||||
port = mkOption {
|
||||
description = "Port on which cheddar should listen";
|
||||
type = types.int;
|
||||
default = 4238;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.cheddar-server = {
|
||||
inherit description;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = "${depot.tools.cheddar}/bin/cheddar --listen 0.0.0.0:${toString cfg.port} --sourcegraph-server";
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
Restart = "always";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -9,6 +9,5 @@
|
|||
imports = [
|
||||
./automatic-gc.nix
|
||||
./auto-deploy.nix
|
||||
./tvl-cache.nix
|
||||
];
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,148 +0,0 @@
|
|||
# public-inbox configuration for depot@tvl.su
|
||||
#
|
||||
# The account itself is a Yandex 360 account in the tvl.su organisation, which
|
||||
# is accessed via IMAP. Yandex takes care of spam filtering for us, so there is
|
||||
# no particular SpamAssassin or other configuration.
|
||||
{ config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.inbox;
|
||||
|
||||
imapConfig = pkgs.writeText "offlineimaprc" ''
|
||||
[general]
|
||||
accounts = depot
|
||||
|
||||
[Account depot]
|
||||
localrepository = Local
|
||||
remoterepository = Remote
|
||||
|
||||
[Repository Local]
|
||||
type = Maildir
|
||||
localfolders = /var/lib/public-inbox/depot-imap
|
||||
|
||||
[Repository Remote]
|
||||
type = IMAP
|
||||
ssl = yes
|
||||
sslcacertfile = /etc/ssl/certs/ca-bundle.crt
|
||||
remotehost = imap.yandex.ru
|
||||
remoteuser = depot@tvl.su
|
||||
remotepassfile = /var/run/agenix/depot-inbox-imap
|
||||
'';
|
||||
in
|
||||
{
|
||||
options.services.depot.inbox = with lib; {
|
||||
enable = mkEnableOption "Enable public-inbox for depot@tvl.su";
|
||||
|
||||
depotPath = mkOption {
|
||||
description = "path to local depot replica";
|
||||
type = types.str;
|
||||
default = "/var/lib/depot";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
# Having nginx *and* other services use ACME certificates for the
|
||||
# same hostname is unsupported in NixOS without resorting to doing
|
||||
# all ACME configuration manually.
|
||||
#
|
||||
# To work around this, we duplicate the TLS certificate used by
|
||||
# nginx to a location that is readable by public-inbox daemons.
|
||||
systemd.services.inbox-cert-sync = {
|
||||
startAt = "daily";
|
||||
|
||||
script = ''
|
||||
${pkgs.coreutils}/bin/install -D -g ${config.users.groups."public-inbox".name} -m 0440 \
|
||||
/var/lib/acme/inbox.tvl.su/fullchain.pem /var/lib/public-inbox/tls/fullchain.pem
|
||||
|
||||
${pkgs.coreutils}/bin/install -D -g ${config.users.groups."public-inbox".name} -m 0440 \
|
||||
/var/lib/acme/inbox.tvl.su/key.pem /var/lib/public-inbox/tls/key.pem
|
||||
'';
|
||||
};
|
||||
|
||||
services.public-inbox = {
|
||||
enable = true;
|
||||
|
||||
http.enable = true;
|
||||
http.port = 8053;
|
||||
|
||||
imap = {
|
||||
enable = true;
|
||||
port = 993;
|
||||
cert = "/var/lib/public-inbox/tls/fullchain.pem";
|
||||
key = "/var/lib/public-inbox/tls/key.pem";
|
||||
};
|
||||
|
||||
nntp = {
|
||||
enable = true;
|
||||
port = 563;
|
||||
cert = "/var/lib/public-inbox/tls/fullchain.pem";
|
||||
key = "/var/lib/public-inbox/tls/key.pem";
|
||||
};
|
||||
|
||||
inboxes.depot = rec {
|
||||
address = [
|
||||
"depot@tvl.su" # primary address
|
||||
"depot@tazj.in" # legacy address
|
||||
];
|
||||
|
||||
description = "TVL depot development (mail to depot@tvl.su)";
|
||||
coderepo = [ "depot" ];
|
||||
url = "https://inbox.tvl.su/depot";
|
||||
|
||||
watch = [
|
||||
"maildir:/var/lib/public-inbox/depot-imap/INBOX/"
|
||||
];
|
||||
|
||||
newsgroup = "su.tvl.depot";
|
||||
};
|
||||
|
||||
settings.coderepo.depot = {
|
||||
dir = cfg.depotPath;
|
||||
cgitUrl = "https://code.tvl.fyi";
|
||||
};
|
||||
|
||||
settings.publicinbox = {
|
||||
wwwlisting = "all";
|
||||
nntpserver = [ "inbox.tvl.su" ];
|
||||
imapserver = [ "inbox.tvl.su" ];
|
||||
|
||||
depot.obfuscate = true;
|
||||
noObfuscate = [
|
||||
"tvl.su"
|
||||
"tvl.fyi"
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
networking.firewall.allowedTCPPorts = [
|
||||
993 # imap
|
||||
563 # nntp
|
||||
];
|
||||
|
||||
age.secrets.depot-inbox-imap = {
|
||||
file = depot.ops.secrets."depot-inbox-imap.age";
|
||||
mode = "0440";
|
||||
group = config.users.groups."public-inbox".name;
|
||||
};
|
||||
|
||||
systemd.services.offlineimap-depot = {
|
||||
description = "download mail for depot@tvl.su";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
startAt = "minutely";
|
||||
|
||||
script = ''
|
||||
mkdir -p /var/lib/public-inbox/depot-imap
|
||||
${pkgs.offlineimap}/bin/offlineimap -c ${imapConfig}
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
|
||||
# Run in the same user context as public-inbox itself to avoid
|
||||
# permissions trouble.
|
||||
User = config.users.users."public-inbox".name;
|
||||
Group = config.users.groups."public-inbox".name;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,45 +0,0 @@
|
|||
# Configuration for receiving a depot replica from Gerrit's
|
||||
# replication plugin.
|
||||
#
|
||||
# This only prepares the user and folder for receiving the replica,
|
||||
# but Gerrit configuration still needs to be modified in addition.
|
||||
{ config, depot, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.replica;
|
||||
in
|
||||
{
|
||||
options.services.depot.replica = with lib; {
|
||||
enable = mkEnableOption "Receive depot git replica from Gerrit";
|
||||
|
||||
key = mkOption {
|
||||
description = "Public key to use for replication";
|
||||
type = types.str;
|
||||
default = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFFab9O1xaQ1TCyn+CxmXHexdlLzURREG+UR3Qdi3BvH";
|
||||
};
|
||||
|
||||
path = mkOption {
|
||||
description = "Replication destination path (will be created)";
|
||||
type = types.str;
|
||||
default = "/var/lib/depot";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
users.groups.depot = { };
|
||||
|
||||
users.users.depot = {
|
||||
group = "depot";
|
||||
isSystemUser = true;
|
||||
createHome = true;
|
||||
home = cfg.path;
|
||||
homeMode = "755"; # everyone can read depot
|
||||
openssh.authorizedKeys.keys = lib.singleton cfg.key;
|
||||
shell = pkgs.bashInteractive; # gerrit needs to run shell commands
|
||||
};
|
||||
|
||||
environment.systemPackages = [
|
||||
pkgs.git
|
||||
];
|
||||
};
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
# Configures journaldriver to forward to the tvl-fyi GCP project from
|
||||
# TVL machines.
|
||||
{ config, depot, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
(depot.third_party.agenix.src + "/modules/age.nix")
|
||||
];
|
||||
|
||||
age.secrets.journaldriver.file = depot.ops.secrets."journaldriver.age";
|
||||
|
||||
services.journaldriver = {
|
||||
enable = true;
|
||||
googleCloudProject = "tvl-fyi";
|
||||
logStream = config.networking.hostName;
|
||||
};
|
||||
|
||||
# Override the systemd service defined in the nixpkgs module to use
|
||||
# the credentials provided by agenix.
|
||||
systemd.services.journaldriver = {
|
||||
serviceConfig = {
|
||||
LoadCredential = "journaldriver.json:/run/agenix/journaldriver";
|
||||
ExecStart = lib.mkForce "${pkgs.coreutils}/bin/env GOOGLE_APPLICATION_CREDENTIALS=\"\${CREDENTIALS_DIRECTORY}/journaldriver.json\" ${depot.ops.journaldriver}/bin/journaldriver";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -3,21 +3,6 @@
|
|||
|
||||
{
|
||||
programs.ssh.knownHosts = {
|
||||
sanduny = {
|
||||
hostNames = [ "sanduny.tvl.su" ];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOag0XhylaTVhmT6HB8EN2Fv5Ymrc4ZfypOXONUkykTX";
|
||||
};
|
||||
|
||||
bugry = {
|
||||
hostNames = [ "bugry.tvl.fyi" ];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGqG6sITyJ/UsQ/RtYqmmMvTT4r4sppadoQIz5SvA+5J";
|
||||
};
|
||||
|
||||
nevsky = {
|
||||
hostNames = [ "nevsky.tvl.fyi" ];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHQe7M+G8Id3ZD7j+I07TCUV1o12q1vpsOXHRlcPSEfa";
|
||||
};
|
||||
|
||||
github = {
|
||||
hostNames = [ "github.com" ];
|
||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl";
|
||||
|
|
|
|||
|
|
@ -1,44 +0,0 @@
|
|||
# NixOS module to run Nixery, currently with local-storage as the
|
||||
# backend for storing/serving image layers.
|
||||
{ depot, config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.nixery;
|
||||
description = "Nixery - container images on-demand";
|
||||
nixpkgsSrc = depot.third_party.sources.nixpkgs;
|
||||
storagePath = "/var/lib/nixery/${nixpkgsSrc.rev}";
|
||||
in
|
||||
{
|
||||
options.services.depot.nixery = {
|
||||
enable = lib.mkEnableOption description;
|
||||
|
||||
port = lib.mkOption {
|
||||
type = lib.types.int;
|
||||
default = 45243; # "image"
|
||||
description = "Port on which Nixery should listen";
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
systemd.services.nixery = {
|
||||
inherit description;
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
StateDirectory = "nixery";
|
||||
Restart = "always";
|
||||
ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p ${storagePath}";
|
||||
ExecStart = "${depot.tools.nixery.nixery}/bin/server";
|
||||
};
|
||||
|
||||
environment = {
|
||||
PORT = toString cfg.port;
|
||||
NIXERY_PKGS_PATH = nixpkgsSrc.outPath;
|
||||
NIXERY_STORAGE_BACKEND = "filesystem";
|
||||
NIX_TIMEOUT = "60"; # seconds
|
||||
STORAGE_PATH = storagePath;
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,119 +0,0 @@
|
|||
{ depot, config, lib, pkgs, ... }:
|
||||
|
||||
let
|
||||
cfg = config.services.depot.panettone;
|
||||
in
|
||||
{
|
||||
options.services.depot.panettone = with lib; {
|
||||
enable = mkEnableOption "Panettone issue tracker";
|
||||
|
||||
port = mkOption {
|
||||
description = "Port on which Panettone should listen";
|
||||
type = types.int;
|
||||
default = 7268;
|
||||
};
|
||||
|
||||
dbHost = mkOption {
|
||||
description = "Postgresql host to connect to for Panettone";
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
};
|
||||
|
||||
dbName = mkOption {
|
||||
description = "Name of the database for Panettone";
|
||||
type = types.str;
|
||||
default = "panettone";
|
||||
};
|
||||
|
||||
dbUser = mkOption {
|
||||
description = "Name of the database user for Panettone";
|
||||
type = types.str;
|
||||
default = "panettone";
|
||||
};
|
||||
|
||||
secretsFile = mkOption {
|
||||
description = ''
|
||||
Path to a file containing secrets, in the format accepted
|
||||
by systemd's EnvironmentFile
|
||||
'';
|
||||
type = types.str;
|
||||
default = config.age.secretsDir + "/panettone";
|
||||
};
|
||||
|
||||
irccatHost = mkOption {
|
||||
description = "Hostname for the irccat instance";
|
||||
type = types.str;
|
||||
default = "localhost";
|
||||
};
|
||||
|
||||
irccatPort = mkOption {
|
||||
description = "Port for the irccat instance";
|
||||
type = types.int;
|
||||
default = 4722;
|
||||
};
|
||||
|
||||
irccatChannel = mkOption {
|
||||
description = "IRC channels to post to via irccat";
|
||||
type = types.str;
|
||||
};
|
||||
};
|
||||
|
||||
config = lib.mkIf cfg.enable {
|
||||
assertions = [{
|
||||
assertion =
|
||||
cfg.dbHost != "localhost" || config.services.postgresql.enable;
|
||||
message = "Panettone requires a postgresql database";
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
cfg.dbHost != "localhost" || config.services.postgresql.enableTCPIP;
|
||||
message = "Panettone can only connect to the postgresql database over TCP";
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
cfg.dbHost != "localhost" || (lib.any
|
||||
(user: user.name == cfg.dbUser)
|
||||
config.services.postgresql.ensureUsers);
|
||||
message = "Panettone requires a database user";
|
||||
}
|
||||
{
|
||||
assertion =
|
||||
cfg.dbHost != "localhost" || (lib.any
|
||||
(db: db == cfg.dbName)
|
||||
config.services.postgresql.ensureDatabases);
|
||||
message = "Panettone requires a database";
|
||||
}];
|
||||
|
||||
systemd.services.panettone = {
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = "${depot.web.panettone}/bin/panettone";
|
||||
|
||||
serviceConfig = {
|
||||
DynamicUser = true;
|
||||
Restart = "always";
|
||||
EnvironmentFile = cfg.secretsFile;
|
||||
};
|
||||
|
||||
environment = {
|
||||
PANETTONE_PORT = toString cfg.port;
|
||||
PGHOST = "localhost";
|
||||
PGUSER = cfg.dbUser;
|
||||
PGDATABASE = cfg.dbName;
|
||||
IRCCATHOST = cfg.irccatHost;
|
||||
IRCCATPORT = toString cfg.irccatPort;
|
||||
ISSUECHANNEL = cfg.irccatChannel;
|
||||
};
|
||||
};
|
||||
|
||||
systemd.services.panettone-fixer = {
|
||||
description = "Restart panettone regularly to work around b/225";
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
script = "${pkgs.systemd}/bin/systemctl restart panettone";
|
||||
serviceConfig.Type = "oneshot";
|
||||
|
||||
# We don't exactly know how frequently this occurs, but
|
||||
# _probably_ not more than hourly.
|
||||
startAt = "hourly";
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
# Configures an OpenLDAP instance for TVL
|
||||
#
|
||||
# TODO(tazjin): Configure ldaps://
|
||||
{ depot, lib, pkgs, ... }:
|
||||
|
||||
with depot.nix.yants;
|
||||
|
||||
let
|
||||
user = struct {
|
||||
username = string;
|
||||
email = string;
|
||||
password = string;
|
||||
displayName = option string;
|
||||
};
|
||||
|
||||
toLdif = defun [ user string ] (u: ''
|
||||
dn: cn=${u.username},ou=users,dc=tvl,dc=fyi
|
||||
objectClass: organizationalPerson
|
||||
objectClass: inetOrgPerson
|
||||
sn: ${u.username}
|
||||
cn: ${u.username}
|
||||
displayName: ${u.displayName or u.username}
|
||||
mail: ${u.email}
|
||||
userPassword: ${u.password}
|
||||
'');
|
||||
|
||||
inherit (depot.ops) users;
|
||||
|
||||
in
|
||||
{
|
||||
services.openldap = {
|
||||
enable = true;
|
||||
|
||||
settings.children = {
|
||||
"olcDatabase={1}mdb".attrs = {
|
||||
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
|
||||
olcDatabase = "{1}mdb";
|
||||
olcDbDirectory = "/var/lib/openldap/db";
|
||||
olcSuffix = "dc=tvl,dc=fyi";
|
||||
olcAccess = "to * by * read";
|
||||
olcRootDN = "cn=admin,dc=tvl,dc=fyi";
|
||||
olcRootPW = "{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$OfcgkOQ96VQ3aJj7NfA9vQ$oS6HQOkYl/bUYg4SejpltQYy7kvqx/RUxvoR4zo1vXU";
|
||||
};
|
||||
|
||||
"cn=module{0}".attrs = {
|
||||
objectClass = "olcModuleList";
|
||||
olcModuleLoad = "argon2";
|
||||
};
|
||||
|
||||
"cn=schema".includes =
|
||||
map (schema: "${pkgs.openldap}/etc/schema/${schema}.ldif")
|
||||
[ "core" "cosine" "inetorgperson" "nis" ];
|
||||
};
|
||||
|
||||
# Contents are immutable at runtime, and adding user accounts etc.
|
||||
# is done statically in the LDIF-formatted contents in this folder.
|
||||
declarativeContents."dc=tvl,dc=fyi" = ''
|
||||
dn: dc=tvl,dc=fyi
|
||||
dc: tvl
|
||||
o: TVL LDAP server
|
||||
description: Root entry for tvl.fyi
|
||||
objectClass: top
|
||||
objectClass: dcObject
|
||||
objectClass: organization
|
||||
|
||||
dn: ou=users,dc=tvl,dc=fyi
|
||||
ou: users
|
||||
description: All users in TVL
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
|
||||
dn: ou=groups,dc=tvl,dc=fyi
|
||||
ou: groups
|
||||
description: All groups in TVL
|
||||
objectClass: top
|
||||
objectClass: organizationalUnit
|
||||
|
||||
${lib.concatStringsSep "\n" (map toLdif users)}
|
||||
'';
|
||||
};
|
||||
}
|
||||
|
|
@ -1,83 +0,0 @@
|
|||
# Standard NixOS users for TVL machines, as well as configuration that
|
||||
# should following along when they are added to a machine.
|
||||
{ depot, pkgs, ... }:
|
||||
|
||||
{
|
||||
users = {
|
||||
users.tazjin = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" "wheel" ];
|
||||
shell = pkgs.fish;
|
||||
openssh.authorizedKeys.keys = depot.users.tazjin.keys.all;
|
||||
};
|
||||
|
||||
users.lukegb = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" "wheel" ];
|
||||
openssh.authorizedKeys.keys = depot.users.lukegb.keys.all;
|
||||
};
|
||||
|
||||
users.aspen = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" "wheel" ];
|
||||
openssh.authorizedKeys.keys = [ depot.users.aspen.keys.whitby ];
|
||||
};
|
||||
|
||||
users.edef = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" ];
|
||||
openssh.authorizedKeys.keys = depot.users.edef.keys.all;
|
||||
};
|
||||
|
||||
users.qyliss = {
|
||||
isNormalUser = true;
|
||||
description = "Alyssa Ross";
|
||||
extraGroups = [ "git" ];
|
||||
openssh.authorizedKeys.keys = depot.users.qyliss.keys.all;
|
||||
};
|
||||
|
||||
users.eta = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" ];
|
||||
openssh.authorizedKeys.keys = depot.users.eta.keys.whitby;
|
||||
};
|
||||
|
||||
users.cynthia = {
|
||||
isNormalUser = true; # I'm normal OwO :3
|
||||
extraGroups = [ "git" ];
|
||||
openssh.authorizedKeys.keys = depot.users.cynthia.keys.all;
|
||||
};
|
||||
|
||||
users.firefly = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" ];
|
||||
openssh.authorizedKeys.keys = depot.users.firefly.keys.whitby;
|
||||
};
|
||||
|
||||
users.sterni = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" "wheel" ];
|
||||
openssh.authorizedKeys.keys = depot.users.sterni.keys.all;
|
||||
};
|
||||
|
||||
users.flokli = {
|
||||
isNormalUser = true;
|
||||
extraGroups = [ "git" "wheel" ];
|
||||
openssh.authorizedKeys.keys = depot.users.flokli.keys.all;
|
||||
};
|
||||
};
|
||||
|
||||
programs.fish.enable = true;
|
||||
|
||||
environment.systemPackages = with pkgs; [
|
||||
alacritty.terminfo
|
||||
foot.terminfo
|
||||
rxvt-unicode-unwrapped.terminfo
|
||||
kitty.terminfo
|
||||
];
|
||||
|
||||
security.sudo.extraRules = [{
|
||||
groups = [ "wheel" ];
|
||||
commands = [{ command = "ALL"; options = [ "NOPASSWD" ]; }];
|
||||
}];
|
||||
}
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
# Serve atward, the query redirection ... thing.
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
# Short link support (i.e. plain http://at) for users with a
|
||||
# configured tvl.fyi/tvl.su search domain.
|
||||
services.nginx.virtualHosts."at-shortlink" = {
|
||||
serverName = "at";
|
||||
extraConfig = "return 302 https://atward.tvl.fyi$request_uri;";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."atward" = {
|
||||
serverName = "atward.tvl.fyi";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
serverAliases = [
|
||||
"atward.tvl.su"
|
||||
"at.tvl.fyi"
|
||||
"at.tvl.su"
|
||||
];
|
||||
|
||||
locations."/" = {
|
||||
proxyPass = "http://localhost:${toString config.services.depot.atward.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,28 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."auth.tvl.fyi" = {
|
||||
serverName = "auth.tvl.fyi";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
# increase buffer size for large headers
|
||||
proxy_buffers 8 16k;
|
||||
proxy_buffer_size 16k;
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:${toString config.services.keycloak.settings.http-port};
|
||||
proxy_set_header X-Forwarded-For $remote_addr;
|
||||
proxy_set_header X-Forwarded-Proto https;
|
||||
proxy_set_header Host $host;
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,32 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."b-shortlink" = {
|
||||
serverName = "b";
|
||||
extraConfig = "return 302 https://b.tvl.fyi$request_uri;";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."b.tvl.fyi" = {
|
||||
serverName = "b.tvl.fyi";
|
||||
serverAliases = [ "b.tvl.su" ];
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
# Forward short links to issues to the issue itself (b/32)
|
||||
location ~ ^/(\d+)$ {
|
||||
return 302 https://b.tvl.fyi/issues$request_uri;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:${toString config.services.depot.panettone.port};
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
# Publicly serve builderball cache. This is an experimental setup, and separate
|
||||
# from the "normal" harmonia cache on cache.tvl.su.
|
||||
{ config, ... }:
|
||||
|
||||
let
|
||||
# This attrset forms a linked list of hosts, which delegate ACME fallbacks to
|
||||
# each other. These *must* form a circle, otherwise we may end up walking only
|
||||
# part of the ring.
|
||||
#
|
||||
# TODO: remove whitby from here, it is gone; leaving this code for now for
|
||||
# easier discovery when reconfiguring this.
|
||||
acmeFallback = host: ({
|
||||
whitby = "nevsky.cache.tvl.fyi";
|
||||
nevsky = "whitby.cache.tvl.fyi"; # GOTO 1
|
||||
})."${host}";
|
||||
in
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."cache.tvl.fyi" = {
|
||||
serverName = "cache.tvl.fyi";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
# This enables fetching TLS certificates for the same domain on different
|
||||
# hosts. This config is kind of messy; it would be nice to generate a
|
||||
# correct ring from the depot fixpoint, but this may be impossible due to
|
||||
# infinite recursion. Please read the comment on `acmeFallback` above.
|
||||
#
|
||||
# TODO: whitby is gone, this is not needed at the moment
|
||||
# acmeFallbackHost = acmeFallback config.networking.hostName;
|
||||
|
||||
extraConfig = ''
|
||||
location = /cache-key.pub {
|
||||
alias /run/agenix/nix-cache-pub;
|
||||
}
|
||||
|
||||
location = / {
|
||||
proxy_pass http://${config.services.depot.harmonia.settings.bind};
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://localhost:${toString config.services.depot.builderball.port};
|
||||
}
|
||||
'';
|
||||
};
|
||||
|
||||
# participating hosts should use their local cache, otherwise they might end
|
||||
# up querying themselves from afar for data they don't have.
|
||||
networking.extraHosts = "127.0.0.1 cache.tvl.fyi";
|
||||
};
|
||||
}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."cache.tvl.su" = {
|
||||
serverName = "cache.tvl.su";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
location = /cache-key.pub {
|
||||
alias /run/agenix/nix-cache-pub;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://${config.services.depot.harmonia.settings.bind};
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
# This configuration redirects from the previous Sourcegraph instance to
|
||||
# livegrep/cgit where appropriate.
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."cs.tvl.fyi" = {
|
||||
serverName = "cs.tvl.fyi";
|
||||
serverAliases = [ "cs.tvl.su" ];
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
set $lineno "";
|
||||
|
||||
# depot root
|
||||
location = /depot {
|
||||
return 301 https://code.tvl.fyi/tree/;
|
||||
}
|
||||
|
||||
# folder/file on canon
|
||||
location ~ ^/depot/-/(blob|tree)/([^\s]*)$ {
|
||||
set $path $2;
|
||||
if ($args ~ ^L(\d+)(-\d+)?$) {
|
||||
set $lineno "#n$1";
|
||||
}
|
||||
|
||||
return 302 https://code.tvl.fyi/tree/$path$lineno;
|
||||
}
|
||||
|
||||
# folder/file on specific commit
|
||||
location ~ ^/depot@([a-f0-9]+)/-/(blob|tree)/([^\s]*)$ {
|
||||
set $commit $1;
|
||||
set $path $3;
|
||||
|
||||
if ($args ~ ^L(\d+)(-\d+)?$) {
|
||||
set $lineno "#n$1";
|
||||
}
|
||||
|
||||
return 302 https://code.tvl.fyi/tree/$path?id=$commit$lineno;
|
||||
}
|
||||
|
||||
# commit info
|
||||
location ~ ^/depot/-/commit/([a-f0-9]+)$ {
|
||||
set $commit $1;
|
||||
return 302 https://code.tvl.fyi/commit/?id=$commit;
|
||||
}
|
||||
|
||||
# search handler
|
||||
# This only redirects to the new search, it doesn't try to parse and
|
||||
# rewrite the query.
|
||||
location /search {
|
||||
return 302 https://grep.tvl.fyi/search;
|
||||
}
|
||||
|
||||
location / {
|
||||
return 404 "TVL code search has moved to grep.tvl.fyi and we could not figure out how to rewrite your query. Sorry!";
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
{ pkgs, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
# Ensure the directory for deployment diffs exists.
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/html/deploys.tvl.fyi/diff 0755 nginx nginx -"
|
||||
];
|
||||
|
||||
services.nginx.virtualHosts."deploys.tvl.fyi" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
root = "/var/html/deploys.tvl.fyi";
|
||||
};
|
||||
|
||||
services.depot.restic.paths = [ "/var/html/deploys.tvl.fyi" ];
|
||||
};
|
||||
}
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
# Experimental configuration for manually Livegrep.
|
||||
{ config, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."grep.tvl.fyi" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
locations."/" = {
|
||||
proxyPass = "http://127.0.0.1:${toString config.services.depot.livegrep.port}";
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
{ config, depot, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."inbox.tvl.su" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
# nginx is incapable of serving a single file at /, hence this hack:
|
||||
location = / {
|
||||
index /landing-page;
|
||||
}
|
||||
|
||||
location = /landing-page {
|
||||
types { } default_type "text/html; charset=utf-8";
|
||||
alias ${depot.web.inbox};
|
||||
}
|
||||
|
||||
# rest of requests is proxied to public-inbox-httpd
|
||||
location / {
|
||||
proxy_pass http://localhost:${toString config.services.public-inbox.http.port};
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
# per-host addresses for publicly reachable caches, for use with builderball
|
||||
# TODO(tazjin): merge with the public cache module; but needs ACME fixes
|
||||
{ config, lib, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = lib.mkIf config.services.depot.harmonia.enable {
|
||||
services.nginx.virtualHosts."${config.networking.hostName}.cache.tvl.fyi" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
location = /cache-key.pub {
|
||||
alias /run/agenix/nix-cache-pub;
|
||||
}
|
||||
|
||||
location / {
|
||||
proxy_pass http://${config.services.depot.harmonia.settings.bind};
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."signup.tvl.fyi" = {
|
||||
root = depot.web.pwcrypt;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
# Host the static assets at static.tvl.fyi
|
||||
#
|
||||
# All assets are served from $base/$drvhash/$file, but can also be
|
||||
# included with `latest/` which will return a (non-permanent!)
|
||||
# redirect to the real location.
|
||||
#
|
||||
# For all purposes within depot, using the drvhash of web.static is
|
||||
# recommended.
|
||||
{ depot, pkgs, ... }:
|
||||
|
||||
let staticHash = depot.web.static.drvHash;
|
||||
in {
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."static.tvl.fyi" = {
|
||||
serverAliases = [ "static.tvl.su" ];
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
location = / {
|
||||
add_header Content-Type text/plain;
|
||||
return 200 "looking for tvl.fyi or tvl.su?";
|
||||
}
|
||||
|
||||
location /latest {
|
||||
rewrite ^/latest/(.*) /${staticHash}/$1 redirect;
|
||||
}
|
||||
|
||||
location /${staticHash}/ {
|
||||
alias ${depot.web.static}/;
|
||||
expires max;
|
||||
add_header Access-Control-Allow-Origin "*";
|
||||
add_header Cache-Control "public";
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."todo.tvl.fyi" = {
|
||||
serverName = "todo.tvl.fyi";
|
||||
serverAliases = [ "todo.tvl.su" ];
|
||||
root = depot.web.todolist;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
|
||||
location ~* \.(webp|woff2)$ {
|
||||
add_header Cache-Control "public, max-age=31536000";
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."tvix.dev" = {
|
||||
serverName = "tvix.dev";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
root = depot.tvix.website;
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."bolt.tvix.dev" = {
|
||||
root = depot.web.tvixbolt;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
};
|
||||
|
||||
# old domain, serve redirect
|
||||
services.nginx.virtualHosts."tvixbolt.tvl.su" = {
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
extraConfig = "return 301 https://bolt.tvix.dev$request_uri;";
|
||||
};
|
||||
|
||||
services.nginx.virtualHosts."docs.tvix.dev" = {
|
||||
serverName = "docs.tvix.dev";
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
location = / {
|
||||
# until we have a better default page here
|
||||
return 301 https://docs.tvix.dev/rust/tvix_eval/index.html;
|
||||
}
|
||||
|
||||
location /rust/ {
|
||||
alias ${depot.tvix.rust-docs}/;
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,47 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."tvl.fyi" = {
|
||||
serverName = "tvl.fyi";
|
||||
root = depot.web.tvl;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
|
||||
rewrite ^/builds/?$ https://buildkite.com/tvl/depot/ last;
|
||||
|
||||
rewrite ^/monorepo-doc/?$ https://docs.google.com/document/d/1nnyByXcH0F6GOmEezNOUa2RFelpeRpDToBLYD_CtjWE/edit?usp=sharing last;
|
||||
|
||||
rewrite ^/irc/?$ ircs://irc.hackint.org:6697/#tvl last;
|
||||
rewrite ^/webchat/?$ https://webirc.hackint.org/#ircs://irc.hackint.org/#tvl last;
|
||||
|
||||
location ~* \.(webp|woff2)$ {
|
||||
add_header Cache-Control "public, max-age=31536000";
|
||||
}
|
||||
|
||||
location /blog {
|
||||
if ($request_uri ~ ^/(.*)\.html$) {
|
||||
return 302 /$1;
|
||||
}
|
||||
|
||||
try_files $uri $uri.html $uri/ =404;
|
||||
}
|
||||
|
||||
location = /blog {
|
||||
return 302 /#blog;
|
||||
}
|
||||
|
||||
location = /blog/ {
|
||||
return 302 /#blog;
|
||||
}
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
{ depot, ... }:
|
||||
|
||||
{
|
||||
imports = [
|
||||
./base.nix
|
||||
];
|
||||
|
||||
config = {
|
||||
services.nginx.virtualHosts."tvl.su" = {
|
||||
serverName = "tvl.su";
|
||||
root = depot.corp.website;
|
||||
enableACME = true;
|
||||
forceSSL = true;
|
||||
|
||||
extraConfig = ''
|
||||
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
|
||||
'';
|
||||
};
|
||||
};
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue