feat(*): initialize new Snix infrastructure
Co-Authored-By: edef <edef@edef.eu> Co-Authored-by: Ryan Lahfa <raito@lix.systems> Change-Id: Ica1cda177a236814de900f50a8a61d288f58f519
This commit is contained in:
parent
067eff3427
commit
a52ea3675c
124 changed files with 27723 additions and 1631 deletions
|
|
@ -23,6 +23,15 @@ let
|
||||||
'';
|
'';
|
||||||
|
|
||||||
exceptions = [
|
exceptions = [
|
||||||
|
# machines is allowed to access //users for several reasons:
|
||||||
|
#
|
||||||
|
# 1. User SSH keys are set in //users.
|
||||||
|
# 2. Some personal websites or demo projects are served from it.
|
||||||
|
[ "ops" "machines" "gerrit01" ]
|
||||||
|
[ "ops" "machines" "public01" ]
|
||||||
|
[ "ops" "machines" "build01" ]
|
||||||
|
[ "ops" "machines" "meta01" ]
|
||||||
|
|
||||||
# Due to evaluation order this also affects these targets.
|
# Due to evaluation order this also affects these targets.
|
||||||
# TODO(tazjin): Can this one be removed somehow?
|
# TODO(tazjin): Can this one be removed somehow?
|
||||||
[ "ops" "nixos" ]
|
[ "ops" "nixos" ]
|
||||||
|
|
|
||||||
|
|
@ -210,7 +210,7 @@ func changeShouldBeSkipped(onlyDisplay string, changeSubject string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func patchSetURL(c gerritevents.Change, p gerritevents.PatchSet) string {
|
func patchSetURL(c gerritevents.Change, p gerritevents.PatchSet) string {
|
||||||
return fmt.Sprintf("https://cl.tvl.fyi/%d", c.Number)
|
return fmt.Sprintf("https://cl.snix.dev/%d", c.Number)
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
|
||||||
49
ops/buildkite/snix.tf
Normal file
49
ops/buildkite/snix.tf
Normal file
|
|
@ -0,0 +1,49 @@
|
||||||
|
# Buildkite configuration for snix.
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
buildkite = {
|
||||||
|
source = "buildkite/buildkite"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
backend "s3" {
|
||||||
|
endpoints = {
|
||||||
|
s3 = "https://s3.dualstack.eu-central-1.amazonaws.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket = "snix-tfstate"
|
||||||
|
key = "terraform/snix-buildkite"
|
||||||
|
region = "eu-central-1"
|
||||||
|
|
||||||
|
skip_credentials_validation = true
|
||||||
|
skip_metadata_api_check = true
|
||||||
|
skip_requesting_account_id = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "buildkite" {
|
||||||
|
organization = "snix"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "buildkite_cluster" "primary" {
|
||||||
|
name = "Primary cluster"
|
||||||
|
description = "Build everything and deploy"
|
||||||
|
emoji = "🚀"
|
||||||
|
color = "#bada55"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "buildkite_pipeline" "snix" {
|
||||||
|
name = "snix"
|
||||||
|
description = "Run full CI pipeline of the Snix monorepository."
|
||||||
|
repository = "https://cl.snix.dev/snix"
|
||||||
|
steps = file("./steps-snix.yml")
|
||||||
|
default_branch = "refs/heads/canon"
|
||||||
|
cluster_id = buildkite_cluster.primary.id
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "buildkite_cluster_queue" "default" {
|
||||||
|
cluster_id = buildkite_cluster.primary.id
|
||||||
|
key = "default"
|
||||||
|
}
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
---
|
|
||||||
steps:
|
|
||||||
- label: ":buildkite: Upload pipeline"
|
|
||||||
command: "buildkite-agent pipeline upload"
|
|
||||||
|
|
@ -1,4 +0,0 @@
|
||||||
---
|
|
||||||
steps:
|
|
||||||
- label: ":buildkite: Upload pipeline"
|
|
||||||
command: "buildkite-agent pipeline upload"
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
||||||
# Buildkite configuration for TVL.
|
|
||||||
|
|
||||||
terraform {
|
|
||||||
required_providers {
|
|
||||||
buildkite = {
|
|
||||||
source = "buildkite/buildkite"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
backend "s3" {
|
|
||||||
endpoints = {
|
|
||||||
s3 = "https://objects.dc-sto1.glesys.net"
|
|
||||||
}
|
|
||||||
bucket = "tvl-state"
|
|
||||||
key = "terraform/tvl-buildkite"
|
|
||||||
region = "glesys"
|
|
||||||
|
|
||||||
skip_credentials_validation = true
|
|
||||||
skip_region_validation = true
|
|
||||||
skip_metadata_api_check = true
|
|
||||||
skip_requesting_account_id = true
|
|
||||||
skip_s3_checksum = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
provider "buildkite" {
|
|
||||||
organization = "tvl"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "buildkite_pipeline" "depot" {
|
|
||||||
name = "depot"
|
|
||||||
description = "Run full CI pipeline of the depot, TVL's monorepo."
|
|
||||||
repository = "https://cl.tvl.fyi/depot"
|
|
||||||
steps = file("./steps-depot.yml")
|
|
||||||
default_branch = "refs/heads/canon"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "buildkite_pipeline" "tvix" {
|
|
||||||
name = "tvix"
|
|
||||||
description = "Tvix, an exported subset of TVL depot"
|
|
||||||
repository = "https://code.tvl.fyi/depot.git:workspace=views/tvix.git"
|
|
||||||
steps = file("./steps-tvix.yml")
|
|
||||||
default_branch = "canon"
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "buildkite_pipeline" "tvl_kit" {
|
|
||||||
name = "tvl-kit"
|
|
||||||
description = "TVL Kit, an exported subset of TVL depot"
|
|
||||||
repository = "https://code.tvl.fyi/depot.git:workspace=views/kit.git"
|
|
||||||
steps = file("./steps-tvl-kit.yml")
|
|
||||||
default_branch = "canon"
|
|
||||||
}
|
|
||||||
3
ops/dashboards/README.md
Normal file
3
ops/dashboards/README.md
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
# Declarative dashboards in JSON Grafana format for `status.snix.dev`
|
||||||
|
|
||||||
|
Add new dashboards to `default.nix` and they will be picked up automatically.
|
||||||
4
ops/dashboards/default.nix
Normal file
4
ops/dashboards/default.nix
Normal file
|
|
@ -0,0 +1,4 @@
|
||||||
|
{ ... }: {
|
||||||
|
node_exporter = ./json/node_exporter.json;
|
||||||
|
all = ./json;
|
||||||
|
}
|
||||||
23899
ops/dashboards/json/node_exporter.json
Normal file
23899
ops/dashboards/json/node_exporter.json
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -1,7 +1,7 @@
|
||||||
{ pkgs, ... }:
|
{ pkgs, ... }:
|
||||||
|
|
||||||
pkgs.stdenv.mkDerivation {
|
pkgs.stdenv.mkDerivation {
|
||||||
name = "deploy-whitby";
|
name = "deploy-machine";
|
||||||
|
|
||||||
phases = [ "installPhase" "installCheckPhase" ];
|
phases = [ "installPhase" "installCheckPhase" ];
|
||||||
|
|
||||||
|
|
@ -11,7 +11,7 @@ pkgs.stdenv.mkDerivation {
|
||||||
|
|
||||||
installPhase = ''
|
installPhase = ''
|
||||||
mkdir -p $out/bin
|
mkdir -p $out/bin
|
||||||
makeWrapper ${./deploy-whitby.sh} $out/bin/deploy-whitby.sh \
|
makeWrapper ${./deploy-machine.sh} $out/bin/deploy-machine.sh \
|
||||||
--prefix PATH : ${with pkgs; lib.makeBinPath [
|
--prefix PATH : ${with pkgs; lib.makeBinPath [
|
||||||
ansi2html
|
ansi2html
|
||||||
git
|
git
|
||||||
|
|
@ -26,6 +26,6 @@ pkgs.stdenv.mkDerivation {
|
||||||
|
|
||||||
doInstallCheck = true;
|
doInstallCheck = true;
|
||||||
installCheckPhase = ''
|
installCheckPhase = ''
|
||||||
shellcheck $out/bin/deploy-whitby.sh
|
shellcheck $out/bin/deploy-machine.sh
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
|
|
@ -1,8 +1,14 @@
|
||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
set -Ceuo pipefail
|
set -Ceuo pipefail
|
||||||
|
|
||||||
HTML_ROOT="${HTML_ROOT:-/var/html/deploys.tvl.fyi}"
|
DOMAIN="snix.dev"
|
||||||
URL_BASE="${URL_BASE:-https://deploys.tvl.fyi/diff}"
|
FORGE_URL="cl.snix.dev"
|
||||||
|
REPO="tvix"
|
||||||
|
TARGET_SYSTEM="gerrit01"
|
||||||
|
TARGET_IRC_CHANNEL="#tvix-dev"
|
||||||
|
|
||||||
|
HTML_ROOT="${HTML_ROOT:-/var/html/deploys."$DOMAIN"}"
|
||||||
|
URL_BASE="${URL_BASE:-https://deploys."$DOMAIN"/diff}"
|
||||||
IRCCAT_PORT="${IRCCAT_PORT:-4722}"
|
IRCCAT_PORT="${IRCCAT_PORT:-4722}"
|
||||||
|
|
||||||
drv_hash() {
|
drv_hash() {
|
||||||
|
|
@ -28,11 +34,11 @@ cleanup() {
|
||||||
}
|
}
|
||||||
trap cleanup EXIT
|
trap cleanup EXIT
|
||||||
|
|
||||||
git clone https://cl.tvl.fyi/depot "$worktree_dir" --reference /depot
|
git clone https://"$FORGE_URL"/"$REPO" "$worktree_dir" --reference /"$REPO"
|
||||||
git -C "$worktree_dir" checkout "$new_rev"
|
git -C "$worktree_dir" checkout "$new_rev"
|
||||||
|
|
||||||
current=$(nix show-derivation /run/current-system | jq -r 'keys | .[0]')
|
current=$(nix show-derivation /run/current-system | jq -r 'keys | .[0]')
|
||||||
new=$(nix-instantiate -A ops.nixos.whitbySystem "$worktree_dir")
|
new=$(nix-instantiate -A ops.nixos."$TARGET_SYSTEM" "$worktree_dir")
|
||||||
|
|
||||||
diff_filename="$(drv_hash "$current")..$(drv_hash "$new").html"
|
diff_filename="$(drv_hash "$current")..$(drv_hash "$new").html"
|
||||||
nvd --color always diff "$current" "$new" \
|
nvd --color always diff "$current" "$new" \
|
||||||
|
|
@ -40,7 +46,7 @@ nvd --color always diff "$current" "$new" \
|
||||||
>| "$HTML_ROOT/diff/$diff_filename"
|
>| "$HTML_ROOT/diff/$diff_filename"
|
||||||
chmod a+r "$HTML_ROOT/diff/$diff_filename"
|
chmod a+r "$HTML_ROOT/diff/$diff_filename"
|
||||||
|
|
||||||
echo "#tvl whitby is being deployed! system diff: $URL_BASE/$diff_filename" \
|
echo "$TARGET_IRC_CHANNEL $TARGET_SYSTEM is being deployed! system diff: $URL_BASE/$diff_filename" \
|
||||||
| nc -w 5 -N localhost "$IRCCAT_PORT"
|
| nc -w 5 -N localhost "$IRCCAT_PORT"
|
||||||
|
|
||||||
# TODO(grfn): Actually do the deploy
|
# TODO(grfn): Actually do the deploy
|
||||||
3
ops/dns/.gitignore
vendored
Normal file
3
ops/dns/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
.terraform*
|
||||||
|
terraform.tfstate*
|
||||||
|
.envrc
|
||||||
|
|
@ -2,10 +2,18 @@ DNS configuration
|
||||||
=================
|
=================
|
||||||
|
|
||||||
This folder contains configuration for our DNS zones. The zones are hosted with
|
This folder contains configuration for our DNS zones. The zones are hosted with
|
||||||
Google Cloud DNS, which supports zone-file based import/export.
|
Digital Ocean DNS, which possess a Terraform provider for DNS records.
|
||||||
|
|
||||||
Currently there is no automation to deploy these zones, but CI will check their
|
Secrets are needed for applying this. The encrypted file
|
||||||
integrity.
|
`//ops/secrets/tf-dns.age` contains `export` calls which should be
|
||||||
|
sourced, for example via `direnv`, by users with the appropriate
|
||||||
|
credentials.
|
||||||
|
|
||||||
*Note: While each zone file specifies an SOA record, it only exists to satisfy
|
Here is an example `direnv` configuration:
|
||||||
`named-checkzone`. Cloud DNS manages this record for us.*
|
|
||||||
|
```
|
||||||
|
# //ops/secrets/.envrc
|
||||||
|
source_up
|
||||||
|
eval $(age --decrypt -i ~/.ssh/id_ed25519 $(git rev-parse --show-toplevel)/ops/secrets/tf-dns.age)
|
||||||
|
watch_file $(git rev-parse --show-toplevel)/secrets/tf-dns.age
|
||||||
|
```
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
# Performs simple (local-only) validity checks on DNS zones.
|
{ depot, lib, pkgs, ... }:
|
||||||
{ depot, pkgs, ... }:
|
|
||||||
|
|
||||||
let
|
let
|
||||||
checkZone = zone: file: pkgs.runCommand "${zone}-check" { } ''
|
checkZone = zone: file: pkgs.runCommand "${zone}-check" { } ''
|
||||||
|
|
@ -7,8 +6,19 @@ let
|
||||||
'';
|
'';
|
||||||
|
|
||||||
in
|
in
|
||||||
depot.nix.readTree.drvTargets {
|
depot.nix.readTree.drvTargets rec {
|
||||||
nixery-dev = checkZone "nixery.dev" ./nixery.dev.zone;
|
# Provide a Terraform wrapper with the right provider installed.
|
||||||
tvl-fyi = checkZone "tvl.fyi" ./tvl.fyi.zone;
|
terraform = pkgs.terraform.withPlugins (p: [
|
||||||
tvl-su = checkZone "tvl.su" ./tvl.su.zone;
|
p.digitalocean
|
||||||
|
]);
|
||||||
|
|
||||||
|
validate = {
|
||||||
|
snix-dev = checkZone "snix.dev" ./snix.dev.zone;
|
||||||
|
snix-systems = checkZone "snix.systems" ./snix.systems.zone;
|
||||||
|
terraform = depot.tools.checks.validateTerrform {
|
||||||
|
inherit terraform;
|
||||||
|
name = "dns";
|
||||||
|
src = lib.cleanSource ./.;
|
||||||
|
};
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
112
ops/dns/dns-snix-dev.tf
Normal file
112
ops/dns/dns-snix-dev.tf
Normal file
|
|
@ -0,0 +1,112 @@
|
||||||
|
# DNS configuration for snix.dev
|
||||||
|
|
||||||
|
resource "digitalocean_domain" "snix_dev" {
|
||||||
|
name = "snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Infrastructure records
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_gerrit01" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "AAAA"
|
||||||
|
name = "gerrit01.infra"
|
||||||
|
value = var.gerrit01_ipv6
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_public01" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "AAAA"
|
||||||
|
name = "public01.infra"
|
||||||
|
value = var.public01_ipv6
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_build01" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "AAAA"
|
||||||
|
name = "build01.infra"
|
||||||
|
value = var.build01_ipv6
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_meta01_v4" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "A"
|
||||||
|
name = "meta01.infra"
|
||||||
|
value = var.meta01_ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_meta01_v6" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "AAAA"
|
||||||
|
name = "meta01.infra"
|
||||||
|
value = var.meta01_ipv6
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_gerrit01_v4" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "A"
|
||||||
|
name = "gerrit01.infra"
|
||||||
|
value = var.gerrit01_ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_gerrit01_v6" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "AAAA"
|
||||||
|
name = "gerrit01.infra"
|
||||||
|
value = var.gerrit01_ipv6
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_public01_v4" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "A"
|
||||||
|
name = "public01.infra"
|
||||||
|
value = var.public01_ipv4
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_infra_public01_v6" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "AAAA"
|
||||||
|
name = "public01.infra"
|
||||||
|
value = var.public01_ipv6
|
||||||
|
}
|
||||||
|
|
||||||
|
# Email records
|
||||||
|
resource "digitalocean_record" "snix_dev_mail_v4" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "A"
|
||||||
|
value = "49.12.112.149"
|
||||||
|
name = "mail"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "digitalocean_record" "snix_dev_mail_v6" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "AAAA"
|
||||||
|
value = "2a01:4f8:c013:3e62::2"
|
||||||
|
name = "mail"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Explicit records for all services running on public01
|
||||||
|
resource "digitalocean_record" "snix_dev_public01" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "CNAME"
|
||||||
|
value = "public01.infra.snix.dev."
|
||||||
|
name = each.key
|
||||||
|
for_each = toset(local.public01_services)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Explicit records for all services running on gerrit01
|
||||||
|
resource "digitalocean_record" "snix_dev_gerrit01" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "CNAME"
|
||||||
|
value = "gerrit01.infra.snix.dev."
|
||||||
|
name = each.key
|
||||||
|
for_each = toset(local.gerrit01_services)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Explicit records for all services running on gerrit01
|
||||||
|
resource "digitalocean_record" "snix_dev_meta01" {
|
||||||
|
domain = digitalocean_domain.snix_dev.id
|
||||||
|
type = "CNAME"
|
||||||
|
value = "meta01.infra.snix.dev."
|
||||||
|
name = each.key
|
||||||
|
for_each = toset(local.meta01_services)
|
||||||
|
}
|
||||||
81
ops/dns/main.tf
Normal file
81
ops/dns/main.tf
Normal file
|
|
@ -0,0 +1,81 @@
|
||||||
|
# Configure snix DNS resources.
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
digitalocean = {
|
||||||
|
source = "digitalocean/digitalocean"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
backend "s3" {
|
||||||
|
endpoints = {
|
||||||
|
s3 = "https://s3.dualstack.eu-central-1.amazonaws.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket = "snix-tfstate"
|
||||||
|
key = "terraform/snix-dns"
|
||||||
|
region = "eu-central-1"
|
||||||
|
|
||||||
|
skip_credentials_validation = true
|
||||||
|
skip_metadata_api_check = true
|
||||||
|
skip_requesting_account_id = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "sni_proxy_ipv4" {
|
||||||
|
type = string
|
||||||
|
default = "163.172.69.160"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "public01_ipv6" {
|
||||||
|
type = string
|
||||||
|
default = "2a01:4f8:c013:3e62::1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "public01_ipv4" {
|
||||||
|
type = string
|
||||||
|
default = "49.13.70.233"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "gerrit01_ipv6" {
|
||||||
|
type = string
|
||||||
|
default = "2a01:4f8:c17:6188::1"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "gerrit01_ipv4" {
|
||||||
|
type = string
|
||||||
|
default = "138.199.144.184"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "build01_ipv6" {
|
||||||
|
type = string
|
||||||
|
default = "2001:bc8:38ee:100:7000::20"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "meta01_ipv4" {
|
||||||
|
type = string
|
||||||
|
default = "142.132.184.228"
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "meta01_ipv6" {
|
||||||
|
type = string
|
||||||
|
default = "2a01:4f8:c013:4a58::1"
|
||||||
|
}
|
||||||
|
|
||||||
|
locals {
|
||||||
|
public01_services = [
|
||||||
|
"auth",
|
||||||
|
"git",
|
||||||
|
"status"
|
||||||
|
]
|
||||||
|
|
||||||
|
gerrit01_services = [
|
||||||
|
"cl"
|
||||||
|
]
|
||||||
|
|
||||||
|
meta01_services = [
|
||||||
|
"mimir",
|
||||||
|
"loki",
|
||||||
|
"tempo"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
;; Google Cloud DNS zone for nixery.dev
|
|
||||||
nixery.dev. 21600 IN SOA ns-cloud-b1.googledomains.com. cloud-dns-hostmaster.google.com. 5 21600 3600 259200 300
|
|
||||||
nixery.dev. 21600 IN NS ns-cloud-b1.googledomains.com.
|
|
||||||
nixery.dev. 21600 IN NS ns-cloud-b2.googledomains.com.
|
|
||||||
nixery.dev. 21600 IN NS ns-cloud-b3.googledomains.com.
|
|
||||||
nixery.dev. 21600 IN NS ns-cloud-b4.googledomains.com.
|
|
||||||
|
|
||||||
;; Records for pointing nixery.dev to whitby
|
|
||||||
nixery.dev. 300 IN A 49.12.129.211
|
|
||||||
nixery.dev. 300 IN AAAA 2a01:4f8:242:5b21:0:feed:edef:beef
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
||||||
;; Google Cloud DNS zone for tvl.fyi.
|
|
||||||
;;
|
|
||||||
;; This zone is hosted in the project 'tvl-fyi', and registered via
|
|
||||||
;; Google Domains.
|
|
||||||
tvl.fyi. 21600 IN SOA ns-cloud-b1.googledomains.com. cloud-dns-hostmaster.google.com. 20 21600 3600 259200 300
|
|
||||||
tvl.fyi. 21600 IN NS ns-cloud-b1.googledomains.com.
|
|
||||||
tvl.fyi. 21600 IN NS ns-cloud-b2.googledomains.com.
|
|
||||||
tvl.fyi. 21600 IN NS ns-cloud-b3.googledomains.com.
|
|
||||||
tvl.fyi. 21600 IN NS ns-cloud-b4.googledomains.com.
|
|
||||||
|
|
||||||
;; Mail forwarding (via domains.google)
|
|
||||||
tvl.fyi. 3600 IN MX 5 gmr-smtp-in.l.google.com.
|
|
||||||
tvl.fyi. 3600 IN MX 10 alt1.gmr-smtp-in.l.google.com.
|
|
||||||
tvl.fyi. 3600 IN MX 20 alt2.gmr-smtp-in.l.google.com.
|
|
||||||
tvl.fyi. 3600 IN MX 30 alt3.gmr-smtp-in.l.google.com.
|
|
||||||
tvl.fyi. 3600 IN MX 40 alt4.gmr-smtp-in.l.google.com.
|
|
||||||
|
|
||||||
;; Landing website is hosted on whitby on the apex.
|
|
||||||
tvl.fyi. 21600 IN A 49.12.129.211
|
|
||||||
tvl.fyi. 21600 IN AAAA 2a01:4f8:242:5b21:0:feed:edef:beef
|
|
||||||
|
|
||||||
;; TVL infrastructure
|
|
||||||
whitby.tvl.fyi. 21600 IN A 49.12.129.211
|
|
||||||
whitby.tvl.fyi. 21600 IN AAAA 2a01:4f8:242:5b21:0:feed:edef:beef
|
|
||||||
|
|
||||||
;; TVL services
|
|
||||||
at.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
atward.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
b.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
cache.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
cl.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
code.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
cs.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
deploys.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
images.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
login.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
static.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
status.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
todo.tvl.fyi. 21600 IN CNAME whitby.tvl.fyi.
|
|
||||||
|
|
@ -1,51 +0,0 @@
|
||||||
;; Google Cloud DNS for tvl.su.
|
|
||||||
;;
|
|
||||||
;; This zone is hosted in the project 'tvl-fyi', and registered via
|
|
||||||
;; NIC.RU.
|
|
||||||
;;
|
|
||||||
;; This zone is mostly identical to tvl.fyi and will eventually become
|
|
||||||
;; the primary zone.
|
|
||||||
tvl.su. 21600 IN SOA ns-cloud-b1.googledomains.com. cloud-dns-hostmaster.google.com. 33 21600 3600 259200 300
|
|
||||||
tvl.su. 21600 IN NS ns-cloud-b1.googledomains.com.
|
|
||||||
tvl.su. 21600 IN NS ns-cloud-b2.googledomains.com.
|
|
||||||
tvl.su. 21600 IN NS ns-cloud-b3.googledomains.com.
|
|
||||||
tvl.su. 21600 IN NS ns-cloud-b4.googledomains.com.
|
|
||||||
|
|
||||||
;; Landing website is hosted on whitby on the apex.
|
|
||||||
tvl.su. 21600 IN A 49.12.129.211
|
|
||||||
tvl.su. 21600 IN AAAA 2a01:4f8:242:5b21:0:feed:edef:beef
|
|
||||||
|
|
||||||
;; TVL infrastructure
|
|
||||||
whitby.tvl.su. 21600 IN A 49.12.129.211
|
|
||||||
whitby.tvl.su. 21600 IN AAAA 2a01:4f8:242:5b21:0:feed:edef:beef
|
|
||||||
|
|
||||||
;; TVL services
|
|
||||||
at.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
atward.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
b.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
cache.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
cl.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
code.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
cs.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
images.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
login.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
static.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
status.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
todo.tvl.su. 21600 IN CNAME whitby.tvl.su.
|
|
||||||
|
|
||||||
;; Google Workspaces domain verification
|
|
||||||
tvl.su. 21600 IN TXT "google-site-verification=3ksTBzFK3lZlzD3ddBfpaHs9qasfAiYBmvbW2T_ejH4"
|
|
||||||
|
|
||||||
;; Google Workspaces email configuration
|
|
||||||
tvl.su. 21600 IN MX 1 aspmx.l.google.com.
|
|
||||||
tvl.su. 21600 IN MX 5 alt1.aspmx.l.google.com.
|
|
||||||
tvl.su. 21600 IN MX 5 alt2.aspmx.l.google.com.
|
|
||||||
tvl.su. 21600 IN MX 10 alt3.aspmx.l.google.com.
|
|
||||||
tvl.su. 21600 IN MX 10 alt4.aspmx.l.google.com.
|
|
||||||
tvl.su. 21600 IN TXT "v=spf1 include:_spf.google.com ~all"
|
|
||||||
google._domainkey.tvl.su. 21600 IN TXT ("v=DKIM1; k=rsa; p=MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAlqCbnGa8oPwrudJK60l6MJj3NBnwj8wAPXNGtYy2SXrOBi7FT+ySwW7ATpfv6Xq9zGDUWJsENPUlFmvDiUs7Qi4scnNvSO1L+sDseB9/q1m3gMFVnTuieDO/" "T+KKkg0+uYgMM7YX5PahsAAJJ+EMb/r4afl3tcBMPR64VveKQ0hiSHA4zIYPsB9FB+b8S5C46uyY0r6WR7IzGjq2Gzb1do0kxvaKItTITWLSImcUu5ZZuXOUKJb441frVBWur5lXaYuedkxb1IRTTK0V/mBODE1D7k73MxGrqlzaMPdCqz+c3hRE18WVUkBTYjANVXDrs3yzBBVxaIAeu++vkO6BvQIDAQAB")
|
|
||||||
|
|
||||||
;; Google Workspaces site aliases
|
|
||||||
docs.tvl.su. 21600 IN CNAME ghs.googlehosted.com.
|
|
||||||
groups.tvl.su. 21600 IN CNAME ghs.googlehosted.com.
|
|
||||||
mail.tvl.su. 21600 IN CNAME ghs.googlehosted.com.
|
|
||||||
|
|
@ -1,8 +1,9 @@
|
||||||
{ depot, pkgs, lib, ... }:
|
{ depot, pkgs, lib, ... }:
|
||||||
|
{ gerrit }:
|
||||||
|
|
||||||
let
|
let
|
||||||
classPath = lib.concatStringsSep ":" [
|
classPath = lib.concatStringsSep ":" [
|
||||||
"${depot.third_party.gerrit}/share/api/extension-api_deploy.jar"
|
"${gerrit}/share/api/extension-api_deploy.jar"
|
||||||
];
|
];
|
||||||
in
|
in
|
||||||
pkgs.stdenvNoCC.mkDerivation rec {
|
pkgs.stdenvNoCC.mkDerivation rec {
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
// vim: set noai ts=2 sw=2 et: */
|
// vim: set noai ts=2 sw=2 et: */
|
||||||
|
|
||||||
// This is a read-only Buildkite token: it was generated by lukegb@, and has
|
// This is a read-only Buildkite token: it was generated and installed by flokli@ and has
|
||||||
// read_builds, read_build_logs, and read_pipelines permissions.
|
// read_builds, read_build_logs, and read_pipelines permissions.
|
||||||
const BUILDKITE_TOKEN = 'a150658fb61062e432f13a032962d70fa9352088';
|
const BUILDKITE_TOKEN = 'bkua_fbb743ba597d89caf522876289705f6571bb599d';
|
||||||
|
|
||||||
function encodeParams(p) {
|
function encodeParams(p) {
|
||||||
const pieces = [];
|
const pieces = [];
|
||||||
|
|
@ -77,25 +77,25 @@ function jobStateToCheckRunStatus(state) {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
const tvlChecksProvider = {
|
const snixChecksProvider = {
|
||||||
async fetch(change) {
|
async fetch(change) {
|
||||||
let {patchsetSha, repo} = change;
|
let {patchsetSha, repo} = change;
|
||||||
|
|
||||||
const experiments = window.ENABLED_EXPERIMENTS || [];
|
const experiments = window.ENABLED_EXPERIMENTS || [];
|
||||||
if (experiments.includes("UiFeature__tvl_check_debug")) {
|
if (experiments.includes("UiFeature__snix_check_debug")) {
|
||||||
patchsetSha = '76692104f58b849b1503a8d8a700298003fa7b5f';
|
patchsetSha = '76692104f58b849b1503a8d8a700298003fa7b5f';
|
||||||
repo = 'depot';
|
repo = 'snix';
|
||||||
}
|
}
|
||||||
|
|
||||||
if (repo !== 'depot') {
|
if (repo !== 'snix') {
|
||||||
// We only handle TVL's depot at the moment.
|
// We only handle snix's depot at the moment.
|
||||||
return {responseCode: 'OK'};
|
return {responseCode: 'OK'};
|
||||||
}
|
}
|
||||||
|
|
||||||
const params = {
|
const params = {
|
||||||
commit: patchsetSha,
|
commit: patchsetSha,
|
||||||
};
|
};
|
||||||
const url = `https://api.buildkite.com/v2/organizations/tvl/pipelines/depot/builds?${encodeParams(params)}`;
|
const url = `https://api.buildkite.com/v2/organizations/snix/pipelines/snix/builds?${encodeParams(params)}`;
|
||||||
const resp = await fetch(url, {
|
const resp = await fetch(url, {
|
||||||
headers: {
|
headers: {
|
||||||
Authorization: `Bearer ${BUILDKITE_TOKEN}`,
|
Authorization: `Bearer ${BUILDKITE_TOKEN}`,
|
||||||
|
|
@ -183,7 +183,7 @@ const tvlChecksProvider = {
|
||||||
};
|
};
|
||||||
|
|
||||||
Gerrit.install(plugin => {
|
Gerrit.install(plugin => {
|
||||||
console.log('TVL plugin initialising');
|
console.log('snix plugin initialising');
|
||||||
|
|
||||||
plugin.checks().register(tvlChecksProvider);
|
plugin.checks().register(snixChecksProvider);
|
||||||
});
|
});
|
||||||
|
|
|
||||||
3
ops/hcloud/.gitignore
vendored
Normal file
3
ops/hcloud/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
.terraform*
|
||||||
|
terraform.tfstate*
|
||||||
|
.envrc
|
||||||
20
ops/hcloud/README.md
Normal file
20
ops/hcloud/README.md
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
Hetzner cloud configuration
|
||||||
|
=======================
|
||||||
|
|
||||||
|
This contains Terraform configuration for setting up our Hetzner cloud resources, except S3, see `//ops//hetzner-s3` for this.
|
||||||
|
|
||||||
|
Through `//tools/depot-deps` a `tf-hcloud` binary is made available
|
||||||
|
which contains a Terraform binary pre-configured with the correct
|
||||||
|
providers. This is automatically on your `$PATH` through `direnv`.
|
||||||
|
|
||||||
|
However, secrets still need to be loaded to access the Terraform state
|
||||||
|
and speak to the Hetzner API. These are available to certain users
|
||||||
|
through `//ops/secrets`.
|
||||||
|
|
||||||
|
This can be done with separate direnv configuration, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
# //ops/buildkite/.envrc
|
||||||
|
source_up
|
||||||
|
eval $(age --decrypt -i ~/.ssh/id_ed25519 $(git rev-parse --show-toplevel)/ops/secrets/tf-hcloud.age)
|
||||||
|
```
|
||||||
13
ops/hcloud/default.nix
Normal file
13
ops/hcloud/default.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
{ depot, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
depot.nix.readTree.drvTargets rec {
|
||||||
|
terraform = pkgs.terraform.withPlugins (p: [
|
||||||
|
p.hcloud
|
||||||
|
]);
|
||||||
|
|
||||||
|
validate = depot.tools.checks.validateTerraform {
|
||||||
|
inherit terraform;
|
||||||
|
name = "hcloud";
|
||||||
|
src = lib.cleanSource ./.;
|
||||||
|
};
|
||||||
|
}
|
||||||
1
ops/hcloud/raito.pub
Normal file
1
ops/hcloud/raito.pub
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcEkYM1r8QVNM/G5CxJInEdoBCWjEHHDdHlzDYNSUIdHHsn04QY+XI67AdMCm8w30GZnLUIj5RiJEWXREUApby0GrfxGGcy8otforygfgtmuUKAUEHdU2MMwrQI7RtTZ8oQ0USRGuqvmegxz3l5caVU7qGvBllJ4NUHXrkZSja2/51vq80RF4MKkDGiz7xUTixI2UcBwQBCA/kQedKV9G28EH+1XfvePqmMivZjl+7VyHsgUVj9eRGA1XWFw59UPZG8a7VkxO/Eb3K9NF297HUAcFMcbY6cPFi9AaBgu3VC4eetDnoN/+xT1owiHi7BReQhGAy/6cdf7C/my5ehZwD raito@RaitoBezarius-Laptop-OverDrive
|
||||||
134
ops/hcloud/snix.tf
Normal file
134
ops/hcloud/snix.tf
Normal file
|
|
@ -0,0 +1,134 @@
|
||||||
|
# Hetzner cloud configuration for snix
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
hcloud = {
|
||||||
|
source = "hetznercloud/hcloud"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
backend "s3" {
|
||||||
|
endpoints = {
|
||||||
|
s3 = "https://s3.dualstack.eu-central-1.amazonaws.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket = "snix-tfstate"
|
||||||
|
key = "terraform/snix-hcloud"
|
||||||
|
region = "eu-central-1"
|
||||||
|
|
||||||
|
skip_credentials_validation = true
|
||||||
|
skip_metadata_api_check = true
|
||||||
|
skip_requesting_account_id = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "hcloud" { }
|
||||||
|
|
||||||
|
resource "hcloud_ssh_key" "raito" {
|
||||||
|
name = "raito"
|
||||||
|
public_key = file("./raito.pub")
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO: pipe it from nix ssh keys
|
||||||
|
#
|
||||||
|
resource "hcloud_server" "meta01" {
|
||||||
|
name = "meta01.infra.snix.dev"
|
||||||
|
image = "debian-12"
|
||||||
|
# Observability stacks can eat quite the amount of RAM.
|
||||||
|
server_type = "cx32"
|
||||||
|
datacenter = "fsn1-dc14"
|
||||||
|
ssh_keys = [ hcloud_ssh_key.raito.id ]
|
||||||
|
public_net {
|
||||||
|
ipv4_enabled = true
|
||||||
|
ipv6_enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [ ssh_keys ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "meta01-v6" {
|
||||||
|
server_id = hcloud_server.meta01.id
|
||||||
|
ip_address = hcloud_server.meta01.ipv6_address
|
||||||
|
dns_ptr = "meta01.infra.snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "meta01-v4" {
|
||||||
|
server_id = hcloud_server.meta01.id
|
||||||
|
ip_address = hcloud_server.meta01.ipv4_address
|
||||||
|
dns_ptr = "meta01.infra.snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_floating_ip" "mail" {
|
||||||
|
type = "ipv4"
|
||||||
|
server_id = hcloud_server.public01.id
|
||||||
|
description = "IPv4 for mail hosting"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_server" "public01" {
|
||||||
|
name = "public01.infra.snix.dev"
|
||||||
|
image = "debian-12"
|
||||||
|
server_type = "cx22"
|
||||||
|
datacenter = "fsn1-dc14"
|
||||||
|
ssh_keys = [ hcloud_ssh_key.raito.id ]
|
||||||
|
public_net {
|
||||||
|
ipv4_enabled = true
|
||||||
|
ipv6_enabled = true
|
||||||
|
}
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [ ssh_keys ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "mail-v4" {
|
||||||
|
floating_ip_id = hcloud_floating_ip.mail.id
|
||||||
|
ip_address = hcloud_floating_ip.mail.ip_address
|
||||||
|
dns_ptr = "mail.snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "mail-v6" {
|
||||||
|
server_id = hcloud_server.public01.id
|
||||||
|
# Hardcoded because I don't want to compute it via Terraform.
|
||||||
|
ip_address = "2a01:4f8:c013:3e62::2"
|
||||||
|
dns_ptr = "mail.snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "public01-v4" {
|
||||||
|
server_id = hcloud_server.public01.id
|
||||||
|
ip_address = hcloud_server.public01.ipv4_address
|
||||||
|
dns_ptr = "public01.infra.snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "public01-v6" {
|
||||||
|
server_id = hcloud_server.public01.id
|
||||||
|
ip_address = hcloud_server.public01.ipv6_address
|
||||||
|
dns_ptr = "public01.infra.snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_server" "gerrit01" {
|
||||||
|
name = "gerrit01.infra.snix.dev"
|
||||||
|
image = "debian-12"
|
||||||
|
server_type = "cpx31"
|
||||||
|
datacenter = "fsn1-dc14"
|
||||||
|
ssh_keys = [ hcloud_ssh_key.raito.id ]
|
||||||
|
public_net {
|
||||||
|
ipv4_enabled = true
|
||||||
|
ipv6_enabled = true
|
||||||
|
}
|
||||||
|
lifecycle {
|
||||||
|
ignore_changes = [ ssh_keys ]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "gerrit01-v6" {
|
||||||
|
server_id = hcloud_server.gerrit01.id
|
||||||
|
ip_address = hcloud_server.gerrit01.ipv6_address
|
||||||
|
dns_ptr = "gerrit01.infra.snix.dev"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "hcloud_rdns" "gerrit01-v4" {
|
||||||
|
server_id = hcloud_server.gerrit01.id
|
||||||
|
ip_address = hcloud_server.gerrit01.ipv4_address
|
||||||
|
dns_ptr = "gerrit01.infra.snix.dev"
|
||||||
|
}
|
||||||
3
ops/hetzner-s3/.gitignore
vendored
Normal file
3
ops/hetzner-s3/.gitignore
vendored
Normal file
|
|
@ -0,0 +1,3 @@
|
||||||
|
.terraform*
|
||||||
|
terraform.tfstate*
|
||||||
|
.envrc
|
||||||
21
ops/hetzner-s3/README.md
Normal file
21
ops/hetzner-s3/README.md
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
Hetzner S3 configuration
|
||||||
|
=======================
|
||||||
|
|
||||||
|
This contains Terraform configuration for setting up our Hetzner S3
|
||||||
|
buckets.
|
||||||
|
|
||||||
|
Through `//tools/depot-deps` a `tf-hetzner-s3` binary is made available
|
||||||
|
which contains a Terraform binary pre-configured with the correct
|
||||||
|
providers. This is automatically on your `$PATH` through `direnv`.
|
||||||
|
|
||||||
|
However, secrets still need to be loaded to access the Terraform state
|
||||||
|
and speak to the Hetzner API. These are available to certain users
|
||||||
|
through `//ops/secrets`.
|
||||||
|
|
||||||
|
This can be done with separate direnv configuration, for example:
|
||||||
|
|
||||||
|
```
|
||||||
|
# //ops/buildkite/.envrc
|
||||||
|
source_up
|
||||||
|
eval $(age --decrypt -i ~/.ssh/id_ed25519 $(git rev-parse --show-toplevel)/ops/secrets/tf-hetzner-s3.age)
|
||||||
|
```
|
||||||
13
ops/hetzner-s3/default.nix
Normal file
13
ops/hetzner-s3/default.nix
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
{ depot, lib, pkgs, ... }:
|
||||||
|
|
||||||
|
depot.nix.readTree.drvTargets rec {
|
||||||
|
terraform = pkgs.terraform.withPlugins (p: [
|
||||||
|
p.minio
|
||||||
|
]);
|
||||||
|
|
||||||
|
validate = depot.tools.checks.validateTerraform {
|
||||||
|
inherit terraform;
|
||||||
|
name = "hetzner-s3";
|
||||||
|
src = lib.cleanSource ./.;
|
||||||
|
};
|
||||||
|
}
|
||||||
63
ops/hetzner-s3/snix.tf
Normal file
63
ops/hetzner-s3/snix.tf
Normal file
|
|
@ -0,0 +1,63 @@
|
||||||
|
# Hetzner S3 configuration for snix
|
||||||
|
# https://docs.hetzner.com/storage/object-storage/getting-started/creating-a-bucket-minio-terraform/
|
||||||
|
|
||||||
|
terraform {
|
||||||
|
required_providers {
|
||||||
|
minio = {
|
||||||
|
source = "aminueza/minio"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
backend "s3" {
|
||||||
|
endpoints = {
|
||||||
|
s3 = "https://s3.dualstack.eu-central-1.amazonaws.com"
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket = "snix-tfstate"
|
||||||
|
key = "terraform/snix-hetzner-s3"
|
||||||
|
region = "eu-central-1"
|
||||||
|
|
||||||
|
skip_credentials_validation = true
|
||||||
|
skip_metadata_api_check = true
|
||||||
|
skip_requesting_account_id = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Hetzner access keys, not to confuse with the state S3.
|
||||||
|
variable "access_key" {}
|
||||||
|
|
||||||
|
variable "secret_key" {
|
||||||
|
sensitive = true
|
||||||
|
}
|
||||||
|
|
||||||
|
provider "minio" {
|
||||||
|
minio_server = "fsn1.your-objectstorage.com"
|
||||||
|
minio_user = "${var.access_key}"
|
||||||
|
minio_password = "${var.secret_key}"
|
||||||
|
minio_region = "fsn1"
|
||||||
|
minio_ssl = true
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "minio_s3_bucket" "mimir" {
|
||||||
|
bucket = "snix-mimir"
|
||||||
|
acl = "private"
|
||||||
|
object_locking = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "minio_s3_bucket" "loki" {
|
||||||
|
bucket = "snix-loki"
|
||||||
|
acl = "private"
|
||||||
|
object_locking = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "minio_s3_bucket" "tempo" {
|
||||||
|
bucket = "snix-tempo"
|
||||||
|
acl = "private"
|
||||||
|
object_locking = false
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "minio_s3_bucket" "backups" {
|
||||||
|
bucket = "snix-backups"
|
||||||
|
acl = "private"
|
||||||
|
object_locking = false
|
||||||
|
}
|
||||||
|
|
@ -1,48 +1,82 @@
|
||||||
# All Keycloak clients, that is applications which authenticate
|
# All Keycloak clients, that is applications which authenticate
|
||||||
# through Keycloak.
|
# through Keycloak.
|
||||||
#
|
#
|
||||||
# Includes first-party (i.e. TVL-hosted) and third-party clients.
|
# Includes first-party (i.e. snix-hosted) and third-party clients.
|
||||||
|
|
||||||
resource "keycloak_openid_client" "grafana" {
|
resource "keycloak_openid_client" "grafana" {
|
||||||
realm_id = keycloak_realm.tvl.id
|
realm_id = keycloak_realm.snix.id
|
||||||
client_id = "grafana"
|
client_id = "grafana"
|
||||||
name = "Grafana"
|
name = "Grafana"
|
||||||
enabled = true
|
enabled = true
|
||||||
access_type = "CONFIDENTIAL"
|
access_type = "CONFIDENTIAL"
|
||||||
standard_flow_enabled = true
|
standard_flow_enabled = true
|
||||||
base_url = "https://status.tvl.su"
|
base_url = "https://status.snix.dev"
|
||||||
|
full_scope_allowed = true
|
||||||
|
|
||||||
valid_redirect_uris = [
|
valid_redirect_uris = [
|
||||||
"https://status.tvl.su/*",
|
"https://status.snix.dev/*",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_openid_client_default_scopes" "grafana_default_scopes" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
client_id = keycloak_openid_client.grafana.id
|
||||||
|
|
||||||
|
default_scopes = [
|
||||||
|
"profile",
|
||||||
|
"email",
|
||||||
|
"roles",
|
||||||
|
"web-origins",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "keycloak_openid_client" "gerrit" {
|
resource "keycloak_openid_client" "gerrit" {
|
||||||
realm_id = keycloak_realm.tvl.id
|
realm_id = keycloak_realm.snix.id
|
||||||
client_id = "gerrit"
|
client_id = "gerrit"
|
||||||
name = "TVL Gerrit"
|
name = "snix Gerrit"
|
||||||
enabled = true
|
enabled = true
|
||||||
access_type = "CONFIDENTIAL"
|
access_type = "CONFIDENTIAL"
|
||||||
standard_flow_enabled = true
|
standard_flow_enabled = true
|
||||||
base_url = "https://cl.tvl.fyi"
|
base_url = "https://cl.snix.dev"
|
||||||
description = "TVL's code review tool"
|
description = "snix project's code review tool"
|
||||||
direct_access_grants_enabled = true
|
direct_access_grants_enabled = true
|
||||||
exclude_session_state_from_auth_response = false
|
exclude_session_state_from_auth_response = false
|
||||||
|
|
||||||
valid_redirect_uris = [
|
valid_redirect_uris = [
|
||||||
"https://cl.tvl.fyi/*",
|
"https://cl.snix.dev/*",
|
||||||
]
|
]
|
||||||
|
|
||||||
web_origins = [
|
web_origins = [
|
||||||
"https://cl.tvl.fyi",
|
"https://cl.snix.dev",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_openid_client" "forgejo" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
client_id = "forgejo"
|
||||||
|
name = "snix Forgejo"
|
||||||
|
enabled = true
|
||||||
|
access_type = "CONFIDENTIAL"
|
||||||
|
standard_flow_enabled = true
|
||||||
|
base_url = "https://git.snix.dev"
|
||||||
|
description = "snix project's code browsing, search and issue tracker"
|
||||||
|
direct_access_grants_enabled = true
|
||||||
|
exclude_session_state_from_auth_response = false
|
||||||
|
|
||||||
|
valid_redirect_uris = [
|
||||||
|
"https://git.snix.dev/*",
|
||||||
|
]
|
||||||
|
|
||||||
|
web_origins = [
|
||||||
|
"https://git.snix.dev",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "keycloak_saml_client" "buildkite" {
|
resource "keycloak_saml_client" "buildkite" {
|
||||||
realm_id = keycloak_realm.tvl.id
|
realm_id = keycloak_realm.snix.id
|
||||||
client_id = "https://buildkite.com"
|
client_id = "https://buildkite.com"
|
||||||
name = "Buildkite"
|
name = "Buildkite"
|
||||||
base_url = "https://buildkite.com/sso/tvl"
|
base_url = "https://buildkite.com/sso/snix"
|
||||||
|
|
||||||
client_signature_required = false
|
client_signature_required = false
|
||||||
assertion_consumer_post_url = "https://buildkite.com/sso/~/1531aca5-f49c-4151-8832-a451e758af4c/saml/consume"
|
assertion_consumer_post_url = "https://buildkite.com/sso/~/1531aca5-f49c-4151-8832-a451e758af4c/saml/consume"
|
||||||
|
|
@ -53,7 +87,7 @@ resource "keycloak_saml_client" "buildkite" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "keycloak_saml_user_attribute_protocol_mapper" "buildkite_email" {
|
resource "keycloak_saml_user_attribute_protocol_mapper" "buildkite_email" {
|
||||||
realm_id = keycloak_realm.tvl.id
|
realm_id = keycloak_realm.snix.id
|
||||||
client_id = keycloak_saml_client.buildkite.id
|
client_id = keycloak_saml_client.buildkite.id
|
||||||
name = "buildkite-email-mapper"
|
name = "buildkite-email-mapper"
|
||||||
user_attribute = "email"
|
user_attribute = "email"
|
||||||
|
|
@ -62,24 +96,10 @@ resource "keycloak_saml_user_attribute_protocol_mapper" "buildkite_email" {
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "keycloak_saml_user_attribute_protocol_mapper" "buildkite_name" {
|
resource "keycloak_saml_user_attribute_protocol_mapper" "buildkite_name" {
|
||||||
realm_id = keycloak_realm.tvl.id
|
realm_id = keycloak_realm.snix.id
|
||||||
client_id = keycloak_saml_client.buildkite.id
|
client_id = keycloak_saml_client.buildkite.id
|
||||||
name = "buildkite-name-mapper"
|
name = "buildkite-name-mapper"
|
||||||
user_attribute = "displayName"
|
user_attribute = "displayName"
|
||||||
saml_attribute_name = "name"
|
saml_attribute_name = "name"
|
||||||
saml_attribute_name_format = "Unspecified"
|
saml_attribute_name_format = "Unspecified"
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "keycloak_openid_client" "panettone" {
|
|
||||||
realm_id = keycloak_realm.tvl.id
|
|
||||||
client_id = "panettone"
|
|
||||||
name = "Panettone"
|
|
||||||
enabled = true
|
|
||||||
access_type = "CONFIDENTIAL"
|
|
||||||
standard_flow_enabled = true
|
|
||||||
|
|
||||||
valid_redirect_uris = [
|
|
||||||
"https://b.tvl.fyi/auth",
|
|
||||||
"http://localhost:6161/auth",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,4 @@
|
||||||
# Configure TVL Keycloak instance.
|
# Configure snix's Keycloak instance.
|
||||||
#
|
|
||||||
# TODO(tazjin): Configure GitLab IDP
|
|
||||||
|
|
||||||
terraform {
|
terraform {
|
||||||
required_providers {
|
required_providers {
|
||||||
|
|
@ -11,43 +9,37 @@ terraform {
|
||||||
|
|
||||||
backend "s3" {
|
backend "s3" {
|
||||||
endpoints = {
|
endpoints = {
|
||||||
s3 = "https://objects.dc-sto1.glesys.net"
|
s3 = "https://s3.dualstack.eu-central-1.amazonaws.com"
|
||||||
}
|
}
|
||||||
bucket = "tvl-state"
|
|
||||||
key = "terraform/tvl-keycloak"
|
bucket = "snix-tfstate"
|
||||||
region = "glesys"
|
key = "terraform/snix-keycloak"
|
||||||
|
region = "eu-central-1"
|
||||||
|
|
||||||
skip_credentials_validation = true
|
skip_credentials_validation = true
|
||||||
skip_region_validation = true
|
skip_metadata_api_check = true
|
||||||
skip_metadata_api_check = true
|
|
||||||
skip_requesting_account_id = true
|
skip_requesting_account_id = true
|
||||||
skip_s3_checksum = true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
provider "keycloak" {
|
provider "keycloak" {
|
||||||
client_id = "terraform"
|
client_id = "terraform"
|
||||||
url = "https://auth.tvl.fyi"
|
url = "https://auth.snix.dev"
|
||||||
# NOTE: Docs mention this applies to "users of the legacy distribution of keycloak".
|
|
||||||
# However, we get a "failed to perform initial login to Keycloak: error
|
|
||||||
# sending POST request to https://auth.tvl.fyi/realms/master/protocol/openid-connect/token: 404 Not Found"
|
|
||||||
# if we don't set this.
|
|
||||||
base_path = "/auth"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "keycloak_realm" "tvl" {
|
resource "keycloak_realm" "snix" {
|
||||||
realm = "TVL"
|
realm = "snix-project"
|
||||||
enabled = true
|
enabled = true
|
||||||
display_name = "The Virus Lounge"
|
display_name = "The snix project"
|
||||||
default_signature_algorithm = "RS256"
|
default_signature_algorithm = "RS256"
|
||||||
|
|
||||||
smtp_server {
|
# smtp_server {
|
||||||
from = "tvlbot@tazj.in"
|
# from = "tvlbot@tazj.in"
|
||||||
from_display_name = "The Virus Lounge"
|
# from_display_name = "The Virus Lounge"
|
||||||
host = "127.0.0.1"
|
# host = "127.0.0.1"
|
||||||
port = "25"
|
# port = "25"
|
||||||
reply_to = "depot@tvl.su"
|
# reply_to = "depot@tvl.su"
|
||||||
ssl = false
|
# ssl = false
|
||||||
starttls = false
|
# starttls = false
|
||||||
}
|
# }
|
||||||
}
|
}
|
||||||
|
|
|
||||||
100
ops/keycloak/permissions.tf
Normal file
100
ops/keycloak/permissions.tf
Normal file
|
|
@ -0,0 +1,100 @@
|
||||||
|
# This sets the permissions for various groups and users.
|
||||||
|
|
||||||
|
# TODO: Realm-level composite roles
|
||||||
|
# resource "keycloak_role" "is_local_admin" {
|
||||||
|
# composite_roles = [
|
||||||
|
# keycloak_role.blablabla.id
|
||||||
|
# ]
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# resource "keycloak_role" "can_manage_trusted_contributors" {
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# # WARNING: This give PII access to the user.
|
||||||
|
# resource "keycloak_role" "can_manage_snix" {
|
||||||
|
# }
|
||||||
|
|
||||||
|
# Realm-level groups to bestow to users.
|
||||||
|
resource "keycloak_group" "snix_core_team" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
name = "snix core team"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_group_roles" "snix_core_team_roles" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
group_id = keycloak_group.snix_core_team.id
|
||||||
|
|
||||||
|
role_ids = [
|
||||||
|
# keycloak_role.is_local_admin,
|
||||||
|
# keycloak_role.can_manage_snix,
|
||||||
|
keycloak_role.grafana_admin.id,
|
||||||
|
# keycloak_role.forgejo_admin.id,
|
||||||
|
# keycloak_role.gerrit_admin.id
|
||||||
|
# keycloak_role.wiki_admin.id
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_group_memberships" "snix_core_team_members" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
group_id = keycloak_group.snix_core_team.id
|
||||||
|
|
||||||
|
members = [
|
||||||
|
"raitobezarius",
|
||||||
|
"edef"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_group" "trusted_contributors" {
|
||||||
|
name = "trusted contributors"
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_group_roles" "trusted_contributors_roles" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
group_id = keycloak_group.trusted_contributors.id
|
||||||
|
|
||||||
|
role_ids = [
|
||||||
|
keycloak_role.grafana_editor.id
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_group" "wiki_editors" {
|
||||||
|
name = "wiki editors"
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
}
|
||||||
|
|
||||||
|
# Application-level roles.
|
||||||
|
|
||||||
|
# Grafana
|
||||||
|
|
||||||
|
resource "keycloak_role" "grafana_editor" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
client_id = keycloak_openid_client.grafana.id
|
||||||
|
name = "Editor"
|
||||||
|
description = "Can edit things in Grafana"
|
||||||
|
}
|
||||||
|
|
||||||
|
resource "keycloak_role" "grafana_admin" {
|
||||||
|
realm_id = keycloak_realm.snix.id
|
||||||
|
client_id = keycloak_openid_client.grafana.id
|
||||||
|
name = "Admin"
|
||||||
|
description = "Can admin things in Grafana"
|
||||||
|
}
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# Forgejo
|
||||||
|
|
||||||
|
# resource "keycloak_role" "forgejo_admin" {
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# resource "keycloak_role" "forgejo_trusted_contributor" {
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# # Gerrit
|
||||||
|
#
|
||||||
|
# resource "keycloak_role" "gerrit_admin" {
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# resource "keycloak_role" "gerrit_trusted_contributor" {
|
||||||
|
# }
|
||||||
|
|
@ -6,44 +6,20 @@ variable "github_client_secret" {
|
||||||
type = string
|
type = string
|
||||||
}
|
}
|
||||||
|
|
||||||
resource "keycloak_ldap_user_federation" "tvl_ldap" {
|
|
||||||
name = "tvl-ldap"
|
|
||||||
realm_id = keycloak_realm.tvl.id
|
|
||||||
enabled = true
|
|
||||||
connection_url = "ldap://localhost"
|
|
||||||
users_dn = "ou=users,dc=tvl,dc=fyi"
|
|
||||||
username_ldap_attribute = "cn"
|
|
||||||
uuid_ldap_attribute = "cn"
|
|
||||||
rdn_ldap_attribute = "cn"
|
|
||||||
full_sync_period = 86400
|
|
||||||
trust_email = true
|
|
||||||
|
|
||||||
user_object_classes = [
|
|
||||||
"inetOrgPerson",
|
|
||||||
"organizationalPerson",
|
|
||||||
]
|
|
||||||
|
|
||||||
lifecycle {
|
|
||||||
# Without this, terraform wants to recreate the resource.
|
|
||||||
ignore_changes = [
|
|
||||||
delete_default_mappers
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# keycloak_oidc_identity_provider.github will be destroyed
|
# keycloak_oidc_identity_provider.github will be destroyed
|
||||||
# (because keycloak_oidc_identity_provider.github is not in configuration)
|
# (because keycloak_oidc_identity_provider.github is not in configuration)
|
||||||
resource "keycloak_oidc_identity_provider" "github" {
|
resource "keycloak_oidc_identity_provider" "github" {
|
||||||
alias = "github"
|
alias = "github"
|
||||||
provider_id = "github"
|
provider_id = "github"
|
||||||
client_id = "Iv23liXfGNIr7InMg5Uo"
|
client_id = "Ov23liKpXqs0aPaVgDpg"
|
||||||
client_secret = var.github_client_secret
|
client_secret = var.github_client_secret
|
||||||
realm = keycloak_realm.tvl.id
|
realm = keycloak_realm.snix.id
|
||||||
backchannel_supported = false
|
backchannel_supported = false
|
||||||
gui_order = "1"
|
gui_order = "1"
|
||||||
store_token = false
|
store_token = false
|
||||||
sync_mode = "IMPORT"
|
sync_mode = "IMPORT"
|
||||||
trust_email = true
|
trust_email = true
|
||||||
|
default_scopes = "openid user:email"
|
||||||
|
|
||||||
# These default to built-in values for the `github` provider_id.
|
# These default to built-in values for the `github` provider_id.
|
||||||
authorization_url = ""
|
authorization_url = ""
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,12 @@
|
||||||
{ depot, ... }:
|
{ depot, ... }:
|
||||||
|
|
||||||
(with depot.ops.machines; [
|
(with depot.ops.machines; [
|
||||||
meta01
|
# Gerrit instance
|
||||||
|
gerrit01
|
||||||
|
# Public-facing services
|
||||||
public01
|
public01
|
||||||
|
# Build machine
|
||||||
|
build01
|
||||||
|
# Observability stack and internal software
|
||||||
|
meta01
|
||||||
])
|
])
|
||||||
|
|
|
||||||
94
ops/machines/build01/default.nix
Normal file
94
ops/machines/build01/default.nix
Normal file
|
|
@ -0,0 +1,94 @@
|
||||||
|
{ depot, lib, pkgs, ... }: # readTree options
|
||||||
|
{ config, ... }: # passed by module system
|
||||||
|
let
|
||||||
|
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
(mod "o11y/agent.nix")
|
||||||
|
(mod "snix-buildkite.nix")
|
||||||
|
(mod "known-hosts.nix")
|
||||||
|
|
||||||
|
(depot.third_party.agenix.src + "/modules/age.nix")
|
||||||
|
];
|
||||||
|
|
||||||
|
# Machine model taken from project Floral.
|
||||||
|
boot.isContainer = true;
|
||||||
|
|
||||||
|
# XXX: There's currently no way to remove the "problematic" entries (trying
|
||||||
|
# to override the /proc, /sys, /dev, ... mounts from systemd-nspawn) while
|
||||||
|
# also keeping the entry for the wrappers dir.
|
||||||
|
boot.specialFileSystems = lib.mkForce {
|
||||||
|
"/run/wrappers" = {
|
||||||
|
fsType = "tmpfs";
|
||||||
|
options = [ "nodev" "mode=755" "size=${config.security.wrapperDirSize}" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.depot.buildkite = {
|
||||||
|
enable = true;
|
||||||
|
agentCount = 32;
|
||||||
|
};
|
||||||
|
|
||||||
|
boot.loader.initScript.enable = true;
|
||||||
|
system.switch.enableNg = false;
|
||||||
|
nix.package = pkgs.nix_2_3;
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
useNetworkd = true;
|
||||||
|
useHostResolvConf = false;
|
||||||
|
|
||||||
|
hostName = "build01";
|
||||||
|
domain = "infra.snix.dev";
|
||||||
|
nameservers = [
|
||||||
|
"2001:4860:4860::6464"
|
||||||
|
"2001:4860:4860::64"
|
||||||
|
];
|
||||||
|
|
||||||
|
interfaces.host0.ipv6 = {
|
||||||
|
addresses = [
|
||||||
|
{ address = "2001:bc8:38ee:100:7000::20"; prefixLength = 64; }
|
||||||
|
];
|
||||||
|
routes = [
|
||||||
|
{ address = "64:ff9b::"; via = "2001:bc8:38ee:100::100"; prefixLength = 96; }
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
firewall.allowPing = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets =
|
||||||
|
let
|
||||||
|
secretFile = name: depot.ops.secrets."${name}.age";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
buildkite-agent-token = {
|
||||||
|
file = secretFile "buildkite-agent-token";
|
||||||
|
mode = "0440";
|
||||||
|
group = "buildkite-agents";
|
||||||
|
};
|
||||||
|
buildkite-private-key = {
|
||||||
|
file = secretFile "buildkite-ssh-private-key";
|
||||||
|
mode = "0440";
|
||||||
|
group = "buildkite-agents";
|
||||||
|
};
|
||||||
|
buildkite-besadii-config = {
|
||||||
|
file = secretFile "buildkite-besadii-config";
|
||||||
|
mode = "0440";
|
||||||
|
group = "buildkite-agents";
|
||||||
|
};
|
||||||
|
buildkite-graphql-token = {
|
||||||
|
file = secretFile "buildkite-graphql-token";
|
||||||
|
mode = "0440";
|
||||||
|
group = "buildkite-agents";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.openssh.enable = true;
|
||||||
|
time.timeZone = "UTC";
|
||||||
|
users.users.root.openssh.authorizedKeys.keys = with depot.users; flokli.keys.all ++ edef.keys.all ++ raito.keys.all;
|
||||||
|
users.groups.kvm = { };
|
||||||
|
users.users.root.extraGroups = [ "kvm" ];
|
||||||
|
|
||||||
|
system.stateVersion = "25.05";
|
||||||
|
}
|
||||||
122
ops/machines/gerrit01/default.nix
Normal file
122
ops/machines/gerrit01/default.nix
Normal file
|
|
@ -0,0 +1,122 @@
|
||||||
|
{ depot, lib, pkgs, ... }: # readTree options
|
||||||
|
{ config, ... }: # passed by module system
|
||||||
|
|
||||||
|
let
|
||||||
|
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./disko.nix
|
||||||
|
|
||||||
|
(mod "hetzner-cloud.nix")
|
||||||
|
(mod "restic.nix")
|
||||||
|
(mod "o11y/agent.nix")
|
||||||
|
(mod "gerrit-autosubmit.nix")
|
||||||
|
(mod "monorepo-gerrit.nix")
|
||||||
|
(mod "www/cl.snix.dev.nix")
|
||||||
|
(mod "known-hosts.nix")
|
||||||
|
|
||||||
|
(depot.third_party.agenix.src + "/modules/age.nix")
|
||||||
|
(depot.third_party.disko.src + "/module.nix")
|
||||||
|
];
|
||||||
|
|
||||||
|
infra.hardware.hetzner-cloud = {
|
||||||
|
enable = true;
|
||||||
|
ipv6 = "2a01:4f8:c17:6188::1/64";
|
||||||
|
};
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
hostName = "gerrit01";
|
||||||
|
domain = "infra.snix.dev";
|
||||||
|
};
|
||||||
|
|
||||||
|
# Disable background git gc system-wide, as it has a tendency to break CI.
|
||||||
|
environment.etc."gitconfig".source = pkgs.writeText "gitconfig" ''
|
||||||
|
[gc]
|
||||||
|
autoDetach = false
|
||||||
|
'';
|
||||||
|
|
||||||
|
time.timeZone = "UTC";
|
||||||
|
|
||||||
|
programs.mtr.enable = true;
|
||||||
|
programs.mosh.enable = true;
|
||||||
|
services.openssh = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
PasswordAuthentication = false;
|
||||||
|
KbdInteractiveAuthentication = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Automatically collect garbage from the Nix store.
|
||||||
|
services.depot.automatic-gc = {
|
||||||
|
enable = true;
|
||||||
|
interval = "daily";
|
||||||
|
diskThreshold = 5; # GiB
|
||||||
|
maxFreed = 3; # GiB
|
||||||
|
preserveGenerations = "30d";
|
||||||
|
};
|
||||||
|
|
||||||
|
age.secrets =
|
||||||
|
let
|
||||||
|
secretFile = name: depot.ops.secrets."${name}.age";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
gerrit-oauth-secret.file = secretFile "gerrit-oauth-secret";
|
||||||
|
gerrit-replication-key.file = secretFile "gerrit-replication-key";
|
||||||
|
gerrit-autosubmit.file = secretFile "gerrit-autosubmit";
|
||||||
|
gerrit-besadii-config = {
|
||||||
|
file = secretFile "buildkite-besadii-config";
|
||||||
|
owner = "git";
|
||||||
|
};
|
||||||
|
restic-repository-password.file = secretFile "restic-repository-password";
|
||||||
|
restic-bucket-credentials.file = secretFile "restic-bucket-credentials";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.depot = {
|
||||||
|
gerrit-autosubmit.enable = true;
|
||||||
|
restic.enable = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
services.fail2ban.enable = true;
|
||||||
|
|
||||||
|
environment.systemPackages = (with pkgs; [
|
||||||
|
bat
|
||||||
|
bb
|
||||||
|
curl
|
||||||
|
direnv
|
||||||
|
fd
|
||||||
|
git
|
||||||
|
htop
|
||||||
|
hyperfine
|
||||||
|
jq
|
||||||
|
nano
|
||||||
|
nvd
|
||||||
|
ripgrep
|
||||||
|
tree
|
||||||
|
unzip
|
||||||
|
vim
|
||||||
|
]) ++ (with depot; [
|
||||||
|
ops.deploy-machine
|
||||||
|
]);
|
||||||
|
|
||||||
|
# Required for prometheus to be able to scrape stats
|
||||||
|
services.nginx.statusPage = true;
|
||||||
|
|
||||||
|
users = {
|
||||||
|
# Set up a user & group for git shenanigans
|
||||||
|
groups.git = { };
|
||||||
|
users.git = {
|
||||||
|
group = "git";
|
||||||
|
isSystemUser = true;
|
||||||
|
createHome = true;
|
||||||
|
home = "/var/lib/git";
|
||||||
|
};
|
||||||
|
users.root.openssh.authorizedKeys.keys = with depot.users; flokli.keys.all ++ edef.keys.all ++ raito.keys.all;
|
||||||
|
};
|
||||||
|
|
||||||
|
boot.initrd.systemd.enable = true;
|
||||||
|
zramSwap.enable = true;
|
||||||
|
|
||||||
|
system.stateVersion = "25.05";
|
||||||
|
}
|
||||||
84
ops/machines/gerrit01/disko.nix
Normal file
84
ops/machines/gerrit01/disko.nix
Normal file
|
|
@ -0,0 +1,84 @@
|
||||||
|
let
|
||||||
|
disk = "/dev/sda";
|
||||||
|
targetFsType = "xfs";
|
||||||
|
swapSizeInGb = 16;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
disko.devices = {
|
||||||
|
disk = {
|
||||||
|
${disk} = {
|
||||||
|
device = "${disk}";
|
||||||
|
type = "disk";
|
||||||
|
content = {
|
||||||
|
type = "gpt";
|
||||||
|
partitions = {
|
||||||
|
ESP = {
|
||||||
|
size = "1G";
|
||||||
|
type = "EF00";
|
||||||
|
priority = 100;
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = "vfat";
|
||||||
|
mountpoint = "/boot";
|
||||||
|
mountOptions = [ "umask=0077" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
PRIMARY = {
|
||||||
|
# Take the next available range.
|
||||||
|
start = "0";
|
||||||
|
end = "-${toString swapSizeInGb}G";
|
||||||
|
content = {
|
||||||
|
type = "lvm_pv";
|
||||||
|
vg = "vg_${targetFsType}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
SWAP = {
|
||||||
|
# Start from the SWAP area.
|
||||||
|
start = "-${toString swapSizeInGb}G";
|
||||||
|
size = "100%";
|
||||||
|
content = {
|
||||||
|
type = "swap";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
lvm_vg = {
|
||||||
|
"vg_${targetFsType}" = {
|
||||||
|
type = "lvm_vg";
|
||||||
|
lvs = {
|
||||||
|
ROOT = {
|
||||||
|
name = "ROOT";
|
||||||
|
size = "2G";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
NIX = {
|
||||||
|
name = "NIX";
|
||||||
|
size = "40%FREE";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/nix";
|
||||||
|
mountOptions = [ "noatime" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
VAR = {
|
||||||
|
name = "VAR";
|
||||||
|
size = "100%FREE";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/var";
|
||||||
|
mountOptions = [ "noatime" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
145
ops/machines/meta01/default.nix
Normal file
145
ops/machines/meta01/default.nix
Normal file
|
|
@ -0,0 +1,145 @@
|
||||||
|
{ depot, lib, pkgs, ... }: # readTree options
|
||||||
|
{ config, ... }: # passed by module system
|
||||||
|
|
||||||
|
let
|
||||||
|
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./disko.nix
|
||||||
|
|
||||||
|
(mod "hetzner-cloud.nix")
|
||||||
|
(mod "o11y/agent.nix")
|
||||||
|
(mod "o11y/mimir.nix")
|
||||||
|
(mod "o11y/loki.nix")
|
||||||
|
(mod "o11y/tempo.nix")
|
||||||
|
(mod "o11y/alertmanager-irc-relay.nix")
|
||||||
|
(mod "known-hosts.nix")
|
||||||
|
(mod "clbot.nix")
|
||||||
|
|
||||||
|
(mod "www/mimir.snix.dev.nix")
|
||||||
|
(mod "www/loki.snix.dev.nix")
|
||||||
|
(mod "www/tempo.snix.dev.nix")
|
||||||
|
|
||||||
|
(depot.third_party.agenix.src + "/modules/age.nix")
|
||||||
|
(depot.third_party.disko.src + "/module.nix")
|
||||||
|
];
|
||||||
|
|
||||||
|
infra.hardware.hetzner-cloud = {
|
||||||
|
enable = true;
|
||||||
|
ipv6 = "2a01:4f8:c013:4a58::1/64";
|
||||||
|
};
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
hostName = "meta01";
|
||||||
|
domain = "infra.snix.dev";
|
||||||
|
};
|
||||||
|
|
||||||
|
time.timeZone = "UTC";
|
||||||
|
|
||||||
|
programs.mtr.enable = true;
|
||||||
|
programs.mosh.enable = true;
|
||||||
|
services.openssh = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
PasswordAuthentication = false;
|
||||||
|
KbdInteractiveAuthentication = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Automatically collect garbage from the Nix store.
|
||||||
|
services.depot.automatic-gc = {
|
||||||
|
enable = true;
|
||||||
|
interval = "daily";
|
||||||
|
diskThreshold = 5; # GiB
|
||||||
|
maxFreed = 3; # GiB
|
||||||
|
preserveGenerations = "30d";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.depot = {
|
||||||
|
# TODO: make it possible to do `alertmanager.enable = true;`
|
||||||
|
prometheus.enable = true;
|
||||||
|
loki.enable = true;
|
||||||
|
tempo.enable = true;
|
||||||
|
clbot = {
|
||||||
|
enable = false;
|
||||||
|
channels = {
|
||||||
|
"#snix" = { };
|
||||||
|
|
||||||
|
flags = {
|
||||||
|
gerrit_host = "cl.snix.dev:29418";
|
||||||
|
gerrit_ssh_auth_username = "clbot";
|
||||||
|
# gerrit_ssh_auth_key = config.age.secrets.clbot-ssh-private-key.path;
|
||||||
|
|
||||||
|
irc_server = "irc.hackint.org:6697";
|
||||||
|
irc_tls = true;
|
||||||
|
irc_user = "snixbot";
|
||||||
|
irc_nick = "snixbot";
|
||||||
|
|
||||||
|
notify_branches = "canon,refs/meta/config";
|
||||||
|
notify_repo = "snix";
|
||||||
|
|
||||||
|
irc_pass = "$CLBOT_PASS";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
networking.nftables.enable = true;
|
||||||
|
networking.firewall.extraInputRules = ''
|
||||||
|
# Prometheus, Loki, Tempo
|
||||||
|
ip6 saddr { 2a01:4f8:c013:3e62::1 } tcp dport { 9009, 9090, 9190 } accept
|
||||||
|
ip saddr { 49.13.70.233 } tcp dport { 9009, 9090, 9190 } accept
|
||||||
|
'';
|
||||||
|
|
||||||
|
age.secrets =
|
||||||
|
let
|
||||||
|
secretFile = name: depot.ops.secrets."${name}.age";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
mimir-environment.file = secretFile "mimir-environment";
|
||||||
|
# Yes, they are literally the same: Hetzner Cloud has no support for per-bucket keys.
|
||||||
|
loki-environment.file = secretFile "mimir-environment";
|
||||||
|
tempo-environment.file = secretFile "mimir-environment";
|
||||||
|
metrics-push-htpasswd.file = secretFile "metrics-push-htpasswd";
|
||||||
|
metrics-push-htpasswd.owner = "nginx";
|
||||||
|
mimir-webhook-url.file = secretFile "mimir-webhook-url";
|
||||||
|
alertmanager-irc-relay-environment.file = secretFile "alertmanager-irc-relay-environment";
|
||||||
|
restic-repository-password.file = secretFile "restic-repository-password";
|
||||||
|
restic-bucket-credentials.file = secretFile "restic-bucket-credentials";
|
||||||
|
};
|
||||||
|
|
||||||
|
services.fail2ban.enable = true;
|
||||||
|
|
||||||
|
environment.systemPackages = (with pkgs; [
|
||||||
|
bat
|
||||||
|
bb
|
||||||
|
curl
|
||||||
|
direnv
|
||||||
|
fd
|
||||||
|
git
|
||||||
|
htop
|
||||||
|
hyperfine
|
||||||
|
jq
|
||||||
|
nano
|
||||||
|
nvd
|
||||||
|
ripgrep
|
||||||
|
tree
|
||||||
|
unzip
|
||||||
|
vim
|
||||||
|
]) ++ (with depot; [
|
||||||
|
ops.deploy-machine
|
||||||
|
]);
|
||||||
|
|
||||||
|
# Required for prometheus to be able to scrape stats
|
||||||
|
services.nginx.statusPage = true;
|
||||||
|
|
||||||
|
users = {
|
||||||
|
users.root.openssh.authorizedKeys.keys = with depot.users; flokli.keys.all ++ edef.keys.all ++ raito.keys.all;
|
||||||
|
};
|
||||||
|
|
||||||
|
boot.initrd.systemd.enable = true;
|
||||||
|
zramSwap.enable = true;
|
||||||
|
|
||||||
|
system.stateVersion = "25.05";
|
||||||
|
}
|
||||||
84
ops/machines/meta01/disko.nix
Normal file
84
ops/machines/meta01/disko.nix
Normal file
|
|
@ -0,0 +1,84 @@
|
||||||
|
let
|
||||||
|
disk = "/dev/sda";
|
||||||
|
targetFsType = "xfs";
|
||||||
|
swapSizeInGb = 8;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
disko.devices = {
|
||||||
|
disk = {
|
||||||
|
${disk} = {
|
||||||
|
device = "${disk}";
|
||||||
|
type = "disk";
|
||||||
|
content = {
|
||||||
|
type = "gpt";
|
||||||
|
partitions = {
|
||||||
|
ESP = {
|
||||||
|
size = "1G";
|
||||||
|
type = "EF00";
|
||||||
|
priority = 100;
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = "vfat";
|
||||||
|
mountpoint = "/boot";
|
||||||
|
mountOptions = [ "umask=0077" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
PRIMARY = {
|
||||||
|
# Take the next available range.
|
||||||
|
start = "0";
|
||||||
|
end = "-${toString swapSizeInGb}G";
|
||||||
|
content = {
|
||||||
|
type = "lvm_pv";
|
||||||
|
vg = "vg_${targetFsType}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
SWAP = {
|
||||||
|
# Start from the SWAP area.
|
||||||
|
start = "-${toString swapSizeInGb}G";
|
||||||
|
size = "100%";
|
||||||
|
content = {
|
||||||
|
type = "swap";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
lvm_vg = {
|
||||||
|
"vg_${targetFsType}" = {
|
||||||
|
type = "lvm_vg";
|
||||||
|
lvs = {
|
||||||
|
ROOT = {
|
||||||
|
name = "ROOT";
|
||||||
|
size = "2G";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
NIX = {
|
||||||
|
name = "NIX";
|
||||||
|
size = "40%FREE";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/nix";
|
||||||
|
mountOptions = [ "noatime" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
VAR = {
|
||||||
|
name = "VAR";
|
||||||
|
size = "100%FREE";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/var";
|
||||||
|
mountOptions = [ "noatime" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
206
ops/machines/public01/default.nix
Normal file
206
ops/machines/public01/default.nix
Normal file
|
|
@ -0,0 +1,206 @@
|
||||||
|
{ depot, lib, pkgs, ... }: # readTree options
|
||||||
|
{ config, ... }: # passed by module system
|
||||||
|
|
||||||
|
let
|
||||||
|
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./disko.nix
|
||||||
|
|
||||||
|
(mod "hetzner-cloud.nix")
|
||||||
|
(mod "forgejo.nix")
|
||||||
|
(mod "restic.nix")
|
||||||
|
# (mod "stalwart.nix")
|
||||||
|
# Automatically enable metric and log collection.
|
||||||
|
(mod "o11y/agent.nix")
|
||||||
|
(mod "o11y/grafana.nix")
|
||||||
|
(mod "www/status.snix.dev.nix")
|
||||||
|
(mod "www/auth.snix.dev.nix")
|
||||||
|
(mod "www/git.snix.dev.nix")
|
||||||
|
# (mod "www/mail.snix.dev.nix")
|
||||||
|
(mod "known-hosts.nix")
|
||||||
|
|
||||||
|
(depot.third_party.agenix.src + "/modules/age.nix")
|
||||||
|
(depot.third_party.disko.src + "/module.nix")
|
||||||
|
];
|
||||||
|
|
||||||
|
infra.hardware.hetzner-cloud = {
|
||||||
|
enable = true;
|
||||||
|
ipv6 = "2a01:4f8:c013:3e62::1/64";
|
||||||
|
# Additional IPs.
|
||||||
|
floatingIPs = [
|
||||||
|
"49.12.112.149/32"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
hostName = "public01";
|
||||||
|
domain = "infra.snix.dev";
|
||||||
|
};
|
||||||
|
|
||||||
|
time.timeZone = "UTC";
|
||||||
|
|
||||||
|
programs.mtr.enable = true;
|
||||||
|
programs.mosh.enable = true;
|
||||||
|
services.openssh = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
PasswordAuthentication = false;
|
||||||
|
KbdInteractiveAuthentication = false;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.depot = {
|
||||||
|
# Automatically collect garbage from the Nix store.
|
||||||
|
automatic-gc = {
|
||||||
|
enable = true;
|
||||||
|
interval = "daily";
|
||||||
|
diskThreshold = 5; # GiB
|
||||||
|
maxFreed = 3; # GiB
|
||||||
|
preserveGenerations = "30d";
|
||||||
|
};
|
||||||
|
forgejo = {
|
||||||
|
enable = true;
|
||||||
|
domain = "git.snix.dev";
|
||||||
|
};
|
||||||
|
grafana.enable = true;
|
||||||
|
# stalwart = {
|
||||||
|
# enable = true;
|
||||||
|
# mailDomain = "mail.snix.dev";
|
||||||
|
# };
|
||||||
|
# Configure backups to Hetzner Cloud
|
||||||
|
restic = {
|
||||||
|
enable = true;
|
||||||
|
paths = [
|
||||||
|
"/var/backup/postgresql"
|
||||||
|
"/var/backup/mysql"
|
||||||
|
"/var/lib/grafana"
|
||||||
|
"/var/lib/forgejo"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.postgresqlBackup = {
|
||||||
|
enable = true;
|
||||||
|
databases = [
|
||||||
|
"keycloak"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
services.mysqlBackup = {
|
||||||
|
enable = true;
|
||||||
|
databases = [
|
||||||
|
"forgejo"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
services.keycloak = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
http-port = 9091;
|
||||||
|
hostname = "auth.snix.dev";
|
||||||
|
proxy-headers = "xforwarded";
|
||||||
|
http-enabled = true;
|
||||||
|
|
||||||
|
# https://www.keycloak.org/docs/latest/server_admin/#_fine_grain_permissions
|
||||||
|
features = "admin-fine-grained-authz";
|
||||||
|
};
|
||||||
|
|
||||||
|
# This will be immediately changed, so no harm in having it here.
|
||||||
|
# It's just a one-time-use random set of characters.
|
||||||
|
initialAdminPassword = "TUxLWjndUZQGQ0A3ws0LfUs1DYRdAVcK";
|
||||||
|
|
||||||
|
database = {
|
||||||
|
type = "postgresql";
|
||||||
|
createLocally = true;
|
||||||
|
passwordFile = config.age.secrets.keycloak-db-password.path;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.keycloak.serviceConfig.Environment = [
|
||||||
|
# https://bugs.openjdk.org/browse/JDK-8170568 someday… !
|
||||||
|
"JAVA_OPTS_APPEND=-Djava.net.preferIPv6Addresses=system"
|
||||||
|
];
|
||||||
|
|
||||||
|
age.secrets =
|
||||||
|
let
|
||||||
|
secretFile = name: depot.ops.secrets."${name}.age";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
forgejo-oauth-secret = {
|
||||||
|
file = secretFile "forgejo-oauth-secret";
|
||||||
|
mode = "0440";
|
||||||
|
group = "git";
|
||||||
|
};
|
||||||
|
grafana-oauth-secret = {
|
||||||
|
file = secretFile "grafana-oauth-secret";
|
||||||
|
mode = "0440";
|
||||||
|
owner = "grafana";
|
||||||
|
};
|
||||||
|
keycloak-db-password.file = secretFile "keycloak-db-password";
|
||||||
|
restic-repository-password.file = secretFile "restic-repository-password";
|
||||||
|
restic-bucket-credentials.file = secretFile "restic-bucket-credentials";
|
||||||
|
};
|
||||||
|
|
||||||
|
# Start the Gerrit->IRC bot
|
||||||
|
# services.depot.clbot = {
|
||||||
|
# enable = true;
|
||||||
|
# channels = {
|
||||||
|
# "#snix-dev" = { };
|
||||||
|
# };
|
||||||
|
|
||||||
|
# # See //fun/clbot for details.
|
||||||
|
# flags = {
|
||||||
|
# gerrit_host = "cl.tvl.fyi:29418";
|
||||||
|
# gerrit_ssh_auth_username = "clbot";
|
||||||
|
# gerrit_ssh_auth_key = config.age.secretsDir + "/clbot-ssh";
|
||||||
|
|
||||||
|
# irc_server = "localhost:${toString config.services.znc.config.Listener.l.Port}";
|
||||||
|
# irc_user = "tvlbot";
|
||||||
|
# irc_nick = "tvlbot";
|
||||||
|
|
||||||
|
# notify_branches = "canon,refs/meta/config";
|
||||||
|
# notify_repo = "depot";
|
||||||
|
|
||||||
|
# # This secret is read from an environment variable, which is
|
||||||
|
# # populated by a systemd EnvironmentFile.
|
||||||
|
# irc_pass = "$CLBOT_PASS";
|
||||||
|
# };
|
||||||
|
# };
|
||||||
|
|
||||||
|
services.fail2ban.enable = true;
|
||||||
|
|
||||||
|
environment.systemPackages = (with pkgs; [
|
||||||
|
bat
|
||||||
|
bb
|
||||||
|
curl
|
||||||
|
direnv
|
||||||
|
fd
|
||||||
|
git
|
||||||
|
htop
|
||||||
|
hyperfine
|
||||||
|
jq
|
||||||
|
nano
|
||||||
|
nvd
|
||||||
|
ripgrep
|
||||||
|
tree
|
||||||
|
unzip
|
||||||
|
vim
|
||||||
|
]) ++ (with depot; [
|
||||||
|
ops.deploy-machine
|
||||||
|
]);
|
||||||
|
|
||||||
|
# Required for prometheus to be able to scrape stats
|
||||||
|
services.nginx.statusPage = true;
|
||||||
|
|
||||||
|
users = {
|
||||||
|
users.root.openssh.authorizedKeys.keys = with depot.users; flokli.keys.all ++ edef.keys.all ++ raito.keys.all;
|
||||||
|
};
|
||||||
|
|
||||||
|
boot.initrd.systemd.enable = true;
|
||||||
|
zramSwap.enable = true;
|
||||||
|
|
||||||
|
system.stateVersion = "25.05";
|
||||||
|
}
|
||||||
84
ops/machines/public01/disko.nix
Normal file
84
ops/machines/public01/disko.nix
Normal file
|
|
@ -0,0 +1,84 @@
|
||||||
|
let
|
||||||
|
disk = "/dev/sda";
|
||||||
|
targetFsType = "xfs";
|
||||||
|
swapSizeInGb = 16;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
disko.devices = {
|
||||||
|
disk = {
|
||||||
|
${disk} = {
|
||||||
|
device = "${disk}";
|
||||||
|
type = "disk";
|
||||||
|
content = {
|
||||||
|
type = "gpt";
|
||||||
|
partitions = {
|
||||||
|
ESP = {
|
||||||
|
size = "1G";
|
||||||
|
type = "EF00";
|
||||||
|
priority = 100;
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = "vfat";
|
||||||
|
mountpoint = "/boot";
|
||||||
|
mountOptions = [ "umask=0077" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
PRIMARY = {
|
||||||
|
# Take the next available range.
|
||||||
|
start = "0";
|
||||||
|
end = "-${toString swapSizeInGb}G";
|
||||||
|
content = {
|
||||||
|
type = "lvm_pv";
|
||||||
|
vg = "vg_${targetFsType}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
SWAP = {
|
||||||
|
# Start from the SWAP area.
|
||||||
|
start = "-${toString swapSizeInGb}G";
|
||||||
|
size = "100%";
|
||||||
|
content = {
|
||||||
|
type = "swap";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
lvm_vg = {
|
||||||
|
"vg_${targetFsType}" = {
|
||||||
|
type = "lvm_vg";
|
||||||
|
lvs = {
|
||||||
|
ROOT = {
|
||||||
|
name = "ROOT";
|
||||||
|
size = "2G";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
NIX = {
|
||||||
|
name = "NIX";
|
||||||
|
size = "40%FREE";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/nix";
|
||||||
|
mountOptions = [ "noatime" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
VAR = {
|
||||||
|
name = "VAR";
|
||||||
|
size = "100%FREE";
|
||||||
|
content = {
|
||||||
|
type = "filesystem";
|
||||||
|
format = targetFsType;
|
||||||
|
mountpoint = "/var";
|
||||||
|
mountOptions = [ "noatime" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -9,5 +9,6 @@
|
||||||
imports = [
|
imports = [
|
||||||
./automatic-gc.nix
|
./automatic-gc.nix
|
||||||
./auto-deploy.nix
|
./auto-deploy.nix
|
||||||
|
./raito-vm.nix
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
|
|
|
||||||
305
ops/modules/forgejo.nix
Normal file
305
ops/modules/forgejo.nix
Normal file
|
|
@ -0,0 +1,305 @@
|
||||||
|
#
|
||||||
|
# Forgejo Git Backend taken from Lix configuration.
|
||||||
|
# Thanks to all the Lix core developers for this!
|
||||||
|
# vim: et:ts=2:sw=2:
|
||||||
|
#
|
||||||
|
{ depot, pkgs, lib, config, ... }:
|
||||||
|
let
|
||||||
|
cfg = config.services.depot.forgejo;
|
||||||
|
inherit (lib) types mkEnableOption mkOption mkIf;
|
||||||
|
emojo =
|
||||||
|
let
|
||||||
|
handlePostFetch = ''
|
||||||
|
for i in $out/*_256.png; do
|
||||||
|
mv $i $(echo $i | sed -E 's/_256//g')
|
||||||
|
done
|
||||||
|
'';
|
||||||
|
drgn = pkgs.fetchzip {
|
||||||
|
url = "https://volpeon.ink/emojis/drgn/drgn.zip";
|
||||||
|
stripRoot = false;
|
||||||
|
sha256 = "sha256-/2MpbxMJC92a4YhwG5rP6TsDC/q1Ng5fFq4xe2cBrrM=";
|
||||||
|
postFetch = handlePostFetch;
|
||||||
|
};
|
||||||
|
neocat = pkgs.fetchzip {
|
||||||
|
url = "https://volpeon.ink/emojis/neocat/neocat.zip";
|
||||||
|
stripRoot = false;
|
||||||
|
sha256 = "sha256-Irh6Mv6ICDkaaenIFf8Cm1AFkdZy0gRVbXqgnwpk3Qw=";
|
||||||
|
postFetch = handlePostFetch;
|
||||||
|
};
|
||||||
|
neofox = pkgs.fetchzip {
|
||||||
|
url = "https://volpeon.ink/emojis/neofox/neofox.zip";
|
||||||
|
stripRoot = false;
|
||||||
|
sha256 = "sha256-FSTVYP/Bt25JfLr/Ny1g9oI9aAvAYLYhct31j3XRXYc=";
|
||||||
|
postFetch = handlePostFetch;
|
||||||
|
};
|
||||||
|
dragon = pkgs.fetchFromGitHub {
|
||||||
|
owner = "chr-1x";
|
||||||
|
repo = "dragn-emoji";
|
||||||
|
rev = "969543d9918ce2f0794ccd1e41b276d1ab22f0d5";
|
||||||
|
sha256 = "sha256-+40e9nKaIpQYZUiXh3Qe5jp2uvRbAQYDdXMGLEWHJio=";
|
||||||
|
postFetch = ''
|
||||||
|
for i in $out/*.svg; do
|
||||||
|
${pkgs.librsvg}/bin/rsvg-convert -h 256 $i > a.png;
|
||||||
|
mv a.png $(echo $i | sed -E "s/svg$/png/");
|
||||||
|
rm $i
|
||||||
|
done
|
||||||
|
${pkgs.oxipng}/bin/oxipng -o max $out/*.png
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
in
|
||||||
|
pkgs.symlinkJoin { name = "emojo"; paths = [ drgn neocat neofox dragon ]; };
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.depot.forgejo = {
|
||||||
|
enable = mkEnableOption "Forgejo Forge";
|
||||||
|
|
||||||
|
domain = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
# we have to use redis since we apparently have a "large instance" which
|
||||||
|
# "leaks hilarious amounts of memory if you use the default configuration"
|
||||||
|
services.redis = {
|
||||||
|
package = pkgs.valkey;
|
||||||
|
|
||||||
|
vmOverCommit = true;
|
||||||
|
servers.forgejo = {
|
||||||
|
enable = true;
|
||||||
|
# disable persistence, so when redis inevitably OOMs due to
|
||||||
|
# forgejo throwing to much in it, we don't restore the dataset
|
||||||
|
# that caused the OOM, breaking the restart loop.
|
||||||
|
save = [ ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
systemd.services.redis-forgejo.serviceConfig = {
|
||||||
|
Restart = "always";
|
||||||
|
};
|
||||||
|
systemd.services.forgejo = {
|
||||||
|
after = [ "redis-forgejo.service" ];
|
||||||
|
wants = [ "redis-forgejo.service" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
services.forgejo = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
package = pkgs.forgejo.overrideAttrs (old: {
|
||||||
|
patches = old.patches ++ (with depot.third_party.lix_forgejo.patches; [
|
||||||
|
upstream_link
|
||||||
|
signin_redirect
|
||||||
|
api_dont_notify
|
||||||
|
forgejo_is_now_gerrit_native
|
||||||
|
forgejo_knows_about_gerrit
|
||||||
|
]);
|
||||||
|
});
|
||||||
|
|
||||||
|
# General settings.
|
||||||
|
lfs.enable = true;
|
||||||
|
|
||||||
|
# Make our checkout paths more in line with expectations by calling our user "git".
|
||||||
|
user = "git";
|
||||||
|
group = "git";
|
||||||
|
|
||||||
|
# Secret mail config.
|
||||||
|
# mailerPasswordFile = config.age.secrets.forgejoSmtpSecret.path;
|
||||||
|
|
||||||
|
# Server and database config.
|
||||||
|
settings = {
|
||||||
|
|
||||||
|
# Sets the name in the titlebar, mostly.
|
||||||
|
DEFAULT.APP_NAME = "Snix Project";
|
||||||
|
|
||||||
|
# Settings for how we serve things.
|
||||||
|
server = {
|
||||||
|
DOMAIN = cfg.domain;
|
||||||
|
PROTOCOL = "http";
|
||||||
|
ENABLE_ACME = true;
|
||||||
|
ACME_ACCEPTTOS = true;
|
||||||
|
ACME_EMAIL = "acme@snix.dev";
|
||||||
|
LANDING_PAGE = "explore";
|
||||||
|
ROOT_URL = "https://${cfg.domain}";
|
||||||
|
|
||||||
|
# open a server on localhost:6060 with pprof data
|
||||||
|
# !! note: the documentation says that this causes forgejo serv to dump
|
||||||
|
# random files in PPROF_DATA_PATH.
|
||||||
|
# This documentation is wrong, ENABLE_PPROF only affects forgejo web,
|
||||||
|
# and forgejo serv requires a --enable-pprof arg to do that. But it's
|
||||||
|
# not causing perf problems right now so we don't care about that
|
||||||
|
# anyway.
|
||||||
|
ENABLE_PPROF = true;
|
||||||
|
};
|
||||||
|
|
||||||
|
# openid is not used in our setup
|
||||||
|
openid = {
|
||||||
|
ENABLE_OPENID_SIGNIN = false;
|
||||||
|
ENABLE_OPENID_SIGNUP = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
oauth2_client = {
|
||||||
|
ENABLE_AUTO_REGISTRATION = true;
|
||||||
|
REGISTER_EMAIL_CONFIRM = false;
|
||||||
|
ACCOUNT_LINKING = "login";
|
||||||
|
USERNAME = "nickname";
|
||||||
|
OPENID_CONNECT_SCOPES = "email profile";
|
||||||
|
};
|
||||||
|
|
||||||
|
cache = {
|
||||||
|
ADAPTER = "redis";
|
||||||
|
HOST = "redis+socket://${config.services.redis.servers.forgejo.unixSocket}";
|
||||||
|
};
|
||||||
|
"cache.last_commit" = {
|
||||||
|
ITEM_TTL = "24h"; # from default 8760h (1 year)
|
||||||
|
};
|
||||||
|
|
||||||
|
service = {
|
||||||
|
# We previously ran with "disable registration" which doesn't actually
|
||||||
|
# do anything to the OAuth login form, just the link account form. We
|
||||||
|
# suspect that if the account has all the required metadata like email
|
||||||
|
# to register cleanly, it doesn't use DISABLE_REGISTRATION at all.
|
||||||
|
#
|
||||||
|
# However this was probably relying on forgejo bugs, let's set it
|
||||||
|
# unambiguously.
|
||||||
|
DISABLE_REGISTRATION = false;
|
||||||
|
ALLOW_ONLY_EXTERNAL_REGISTRATION = true;
|
||||||
|
|
||||||
|
#REQUIRE_SIGNIN_VIEW = false;
|
||||||
|
ENABLE_NOTIFY_MAIL = true;
|
||||||
|
|
||||||
|
# Don't add org members as watchers on all repos, or indeed on new
|
||||||
|
# repos either.
|
||||||
|
#
|
||||||
|
# See: https://github.com/bmackinney/gitea/commit/a9eb2167536cfa8f7b7a23f73e11c8edf5dc0dc0
|
||||||
|
AUTO_WATCH_NEW_REPOS = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
session = {
|
||||||
|
# Put sessions in the DB so they survive restarts
|
||||||
|
PROVIDER = "db";
|
||||||
|
PROVIDER_CONFIG = "";
|
||||||
|
|
||||||
|
# Cookie only works over https
|
||||||
|
COOKIE_SECURE = true;
|
||||||
|
|
||||||
|
# 5 day sessions
|
||||||
|
SESSION_LIFE_TIME = 86400 * 5;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Careful with these!
|
||||||
|
security = {
|
||||||
|
# Don't allow access to the install page; manage exclusively via Nix.
|
||||||
|
INSTALL_LOCK = true;
|
||||||
|
|
||||||
|
# Allow internal users with the right permissions to set up Git hooks.
|
||||||
|
DISABLE_GIT_HOOKS = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Note: PASSWD is set by NixOS up.
|
||||||
|
# mailer = {
|
||||||
|
# ENABLED = true;
|
||||||
|
# PROTOCOL = "smtps";
|
||||||
|
# SMTP_ADDR = "";
|
||||||
|
# SMTP_PORT = 465;
|
||||||
|
# USER = "";
|
||||||
|
# FROM = "";
|
||||||
|
# };
|
||||||
|
|
||||||
|
ui = {
|
||||||
|
# Add the used emojis from https://volpeon.ink/emojis/ as well as https://github.com/chr-1x/dragn-emoji
|
||||||
|
CUSTOM_EMOJIS = builtins.readFile depot.third_party.lix_forgejo.custom_emojis;
|
||||||
|
# Normal reaction emoji people always need.
|
||||||
|
REACTIONS = "+1, -1, laugh, confused, heart, hooray, eyes, melting_face, neocat_scream_scared, neofox_scream_scared, drgn_scream, neocat_heart, neofox_heart, drgn_heart, neocat_floof_reach, neocat_pleading, neofox_floof_reach, neofox_pleading, drgn_pleading";
|
||||||
|
|
||||||
|
# To protect privacy of users.
|
||||||
|
SHOW_USER_EMAIL = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
# No runners are configured.
|
||||||
|
actions.ENABLED = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
# Use a MySQL database, which we enable below.
|
||||||
|
database = {
|
||||||
|
type = "mysql";
|
||||||
|
user = config.services.forgejo.user;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Inspired from Gerrit's way of doing things (from Lix).
|
||||||
|
# Before starting Forgejo, we will re-converge any required information.
|
||||||
|
# TODO: learn how to use update-oauth as well?
|
||||||
|
systemd.services.forgejo-keys = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
before = [ "forgejo.service" ];
|
||||||
|
wantedBy = [ "forgejo.service" ];
|
||||||
|
after = [ "network.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = "true";
|
||||||
|
WorkingDirectory = "/var/lib/forgejo";
|
||||||
|
User = "git";
|
||||||
|
Group = "git";
|
||||||
|
Environment = [
|
||||||
|
"FORGEJO_WORK_DIR=/var/lib/forgejo"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
path = [ config.services.forgejo.package ];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
NAME="Snix project"
|
||||||
|
PROVIDER="openidConnect"
|
||||||
|
CLIENT_ID="forgejo"
|
||||||
|
CLIENT_SECRET=$(cat ${config.age.secrets.forgejo-oauth-secret.path})
|
||||||
|
DISCOVERY_URL="https://auth.snix.dev/realms/snix-project/.well-known/openid-configuration"
|
||||||
|
SCOPES=("openid" "profile" "email")
|
||||||
|
|
||||||
|
# Check if the OAuth2 source already exists
|
||||||
|
if gitea admin auth list | grep -q "$NAME"; then
|
||||||
|
echo "OAuth2 source '$NAME' already exists. Skipping creation."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Add the OAuth2 source
|
||||||
|
gitea admin auth add-oauth \
|
||||||
|
--name "$NAME" \
|
||||||
|
--provider "$PROVIDER" \
|
||||||
|
--key "$CLIENT_ID" \
|
||||||
|
--secret "$CLIENT_SECRET" \
|
||||||
|
--auto-discover-url "$DISCOVERY_URL" \
|
||||||
|
$(printf -- '--scopes "%s" ' "''${SCOPES[@]}") \
|
||||||
|
--icon-url "$ICON_URL"
|
||||||
|
|
||||||
|
echo "OAuth2 source '$NAME' added successfully."
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Create our user an group. This is necessary for any name that's
|
||||||
|
# not "forgejo", due to the nix module config.
|
||||||
|
users.users."${config.services.forgejo.group}" = {
|
||||||
|
description = "Gitea Service";
|
||||||
|
useDefaultShell = true;
|
||||||
|
|
||||||
|
home = config.services.forgejo.stateDir;
|
||||||
|
group = config.services.forgejo.group;
|
||||||
|
|
||||||
|
# redis instance runs as redis-forgejo, so we need to be in that group to be able to connect
|
||||||
|
extraGroups = [ "redis-forgejo" ];
|
||||||
|
|
||||||
|
isSystemUser = true;
|
||||||
|
};
|
||||||
|
users.groups."${config.services.forgejo.group}" = { };
|
||||||
|
|
||||||
|
# Enable the mysql server, which will provide the forgejo backing store.
|
||||||
|
services.mysql.enable = lib.mkForce true;
|
||||||
|
services.mysql.package = lib.mkForce pkgs.mariadb;
|
||||||
|
|
||||||
|
systemd.tmpfiles.rules = let cfg = config.services.forgejo; in [
|
||||||
|
"d '${cfg.customDir}/public/assets' 0750 ${cfg.user} ${cfg.group} - -"
|
||||||
|
"d '${cfg.customDir}/public/assets/img' 0750 ${cfg.user} ${cfg.group} - -"
|
||||||
|
"L+ '${cfg.customDir}/public/assets/img/emoji' - - - - ${emojo}"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -12,7 +12,7 @@ in
|
||||||
{
|
{
|
||||||
options.services.depot.gerrit-autosubmit = {
|
options.services.depot.gerrit-autosubmit = {
|
||||||
enable = lib.mkEnableOption description;
|
enable = lib.mkEnableOption description;
|
||||||
gerritUrl = mkStringOption "https://cl.tvl.fyi";
|
gerritUrl = mkStringOption "https://cl.snix.dev";
|
||||||
|
|
||||||
secretsFile = with lib; mkOption {
|
secretsFile = with lib; mkOption {
|
||||||
description = "Path to a systemd EnvironmentFile containing secrets";
|
description = "Path to a systemd EnvironmentFile containing secrets";
|
||||||
|
|
@ -37,6 +37,7 @@ in
|
||||||
|
|
||||||
environment = {
|
environment = {
|
||||||
GERRIT_URL = cfg.gerritUrl;
|
GERRIT_URL = cfg.gerritUrl;
|
||||||
|
GERRIT_USERNAME = "clbot";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
||||||
76
ops/modules/hetzner-cloud.nix
Normal file
76
ops/modules/hetzner-cloud.nix
Normal file
|
|
@ -0,0 +1,76 @@
|
||||||
|
{ config, lib, pkgs, modulesPath, ... }:
|
||||||
|
let
|
||||||
|
cfg = config.infra.hardware.hetzner-cloud;
|
||||||
|
inherit (lib) types mkOption mkEnableOption mkIf;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports =
|
||||||
|
[ (modulesPath + "/profiles/qemu-guest.nix") ];
|
||||||
|
|
||||||
|
options.infra.hardware.hetzner-cloud = {
|
||||||
|
enable = mkEnableOption "the Hetzner Cloud hardware profile";
|
||||||
|
|
||||||
|
ipv6 = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
};
|
||||||
|
|
||||||
|
floatingIPs = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
default = [ ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.qemuGuest.enable = true;
|
||||||
|
systemd.network.enable = true;
|
||||||
|
networking.useDHCP = lib.mkDefault false;
|
||||||
|
|
||||||
|
systemd.network.networks."10-wan" = {
|
||||||
|
matchConfig.Name = "enp1s0";
|
||||||
|
linkConfig.RequiredForOnline = true;
|
||||||
|
networkConfig = {
|
||||||
|
# DHCPv4 for the IPv4 only.
|
||||||
|
DHCP = "ipv4";
|
||||||
|
Address = [ cfg.ipv6 ] ++ cfg.floatingIPs;
|
||||||
|
};
|
||||||
|
|
||||||
|
routes = [
|
||||||
|
{
|
||||||
|
Gateway = "fe80::1";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
dns = [ "2a01:4ff:ff00::add:1" "2a01:4ff:ff00::add:2" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
boot.loader.systemd-boot.enable = true;
|
||||||
|
|
||||||
|
boot.initrd.kernelModules = [
|
||||||
|
"virtio_balloon"
|
||||||
|
"virtio_console"
|
||||||
|
"virtio_rng"
|
||||||
|
];
|
||||||
|
|
||||||
|
boot.initrd.availableKernelModules = [
|
||||||
|
"9p"
|
||||||
|
"9pnet_virtio"
|
||||||
|
"ata_piix"
|
||||||
|
"nvme"
|
||||||
|
"sd_mod"
|
||||||
|
"sr_mod"
|
||||||
|
"uhci_hcd"
|
||||||
|
"virtio_blk"
|
||||||
|
"virtio_mmio"
|
||||||
|
"virtio_net"
|
||||||
|
"virtio_pci"
|
||||||
|
"virtio_scsi"
|
||||||
|
"xhci_pci"
|
||||||
|
"ahci"
|
||||||
|
];
|
||||||
|
|
||||||
|
boot.kernelModules = [ ];
|
||||||
|
boot.extraModulePackages = [ ];
|
||||||
|
|
||||||
|
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -1,8 +1,23 @@
|
||||||
# Configure public keys for SSH hosts known to TVL.
|
# Configure public keys for SSH hosts known to the snix project.
|
||||||
{ ... }:
|
{ ... }:
|
||||||
|
|
||||||
{
|
{
|
||||||
programs.ssh.knownHosts = {
|
programs.ssh.knownHosts = {
|
||||||
|
public01 = {
|
||||||
|
hostNames = [ "public01.infra.snix.dev" "git.snix.dev" ];
|
||||||
|
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICzB7bqXWcv+sVokySvj1d74zRlVLSNqBw7/OY3c7QYd";
|
||||||
|
};
|
||||||
|
|
||||||
|
gerrit01 = {
|
||||||
|
hostNames = [ "gerrit01.infra.snix.dev" "cl.snix.dev" ];
|
||||||
|
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN+RCLAExaM5EC70UsCPMtDT1Cfa80Ux/vex95fLk9S4";
|
||||||
|
};
|
||||||
|
|
||||||
|
build01 = {
|
||||||
|
hostNames = [ "build01.infra.snix.dev" ];
|
||||||
|
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEteVaeN/FEAY8yyGWdAbv6+X6yv2m8+4F5qZEAhxW9f";
|
||||||
|
};
|
||||||
|
|
||||||
github = {
|
github = {
|
||||||
hostNames = [ "github.com" ];
|
hostNames = [ "github.com" ];
|
||||||
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl";
|
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl";
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,13 @@
|
||||||
# Gerrit configuration for the TVL monorepo
|
# Gerrit configuration for the snix monorepo
|
||||||
{ depot, pkgs, config, lib, ... }:
|
{ depot, pkgs, config, lib, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.services.gerrit;
|
cfg = config.services.gerrit;
|
||||||
|
|
||||||
besadiiWithConfig = name: pkgs.writeShellScript "besadii-hook" ''
|
gerritPackage = depot.third_party.nix-gerrit.gerrit_3_11;
|
||||||
|
gerritPlugins = depot.third_party.nix-gerrit.plugins_3_11;
|
||||||
|
|
||||||
|
besadiiWithConfig = name: pkgs.writeShellScript "besadii-gerrit01" ''
|
||||||
export BESADII_CONFIG=/run/agenix/gerrit-besadii-config
|
export BESADII_CONFIG=/run/agenix/gerrit-besadii-config
|
||||||
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
|
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
|
||||||
'';
|
'';
|
||||||
|
|
@ -16,10 +19,11 @@ let
|
||||||
'';
|
'';
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
|
networking.firewall.allowedTCPPorts = [ 29418 ];
|
||||||
services.gerrit = {
|
services.gerrit = {
|
||||||
enable = true;
|
enable = true;
|
||||||
listenAddress = "[::]:4778"; # 4778 - grrt
|
listenAddress = "[::]:4778"; # 4778 - grrt
|
||||||
serverId = "4fdfa107-4df9-4596-8e0a-1d2bbdd96e36";
|
serverId = "b4813230-0b9b-46cb-b400-dcbed70f87e6";
|
||||||
|
|
||||||
builtinPlugins = [
|
builtinPlugins = [
|
||||||
"download-commands"
|
"download-commands"
|
||||||
|
|
@ -27,28 +31,32 @@ in
|
||||||
"replication"
|
"replication"
|
||||||
];
|
];
|
||||||
|
|
||||||
plugins = with depot.third_party.gerrit_plugins; [
|
plugins = with gerritPlugins; [
|
||||||
code-owners
|
# TODO: re-enable once we have figured out all the email situation.
|
||||||
|
# code-owners
|
||||||
oauth
|
oauth
|
||||||
depot.ops.gerrit-tvl
|
(depot.ops.gerrit-tvl {
|
||||||
|
gerrit = gerritPackage;
|
||||||
|
})
|
||||||
];
|
];
|
||||||
|
|
||||||
package = depot.third_party.gerrit;
|
package = gerritPackage;
|
||||||
|
|
||||||
jvmHeapLimit = "4g";
|
jvmHeapLimit = "4g";
|
||||||
|
|
||||||
# In some NixOS channel bump, the default version of OpenJDK has
|
# WARN(raito): keep this synchronized with the Gerrit version!
|
||||||
# changed to one that is incompatible with our current version of
|
jvmPackage = pkgs.openjdk21_headless;
|
||||||
# Gerrit.
|
|
||||||
#
|
jvmOpts = [
|
||||||
# TODO(tazjin): Update Gerrit and remove this when possible.
|
# https://bugs.openjdk.org/browse/JDK-8170568 someday… !
|
||||||
jvmPackage = pkgs.openjdk17_headless;
|
"-Djava.net.preferIPv6Addresses=system"
|
||||||
|
];
|
||||||
|
|
||||||
settings = {
|
settings = {
|
||||||
core.packedGitLimit = "100m";
|
core.packedGitLimit = "100m";
|
||||||
log.jsonLogging = true;
|
log.jsonLogging = true;
|
||||||
log.textLogging = false;
|
log.textLogging = false;
|
||||||
sshd.advertisedAddress = "code.tvl.fyi:29418";
|
sshd.advertisedAddress = "cl.snix.dev:29418";
|
||||||
hooks.path = "${gerritHooks}";
|
hooks.path = "${gerritHooks}";
|
||||||
cache.web_sessions.maxAge = "3 months";
|
cache.web_sessions.maxAge = "3 months";
|
||||||
plugins.allowRemoteAdmin = false;
|
plugins.allowRemoteAdmin = false;
|
||||||
|
|
@ -58,7 +66,7 @@ in
|
||||||
# Configures gerrit for being reverse-proxied by nginx as per
|
# Configures gerrit for being reverse-proxied by nginx as per
|
||||||
# https://gerrit-review.googlesource.com/Documentation/config-reverseproxy.html
|
# https://gerrit-review.googlesource.com/Documentation/config-reverseproxy.html
|
||||||
gerrit = {
|
gerrit = {
|
||||||
canonicalWebUrl = "https://cl.tvl.fyi";
|
canonicalWebUrl = "https://cl.snix.dev";
|
||||||
docUrl = "/Documentation";
|
docUrl = "/Documentation";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
@ -72,43 +80,43 @@ in
|
||||||
];
|
];
|
||||||
|
|
||||||
# Configure for cgit.
|
# Configure for cgit.
|
||||||
gitweb = {
|
# gitweb = {
|
||||||
type = "custom";
|
# type = "custom";
|
||||||
url = "https://code.tvl.fyi";
|
# url = "https://code.snix.dev";
|
||||||
project = "/";
|
# project = "/";
|
||||||
revision = "/commit/?id=\${commit}";
|
# revision = "/commit/?id=\${commit}";
|
||||||
branch = "/log/?h=\${branch}";
|
# branch = "/log/?h=\${branch}";
|
||||||
tag = "/tag/?h=\${tag}";
|
# tag = "/tag/?h=\${tag}";
|
||||||
roottree = "/tree/?h=\${commit}";
|
# roottree = "/tree/?h=\${commit}";
|
||||||
file = "/tree/\${file}?h=\${commit}";
|
# file = "/tree/\${file}?h=\${commit}";
|
||||||
filehistory = "/log/\${file}?h=\${branch}";
|
# filehistory = "/log/\${file}?h=\${branch}";
|
||||||
linkname = "cgit";
|
# linkname = "cgit";
|
||||||
};
|
# };
|
||||||
|
|
||||||
# Auto-link panettone bug links
|
# # Auto-link panettone bug links
|
||||||
commentlink.panettone = {
|
# commentlink.panettone = {
|
||||||
match = "b/(\\d+)";
|
# match = "b/(\\d+)";
|
||||||
link = "https://b.tvl.fyi/issues/$1";
|
# link = "https://b.tvl.fyi/issues/$1";
|
||||||
};
|
# };
|
||||||
|
|
||||||
# Auto-link other CLs
|
# Auto-link other CLs
|
||||||
commentlink.gerrit = {
|
commentlink.gerrit = {
|
||||||
match = "cl/(\\d+)";
|
match = "cl/(\\d+)";
|
||||||
link = "https://cl.tvl.fyi/$1";
|
link = "https://cl.snix.dev/$1";
|
||||||
};
|
};
|
||||||
|
|
||||||
# Auto-link links to monotonically increasing revisions/commits
|
# Auto-link links to monotonically increasing revisions/commits
|
||||||
commentlink.revision = {
|
# commentlink.revision = {
|
||||||
match = "r/(\\d+)";
|
# match = "r/(\\d+)";
|
||||||
link = "https://code.tvl.fyi/commit/?h=refs/r/$1";
|
# link = "https://code.tvl.fyi/commit/?h=refs/r/$1";
|
||||||
};
|
# };
|
||||||
|
|
||||||
# Configures integration with Keycloak, which then integrates with a
|
# Configures integration with Keycloak, which then integrates with a
|
||||||
# variety of backends.
|
# variety of backends.
|
||||||
auth.type = "OAUTH";
|
auth.type = "OAUTH";
|
||||||
plugin.gerrit-oauth-provider-keycloak-oauth = {
|
plugin.gerrit-oauth-provider-keycloak-oauth = {
|
||||||
root-url = "https://auth.tvl.fyi/auth";
|
root-url = "https://auth.snix.dev/";
|
||||||
realm = "TVL";
|
realm = "snix-project";
|
||||||
client-id = "gerrit";
|
client-id = "gerrit";
|
||||||
# client-secret is set in /var/lib/gerrit/etc/secure.config.
|
# client-secret is set in /var/lib/gerrit/etc/secure.config.
|
||||||
};
|
};
|
||||||
|
|
@ -136,31 +144,34 @@ in
|
||||||
# $site_path/etc/secure.config and is *not* controlled by Nix.
|
# $site_path/etc/secure.config and is *not* controlled by Nix.
|
||||||
#
|
#
|
||||||
# Receiving email is not currently supported.
|
# Receiving email is not currently supported.
|
||||||
sendemail = {
|
# sendemail = {
|
||||||
enable = true;
|
# enable = true;
|
||||||
html = false;
|
# html = false;
|
||||||
connectTimeout = "10sec";
|
# connectTimeout = "10sec";
|
||||||
from = "TVL Code Review <tvlbot@tazj.in>";
|
# from = "TVL Code Review <tvlbot@tazj.in>";
|
||||||
includeDiff = true;
|
# includeDiff = true;
|
||||||
smtpEncryption = "none";
|
# smtpEncryption = "none";
|
||||||
smtpServer = "localhost";
|
# smtpServer = "localhost";
|
||||||
smtpServerPort = 2525;
|
# smtpServerPort = 2525;
|
||||||
};
|
# };
|
||||||
};
|
};
|
||||||
|
|
||||||
# Replication of the depot repository to secondary machines, for
|
# Replication of the snix repository to secondary machines, for
|
||||||
# serving cgit/josh.
|
# serving forgejo.
|
||||||
replicationSettings = {
|
replicationSettings = {
|
||||||
gerrit.replicateOnStartup = true;
|
gerrit.replicateOnStartup = true;
|
||||||
|
|
||||||
remote.sanduny = {
|
# Replicate to our forgejo instance.
|
||||||
url = "depot@sanduny.tvl.su:/var/lib/depot";
|
remote.forgejo = {
|
||||||
projects = "depot";
|
url = "git@git.snix.dev:snix/snix.git";
|
||||||
};
|
push = [ "+refs/heads/*:refs/heads/*" "+refs/tags/*:refs/tags/*" ];
|
||||||
|
timeout = 30;
|
||||||
remote.bugry = {
|
threads = 3;
|
||||||
url = "depot@bugry.tvl.fyi:/var/lib/depot";
|
remoteNameStyle = "dash";
|
||||||
projects = "depot";
|
mirror = true;
|
||||||
|
# we are unsure if this should be private info
|
||||||
|
replicatePermissions = false;
|
||||||
|
projects = [ "snix" ];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
@ -178,6 +189,52 @@ in
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# Taken from Lix.
|
||||||
|
# Before starting gerrit, we'll want to create a "secure auth" file that contains our secrets.
|
||||||
|
systemd.services.gerrit-keys = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
before = [ "gerrit.service" ];
|
||||||
|
wantedBy = [ "gerrit.service" ];
|
||||||
|
after = [ "network.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
Type = "oneshot";
|
||||||
|
RemainAfterExit = "true";
|
||||||
|
WorkingDirectory = "/var/lib/gerrit";
|
||||||
|
};
|
||||||
|
|
||||||
|
path = [ pkgs.git ];
|
||||||
|
|
||||||
|
script = ''
|
||||||
|
CONF=etc/secure.config
|
||||||
|
|
||||||
|
# Ensure our config file is accessible to gerrit.
|
||||||
|
touch $CONF
|
||||||
|
chmod 600 $CONF
|
||||||
|
|
||||||
|
# Configure the SSH replication material
|
||||||
|
mkdir -p /var/lib/git/.ssh
|
||||||
|
cp ${config.age.secrets.gerrit-replication-key.path} /var/lib/git/.ssh/id_replication
|
||||||
|
cat > /var/lib/git/.ssh/config <<EOF
|
||||||
|
Host *
|
||||||
|
IdentityFile /var/lib/git/.ssh/id_replication
|
||||||
|
EOF
|
||||||
|
chmod 600 /var/lib/git/.ssh/id_replication
|
||||||
|
chmod 600 /var/lib/git/.ssh/config
|
||||||
|
chmod 700 /var/lib/git/.ssh
|
||||||
|
cp -L /etc/ssh/ssh_known_hosts /var/lib/git/.ssh/known_hosts
|
||||||
|
chmod 600 /var/lib/git/.ssh/known_hosts
|
||||||
|
chown -R git:git /var/lib/git/.ssh
|
||||||
|
|
||||||
|
# ... and finally, plop our secrets inside, and give the file to gerrit.
|
||||||
|
git config -f $CONF plugin.gerrit-oauth-provider-keycloak-oauth.client-secret \
|
||||||
|
"$(cat ${config.age.secrets.gerrit-oauth-secret.path})"
|
||||||
|
|
||||||
|
chown git:git $CONF
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
services.depot.restic = {
|
services.depot.restic = {
|
||||||
paths = [ "/var/lib/gerrit" ];
|
paths = [ "/var/lib/gerrit" ];
|
||||||
exclude = [ "/var/lib/gerrit/tmp" ];
|
exclude = [ "/var/lib/gerrit/tmp" ];
|
||||||
|
|
|
||||||
132
ops/modules/o11y/agent.nix
Normal file
132
ops/modules/o11y/agent.nix
Normal file
|
|
@ -0,0 +1,132 @@
|
||||||
|
{ depot
|
||||||
|
, config
|
||||||
|
, lib
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.infra.monitoring.grafana-agent;
|
||||||
|
inherit (lib) mkEnableOption mkOption mkIf types;
|
||||||
|
passwordAsCredential = "\${CREDENTIALS_DIRECTORY}/password";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.infra.monitoring.grafana-agent = {
|
||||||
|
enable = (mkEnableOption "Grafana Agent") // { default = true; };
|
||||||
|
|
||||||
|
exporters = mkOption {
|
||||||
|
description = ''
|
||||||
|
Set of additional exporters to scrape.
|
||||||
|
|
||||||
|
The attribute name will be used as `job_name`
|
||||||
|
internally, which ends up exported as `job` label
|
||||||
|
on all metrics of that exporter.
|
||||||
|
'';
|
||||||
|
type = types.attrsOf (types.submodule ({ config, name, ... }: {
|
||||||
|
options.port = mkOption {
|
||||||
|
description = "Exporter port";
|
||||||
|
type = types.int;
|
||||||
|
};
|
||||||
|
options.bearerTokenFile = mkOption {
|
||||||
|
description = "File containing a bearer token";
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
|
|
||||||
|
options.scrapeConfig = mkOption {
|
||||||
|
description = "Prometheus scrape config";
|
||||||
|
type = types.attrs;
|
||||||
|
};
|
||||||
|
config.scrapeConfig = lib.mkMerge [{
|
||||||
|
job_name = name;
|
||||||
|
static_configs = [
|
||||||
|
{ targets = [ "localhost:${toString config.port}" ]; }
|
||||||
|
];
|
||||||
|
}
|
||||||
|
(lib.mkIf (config.bearerTokenFile != null) {
|
||||||
|
authorization.credentials_file = "\${CREDENTIALS_DIRECTORY}/${name}-bearer-token";
|
||||||
|
})];
|
||||||
|
|
||||||
|
options.secrets = mkOption {
|
||||||
|
description = "Secrets required for scrape config";
|
||||||
|
type = types.attrs;
|
||||||
|
internal = true;
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
config.secrets = lib.mkIf (config.bearerTokenFile != null) {
|
||||||
|
"${name}-bearer-token" = config.bearerTokenFile;
|
||||||
|
};
|
||||||
|
}));
|
||||||
|
default = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
age.secrets.grafana-agent-password.file = depot.ops.secrets."grafana-agent-password.age";
|
||||||
|
|
||||||
|
services.grafana-agent = {
|
||||||
|
enable = true;
|
||||||
|
credentials = lib.mkMerge ([{ password = config.age.secrets.grafana-agent-password.path; }] ++
|
||||||
|
lib.mapAttrsToList (name: value: value.secrets) config.infra.monitoring.grafana-agent.exporters);
|
||||||
|
settings = {
|
||||||
|
metrics = {
|
||||||
|
global.remote_write = [
|
||||||
|
{
|
||||||
|
url = "https://mimir.snix.dev/api/v1/push";
|
||||||
|
basic_auth = {
|
||||||
|
username = "promtail";
|
||||||
|
password_file = passwordAsCredential;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
global.external_labels = {
|
||||||
|
hostname = config.networking.hostName;
|
||||||
|
};
|
||||||
|
configs = [
|
||||||
|
{
|
||||||
|
name = config.networking.hostName;
|
||||||
|
scrape_configs = lib.mapAttrsToList (name: value: value.scrapeConfig) config.infra.monitoring.grafana-agent.exporters;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
# logs = {
|
||||||
|
# global.clients = [
|
||||||
|
# {
|
||||||
|
# url = "https://loki.forkos.org/loki/api/v1/push";
|
||||||
|
# basic_auth = {
|
||||||
|
# username = "promtail";
|
||||||
|
# password_file = passwordAsCredential;
|
||||||
|
# };
|
||||||
|
# }
|
||||||
|
# ];
|
||||||
|
# configs = [
|
||||||
|
# {
|
||||||
|
# name = "journald";
|
||||||
|
# scrape_configs = [
|
||||||
|
# {
|
||||||
|
# job_name = "system";
|
||||||
|
# journal = {
|
||||||
|
# max_age = "12h";
|
||||||
|
# labels = {
|
||||||
|
# job = "systemd-journal";
|
||||||
|
# host = config.networking.hostName;
|
||||||
|
# };
|
||||||
|
# };
|
||||||
|
# relabel_configs = [
|
||||||
|
# {
|
||||||
|
# source_labels = [ "__journal__systemd_unit" ];
|
||||||
|
# target_label = "unit";
|
||||||
|
# }
|
||||||
|
# ];
|
||||||
|
# }
|
||||||
|
# ];
|
||||||
|
# }
|
||||||
|
# ];
|
||||||
|
# positions_directory = "\${STATE_DIRECTORY}/positions";
|
||||||
|
# };
|
||||||
|
integrations.node_exporter.enable_collectors = [
|
||||||
|
"processes"
|
||||||
|
"systemd"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
20
ops/modules/o11y/alertmanager-irc-relay.nix
Normal file
20
ops/modules/o11y/alertmanager-irc-relay.nix
Normal file
|
|
@ -0,0 +1,20 @@
|
||||||
|
{ config, depot, ... }: {
|
||||||
|
imports = [
|
||||||
|
depot.third_party.alertmanager-irc-relay.module
|
||||||
|
];
|
||||||
|
|
||||||
|
services.alertmanager-irc-relay = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
irc_host = "irc.hackint.org";
|
||||||
|
irc_port = 6697;
|
||||||
|
irc_nickname = "silentfox";
|
||||||
|
irc_channels = [
|
||||||
|
{ name = "#snix"; password = "$CHANNEL_PASSWORD"; }
|
||||||
|
];
|
||||||
|
};
|
||||||
|
environmentFiles = [
|
||||||
|
config.age.secrets.alertmanager-irc-relay-environment.path
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
0
ops/modules/o11y/alerts/.gitkeep
Normal file
0
ops/modules/o11y/alerts/.gitkeep
Normal file
148
ops/modules/o11y/grafana.nix
Normal file
148
ops/modules/o11y/grafana.nix
Normal file
|
|
@ -0,0 +1,148 @@
|
||||||
|
{ depot
|
||||||
|
, config
|
||||||
|
, lib
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.depot.grafana;
|
||||||
|
inherit (lib) mkEnableOption mkIf;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.depot.grafana.enable = mkEnableOption "Grafana frontend";
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services = {
|
||||||
|
grafana = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
settings = {
|
||||||
|
server = {
|
||||||
|
domain = "status.snix.dev";
|
||||||
|
http_addr = "127.0.0.1";
|
||||||
|
http_port = 2342;
|
||||||
|
root_url = "https://status.snix.dev/";
|
||||||
|
};
|
||||||
|
|
||||||
|
database = {
|
||||||
|
type = "postgres";
|
||||||
|
user = "grafana";
|
||||||
|
host = "/run/postgresql";
|
||||||
|
};
|
||||||
|
|
||||||
|
"auth.anonymous" = {
|
||||||
|
enabled = true;
|
||||||
|
org_name = "Main Org.";
|
||||||
|
org_role = "Viewer";
|
||||||
|
};
|
||||||
|
|
||||||
|
"auth.generic_oauth" = {
|
||||||
|
enabled = true;
|
||||||
|
|
||||||
|
name = "snix SSO";
|
||||||
|
client_id = "grafana";
|
||||||
|
client_secret = "$__file{${config.age.secrets.grafana-oauth-secret.path}}";
|
||||||
|
|
||||||
|
auth_url = "https://auth.snix.dev/realms/snix-project/protocol/openid-connect/auth";
|
||||||
|
token_url = "https://auth.snix.dev/realms/snix-project/protocol/openid-connect/token";
|
||||||
|
api_url = "https://auth.snix.dev/realms/snix-project/protocol/openid-connect/userinfo";
|
||||||
|
|
||||||
|
login_attribute_path = "username";
|
||||||
|
email_attribute_path = "email";
|
||||||
|
name_attribute_path = "full_name";
|
||||||
|
|
||||||
|
scopes = [
|
||||||
|
"openid"
|
||||||
|
"profile"
|
||||||
|
"email"
|
||||||
|
"offline_access"
|
||||||
|
"roles"
|
||||||
|
];
|
||||||
|
|
||||||
|
allow_sign_up = true;
|
||||||
|
auto_login = true;
|
||||||
|
allow_assign_grafana_admin = true;
|
||||||
|
|
||||||
|
role_attribute_path = "contains(grafana_roles[*], 'Admin') && 'GrafanaAdmin' || contains(grafana_roles[*], 'Editor') && 'Editor' || 'Viewer'";
|
||||||
|
};
|
||||||
|
|
||||||
|
dashboards.default_home_dashboard_path = "${depot.ops.dashboards.node_exporter}";
|
||||||
|
|
||||||
|
feature_toggles.enable = "autoMigrateOldPanels newVizTooltips";
|
||||||
|
security.angular_support_enabled = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
provision = {
|
||||||
|
dashboards.settings = {
|
||||||
|
apiVersion = 1;
|
||||||
|
providers = [
|
||||||
|
{
|
||||||
|
name = "default";
|
||||||
|
options.path = depot.ops.dashboards.all;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
datasources.settings = {
|
||||||
|
apiVersion = 1;
|
||||||
|
datasources = [
|
||||||
|
{
|
||||||
|
name = "Mimir";
|
||||||
|
type = "prometheus";
|
||||||
|
uid = "mimir";
|
||||||
|
access = "proxy";
|
||||||
|
url = "http://mimir.snix.dev:9009/prometheus";
|
||||||
|
isDefault = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
name = "Loki";
|
||||||
|
type = "loki";
|
||||||
|
uid = "loki";
|
||||||
|
access = "proxy";
|
||||||
|
url = "http://loki.snix.dev:9090/";
|
||||||
|
}
|
||||||
|
{
|
||||||
|
name = "Tempo";
|
||||||
|
type = "tempo";
|
||||||
|
uid = "tempo";
|
||||||
|
access = "proxy";
|
||||||
|
url = "http://tempo.snix.dev:9190";
|
||||||
|
jsonData.streamingEnabled.search = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
name = "Mimir Alertmanager";
|
||||||
|
type = "alertmanager";
|
||||||
|
uid = "mimir-alertmanager";
|
||||||
|
access = "proxy";
|
||||||
|
url = "http://mimir.snix.dev:9009/";
|
||||||
|
jsonData = {
|
||||||
|
handleGrafanaManagedAlerts = true;
|
||||||
|
implementation = "mimir";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
# {
|
||||||
|
# name = "Pyroscope";
|
||||||
|
# type = "grafana-pyroscope-datasource";
|
||||||
|
# uid = "pyroscope";
|
||||||
|
# access = "proxy";
|
||||||
|
# url = "http://127.0.0.1:4040";
|
||||||
|
# }
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
postgresql = {
|
||||||
|
ensureDatabases = [ "grafana" ];
|
||||||
|
ensureUsers = [
|
||||||
|
{
|
||||||
|
name = "grafana";
|
||||||
|
ensureDBOwnership = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
infra.monitoring.grafana-agent.exporters.grafana.port = 2342;
|
||||||
|
};
|
||||||
|
}
|
||||||
90
ops/modules/o11y/loki.nix
Normal file
90
ops/modules/o11y/loki.nix
Normal file
|
|
@ -0,0 +1,90 @@
|
||||||
|
{ config
|
||||||
|
, lib
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.depot.loki;
|
||||||
|
inherit (lib) mkEnableOption mkIf;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.depot.loki.enable = mkEnableOption "Loki storage";
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.loki = {
|
||||||
|
enable = true;
|
||||||
|
extraFlags = [ "--config.expand-env" ];
|
||||||
|
|
||||||
|
configuration = {
|
||||||
|
server = {
|
||||||
|
http_listen_port = 9090;
|
||||||
|
grpc_listen_port = 9096;
|
||||||
|
|
||||||
|
# 16M
|
||||||
|
grpc_server_max_recv_msg_size = 16777216;
|
||||||
|
grpc_server_max_send_msg_size = 16777216;
|
||||||
|
};
|
||||||
|
|
||||||
|
auth_enabled = false;
|
||||||
|
|
||||||
|
common = {
|
||||||
|
storage.s3 = {
|
||||||
|
endpoint = "fsn1.your-objectstorage.com";
|
||||||
|
region = "fsn1";
|
||||||
|
bucketnames = "snix-loki";
|
||||||
|
secret_access_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
|
||||||
|
access_key_id = "\${S3_KEY_ID}";
|
||||||
|
s3forcepathstyle = true;
|
||||||
|
};
|
||||||
|
ring = {
|
||||||
|
kvstore.store = "memberlist";
|
||||||
|
# TODO: Such a ugly hack.
|
||||||
|
instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
};
|
||||||
|
replication_factor = 1;
|
||||||
|
};
|
||||||
|
|
||||||
|
memberlist = {
|
||||||
|
advertise_addr = "127.0.0.1";
|
||||||
|
cluster_label = "snix";
|
||||||
|
bind_port = 7947;
|
||||||
|
advertise_port = 7947;
|
||||||
|
};
|
||||||
|
|
||||||
|
storage_config.tsdb_shipper = {
|
||||||
|
active_index_directory = "/var/lib/loki/index";
|
||||||
|
cache_location = "/var/lib/loki/cache";
|
||||||
|
};
|
||||||
|
|
||||||
|
compactor = {
|
||||||
|
working_directory = "/var/lib/loki/compactor";
|
||||||
|
compaction_interval = "10m";
|
||||||
|
retention_enabled = true;
|
||||||
|
retention_delete_delay = "1s";
|
||||||
|
retention_delete_worker_count = 150;
|
||||||
|
delete_request_store = "filesystem";
|
||||||
|
};
|
||||||
|
|
||||||
|
limits_config.retention_period = "1w";
|
||||||
|
|
||||||
|
schema_config = {
|
||||||
|
configs = [
|
||||||
|
{
|
||||||
|
from = "2024-07-01";
|
||||||
|
store = "tsdb";
|
||||||
|
object_store = "s3";
|
||||||
|
schema = "v13";
|
||||||
|
index = {
|
||||||
|
prefix = "index_";
|
||||||
|
period = "24h";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.loki.serviceConfig.EnvironmentFile = [ config.age.secrets.loki-environment.path ];
|
||||||
|
|
||||||
|
infra.monitoring.grafana-agent.exporters.loki.port = 9090;
|
||||||
|
};
|
||||||
|
}
|
||||||
123
ops/modules/o11y/mimir.nix
Normal file
123
ops/modules/o11y/mimir.nix
Normal file
|
|
@ -0,0 +1,123 @@
|
||||||
|
{ config
|
||||||
|
, lib
|
||||||
|
, pkgs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.depot.prometheus;
|
||||||
|
inherit (lib) mkEnableOption mkIf;
|
||||||
|
|
||||||
|
mimirPort = config.services.mimir.configuration.server.http_listen_port;
|
||||||
|
|
||||||
|
alerts = pkgs.runCommand "mimir-alerts-checked"
|
||||||
|
{
|
||||||
|
src = ./alerts;
|
||||||
|
nativeBuildInputs = with pkgs; [ prometheus.cli ];
|
||||||
|
} ''
|
||||||
|
promtool check rules $src/*
|
||||||
|
mkdir $out
|
||||||
|
cp -R $src $out/anonymous/
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.depot.prometheus.enable = mkEnableOption "Prometheus scraper";
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.mimir = {
|
||||||
|
enable = true;
|
||||||
|
extraFlags = [ "--config.expand-env=true" ];
|
||||||
|
configuration = {
|
||||||
|
target = "all,alertmanager";
|
||||||
|
|
||||||
|
multitenancy_enabled = false;
|
||||||
|
|
||||||
|
common.storage = {
|
||||||
|
backend = "s3";
|
||||||
|
s3 = {
|
||||||
|
endpoint = "fsn1.your-objectstorage.com";
|
||||||
|
bucket_name = "snix-mimir";
|
||||||
|
secret_access_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
|
||||||
|
access_key_id = "\${S3_KEY_ID}";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# TODO: Such a ugly hack.
|
||||||
|
distributor.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
ingester.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
frontend.instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
query_scheduler.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
ruler.ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
compactor.sharding_ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
store_gateway.sharding_ring.instance_interface_names = [ "enp1s0" "lo" ];
|
||||||
|
|
||||||
|
memberlist = {
|
||||||
|
advertise_addr = "127.0.0.1";
|
||||||
|
cluster_label = "snix";
|
||||||
|
};
|
||||||
|
|
||||||
|
server = {
|
||||||
|
http_listen_port = 9009;
|
||||||
|
grpc_server_max_recv_msg_size = 104857600;
|
||||||
|
grpc_server_max_send_msg_size = 104857600;
|
||||||
|
grpc_server_max_concurrent_streams = 1000;
|
||||||
|
};
|
||||||
|
|
||||||
|
ingester.ring.replication_factor = 1;
|
||||||
|
|
||||||
|
distributor.instance_limits.max_ingestion_rate = 0; # unlimited
|
||||||
|
limits = {
|
||||||
|
ingestion_rate = 1000000; # can't set to unlimited :(
|
||||||
|
out_of_order_time_window = "12h";
|
||||||
|
max_global_series_per_user = 0; # unlimited
|
||||||
|
};
|
||||||
|
|
||||||
|
blocks_storage.backend = "s3";
|
||||||
|
ruler_storage = {
|
||||||
|
backend = "local";
|
||||||
|
local.directory = alerts;
|
||||||
|
};
|
||||||
|
|
||||||
|
alertmanager = {
|
||||||
|
sharding_ring = {
|
||||||
|
replication_factor = 1;
|
||||||
|
# TODO: hack
|
||||||
|
instance_interface_names = [ "enp1s0" ];
|
||||||
|
};
|
||||||
|
fallback_config_file = pkgs.writers.writeYAML "alertmanager.yaml" {
|
||||||
|
route = {
|
||||||
|
group_by = [ "alertname" ];
|
||||||
|
receiver = "irc";
|
||||||
|
};
|
||||||
|
receivers = [
|
||||||
|
{
|
||||||
|
name = "irc";
|
||||||
|
webhook_configs = [{
|
||||||
|
# Mimir can't expand environment variables in external config files,
|
||||||
|
# so work around it.
|
||||||
|
url_file = "/run/credentials/mimir.service/webhook-url";
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
alertmanager_storage.backend = "filesystem";
|
||||||
|
|
||||||
|
ruler.alertmanager_url = "http://localhost:${toString mimirPort}/alertmanager";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.mimir = {
|
||||||
|
# Mimir tries to determine its own IP address for gossip purposes,
|
||||||
|
# even when it's the only instance, and fails if it can't find one.
|
||||||
|
# Avoid that by ensuring it starts after the network is set up.
|
||||||
|
wants = [ "network-online.target" ];
|
||||||
|
after = [ "network-online.target" ];
|
||||||
|
serviceConfig = {
|
||||||
|
EnvironmentFile = [ config.age.secrets.mimir-environment.path ];
|
||||||
|
LoadCredential = [ "webhook-url:${config.age.secrets.mimir-webhook-url.path}" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
infra.monitoring.grafana-agent.exporters.mimir.port = 9009;
|
||||||
|
};
|
||||||
|
}
|
||||||
71
ops/modules/o11y/tempo.nix
Normal file
71
ops/modules/o11y/tempo.nix
Normal file
|
|
@ -0,0 +1,71 @@
|
||||||
|
{ config
|
||||||
|
, lib
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.depot.tempo;
|
||||||
|
inherit (lib) mkEnableOption mkIf;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.depot.tempo.enable = mkEnableOption "Tempo trace store";
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.tempo = {
|
||||||
|
enable = true;
|
||||||
|
extraFlags = [ "--config.expand-env=true" ];
|
||||||
|
settings = {
|
||||||
|
multitenancy_enabled = false;
|
||||||
|
stream_over_http_enabled = true;
|
||||||
|
|
||||||
|
server = {
|
||||||
|
http_listen_port = 9190;
|
||||||
|
grpc_listen_port = 9195;
|
||||||
|
};
|
||||||
|
distributor.receivers.otlp.protocols.http.endpoint = "127.0.0.1:4138";
|
||||||
|
|
||||||
|
# TODO: S3
|
||||||
|
storage.trace = {
|
||||||
|
backend = "s3";
|
||||||
|
s3 = {
|
||||||
|
endpoint = "fsn1.your-objectstorage.com";
|
||||||
|
bucket = "snix-tempo";
|
||||||
|
secret_key = "\${S3_KEY}"; # This is a secret injected via an environment variable
|
||||||
|
access_key = "\${S3_KEY_ID}";
|
||||||
|
};
|
||||||
|
wal.path = "/var/lib/tempo/traces-wal";
|
||||||
|
};
|
||||||
|
|
||||||
|
metrics_generator.storage = {
|
||||||
|
path = "/var/lib/tempo/metrics-wal";
|
||||||
|
remote_write = [
|
||||||
|
{
|
||||||
|
url = "http://127.0.0.1:9009/api/v1/push";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
overrides.defaults.metrics_generator.processors = [ "span-metrics" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.services.tempo.serviceConfig.EnvironmentFile = [ config.age.secrets.tempo-environment.path ];
|
||||||
|
|
||||||
|
services.nginx = {
|
||||||
|
upstreams.tempo = {
|
||||||
|
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" = { };
|
||||||
|
extraConfig = "keepalive 16;";
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualHosts."tempo.snix.dev" = {
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
locations."/" = {
|
||||||
|
proxyPass = "http://tempo";
|
||||||
|
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
infra.monitoring.grafana-agent.exporters.tempo.port = 9190;
|
||||||
|
};
|
||||||
|
}
|
||||||
76
ops/modules/raito-vm.nix
Normal file
76
ops/modules/raito-vm.nix
Normal file
|
|
@ -0,0 +1,76 @@
|
||||||
|
{ lib, config, ... }:
|
||||||
|
let
|
||||||
|
cfg = config.infra.hardware.raito-vm;
|
||||||
|
inherit (lib) mkEnableOption mkIf mkOption types;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.infra.hardware.raito-vm = {
|
||||||
|
enable = mkEnableOption "Raito's VM hardware defaults";
|
||||||
|
|
||||||
|
networking = {
|
||||||
|
nat64.enable = mkEnableOption "the setup of NAT64 rules to the local NAT64 node";
|
||||||
|
|
||||||
|
wan = {
|
||||||
|
address = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "IPv6 prefix for WAN. Ask Raito when in doubt.";
|
||||||
|
};
|
||||||
|
mac = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "MAC address for the WAN interface.";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
services.qemuGuest.enable = true;
|
||||||
|
systemd.network.enable = true;
|
||||||
|
networking.useDHCP = lib.mkDefault false;
|
||||||
|
|
||||||
|
systemd.network.networks."10-wan" = {
|
||||||
|
matchConfig.Name = "wan";
|
||||||
|
linkConfig.RequiredForOnline = true;
|
||||||
|
networkConfig.Address = [ cfg.networking.wan.address ];
|
||||||
|
|
||||||
|
routes = mkIf cfg.networking.nat64.enable [
|
||||||
|
{
|
||||||
|
Destination = "64:ff9b::/96";
|
||||||
|
Gateway = "2001:bc8:38ee:100::100";
|
||||||
|
Scope = "site";
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
# Enable DNS64 resolvers from Google, I'm too lazy.
|
||||||
|
dns = mkIf cfg.networking.nat64.enable [ "2001:4860:4860::6464" "2001:4860:4860::64" ];
|
||||||
|
};
|
||||||
|
|
||||||
|
systemd.network.links."10-wan" = {
|
||||||
|
matchConfig.MACAddress = cfg.networking.wan.mac;
|
||||||
|
linkConfig.Name = "wan";
|
||||||
|
};
|
||||||
|
|
||||||
|
boot.loader.systemd-boot.enable = true;
|
||||||
|
|
||||||
|
boot.initrd.kernelModules = [
|
||||||
|
"virtio_balloon"
|
||||||
|
"virtio_console"
|
||||||
|
"virtio_rng"
|
||||||
|
];
|
||||||
|
|
||||||
|
boot.initrd.availableKernelModules = [
|
||||||
|
"9p"
|
||||||
|
"9pnet_virtio"
|
||||||
|
"ata_piix"
|
||||||
|
"nvme"
|
||||||
|
"sr_mod"
|
||||||
|
"uhci_hcd"
|
||||||
|
"virtio_blk"
|
||||||
|
"virtio_mmio"
|
||||||
|
"virtio_net"
|
||||||
|
"virtio_pci"
|
||||||
|
"virtio_scsi"
|
||||||
|
"xhci_pci"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -1,13 +1,14 @@
|
||||||
# Configure restic backups to S3-compatible storage, in our case
|
# Configure restic backups to S3-compatible storage, in our case
|
||||||
# Yandex Cloud Storage.
|
# Hetzner Cloud object storage.
|
||||||
#
|
#
|
||||||
# When adding a new machine, the repository has to be initialised once. Refer to
|
# Conventions:
|
||||||
# the Restic documentation for details on this process.
|
# - restic's cache lives in /var/backup/restic/cache
|
||||||
{ config, depot, lib, pkgs, ... }:
|
# - repository password lives in `config.age.secrets.restic-repository-password.path`
|
||||||
|
# - object storage credentials in `config.age.secrets.restic-bucket-credentials.path`
|
||||||
|
{ config, lib, pkgs, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
cfg = config.services.depot.restic;
|
cfg = config.services.depot.restic;
|
||||||
description = "Restic backups to Yandex Cloud";
|
|
||||||
mkStringOption = default: lib.mkOption {
|
mkStringOption = default: lib.mkOption {
|
||||||
inherit default;
|
inherit default;
|
||||||
type = lib.types.str;
|
type = lib.types.str;
|
||||||
|
|
@ -15,10 +16,10 @@ let
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
options.services.depot.restic = {
|
options.services.depot.restic = {
|
||||||
enable = lib.mkEnableOption description;
|
enable = lib.mkEnableOption "the restic backups";
|
||||||
bucketEndpoint = mkStringOption "storage.yandexcloud.net";
|
bucketEndpoint = mkStringOption "fsn1.your-objectstorage.com";
|
||||||
bucketName = mkStringOption "tvl-backups";
|
bucketName = mkStringOption "snix-backups";
|
||||||
bucketCredentials = mkStringOption "/run/agenix/yc-restic";
|
bucketCredentials = mkStringOption config.age.secrets.restic-bucket-credentials.path;
|
||||||
repository = mkStringOption config.networking.hostName;
|
repository = mkStringOption config.networking.hostName;
|
||||||
interval = mkStringOption "hourly";
|
interval = mkStringOption "hourly";
|
||||||
|
|
||||||
|
|
@ -30,24 +31,24 @@ in
|
||||||
exclude = with lib; mkOption {
|
exclude = with lib; mkOption {
|
||||||
description = "Files that should be excluded from backups";
|
description = "Files that should be excluded from backups";
|
||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
|
default = [ ];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
config = lib.mkIf cfg.enable {
|
||||||
age.secrets = {
|
|
||||||
restic-password.file = depot.ops.secrets."restic-${config.networking.hostName}.age";
|
|
||||||
yc-restic.file = depot.ops.secrets."yc-restic.age";
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.restic = {
|
systemd.services.restic = {
|
||||||
description = "Backups to Yandex Cloud";
|
description = "Backups to Hetzner Cloud";
|
||||||
|
|
||||||
script = "${pkgs.restic}/bin/restic backup ${lib.concatStringsSep " " cfg.paths}";
|
script = "${pkgs.restic}/bin/restic backup ${lib.concatStringsSep " " cfg.paths}";
|
||||||
|
|
||||||
|
serviceConfig.ExecStartPre = pkgs.writeShellScript "init-repo" ''
|
||||||
|
${pkgs.restic}/bin/restic init && echo "Initializing the repository." || echo "Already initialized."
|
||||||
|
'';
|
||||||
|
|
||||||
environment = {
|
environment = {
|
||||||
RESTIC_REPOSITORY = "s3:${cfg.bucketEndpoint}/${cfg.bucketName}/${cfg.repository}";
|
RESTIC_REPOSITORY = "s3:${cfg.bucketEndpoint}/${cfg.bucketName}/${cfg.repository}";
|
||||||
AWS_SHARED_CREDENTIALS_FILE = cfg.bucketCredentials;
|
AWS_SHARED_CREDENTIALS_FILE = cfg.bucketCredentials;
|
||||||
RESTIC_PASSWORD_FILE = "/run/agenix/restic-password";
|
RESTIC_PASSWORD_FILE = config.age.secrets.restic-repository-password.path;
|
||||||
RESTIC_CACHE_DIR = "/var/backup/restic/cache";
|
RESTIC_CACHE_DIR = "/var/backup/restic/cache";
|
||||||
|
|
||||||
RESTIC_EXCLUDE_FILE =
|
RESTIC_EXCLUDE_FILE =
|
||||||
|
|
|
||||||
112
ops/modules/snix-buildkite.nix
Normal file
112
ops/modules/snix-buildkite.nix
Normal file
|
|
@ -0,0 +1,112 @@
|
||||||
|
# Configuration for the snix buildkite agents.
|
||||||
|
{ config
|
||||||
|
, depot
|
||||||
|
, pkgs
|
||||||
|
, lib
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
cfg = config.services.depot.buildkite;
|
||||||
|
agents = lib.range 1 cfg.agentCount;
|
||||||
|
hostname = config.networking.hostName;
|
||||||
|
description = "Buildkite agents for snix";
|
||||||
|
|
||||||
|
besadiiWithConfig =
|
||||||
|
name:
|
||||||
|
pkgs.writeShellScript "besadii-${hostname}" ''
|
||||||
|
export BESADII_CONFIG=/run/agenix/buildkite-besadii-config
|
||||||
|
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
|
||||||
|
'';
|
||||||
|
|
||||||
|
# All Buildkite hooks are actually besadii, but it's being invoked
|
||||||
|
# with different names.
|
||||||
|
buildkiteHooks = pkgs.runCommand "buildkite-hooks" { } ''
|
||||||
|
mkdir -p $out/bin
|
||||||
|
ln -s ${besadiiWithConfig "post-command"} $out/bin/post-command
|
||||||
|
'';
|
||||||
|
|
||||||
|
credentialHelper = pkgs.writeShellScriptBin "git-credential-gerrit-creds" ''
|
||||||
|
echo 'username=besadii'
|
||||||
|
echo "password=$(jq -r '.gerritPassword' /run/agenix/buildkite-besadii-config)"
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.depot.buildkite = {
|
||||||
|
enable = lib.mkEnableOption description;
|
||||||
|
|
||||||
|
agentCount = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
description = "Number of Buildkite agents to launch";
|
||||||
|
};
|
||||||
|
|
||||||
|
largeSlots = lib.mkOption {
|
||||||
|
type = lib.types.int;
|
||||||
|
default = cfg.agentCount;
|
||||||
|
description = "Number of agents with 'large=true'";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = lib.mkIf cfg.enable {
|
||||||
|
# Run the Buildkite agents using the default upstream module.
|
||||||
|
services.buildkite-agents = builtins.listToAttrs (
|
||||||
|
map
|
||||||
|
(n: rec {
|
||||||
|
name = "${hostname}-${toString n}";
|
||||||
|
value =
|
||||||
|
{
|
||||||
|
inherit name;
|
||||||
|
enable = true;
|
||||||
|
tokenPath = config.age.secretsDir + "/buildkite-agent-token";
|
||||||
|
privateSshKeyPath = config.age.secretsDir + "/buildkite-private-key";
|
||||||
|
hooks.post-command = "${buildkiteHooks}/bin/post-command";
|
||||||
|
tags.queue = "default";
|
||||||
|
hooks.environment = ''
|
||||||
|
export PATH=$PATH:/run/wrappers/bin
|
||||||
|
'';
|
||||||
|
|
||||||
|
tags.hostname = hostname;
|
||||||
|
|
||||||
|
# all agents support small jobs
|
||||||
|
tags.small = "true";
|
||||||
|
|
||||||
|
runtimePackages = with pkgs; [
|
||||||
|
bash
|
||||||
|
coreutils
|
||||||
|
credentialHelper
|
||||||
|
curl
|
||||||
|
git
|
||||||
|
gnutar
|
||||||
|
gzip
|
||||||
|
jq
|
||||||
|
nix
|
||||||
|
];
|
||||||
|
}
|
||||||
|
// (lib.optionalAttrs (n <= cfg.largeSlots) {
|
||||||
|
tags.large = "true";
|
||||||
|
});
|
||||||
|
})
|
||||||
|
agents
|
||||||
|
);
|
||||||
|
|
||||||
|
# Set up a group for all Buildkite agent users
|
||||||
|
users = {
|
||||||
|
groups.buildkite-agents = { };
|
||||||
|
users = builtins.listToAttrs (
|
||||||
|
map
|
||||||
|
(n: rec {
|
||||||
|
name = "buildkite-agent-${hostname}-${toString n}";
|
||||||
|
value = {
|
||||||
|
isSystemUser = true;
|
||||||
|
group = lib.mkForce "buildkite-agents";
|
||||||
|
extraGroups = [
|
||||||
|
name
|
||||||
|
"docker"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
})
|
||||||
|
agents
|
||||||
|
);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
83
ops/modules/stalwart.nix
Normal file
83
ops/modules/stalwart.nix
Normal file
|
|
@ -0,0 +1,83 @@
|
||||||
|
# Stalwart is an all-in-one mailserver in Rust.
|
||||||
|
# https://stalw.art/
|
||||||
|
{ config, lib, ... }:
|
||||||
|
let
|
||||||
|
inherit (lib) mkOption mkEnableOption mkIf types;
|
||||||
|
cfg = config.services.depot.stalwart;
|
||||||
|
certs = config.security.acme.certs.${cfg.mailDomain} or (throw "NixOS-level ACME was not enabled for `${cfg.mailDomain}`: mailserver cannot autoconfigure!");
|
||||||
|
mkBind = port: ip: "${ip}:${toString port}";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.depot.stalwart = {
|
||||||
|
enable = mkEnableOption "Stalwart Mail server";
|
||||||
|
|
||||||
|
listenAddresses = mkOption {
|
||||||
|
type = types.listOf types.str;
|
||||||
|
default = [
|
||||||
|
"49.12.112.149"
|
||||||
|
"[2a01:4f8:c013:3e62::2]"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
|
mailDomain = mkOption {
|
||||||
|
type = types.str;
|
||||||
|
description = "The email domain, i.e. the part after @";
|
||||||
|
example = "snix.dev";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
# Open only from the listen addresses.
|
||||||
|
networking.firewall.allowedTCPPorts = [ 25 587 143 443 ];
|
||||||
|
services.stalwart-mail = {
|
||||||
|
enable = true;
|
||||||
|
settings = {
|
||||||
|
certificate.letsencrypt = {
|
||||||
|
cert = "file://${certs.directory}/fullchain.pem";
|
||||||
|
private-key = "file://${certs.directory}/key.pem";
|
||||||
|
};
|
||||||
|
server = {
|
||||||
|
hostname = cfg.mailDomain;
|
||||||
|
tls = {
|
||||||
|
certificate = "letsencrypt";
|
||||||
|
enable = true;
|
||||||
|
implicit = false;
|
||||||
|
};
|
||||||
|
listener = {
|
||||||
|
smtp = {
|
||||||
|
bind = map (mkBind 587) cfg.listenAddresses;
|
||||||
|
protocol = "smtp";
|
||||||
|
};
|
||||||
|
imap = {
|
||||||
|
bind = map (mkBind 143) cfg.listenAddresses;
|
||||||
|
protocol = "imap";
|
||||||
|
};
|
||||||
|
mgmt = {
|
||||||
|
bind = map (mkBind 443) cfg.listenAddresses;
|
||||||
|
protocol = "https";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
session = {
|
||||||
|
rcpt = {
|
||||||
|
directory = "in-memory";
|
||||||
|
# Allow this server to be used as a relay for authenticated principals.
|
||||||
|
relay = [
|
||||||
|
{ "if" = "!is_empty(authenticated_as)"; "then" = true; }
|
||||||
|
{ "else" = false; }
|
||||||
|
];
|
||||||
|
};
|
||||||
|
auth = {
|
||||||
|
mechanisms = [ "PLAIN" ];
|
||||||
|
directory = "in-memory";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
jmap.directory = "in-memory";
|
||||||
|
queue.outbound.next-hop = [ "local" ];
|
||||||
|
directory.in-memory = {
|
||||||
|
type = "memory";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -1,95 +0,0 @@
|
||||||
# Configuration for the TVL buildkite agents.
|
|
||||||
{ config, depot, pkgs, lib, ... }:
|
|
||||||
|
|
||||||
let
|
|
||||||
cfg = config.services.depot.buildkite;
|
|
||||||
agents = lib.range 1 cfg.agentCount;
|
|
||||||
description = "Buildkite agents for TVL";
|
|
||||||
hostname = config.networking.hostName;
|
|
||||||
|
|
||||||
besadiiWithConfig = name: pkgs.writeShellScript "besadii-${hostname}" ''
|
|
||||||
export BESADII_CONFIG=/run/agenix/buildkite-besadii-config
|
|
||||||
exec -a ${name} ${depot.ops.besadii}/bin/besadii "$@"
|
|
||||||
'';
|
|
||||||
|
|
||||||
# All Buildkite hooks are actually besadii, but it's being invoked
|
|
||||||
# with different names.
|
|
||||||
buildkiteHooks = pkgs.runCommand "buildkite-hooks" { } ''
|
|
||||||
mkdir -p $out/bin
|
|
||||||
ln -s ${besadiiWithConfig "post-command"} $out/bin/post-command
|
|
||||||
'';
|
|
||||||
|
|
||||||
credentialHelper = pkgs.writeShellScriptBin "git-credential-gerrit-creds" ''
|
|
||||||
echo 'username=buildkite'
|
|
||||||
echo "password=$(jq -r '.gerritPassword' /run/agenix/buildkite-besadii-config)"
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
{
|
|
||||||
options.services.depot.buildkite = {
|
|
||||||
enable = lib.mkEnableOption description;
|
|
||||||
|
|
||||||
agentCount = lib.mkOption {
|
|
||||||
type = lib.types.int;
|
|
||||||
description = "Number of Buildkite agents to launch";
|
|
||||||
};
|
|
||||||
|
|
||||||
largeSlots = lib.mkOption {
|
|
||||||
type = lib.types.int;
|
|
||||||
default = cfg.agentCount;
|
|
||||||
description = "Number of agents with 'large=true'";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
config = lib.mkIf cfg.enable {
|
|
||||||
# Run the Buildkite agents using the default upstream module.
|
|
||||||
services.buildkite-agents = builtins.listToAttrs (map
|
|
||||||
(n: rec {
|
|
||||||
name = "${hostname}-${toString n}";
|
|
||||||
value = {
|
|
||||||
inherit name;
|
|
||||||
enable = true;
|
|
||||||
tokenPath = config.age.secretsDir + "/buildkite-agent-token";
|
|
||||||
privateSshKeyPath = config.age.secretsDir + "/buildkite-private-key";
|
|
||||||
hooks.post-command = "${buildkiteHooks}/bin/post-command";
|
|
||||||
hooks.environment = ''
|
|
||||||
export PATH=$PATH:/run/wrappers/bin
|
|
||||||
'';
|
|
||||||
|
|
||||||
tags.hostname = hostname;
|
|
||||||
|
|
||||||
# all agents support small jobs
|
|
||||||
tags.small = "true";
|
|
||||||
|
|
||||||
runtimePackages = with pkgs; [
|
|
||||||
bash
|
|
||||||
coreutils
|
|
||||||
credentialHelper
|
|
||||||
curl
|
|
||||||
git
|
|
||||||
gnutar
|
|
||||||
gzip
|
|
||||||
jq
|
|
||||||
nix
|
|
||||||
];
|
|
||||||
} // (lib.optionalAttrs (n <= cfg.largeSlots) {
|
|
||||||
tags.large = "true";
|
|
||||||
});
|
|
||||||
})
|
|
||||||
agents);
|
|
||||||
|
|
||||||
# Set up a group for all Buildkite agent users
|
|
||||||
users = {
|
|
||||||
groups.buildkite-agents = { };
|
|
||||||
users = builtins.listToAttrs (map
|
|
||||||
(n: rec {
|
|
||||||
name = "buildkite-agent-${hostname}-${toString n}";
|
|
||||||
value = {
|
|
||||||
isSystemUser = true;
|
|
||||||
group = lib.mkForce "buildkite-agents";
|
|
||||||
extraGroups = [ name "docker" ];
|
|
||||||
};
|
|
||||||
})
|
|
||||||
agents);
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
52
ops/modules/www/auth.snix.dev.nix
Normal file
52
ops/modules/www/auth.snix.dev.nix
Normal file
|
|
@ -0,0 +1,52 @@
|
||||||
|
{ config, ... }:
|
||||||
|
let
|
||||||
|
host = "auth.snix.dev";
|
||||||
|
realm = "snix-project";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./base.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
config = {
|
||||||
|
services.nginx.virtualHosts."${host}" = {
|
||||||
|
serverName = host;
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
|
||||||
|
locations."/" = {
|
||||||
|
recommendedProxySettings = true;
|
||||||
|
proxyPass = "http://127.0.0.1:9091";
|
||||||
|
extraConfig = ''
|
||||||
|
proxy_pass_header Authorization;
|
||||||
|
|
||||||
|
proxy_busy_buffers_size 512k;
|
||||||
|
proxy_buffers 4 512k;
|
||||||
|
proxy_buffer_size 256k;
|
||||||
|
|
||||||
|
# Allow clients with Auth hardcoded to use our base path.
|
||||||
|
#
|
||||||
|
# XXX: ok so this is horrible. For some reason gerrit explodes if
|
||||||
|
# it receives a redirect when doing auth. But we need to redirect
|
||||||
|
# the browser to reuse sessions. Thus, user agent scanning.
|
||||||
|
if ($http_user_agent ~* "^Java.*$") {
|
||||||
|
rewrite ^/auth/(.*)$ /$1 last;
|
||||||
|
}
|
||||||
|
rewrite ^/auth/(.*)$ /$1 redirect;
|
||||||
|
|
||||||
|
# Hacks to make us compatible with authenticators that expect GitLab's format.
|
||||||
|
rewrite ^/realms/${realm}/protocol/openid-connect/api/v4/user$ /realms/${realm}/protocol/openid-connect/userinfo;
|
||||||
|
rewrite ^/realms/${realm}/protocol/openid-connect/oauth/authorize$ /realms/${realm}/protocol/openid-connect/auth?scope=openid%20email%20profile;
|
||||||
|
rewrite ^/realms/${realm}/protocol/openid-connect/oauth/token$ /realms/${realm}/protocol/openid-connect/token;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
# Forward our admin address to our default realm.
|
||||||
|
locations."= /admin".extraConfig = "return 302 https://${host}/admin/snix-project/console/;";
|
||||||
|
locations."= /superadmin".extraConfig = "return 302 https://${host}/admin/master/console/;";
|
||||||
|
|
||||||
|
# Forward our root address to the account management portal.
|
||||||
|
locations."= /".extraConfig = "return 302 https://${host}/realms/${realm}/account;";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -2,9 +2,11 @@
|
||||||
|
|
||||||
{
|
{
|
||||||
config = {
|
config = {
|
||||||
|
networking.firewall.allowedTCPPorts = [ 80 443 ];
|
||||||
|
|
||||||
security.acme = {
|
security.acme = {
|
||||||
acceptTerms = true;
|
acceptTerms = true;
|
||||||
defaults.email = "letsencrypt@tvl.su";
|
defaults.email = "acme@snix.dev";
|
||||||
};
|
};
|
||||||
|
|
||||||
services.nginx = {
|
services.nginx = {
|
||||||
|
|
|
||||||
|
|
@ -8,12 +8,11 @@
|
||||||
config = {
|
config = {
|
||||||
services.nginx.virtualHosts."cl-shortlink" = {
|
services.nginx.virtualHosts."cl-shortlink" = {
|
||||||
serverName = "cl";
|
serverName = "cl";
|
||||||
extraConfig = "return 302 https://cl.tvl.fyi$request_uri;";
|
extraConfig = "return 302 https://cl.snix.dev$request_uri;";
|
||||||
};
|
};
|
||||||
|
|
||||||
services.nginx.virtualHosts.gerrit = {
|
services.nginx.virtualHosts.gerrit = {
|
||||||
serverName = "cl.tvl.fyi";
|
serverName = "cl.snix.dev";
|
||||||
serverAliases = [ "cl.tvl.su" ];
|
|
||||||
enableACME = true;
|
enableACME = true;
|
||||||
forceSSL = true;
|
forceSSL = true;
|
||||||
|
|
||||||
|
|
@ -21,7 +20,7 @@
|
||||||
location / {
|
location / {
|
||||||
proxy_pass http://localhost:4778;
|
proxy_pass http://localhost:4778;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
# The :443 suffix is a workaround for https://b.tvl.fyi/issues/88.
|
# The :443 suffix is a workaround for https://b.snix.dev/issues/88.
|
||||||
proxy_set_header Host $host:443;
|
proxy_set_header Host $host:443;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
||||||
{ depot, pkgs, config, ... }:
|
|
||||||
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./base.nix
|
|
||||||
];
|
|
||||||
|
|
||||||
config = {
|
|
||||||
services.nginx.virtualHosts.cgit = {
|
|
||||||
serverName = "code.tvl.fyi";
|
|
||||||
serverAliases = [ "code.tvl.su" ];
|
|
||||||
enableACME = true;
|
|
||||||
forceSSL = true;
|
|
||||||
|
|
||||||
extraConfig = ''
|
|
||||||
if ($http_user_agent ~ (ClaudeBot|GPTBot|Amazonbot)) {
|
|
||||||
return 403;
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /go-get/tvix/build-go {
|
|
||||||
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/build-go git https://code.tvl.fyi/depot.git:/tvix/build-go.git"></html>''};
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /go-get/tvix/castore-go {
|
|
||||||
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/castore-go git https://code.tvl.fyi/depot.git:/tvix/castore-go.git"></html>''};
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /go-get/tvix/store-go {
|
|
||||||
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/store-go git https://code.tvl.fyi/depot.git:/tvix/store-go.git"></html>''};
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /go-get/tvix/nar-bridge {
|
|
||||||
alias ${pkgs.writeText "go-import-metadata.html" ''<html><meta name="go-import" content="code.tvl.fyi/tvix/nar-bridge git https://code.tvl.fyi/depot.git:/tvix/nar-bridge.git"></html>''};
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /tvix/build-go {
|
|
||||||
if ($args ~* "/?go-get=1") {
|
|
||||||
return 302 /go-get/tvix/build-go;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /tvix/castore-go {
|
|
||||||
if ($args ~* "/?go-get=1") {
|
|
||||||
return 302 /go-get/tvix/castore-go;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /tvix/store-go {
|
|
||||||
if ($args ~* "/?go-get=1") {
|
|
||||||
return 302 /go-get/tvix/store-go;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
location = /tvix/nar-bridge {
|
|
||||||
if ($args ~* "/?go-get=1") {
|
|
||||||
return 302 /go-get/tvix/nar-bridge;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Git operations on depot.git hit josh
|
|
||||||
location /depot.git {
|
|
||||||
proxy_pass http://127.0.0.1:${toString config.services.depot.josh.port};
|
|
||||||
}
|
|
||||||
|
|
||||||
# Git clone operations on '/' should be redirected to josh now.
|
|
||||||
location = /info/refs {
|
|
||||||
return 302 https://code.tvl.fyi/depot.git/info/refs$is_args$args;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Static assets must always hit the root.
|
|
||||||
location ~ ^/(favicon\.ico|cgit\.(css|png))$ {
|
|
||||||
proxy_pass http://localhost:2448;
|
|
||||||
}
|
|
||||||
|
|
||||||
# Everything else is forwarded to cgit for the web view
|
|
||||||
location / {
|
|
||||||
proxy_pass http://localhost:2448/cgit.cgi/depot/;
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
26
ops/modules/www/git.snix.dev.nix
Normal file
26
ops/modules/www/git.snix.dev.nix
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
{ ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./base.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
config = {
|
||||||
|
services.nginx.virtualHosts.forgejo = {
|
||||||
|
serverName = "git.snix.dev";
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
locations."/" = {
|
||||||
|
proxyPass = "http://127.0.0.1:3000";
|
||||||
|
extraConfig = ''
|
||||||
|
proxy_ssl_server_name on;
|
||||||
|
proxy_pass_header Authorization;
|
||||||
|
|
||||||
|
# This has to be sufficiently large for uploading layers of
|
||||||
|
# non-broken docker images.
|
||||||
|
client_max_body_size 1G;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
22
ops/modules/www/loki.snix.dev.nix
Normal file
22
ops/modules/www/loki.snix.dev.nix
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./base.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
services.nginx = {
|
||||||
|
upstreams.loki = {
|
||||||
|
servers."127.0.0.1:${toString config.services.loki.configuration.server.http_listen_port}" = { };
|
||||||
|
extraConfig = "keepalive 16;";
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualHosts."loki.snix.dev" = {
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
locations."/loki/api/v1/push" = {
|
||||||
|
proxyPass = "http://loki";
|
||||||
|
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
25
ops/modules/www/mail.snix.dev.nix
Normal file
25
ops/modules/www/mail.snix.dev.nix
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
{ config, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./base.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
config = {
|
||||||
|
# Listen on a special IPv4 & IPv6 specialized for mail.
|
||||||
|
# This NGINX has only one role: obtain TLS/SSL certificates for the mailserver.
|
||||||
|
# All the TLS, IMAP, SMTP stuff is handled directly by the mailserver runtime.
|
||||||
|
# This is why you will not see any `stream { }` block here.
|
||||||
|
services.nginx.virtualHosts.stalwart = {
|
||||||
|
serverName = "mail.snix.dev";
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
|
||||||
|
listenAddresses = [
|
||||||
|
"127.0.0.2"
|
||||||
|
"49.12.112.149"
|
||||||
|
"[2a01:4f8:c013:3e62::2]"
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
24
ops/modules/www/mimir.snix.dev.nix
Normal file
24
ops/modules/www/mimir.snix.dev.nix
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
{ config, ... }:
|
||||||
|
let
|
||||||
|
mimirPort = config.services.mimir.configuration.server.http_listen_port;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./base.nix
|
||||||
|
];
|
||||||
|
services.nginx = {
|
||||||
|
upstreams.mimir = {
|
||||||
|
servers."127.0.0.1:${toString mimirPort}" = { };
|
||||||
|
extraConfig = "keepalive 16;";
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualHosts."mimir.snix.dev" = {
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
locations."/api/v1/push" = {
|
||||||
|
proxyPass = "http://mimir";
|
||||||
|
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
25
ops/modules/www/status.snix.dev.nix
Normal file
25
ops/modules/www/status.snix.dev.nix
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
{ config, ... }:
|
||||||
|
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./base.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
config = {
|
||||||
|
services.nginx =
|
||||||
|
let
|
||||||
|
scfg = config.services.grafana.settings.server;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
enable = true;
|
||||||
|
virtualHosts."${scfg.domain}" = {
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
locations."/" = {
|
||||||
|
proxyPass = "http://${scfg.http_addr}:${toString scfg.http_port}";
|
||||||
|
proxyWebsockets = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -1,25 +0,0 @@
|
||||||
{ config, ... }:
|
|
||||||
|
|
||||||
{
|
|
||||||
imports = [
|
|
||||||
./base.nix
|
|
||||||
];
|
|
||||||
|
|
||||||
config = {
|
|
||||||
services.nginx.virtualHosts."status-fyi" = {
|
|
||||||
serverName = "status.tvl.fyi";
|
|
||||||
enableACME = true;
|
|
||||||
extraConfig = "return 302 https://status.tvl.su$request_uri;";
|
|
||||||
};
|
|
||||||
|
|
||||||
services.nginx.virtualHosts.grafana = {
|
|
||||||
serverName = "status.tvl.su";
|
|
||||||
enableACME = true;
|
|
||||||
forceSSL = true;
|
|
||||||
|
|
||||||
locations."/" = {
|
|
||||||
proxyPass = "http://localhost:${toString config.services.grafana.settings.server.http_port}";
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
}
|
|
||||||
22
ops/modules/www/tempo.snix.dev.nix
Normal file
22
ops/modules/www/tempo.snix.dev.nix
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
{ config, ... }:
|
||||||
|
{
|
||||||
|
imports = [
|
||||||
|
./base.nix
|
||||||
|
];
|
||||||
|
|
||||||
|
services.nginx = {
|
||||||
|
upstreams.tempo = {
|
||||||
|
servers."${config.services.tempo.settings.distributor.receivers.otlp.protocols.http.endpoint}" = { };
|
||||||
|
extraConfig = "keepalive 16;";
|
||||||
|
};
|
||||||
|
|
||||||
|
virtualHosts."tempo.snix.dev" = {
|
||||||
|
enableACME = true;
|
||||||
|
forceSSL = true;
|
||||||
|
locations."/" = {
|
||||||
|
proxyPass = "http://tempo";
|
||||||
|
basicAuthFile = config.age.secrets.metrics-push-htpasswd.path;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
@ -60,5 +60,9 @@ in rec {
|
||||||
'';
|
'';
|
||||||
|
|
||||||
# Systems that should be built in CI
|
# Systems that should be built in CI
|
||||||
meta.ci.targets = [ ];
|
gerrit01System = nixosFor depot.ops.machines.gerrit01;
|
||||||
|
public01System = nixosFor depot.ops.machines.public01;
|
||||||
|
build01System = nixosFor depot.ops.machines.build01;
|
||||||
|
meta01System = nixosFor depot.ops.machines.meta01;
|
||||||
|
meta.ci.targets = [ "gerrit01System" "public01System" "build01System" "meta01System" ];
|
||||||
}
|
}
|
||||||
|
|
|
||||||
12
ops/secrets/alertmanager-irc-relay-environment.age
Normal file
12
ops/secrets/alertmanager-irc-relay-environment.age
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung MFR57YIw4IZk4ZGFyCDV/gr+iso4XLL40MmESBr8NQI
|
||||||
|
6iyYl9pMtA309N3wQ5N7jA+rUN3DAcXq++dS5RFfaZc
|
||||||
|
-> X25519 q2U7kDMrPfI1a4XyJV2IJ+gxIiRX/xNIs9cgKNs2Ym4
|
||||||
|
2E6TQubnQ4QmJt5t8PNiN3bQHtM9WR+QapVljYnOkEw
|
||||||
|
-> ssh-ed25519 C2zWnA ppTx3QL3a1xHqcYnfJkW5u0NtpCMPwL52lkLXYRB6is
|
||||||
|
8Vyz/NgMYICueDaGDVQ962atmeI7JvTweMXjQoQLm8s
|
||||||
|
-> ssh-ed25519 LzO4tw 2Z3IkWkmDSuGWtcuYEEiGVBB8olZMI6f5Ut14bZqgW8
|
||||||
|
lRZExatm6jOLprlpSioWALwMHRurll48QNIXn7GwN7o
|
||||||
|
--- oXp6GuozUs9gwzmGng/e0rDlerNKZJIi49ss+ZMAXRE
|
||||||
|
,MÝ
|
||||||
|
<EFBFBD>‰ï¤¾Ê†Ê5¸ì‹ëÔ[ëÉ3kËŸ€HÏþ-æ—ãßûfå.vów€0†€R_e„Dk±-
|
||||||
|
|
@ -1,20 +1,11 @@
|
||||||
age-encryption.org/v1
|
age-encryption.org/v1
|
||||||
-> ssh-ed25519 OkGqLg WT+iZEFDR8xC1ypj5lLjCc8Q8a3E/LSE29a8SyGpGwg
|
-> ssh-ed25519 +qVung ZFmXZSq+DvoNCgZHoqGmc0oVoxotWOnSNgIWrv5GLVw
|
||||||
1bwMz/pZPhrIpSXoWTda0ehVg9uHUA5LXu9ZOAp+jmE
|
x2lbZWRxOorYSlThNalW8F06vixFjB4cxvRoHbIMENU
|
||||||
-> ssh-ed25519 xR+E/Q 4LfYYJalhmJVWa3Edzy57LOeJAEKWazCNkhTlJisEVc
|
-> X25519 80hLko4Ont8T66KbHpegXfIcnbp8yNjS1cojiG7mvDY
|
||||||
Ab4PDdOHafkTcjRIzTs/hG92ueSF762TSIqsLTfM0oA
|
IbJYd0v8HdLhW+BziRD01Fmo94cDGR+0icvroonLlmo
|
||||||
-> ssh-ed25519 dcsaLw RZyn6l7iV4BWo5SX/8qf54un21EMAfypdLUAfPpmdnE
|
-> ssh-ed25519 C2zWnA XWgQLVQhfXI0H85TDhWur6AMeN5n60rHIbjF2T4N0Eo
|
||||||
f15CElv+PEWR6C3O8V2qbBe+RFgm/sfhwwSWgbYK14Q
|
kzyRRjCPW9cLi37l+2E1kNbr5dzTUHgMH5oaFwoNqS0
|
||||||
-> ssh-ed25519 zcCuhA 6eKqLkucV2KO9SEAFa4Lprq/+Hawi4EDkcZ83ktIbB8
|
-> ssh-ed25519 3T2Xig O5spWbLSVZyYNhd45L+voflabSnO4mq/8pWjpvO9kng
|
||||||
cJUyoe+e528ycKpmZbXb43QCixWudUCoVQYIFcy7UvM
|
92q0DaERvVViCfEN3nW1lcXdQ1vbWndLfX6CW1ysoVs
|
||||||
-> ssh-ed25519 1SxhRA XSaYWJCKyuT0G8DOTEVBfRUp8SGJuMfCkwZcwG2BC3U
|
--- LiNoXoSE5LMHeQAXBWkn6hSEdEI94id7pm8UzLDN6Lo
|
||||||
x95KAtE1txHaE/DiAL1SRGKt/aoaGpCyyCdqDF1v3vY
|
ŒÇê2dÄ=pÀ•£ÞOSâæ¥E8¡¬!§ëñsrZ £:àOÄÖ•ŸcCs'Yˆ¦•ð
|
||||||
-> ssh-ed25519 ch/9tw csU+Xmy01gzEtIeF8YubJWpdCLPUefnS25TqnAO3+C4
|
|
||||||
tuDdDnwq35mdYFnZ0PTeGf/+wAbfrJKOpdtZvO2QX+c
|
|
||||||
-> ssh-ed25519 CpJBgQ hQzuceRkrMcq8anAXFDfEzpx50K+eP5vSy4bgmGMC28
|
|
||||||
AfQORHySKKic2mkNTx8n/prxR8lbv6md28VV+Yjl0do
|
|
||||||
-> ssh-ed25519 aXKGcg kTkolPXSztb9g9xhpC/hDMwvbnsdkU36Mp/Zxk55QXc
|
|
||||||
7LeNOwPwyCgHGV6pedl6XqXiKwsAVCjvfEMuChnwUN4
|
|
||||||
--- zbFSUb7Js+C+da2a14MNu/TZhpw7psLfD9EfK+awlEQ
|
|
||||||
¦åDËÿ˜Rœ—@ˆDñƒOx凿™[Vxß
|
|
||||||
$²‹lC°Ù.°ŒWìœ]<y™½5¬ò‘C0~f
|
|
||||||
13
ops/secrets/buildkite-besadii-config.age
Normal file
13
ops/secrets/buildkite-besadii-config.age
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung 3fkpILP2v3cya4RR/kfpBWEDZtmo1jn5d6L3EgvVVFk
|
||||||
|
6MsF0wGt/wfJTIgI0ahothPc2br3CplwRcoaDwLIkCQ
|
||||||
|
-> X25519 n21xXX/pcmB/+3PG+wlMuq6gaNwuERCvQqxDMxhepGk
|
||||||
|
VnbRBpA2zlIsrldqrJDYoSqivP69D5AVZ0xvo2w7dHQ
|
||||||
|
-> ssh-ed25519 C2zWnA ixzmDD0Ilj76ukzutqTLDeTBIIvvPIFW89UUJEE4slU
|
||||||
|
0IicUwhBo+9c+OxFge/UOYxRNfhzDt+Q2h+RagQnXfo
|
||||||
|
-> ssh-ed25519 x3gRmg ni3Jasf1IV71UVgcTFbf/atIOddr2lolLjurLpiNG0s
|
||||||
|
DA8WzuqXRMyY7gY62a7KNx04B54rV3g2tcNi2MiYmbA
|
||||||
|
-> ssh-ed25519 3T2Xig cg5Ki93tWlaURC/KRqE32oExDnvfcvEIfKNIJC3dKyg
|
||||||
|
ZfzJVZ2Bm6VVZUd6xOq77xbp5BWMcHdtAEB4LgdYIOg
|
||||||
|
--- TjhYtdAV2Qit8SgQ1ktCQxN7nbq96EcEdjAdQ8nKcu4
|
||||||
|
‡¾ïâáOÄï7_8ûj嬂ÅLëõ‰8Êœöçû§vÙõåÙ0Êd <64><&=âÍ-™Ä”J^;”\h–lmÈåׄ»lÌøùìDú«aQ¯(5§¸Ç2Ã~Xã@qØs7ÇÊ ä-yU‚«vkðÛqÕ‡¦Íÿ÷'Eqc<11>R`GÍUÂZ
Ð&mb'ˆ#
Ò´ÄÑ<C384>’¶ü¼²Òƒ$7kæŸ ŧ¸£Ø®æÅy뛺¡Èy/"÷· ,Ýö{‡c°Ö9çó\4ÜD[së¾<C3AB>å£êÍ0áù^bîTÔ‡ô a;©õgž4¯Pýå:‡ZÁ?úÆ2'6jœ[£ àG›klMƒ<k‡1ûn ã1n[9—$GX˜ê^dhîÛBR¬0–(fêµF®lî•›ó_/Fù¦÷ý§ê˜4Â…ÊI>Ø<>&Rµ¨KLÉŽo¼/Ž087ôP-«yŸþžðHí¼(ò”šFjÈÆ¶jµŸ
|
||||||
|
|
@ -1,19 +1,11 @@
|
||||||
age-encryption.org/v1
|
age-encryption.org/v1
|
||||||
-> ssh-ed25519 OkGqLg p2b6PpJcKcBQS6nUBtN33TTY/WhSkZyX11Qfr0uyji0
|
-> ssh-ed25519 +qVung MkmafLpQl6prK08B32McpcLtwx72k0bQ01oVtATnFgQ
|
||||||
YxLBGMuR4TYQkpyTZt/rjfNglqGCAPW6VqcSGDwUZJ4
|
7E8fi0BHzulXux6xPLP+hw6ugSOuZXPrWHGNxpf4IcM
|
||||||
-> ssh-ed25519 xR+E/Q 0rbUBI7F1Me6kkeeB5v7JLLXTvg4PlUiuVbo0LOlSg8
|
-> X25519 q7N6ltSbhrdmFFOttgg2KB5AQ7fsrXlogMNM+eYf/ng
|
||||||
9Np5qNztl7mQvM4r22icdmJsHisF2pnjmrefJ1FBKyc
|
KNiuCAjTK6/c8f2EIXolNC6nx8UycYTy4/L2ovnEi9E
|
||||||
-> ssh-ed25519 dcsaLw 3rf1PBUTKxMNdAwq5nfknBH5gtA/s1iOOc6p+U/0x0A
|
-> ssh-ed25519 C2zWnA U7793cywgqfxK+oeBawtbLazIjap/5v9MKfIDHzAsQ4
|
||||||
tO4pzdD+z+6Npm9l3gVgLO71VJmiVSGq3FGaaWfSNzk
|
1kQphItEbxQbIN9kbvcypgDh6Glzbvaz9CSSv3Flhq0
|
||||||
-> ssh-ed25519 zcCuhA Ad5xMaTCB8pcxy/X31vKsNhC3uCex/2+ykQ/1BdPMRE
|
-> ssh-ed25519 3T2Xig HEo6B3qYzxQlKosmFaIpjV917tNaoZaphDDH4e9enD4
|
||||||
ENXTFjlPqNRARONR6lfRdpQdYxH7Pnu28JOBNN3eM2Q
|
7FD4U32n7mUzBGNvx/kjeFbzxR4ntixqiQoethtQPm8
|
||||||
-> ssh-ed25519 1SxhRA UCsz/7KohWfkOBK66YafcU93GLCrihY29Pnzdy4TID8
|
--- Jae1QioQwGwCJzOvRdlvMUVRCj9g4lt7YiR0AZxkAU4
|
||||||
RUvL9DqAc8a9okBKlnADYyNpyABbuHXinn+Uit2OKy4
|
þþ[½w?ò <20>§g©_,áÚU?V‹.€†º-I<ô!ÀEÉâŒù?™uÄë¡(+¬f»Àc!°U»Ç ßÿ2ãÀµœQr
|
||||||
-> ssh-ed25519 ch/9tw 6y9zzfdWun5WV5IQsWHSnEI6VhWvwWMuBfQRHvnnxg0
|
|
||||||
FPIxsRo6cUYZ4jK03Lbj1kLkrEZsIa32p2IczSZTQvI
|
|
||||||
-> ssh-ed25519 CpJBgQ zEsWfAt2HUk3wHtnFzF0D4aKy4isM4AQLEdbkWh++j4
|
|
||||||
3m50cqrB2FlkZy3dKT9UcCaeOlXsOyz+v2p0PD8n9hw
|
|
||||||
-> ssh-ed25519 aXKGcg +KQkCsVWzXl6Ed6KIv3jGU4UlQnKmgS2A47esh245ms
|
|
||||||
fmrn4wUggVtnU0xLyVYqNcnScd/ZdECYVylrRID399s
|
|
||||||
--- S0uHuiqWz9M3xubQTE4OU39h74ENmRBD4p896amTuFU
|
|
||||||
•³ë½L È bd&Ôú½êãK‡Þ+0¶7ô»"õ“\z‚ è. ôöY¥llŒ
p~ἫpLCEg÷²ÕI
|
|
||||||
Binary file not shown.
11
ops/secrets/forgejo-oauth-secret.age
Normal file
11
ops/secrets/forgejo-oauth-secret.age
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung 8iqOuW6HDFzo0OdoyM0E0hSIl/Ow0e5/PV7z0hkzCgU
|
||||||
|
DYyC80e9XV/a91NBgkD+mOQfc4TCKJrMjL7z+/DAeWM
|
||||||
|
-> X25519 C6/8kewhvSzFZG7ElpgVz8Pji3sKIpnvjH9PJWXZ0mM
|
||||||
|
znSY1QbscbPqlEcATRyeJBUwwLyXWyT7i/CQu/XJg2s
|
||||||
|
-> ssh-ed25519 C2zWnA Z2/nUTlz/ryC2CHeVuhmry9eIV2Oe8lH0hTpeyJfD1c
|
||||||
|
GsE3eELzxivFKYfw2MXC/jJhdP6tooGVqUCfUjVSfzM
|
||||||
|
-> ssh-ed25519 uZGziw EwZG9bpYuh8610nNV/9iP7v3c9WE82sijCvNbtoNTRM
|
||||||
|
7yO2Bblf1fbGIuwfa3hF7T+xUmrEAtueDGTPtGkv1mE
|
||||||
|
--- aZjZHNae86WtaWj/KYnLH12DsryG8WPsxLEBULJj1+A
|
||||||
|
üÌuáõ †²Î#ýe–AÂÝÿ¥¾p<C2BE>é†^ñïfÌá««%líj-ráqö‹q¬ÞXÉ^Ì;&äÎh¨Ð+Pë7
|
||||||
11
ops/secrets/gerrit-oauth-secret.age
Normal file
11
ops/secrets/gerrit-oauth-secret.age
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung CNWkmpktIsB/XR4m20Zhhqb54CkxizUW8EU/UdYZ9DI
|
||||||
|
Xt7Rjr9MzceUIvRqkszaBsrrYtgj9mVCYeKZmgw6Fm8
|
||||||
|
-> X25519 UhegFmsgtO0412BIIyUhWKsokIuIVdzprxUzJQ4AWBA
|
||||||
|
mzaUhweKSBbtSNlsoLOOQPUiYg7lubiZAjBaPApyMtg
|
||||||
|
-> ssh-ed25519 C2zWnA qGtHvxEIG70n6DBaPIomDwZ61/UvUHk10SblLCaaL2A
|
||||||
|
xFkJfWcpCE9+YwpQR5HE8KA7kEZy6UL5X78hFWvC5mI
|
||||||
|
-> ssh-ed25519 x3gRmg 4BBGKNHHg7JBUeZ2bfeIQq5tSZBHP40ukJCX3yOLjgk
|
||||||
|
JPVfhyh2G9XC4vt66Sv9Mu1dtJjuxadnASLo2aaLf8k
|
||||||
|
--- Ttka8+om8tOvGOGDQu/xm0q1jLUdtmYLuKkDKB7u0ok
|
||||||
|
9øp …ÊŠÔÐàö<C3A0>dŒú?Vº?eBÂ^%r3ñíWÔ|”Wf™àÀJäÐ’4ö»‡pœ¿¦ÿÖOb³n›·gÄdÊA
|
||||||
BIN
ops/secrets/gerrit-replication-key.age
Normal file
BIN
ops/secrets/gerrit-replication-key.age
Normal file
Binary file not shown.
17
ops/secrets/grafana-agent-password.age
Normal file
17
ops/secrets/grafana-agent-password.age
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung M6L0Lr7nf8C2Bvq3yK9BpDkbFShdYa8xJJNJfw6fSEM
|
||||||
|
j/s8WqJlEySvUr52noQi8yclurRNWl8E/jKKpBiapyY
|
||||||
|
-> X25519 pbd5f6XiLXiFQ6uV4P071j2Q6qCQzrK3aF3ln3C532k
|
||||||
|
HvrV8RvjysTn8eSMqGVKwhCxjTEnvdm7hmde4hyCLRI
|
||||||
|
-> ssh-ed25519 C2zWnA myuJkAJcbRLYNZJJk6UCu+lp5DjmesusJdpE2FbjNA4
|
||||||
|
6YgIR3q7+27SQxHlKDJLLcESge21IaZcXXw0pkz0hSg
|
||||||
|
-> ssh-ed25519 x3gRmg 2uQHWIxs9okVv+kSJaLXeTibUIsVzuFkLjluClzINQQ
|
||||||
|
q1tCF2imWqStdjDsiUkmbl2jPYza4Gtht1IUw75uzpg
|
||||||
|
-> ssh-ed25519 uZGziw Da2diR7zKn6aBbpJdqlTDow7wuICg0uS8hpvDr6bxSs
|
||||||
|
mX/4Z66pX+kpA1Uw9pGxzdlEOdRmFrzaMIdCQH04XMg
|
||||||
|
-> ssh-ed25519 3T2Xig yHEKJv/U07xcpAwCHlDTVKLcCIs8/eJ4fpm6ul9mb2M
|
||||||
|
aHVfdIQcBlAoWuGJGqTfZUB/tROk1ZHlle/1BqDySGA
|
||||||
|
-> ssh-ed25519 LzO4tw b46U8tzzshDbSAUlUVRoVMPd0mUHDgoEPhCH12Ew73Q
|
||||||
|
EnlrYB+Hf47svM4Ha3BQVRIYGI+XaWUKLzbRvAa3M2I
|
||||||
|
--- p5jfa1L6lRIgt/Twyi3EaFAWiUVrDLo051N2a61qkUw
|
||||||
|
Øz'Ùy–#Ûá€Þ^&¶+7:®ÖÇgÀÑÂwtìyó4ŽjÆHZï]‘ÏÞGä|áÝFÞ2بÊJ
|
||||||
12
ops/secrets/grafana-oauth-secret.age
Normal file
12
ops/secrets/grafana-oauth-secret.age
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung Jv9hZHfNQeWjgop6+YbfmYHCgRByjfyXmTvxoA9vZ2U
|
||||||
|
lzwj3VNiFkjNgix5k7HLhqC9tt+poR+EilsEKBHQVaU
|
||||||
|
-> X25519 f0zBSraN1uTvZNfybRJpDhDiXnohF5XDBZe1GCnGmhc
|
||||||
|
+2kNv3oO5flAxp8ESSXYu968tTBLEzg2K1YJP3KJlL0
|
||||||
|
-> ssh-ed25519 C2zWnA 8jygqU5lD8UrybSLuE1gw0VM8YMwT2nqFkcykelZFUo
|
||||||
|
7YJH6vUi1FSpIBZ9L5lchw0OPjokC8QTJZ+fRBNUQgE
|
||||||
|
-> ssh-ed25519 uZGziw LkSTjIdlHXZCeVwiBatnbKZgmgYIJ2kzpXd4Kq2lpCA
|
||||||
|
3GcixBxUmcwXGWhUB8lgGbrLtg+j3QTeCWtVL9i/t8w
|
||||||
|
--- F2fzakv5un2T3gOOGi8aDqaFW666P765JkeBNuxDRTI
|
||||||
|
Ϩcã•ñZV÷Г
|
||||||
|
÷Ú®`<60>ñìúáÇú-'È¢€+DMß=s\KUÚóbªÕù~T°C)cµHàãZm
|
||||||
BIN
ops/secrets/keycloak-db-password.age
Normal file
BIN
ops/secrets/keycloak-db-password.age
Normal file
Binary file not shown.
|
|
@ -1,19 +0,0 @@
|
||||||
age-encryption.org/v1
|
|
||||||
-> ssh-ed25519 OkGqLg 85hbcQ9r29CC95B1CXO+uftm7ywhTeWCpklX4hOc2gU
|
|
||||||
7EO8O5/eg1noB8nbl9XL+m8WAvLp6QnA25CiTsp5jfY
|
|
||||||
-> ssh-ed25519 xR+E/Q Hefp9fWCq9sWdgyKp3gNEO1p9yWFK4sYX8xMxkyy9G4
|
|
||||||
JXofip2LGkJFDBb+6DegoFGDPjk8FGF+AqaAy5FPqwk
|
|
||||||
-> ssh-ed25519 dcsaLw IUoPTD1SfnY/wXXFcIc6h47fea6ukWAurUmfqwTQOAs
|
|
||||||
G/YeKUk8IQXBQ1q8338HxUg2vXqmh8LOIHSX4Qn1CFo
|
|
||||||
-> ssh-ed25519 zcCuhA 2LrAbe+Jpsg6gFzbnx3ppDesbQSWqzHs2uOv9szb80U
|
|
||||||
idJNMv6Lf0k2NsfOcm7it8LwPYxjdq7+LS7PUzQ89Qg
|
|
||||||
-> ssh-ed25519 1SxhRA idcz/kk9WyIA4I2NwzzPiMX0AmXkV3FTHxoE12n2eWQ
|
|
||||||
e+4am77QT0fDv9Xgci4L+VsgFyKT4ZHjB0FWe76hV3g
|
|
||||||
-> ssh-ed25519 ch/9tw RNZWeD7W18wpcpBksipmib6vhHmaCP5iQeK4uLHU604
|
|
||||||
bH/PJprw6+jEktmPnS3OrGMtJ/XHYVZQoQRdReLkLYM
|
|
||||||
-> ssh-ed25519 CpJBgQ w9gTapqMBoJl+C4sWIGIDCZpemRCEu1iDUUWFt2rW0A
|
|
||||||
1nYU4UiHYT9vPASYHwunK2Td+acAmjzRpFpLioNneJc
|
|
||||||
-> ssh-ed25519 aXKGcg evsnA8cq5xz+0GdKT7cBZWckBpX+w05yLOOaOL4+0BM
|
|
||||||
LigkUyewAl+O0KBKuykbwKzFTCY5n6lnCcarl2Vu0NE
|
|
||||||
--- KhA9LmsAYYPMrJrsJYZyEq04LvrMZkJaCL5Bt5ruYD0
|
|
||||||
VÚOÜ{$®ÞÁ[ônÔýG7
/ÀgÔ0¾“ÊqCnˆé9ÝÃÙ(g¼ô<C2BC>¨®áb@¢
|
|
||||||
11
ops/secrets/metrics-push-htpasswd.age
Normal file
11
ops/secrets/metrics-push-htpasswd.age
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung sFiPxBiVIYJPK2dYZmzCJ4Xv6x0qPmAjBmegh3EN2RE
|
||||||
|
oyQ7LKEKQK8R1fDFq5v9gLh4ZSYsRBKijz8jq638QKM
|
||||||
|
-> X25519 MAlDEEtm8yz+mtLnsWTSw/iDMn9SsY20inGM4gwKsWc
|
||||||
|
5l7cw0zHMOKXkYKxvFGNYGqMuLk8KQJKOCUnHNlQVaU
|
||||||
|
-> ssh-ed25519 C2zWnA du7PyTSMnqJCQH/TXLh2uzhdjmnQbh6KxRJ5M5W9fxI
|
||||||
|
GRaZU3cCe/wHNmnrMP5EeSf0Z3xtV/XRY41jc+fooUA
|
||||||
|
-> ssh-ed25519 LzO4tw lQVDe2IUXkk30rn1C7LEnBAE92v1Tx/zTyiLT45DZHw
|
||||||
|
HfynROGBmyICXVs0Gc+/yTlFazuz2WyCq80Y2ciNhwc
|
||||||
|
--- 5ars0rPwU9G2blh2eOKmGt28AdawIPXAWuZrd79rKDw
|
||||||
|
d<EFBFBD>{Ë‘ÛHøÒ^nožz”<<3C>CLý~îMˆ\EeN¯ï~µŸðW;"úÓYDpŽ7 tðˆ+ì-J¼ýã‰|q«¡Þ?/ß¹jFRX õªØãÏbÁฬjÂ,šuË'šë<C5A1>Ë¿ÓLE@¥æ
|
||||||
12
ops/secrets/mimir-environment.age
Normal file
12
ops/secrets/mimir-environment.age
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung P6nn2lXFAdzZmq9Uca1Y5V+yUuNe1NtTsb9v1GEpLTE
|
||||||
|
iX//2OZXlnc5a8GDnFqc2AwvnE5F934YqrIHmasKXjI
|
||||||
|
-> X25519 TTeB7TDM1XHpDU+5yn65j/wUKH1AA2qhv7FOWgxv3X4
|
||||||
|
fS3BZ7dbToKruan7N8HW2YsHSCvy803Mdqc9VymonRE
|
||||||
|
-> ssh-ed25519 C2zWnA YzTQJhSKUp9PsRjyY0NchhsdOCO29f1Gy6MyoxNJmmM
|
||||||
|
Tr/K0YK+NrNvTuamgU3QANHMW4gFSzTX3fj3iJ27MNQ
|
||||||
|
-> ssh-ed25519 LzO4tw 2Ea1tYhWtNV72FCqoZx7E4B0KhDxVBv88nhyFyBxkwk
|
||||||
|
tYBPAYF4T40TIABaAZ79pCtJ9XGPRN4N7sDIlQreizg
|
||||||
|
--- +0LKKglhNwUNcFTCFPXyjUSdao0xG6t18Z/2o/XIHj8
|
||||||
|
(MLDîÖ=vümèø±j®óðIê4No@a5S¶“àéÑÑ×ùÑ+%
|
||||||
|
ÐD±[¯>µÄg¤ºÌjMÍ´D5’(‚å&ie)‚æ`µ™¿>)äè¬wñqÑèowô:r³!å|¯øïz]ã{L‡°ëû
|
||||||
12
ops/secrets/mimir-webhook-url.age
Normal file
12
ops/secrets/mimir-webhook-url.age
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung GkyQbgnYLKVP5aStgR2xpTBWLQjRRrB0iZFh34bz6Ek
|
||||||
|
5MkXZmVlI77MlI7Q4u7rJpXlOvy0lhSX0zMRlf5PnsE
|
||||||
|
-> X25519 Ulq3EUtAo75G3++IwtcJ0HTV/WUJLxRD1CSlhDvBMjY
|
||||||
|
YnYrw6Kgy9H9M3tS80wHr3aMDA4UJaZZqFVTFy5GL4k
|
||||||
|
-> ssh-ed25519 C2zWnA V89wrBq9DMsnDc5OuO6Sd5Lah1r+QmVLjoQZE51ymzU
|
||||||
|
On5m4AXJFBo5Egk66tqxViK2/C8/taLukADxwRpn7gU
|
||||||
|
-> ssh-ed25519 LzO4tw 3rGT8uh5vI8uN1NrhSovG3KjVSLJBbxLrwz7vvOQVms
|
||||||
|
DugUB6q10ckKY9MocAhrYiVwYrW5fhC7MgXnIdHbjfk
|
||||||
|
--- h69fCsqmWgpINlubOaE0nq+pYwizAaMQjiuibM1hAXg
|
||||||
|
Æ`ŠÈqÝ/㨆\,íài±â+HU|«»Áß–ã-æ!<21>ïŽ\ôõ
|
||||||
|
Êéi„w¶ë˜'a˜P‚…‹
|
||||||
|
|
@ -9,6 +9,7 @@ let
|
||||||
inherit (depot.nix.yants)
|
inherit (depot.nix.yants)
|
||||||
attrs
|
attrs
|
||||||
any
|
any
|
||||||
|
either
|
||||||
defun
|
defun
|
||||||
list
|
list
|
||||||
path
|
path
|
||||||
|
|
@ -17,7 +18,8 @@ let
|
||||||
struct
|
struct
|
||||||
;
|
;
|
||||||
ssh-pubkey = restrict "SSH pubkey" (lib.hasPrefix "ssh-") string;
|
ssh-pubkey = restrict "SSH pubkey" (lib.hasPrefix "ssh-") string;
|
||||||
agenixSecret = struct "agenixSecret" { publicKeys = list ssh-pubkey; };
|
age-pubkey = restrict "age pubkey" (lib.hasPrefix "age") string;
|
||||||
|
agenixSecret = struct "agenixSecret" { publicKeys = list (either age-pubkey ssh-pubkey); };
|
||||||
in
|
in
|
||||||
|
|
||||||
defun [ path (attrs agenixSecret) (attrs any) ]
|
defun [ path (attrs agenixSecret) (attrs any) ]
|
||||||
|
|
|
||||||
BIN
ops/secrets/restic-bucket-credentials.age
Normal file
BIN
ops/secrets/restic-bucket-credentials.age
Normal file
Binary file not shown.
17
ops/secrets/restic-repository-password.age
Normal file
17
ops/secrets/restic-repository-password.age
Normal file
|
|
@ -0,0 +1,17 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung fEPHUIHGP1lq7BefcyrrBhsh2pIcfBYOp8JdA8gLlF8
|
||||||
|
xfSkbL06XJnDWwMYbYbDb9aL1ZiMFADPJMsA9Yc8OGc
|
||||||
|
-> X25519 7A8fombypOBpzi0kY9KEaXXUaK2/TqZxDdA7xdBZq3s
|
||||||
|
O+zKJvjLihSHagyexDWPb7B6BYXWtqDG5jNe1Cy7elE
|
||||||
|
-> ssh-ed25519 C2zWnA THArgNXfZTE7IEDSx5btgH2M26LEWf7xil0fpvyG/HU
|
||||||
|
IEhcR42wwsfZBzFomZQjyX3aTxM72Mq/9lYJypH4fCI
|
||||||
|
-> ssh-ed25519 x3gRmg LppryFQR0QVRXNudhnH1xagauUB3qJku3tGwnP7uwnM
|
||||||
|
E4Np7eNxtUWgCgeNeKKRHRUw3e2n7UJiVRRM6Rq6M1M
|
||||||
|
-> ssh-ed25519 uZGziw omZBRMsKsgRgvV18Kx1RKrrT79T5Ec6AJLLqyZ8NcDY
|
||||||
|
xofz1/6hFIYy9cX5xh59EYN5yXnTnoeLpqoaqF91NWA
|
||||||
|
-> ssh-ed25519 3T2Xig UK5jbvRneWnyiWXIr7LOUofglIcLixZ1Kk3azGtLaSA
|
||||||
|
tTzIXc8Z3rxyudWGRifZXdva4ThElZryfWQk/jO7Cl4
|
||||||
|
-> ssh-ed25519 LzO4tw plLskc3SFPm2sXhZMLAPTC6abHbe4JP5+/utI29SI1Y
|
||||||
|
qpna8eExCro5pqImBFDCOC4A6tSxp788qZU3U1ugqb8
|
||||||
|
--- qyCRIv4thNmOpjMx3QHMxFFT04kLyvUiGnt7Goj4ri0
|
||||||
|
›<EFBFBD>ο$C[Fe&>G¶›}©Ÿ%V66²2À¯w2<77>)Ö¤°Í<C2B0>
¬%;XvØzœJôଚR¹_/j½(¹yEqD]›
|
||||||
|
|
@ -1,67 +1,59 @@
|
||||||
let
|
let
|
||||||
tazjin = [
|
raito = [
|
||||||
# tverskoy
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICaw9ihTG7ucB8P38XdalEWev8+q96e2yNm4B+/I9IJp"
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM1fGWz/gsq+ZeZXjvUrV+pBlanw1c3zJ9kLTax9FWQy"
|
|
||||||
|
|
||||||
# zamalek
|
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDBRXeb8EuecLHP0bW4zuebXp4KRnXgJTZfeVWXQ1n1R"
|
|
||||||
|
|
||||||
# khamovnik
|
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID1ptE5HvGSXxSXo+aHBTKa5PBlAM1HqmpzWz0yAhHLj"
|
|
||||||
|
|
||||||
# arbat
|
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ1Eai0p7eF7XML5wokqF4GlVZM+YXEORfs/GPGwEky7"
|
|
||||||
];
|
];
|
||||||
|
|
||||||
aspen = [
|
edef = [
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMcBGBoWd5pPIIQQP52rcFOQN3wAY0J/+K2fuU6SffjA "
|
"age1n8vj5s4s9vyl8cq76q3mxaj5yxhmeuzh3puffp27j59e6vsj9frq34f90r"
|
||||||
];
|
];
|
||||||
|
|
||||||
sterni = [
|
flokli = [
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJk+KvgvI2oJTppMASNUfMcMkA2G5ZNt+HnWDzaXKLlo"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPTVTXOutUZZjXLB0lUSgeKcSY/8mxKkC0ingGK1whD2 flokli"
|
||||||
];
|
];
|
||||||
|
|
||||||
flokli = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPTVTXOutUZZjXLB0lUSgeKcSY/8mxKkC0ingGK1whD2 flokli";
|
gerrit01 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN+RCLAExaM5EC70UsCPMtDT1Cfa80Ux/vex95fLk9S4 root@gerrit01";
|
||||||
|
public01 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICzB7bqXWcv+sVokySvj1d74zRlVLSNqBw7/OY3c7QYd root@public01";
|
||||||
|
build01 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEteVaeN/FEAY8yyGWdAbv6+X6yv2m8+4F5qZEAhxW9f root@build01";
|
||||||
|
meta01 = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINj2csTShq5PsmB/T0596TASyf7VImD4592HEqaYHgKh root@meta01";
|
||||||
|
|
||||||
sanduny = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOag0XhylaTVhmT6HB8EN2Fv5Ymrc4ZfypOXONUkykTX";
|
superadmins = raito ++ edef ++ flokli;
|
||||||
nevsky = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHQe7M+G8Id3ZD7j+I07TCUV1o12q1vpsOXHRlcPSEfa";
|
|
||||||
bugry = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGqG6sITyJ/UsQ/RtYqmmMvTT4r4sppadoQIz5SvA+5J";
|
|
||||||
|
|
||||||
admins = tazjin ++ aspen ++ sterni;
|
allDefault.publicKeys = superadmins ++ [ gerrit01 public01 build01 meta01 ];
|
||||||
allHosts = [ sanduny nevsky bugry ];
|
terraform.publicKeys = superadmins;
|
||||||
for = hosts: {
|
gerrit01Default.publicKeys = superadmins ++ [ gerrit01 ];
|
||||||
publicKeys = hosts ++ admins;
|
public01Default.publicKeys = superadmins ++ [ public01 ];
|
||||||
};
|
build01Default.publicKeys = superadmins ++ [ build01 ];
|
||||||
|
meta01Default.publicKeys = superadmins ++ [ meta01 ];
|
||||||
|
ciDefault.publicKeys = superadmins ++ [ gerrit01 build01 ];
|
||||||
in
|
in
|
||||||
{
|
{
|
||||||
"besadii.age" = for [ nevsky ];
|
"grafana-agent-password.age" = allDefault;
|
||||||
"buildkite-agent-token.age" = for [ nevsky ];
|
|
||||||
"buildkite-graphql-token.age" = for [ nevsky ];
|
"restic-repository-password.age" = allDefault;
|
||||||
"buildkite-ssh-private-key.age" = for [ nevsky ];
|
"restic-bucket-credentials.age" = allDefault;
|
||||||
"clbot-ssh.age" = for [ nevsky ];
|
|
||||||
"clbot.age" = for [ nevsky ];
|
"keycloak-db-password.age" = public01Default;
|
||||||
"depot-inbox-imap.age" = for [ sanduny ];
|
"gerrit-oauth-secret.age" = gerrit01Default;
|
||||||
"depot-replica-key.age" = for [ nevsky ];
|
"gerrit-replication-key.age" = gerrit01Default;
|
||||||
"gerrit-autosubmit.age" = for [ nevsky ];
|
"gerrit-autosubmit.age" = gerrit01Default;
|
||||||
"gerrit-secrets.age" = for [ nevsky ];
|
|
||||||
"grafana.age" = for [ nevsky ];
|
"forgejo-oauth-secret.age" = public01Default;
|
||||||
"irccat.age" = for [ nevsky ];
|
"grafana-oauth-secret.age" = public01Default;
|
||||||
"journaldriver.age" = for allHosts;
|
|
||||||
"keycloak-db.age" = for [ nevsky ];
|
"buildkite-agent-token.age" = build01Default;
|
||||||
"nix-cache-priv.age" = for [ nevsky ];
|
"buildkite-ssh-private-key.age" = build01Default;
|
||||||
"nix-cache-pub.age" = for [ nevsky ];
|
"buildkite-besadii-config.age" = ciDefault;
|
||||||
"owothia.age" = for [ nevsky ];
|
"buildkite-graphql-token.age" = build01Default;
|
||||||
"panettone.age" = for [ nevsky ];
|
|
||||||
"restic-bugry.age" = for [ bugry ];
|
"metrics-push-htpasswd.age" = meta01Default;
|
||||||
"restic-nevsky.age" = for [ nevsky ];
|
"alertmanager-irc-relay-environment.age" = meta01Default;
|
||||||
"restic-sanduny.age" = for [ sanduny ];
|
"mimir-environment.age" = meta01Default;
|
||||||
"smtprelay.age" = for [ nevsky ];
|
"mimir-webhook-url.age" = meta01Default;
|
||||||
"teleirc.age" = for [ nevsky ];
|
"loki-environment.age" = meta01Default;
|
||||||
"tf-buildkite.age" = for [ /* humans only */ ];
|
|
||||||
"tf-glesys.age" = for [ /* humans only */ ];
|
"tf-dns.age" = terraform;
|
||||||
"tf-keycloak.age" = for [ flokli ];
|
"tf-keycloak.age" = terraform;
|
||||||
"tvl-alerts-bot-telegram-token.age" = for [ nevsky ];
|
"tf-hcloud.age" = terraform;
|
||||||
"wg-bugry.age" = for [ bugry ];
|
"tf-hetzner-s3.age" = terraform;
|
||||||
"wg-nevsky.age" = for [ nevsky ];
|
"tf-buildkite.age" = terraform;
|
||||||
"yc-restic.age" = for [ nevsky sanduny bugry ];
|
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Binary file not shown.
9
ops/secrets/tf-dns.age
Normal file
9
ops/secrets/tf-dns.age
Normal file
|
|
@ -0,0 +1,9 @@
|
||||||
|
age-encryption.org/v1
|
||||||
|
-> ssh-ed25519 +qVung bfXcCFJcGZMG7wn8/9WXcZNZ5GlinAZWMAcC1NhQ1Fk
|
||||||
|
NNw4bHM7t3UFYapqdSwfY5y+2vAVUsEtP42KUYRO5QI
|
||||||
|
-> X25519 wNi7DKA0ego6INa6mKuqy3JDfj3bt6EAz5wdcBIuwF4
|
||||||
|
mnTNxiy4NXCBb6L8SKbFSfyBaVt9q2bq33DxHh7RhaI
|
||||||
|
-> ssh-ed25519 C2zWnA JsfydhJKmS72cyDYruJiq0AXStdZRfTeluZg7iSbS10
|
||||||
|
kUrGkroP+sGLvHZKtOOsZg+PO18VjdEqgcIUlPiQbp4
|
||||||
|
--- KHJEkrcyHBIUPQaLKydcvd3uxue+2hDkJK4zze4BRYc
|
||||||
|
&=áÏ·ö+êÈné€ßo€â}¶mæ¶örÖP¸ <@¦À¶ÿU‘ÒÍ. ‰øóU5¼1ÒDŒze»Ø …‘u<E28098>Þ%…Uö¯ÿ7“J<E2809C>‹ë\”÷h4vn»‰:§¶’n–·Ú€Ü©ÑêqºQ§ƒùÛ&¸í¥K÷•Ãxm¾—‚ÚÝ-È`åó,Æ|ÚЧ!ö¦ã:¢ÉløPl”é6ï+¥ÁxîFu$’‚¬X·>kG:üÒ<C3BC> ½!›é¸=‰¸”ò±²Åz,\k ¬s&w;¥<>aOÛ¯ÌVçjÅg-«ÜÞ
|
||||||
BIN
ops/secrets/tf-hcloud.age
Normal file
BIN
ops/secrets/tf-hcloud.age
Normal file
Binary file not shown.
BIN
ops/secrets/tf-hetzner-s3.age
Normal file
BIN
ops/secrets/tf-hetzner-s3.age
Normal file
Binary file not shown.
Binary file not shown.
4
third_party/alertmanager-irc-relay/default.nix
vendored
Normal file
4
third_party/alertmanager-irc-relay/default.nix
vendored
Normal file
|
|
@ -0,0 +1,4 @@
|
||||||
|
{ depot, ... }: {
|
||||||
|
package = import ./package.nix;
|
||||||
|
module = import ./module.nix;
|
||||||
|
}
|
||||||
53
third_party/alertmanager-irc-relay/module.nix
vendored
Normal file
53
third_party/alertmanager-irc-relay/module.nix
vendored
Normal file
|
|
@ -0,0 +1,53 @@
|
||||||
|
{ config
|
||||||
|
, lib
|
||||||
|
, pkgs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
let
|
||||||
|
cfg = config.services.alertmanager-irc-relay;
|
||||||
|
yaml = pkgs.formats.yaml { };
|
||||||
|
configFile = yaml.generate "config.yaml" cfg.settings;
|
||||||
|
inherit (lib) mkEnableOption mkIf types mkOption mapAttrs mkPackageOption;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.services.alertmanager-irc-relay = {
|
||||||
|
enable = mkEnableOption "Alertmanager IRC relay";
|
||||||
|
package = mkPackageOption pkgs "alertmanager-irc-relay" { };
|
||||||
|
settings = mkOption {
|
||||||
|
type = types.attrsOf yaml.type;
|
||||||
|
};
|
||||||
|
environmentFiles = mkOption {
|
||||||
|
type = types.listOf types.path;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = mkIf cfg.enable {
|
||||||
|
systemd.services.alertmanager-irc-relay = {
|
||||||
|
description = "Alertmanager IRC Relay Service";
|
||||||
|
after = [ "network.target" ];
|
||||||
|
wantedBy = [ "multi-user.target" ];
|
||||||
|
|
||||||
|
serviceConfig = {
|
||||||
|
ExecStart = "${lib.getExe cfg.package} --config ${configFile}";
|
||||||
|
Restart = "always";
|
||||||
|
DynamicUser = true;
|
||||||
|
ProtectSystem = "strict";
|
||||||
|
ProtectHome = true;
|
||||||
|
NoNewPrivileges = true;
|
||||||
|
EnvironmentFile = cfg.environmentFiles;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
services.alertmanager-irc-relay.settings = mapAttrs (_: lib.mkDefault) {
|
||||||
|
http_host = "localhost";
|
||||||
|
http_port = 8000;
|
||||||
|
|
||||||
|
msg_once_per_alert_group = true;
|
||||||
|
use_privmsg = false;
|
||||||
|
|
||||||
|
msg_template = "Alert {{ .Labels.alertname }} on {{ .Labels.instance }} is {{ .Status }}";
|
||||||
|
alert_buffer_size = 2048;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
29
third_party/alertmanager-irc-relay/package.nix
vendored
Normal file
29
third_party/alertmanager-irc-relay/package.nix
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
{ lib
|
||||||
|
, buildGoModule
|
||||||
|
, fetchFromGitHub
|
||||||
|
,
|
||||||
|
}:
|
||||||
|
|
||||||
|
buildGoModule rec {
|
||||||
|
pname = "alertmanager-irc-relay";
|
||||||
|
version = "0.5.1";
|
||||||
|
|
||||||
|
src = fetchFromGitHub {
|
||||||
|
owner = "google";
|
||||||
|
repo = "alertmanager-irc-relay";
|
||||||
|
rev = "v${version}";
|
||||||
|
hash = "sha256-Rl7o2QPa/IU1snlx/LiJxQok9pnkw9XANnJsu41vNlY=";
|
||||||
|
};
|
||||||
|
|
||||||
|
vendorHash = "sha256-KX+TR0n14+95lldF+0KUo5DbqOKpUDaZNuKMBf0KHFQ=";
|
||||||
|
|
||||||
|
ldflags = [ "-s" "-w" ];
|
||||||
|
|
||||||
|
meta = {
|
||||||
|
description = "Send Prometheus Alerts to IRC using Webhooks";
|
||||||
|
homepage = "https://github.com/google/alertmanager-irc-relay";
|
||||||
|
license = lib.licenses.asl20;
|
||||||
|
maintainers = with lib.maintainers; [ raitobezarius ];
|
||||||
|
mainProgram = "alertmanager-irc-relay";
|
||||||
|
};
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue