chore(*): drop everything that is not required for Tvix

Co-Authored-By: edef <edef@edef.eu>
Co-Authored-By: Ryan Lahfa <raito@lix.systems>
Change-Id: I9817214c3122e49d694c5e41818622a08d9dfe45
This commit is contained in:
Florian Klink 2025-01-05 17:12:30 +01:00
parent bd91cac1f3
commit df4500ea2b
2905 changed files with 34 additions and 493328 deletions

View file

@ -1,3 +0,0 @@
.terraform*
terraform.tfstate*
.envrc

View file

@ -1,20 +0,0 @@
Terraform for GleSYS
======================
This contains the Terraform configuration for deploying TVL's
infrastructure at [GleSYS](https://glesys.com). This includes object
storage (e.g. for backups and Terraform state) and DNS.
Secrets are needed for applying this. The encrypted file
`//ops/secrets/tf-glesys.age` contains `export` calls which should be
sourced, for example via `direnv`, by users with the appropriate
credentials.
An example `direnv` configuration used by tazjin is this:
```
# //ops/secrets/.envrc
source_up
eval $(age --decrypt -i ~/.ssh/id_ed25519 $(git rev-parse --show-toplevel)/ops/secrets/tf-glesys.age)
watch_file $(git rev-parse --show-toplevel)/secrets/tf-glesys.age
```

View file

@ -1,15 +0,0 @@
{ depot, lib, pkgs, ... }:
depot.nix.readTree.drvTargets rec {
# Provide a Terraform wrapper with the right provider installed.
terraform = pkgs.terraform.withPlugins (_: [
depot.third_party.terraform-provider-glesys
]);
validate = depot.tools.checks.validateTerraform {
inherit terraform;
name = "glesys";
src = lib.cleanSource ./.;
env.GLESYS_TOKEN = "ci-dummy";
};
}

View file

@ -1,44 +0,0 @@
# DNS configuration for nixery.dev
#
# TODO(tazjin): Figure out what to do with //ops/dns for this. I'd
# like to keep zonefiles in case we move providers again, but maybe
# generate something from them. Not sure yet.
resource "glesys_dnsdomain" "nixery_dev" {
name = "nixery.dev"
}
resource "glesys_dnsdomain_record" "nixery_dev_apex_A" {
domain = glesys_dnsdomain.nixery_dev.id
host = "@"
type = "A"
data = var.bugry_ipv4
}
resource "glesys_dnsdomain_record" "nixery_dev_apex_AAAA" {
domain = glesys_dnsdomain.nixery_dev.id
host = "@"
type = "AAAA"
data = var.bugry_ipv6
}
resource "glesys_dnsdomain_record" "nixery_dev_NS1" {
domain = glesys_dnsdomain.nixery_dev.id
host = "@"
type = "NS"
data = "ns1.namesystem.se."
}
resource "glesys_dnsdomain_record" "nixery_dev_NS2" {
domain = glesys_dnsdomain.nixery_dev.id
host = "@"
type = "NS"
data = "ns2.namesystem.se."
}
resource "glesys_dnsdomain_record" "nixery_dev_NS3" {
domain = glesys_dnsdomain.nixery_dev.id
host = "@"
type = "NS"
data = "ns3.namesystem.se."
}

View file

@ -1,54 +0,0 @@
# DNS configuration for tvix.dev
resource "glesys_dnsdomain" "tvix_dev" {
name = "tvix.dev"
}
resource "glesys_dnsdomain_record" "tvix_dev_apex_A" {
domain = glesys_dnsdomain.tvix_dev.id
host = "@"
type = "A"
data = var.bugry_ipv4
}
resource "glesys_dnsdomain_record" "tvix_dev_apex_AAAA" {
domain = glesys_dnsdomain.tvix_dev.id
host = "@"
type = "AAAA"
data = var.bugry_ipv6
}
resource "glesys_dnsdomain_record" "tvix_dev_bolt_CNAME" {
domain = glesys_dnsdomain.tvix_dev.id
host = "bolt"
type = "CNAME"
data = "bugry.tvl.fyi."
}
resource "glesys_dnsdomain_record" "tvix_dev_docs_CNAME" {
domain = glesys_dnsdomain.tvix_dev.id
host = "docs"
type = "CNAME"
data = "bugry.tvl.fyi."
}
resource "glesys_dnsdomain_record" "tvix_dev_NS1" {
domain = glesys_dnsdomain.tvix_dev.id
host = "@"
type = "NS"
data = "ns1.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvix_dev_NS2" {
domain = glesys_dnsdomain.tvix_dev.id
host = "@"
type = "NS"
data = "ns2.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvix_dev_NS3" {
domain = glesys_dnsdomain.tvix_dev.id
host = "@"
type = "NS"
data = "ns3.namesystem.se."
}

View file

@ -1,156 +0,0 @@
# DNS configuration for tvl.fyi
resource "glesys_dnsdomain" "tvl_fyi" {
name = "tvl.fyi"
}
resource "glesys_dnsdomain_record" "tvl_fyi_NS1" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "NS"
data = "ns1.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvl_fyi_NS2" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "NS"
data = "ns2.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvl_fyi_NS3" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "NS"
data = "ns3.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvl_fyi_apex_A" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "A"
data = var.bugry_ipv4
}
resource "glesys_dnsdomain_record" "tvl_fyi_apex_AAAA" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "AAAA"
data = var.bugry_ipv6
}
resource "glesys_dnsdomain_record" "tvl_fyi_nevsky_A" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "nevsky"
type = "A"
data = var.nevsky_ipv4
}
resource "glesys_dnsdomain_record" "tvl_fyi_nevsky_AAAA" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "nevsky"
type = "AAAA"
data = var.nevsky_ipv6
}
resource "glesys_dnsdomain_record" "tvl_fyi_bugry_A" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "bugry"
type = "A"
data = var.bugry_ipv4
}
resource "glesys_dnsdomain_record" "tvl_fyi_bugry_AAAA" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "bugry"
type = "AAAA"
data = var.bugry_ipv6
}
# Explicit records for all services running on nevsky
resource "glesys_dnsdomain_record" "tvl_fyi_nevsky_services" {
domain = glesys_dnsdomain.tvl_fyi.id
type = "CNAME"
data = "nevsky.tvl.fyi."
host = each.key
for_each = toset(local.nevsky_services)
}
# Explicit records for all services running on bugry
resource "glesys_dnsdomain_record" "tvl_fyi_bugry_services" {
domain = glesys_dnsdomain.tvl_fyi.id
type = "CNAME"
data = "bugry.tvl.fyi."
host = each.key
for_each = toset(local.bugry_services)
}
resource "glesys_dnsdomain_record" "tvl_fyi_net_CNAME" {
domain = glesys_dnsdomain.tvl_fyi.id
type = "CNAME"
data = "sanduny.tvl.su."
host = "net"
}
# Binary cache round-robin setup (experimental; only on .fyi)
resource "glesys_dnsdomain_record" "cache_tvl_fyi_A" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "cache"
type = "A"
data = each.key
for_each = toset([var.nevsky_ipv4])
}
resource "glesys_dnsdomain_record" "cache_tvl_fyi_AAAA" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "cache"
type = "AAAA"
data = each.key
for_each = toset([var.nevsky_ipv6])
}
# Builderball cache records
resource "glesys_dnsdomain_record" "tvl_fyi_cache_nevsky_CNAME" {
domain = glesys_dnsdomain.tvl_fyi.id
type = "CNAME"
data = "nevsky.tvl.fyi."
host = "nevsky.cache"
}
# Google Domains mail forwarding configuration (no sending)
resource "glesys_dnsdomain_record" "tvl_fyi_MX_5" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "MX"
data = "5 gmr-smtp-in.l.google.com."
}
resource "glesys_dnsdomain_record" "tvl_fyi_MX_10" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "MX"
data = "10 alt1.gmr-smtp-in.l.google.com."
}
resource "glesys_dnsdomain_record" "tvl_fyi_MX_20" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "MX"
data = "20 alt2.gmr-smtp-in.l.google.com."
}
resource "glesys_dnsdomain_record" "tvl_fyi_MX_30" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "MX"
data = "30 alt3.aspmx.l.google.com."
}
resource "glesys_dnsdomain_record" "tvl_fyi_MX_40" {
domain = glesys_dnsdomain.tvl_fyi.id
host = "@"
type = "MX"
data = "40 alt4.gmr-smtp-in.l.google.com."
}

View file

@ -1,139 +0,0 @@
# DNS configuration for tvl.su
resource "glesys_dnsdomain" "tvl_su" {
name = "tvl.su"
}
resource "glesys_dnsdomain_record" "tvl_su_NS1" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "NS"
data = "ns1.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvl_su_NS2" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "NS"
data = "ns2.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvl_su_NS3" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "NS"
data = "ns3.namesystem.se."
}
resource "glesys_dnsdomain_record" "tvl_su_apex_A" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "A"
data = var.bugry_ipv4
}
resource "glesys_dnsdomain_record" "tvl_su_apex_AAAA" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "AAAA"
data = var.bugry_ipv6
}
resource "glesys_dnsdomain_record" "tvl_su_sanduny_A" {
domain = glesys_dnsdomain.tvl_su.id
host = "sanduny"
type = "A"
data = var.sanduny_ipv4
}
resource "glesys_dnsdomain_record" "tvl_su_sanduny_AAAA" {
domain = glesys_dnsdomain.tvl_su.id
host = "sanduny"
type = "AAAA"
data = var.sanduny_ipv6
}
resource "glesys_dnsdomain_record" "cache_tvl_su_nevsky_CNAME" {
domain = glesys_dnsdomain.tvl_su.id
host = "cache"
type = "CNAME"
data = "nevsky.tvl.fyi."
}
# Explicit records for all services running on nevsky
resource "glesys_dnsdomain_record" "tvl_su_nevsky_services" {
domain = glesys_dnsdomain.tvl_su.id
type = "CNAME"
data = "nevsky.tvl.fyi."
host = each.key
for_each = toset(local.nevsky_services)
}
# Explicit records for all services running on bugry
resource "glesys_dnsdomain_record" "tvl_su_bugry_services" {
domain = glesys_dnsdomain.tvl_su.id
type = "CNAME"
data = "bugry.tvl.fyi."
host = each.key
for_each = toset(local.bugry_services)
}
# historical tvixbolt.tvl.su record, redirects to bolt.tvix.dev
resource "glesys_dnsdomain_record" "tvix_su_tvixbolt_CNAME" {
domain = glesys_dnsdomain.tvl_su.id
host = "tvixbolt"
type = "CNAME"
data = "nevsky.tvl.fyi."
}
resource "glesys_dnsdomain_record" "tvl_su_inbox_CNAME" {
domain = glesys_dnsdomain.tvl_su.id
type = "CNAME"
data = "sanduny.tvl.su."
host = "inbox.tvl.su."
}
resource "glesys_dnsdomain_record" "tvl_su_TXT_google_site" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "TXT"
data = "google-site-verification=3ksTBzFK3lZlzD3ddBfpaHs9qasfAiYBmvbW2T_ejH4"
}
# Yandex 360 setup
resource "glesys_dnsdomain_record" "tvl_su_TXT_yandex" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "TXT"
data = "yandex-verification: b99c43b7838949dc"
}
resource "glesys_dnsdomain_record" "tvl_su_MX_yandex" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "MX"
data = "10 mx.yandex.net."
}
resource "glesys_dnsdomain_record" "tvl_su_TXT_yandex_spf" {
domain = glesys_dnsdomain.tvl_su.id
host = "@"
type = "TXT"
data = "v=spf1 redirect=_spf.yandex.net"
}
resource "glesys_dnsdomain_record" "tvl_su_TXT_yandex_dkim" {
domain = glesys_dnsdomain.tvl_su.id
host = "mail._domainkey"
type = "TXT"
data = "v=DKIM1; k=rsa; t=s; p=MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDaRdWF8BtCHlTTQN8O+E5Qn27FVIpUEAdk1uq2vdIKh1Un/3NfdWtxStcS1Mf0iEprt1Fb4zgWOkDlPi+hH/UZqiC9QNeNqEBGMB9kgJyfsUt6cDCIVGvn8PT9JcZW1jxSziOj8nUWB4noqbaVcQNqNbwtaHPm3aifwKwScxVO7wIDAQAB"
}
resource "glesys_dnsdomain_record" "tvl_su_CNAME_yandex_mail" {
domain = glesys_dnsdomain.tvl_su.id
host = "mail"
type = "CNAME"
data = "domain.mail.yandex.net."
}

View file

@ -1,105 +0,0 @@
# Configure TVL resources hosted with GleSYS.
#
# Most importantly:
# - all of our DNS
# - object storage (e.g. backups)
terraform {
required_providers {
glesys = {
source = "depot/glesys"
}
}
backend "s3" {
endpoints = {
s3 = "https://objects.dc-sto1.glesys.net"
}
bucket = "tvl-state"
key = "terraform/tvl-glesys"
region = "glesys"
skip_credentials_validation = true
skip_region_validation = true
skip_metadata_api_check = true
skip_requesting_account_id = true
skip_s3_checksum = true
}
}
provider "glesys" {
userid = "cl26117" # generated by GleSYS
}
resource "glesys_objectstorage_instance" "tvl-backups" {
description = "tvl-backups"
datacenter = "dc-sto1"
}
resource "glesys_objectstorage_instance" "tvl-state" {
description = "tvl-state"
datacenter = "dc-sto1"
}
resource "glesys_objectstorage_credential" "terraform-state" {
instanceid = glesys_objectstorage_instance.tvl-state.id
description = "key for terraform state"
}
resource "glesys_objectstorage_credential" "litestream" {
instanceid = glesys_objectstorage_instance.tvl-state.id
description = "key for litestream"
}
variable "nevsky_ipv4" {
type = string
default = "188.225.81.75"
}
variable "nevsky_ipv6" {
type = string
default = "2a03:6f00:2:514b:0:feed:edef:beef"
}
variable "bugry_ipv4" {
type = string
default = "91.199.149.239"
}
variable "bugry_ipv6" {
type = string
default = "2a03:6f00:2:514b:5bc7:95ef:0:2"
}
variable "sanduny_ipv4" {
type = string
default = "85.119.82.231"
}
variable "sanduny_ipv6" {
type = string
default = "2001:ba8:1f1:f109::feed:edef:beef"
}
locals {
# Hostnames of all public services on nevsky
nevsky_services = [
"auth",
"b",
"cl",
"code",
"cs",
"deploys", # TODO: unsupported (b/437)
"grep",
"status",
]
# Hostnames of all public services on bugry
bugry_services = [
"at",
"atward",
"signup",
"static",
"todo",
]
}

View file

@ -1,3 +0,0 @@
result
/target
**/*.rs.bk

View file

@ -1,626 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
dependencies = [
"memchr",
]
[[package]]
name = "anyhow"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da"
[[package]]
name = "base64"
version = "0.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
[[package]]
name = "bitflags"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de"
[[package]]
name = "build-env"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e068f31938f954b695423ecaf756179597627d0828c0d3e48c0a722a8b23cf9e"
[[package]]
name = "cc"
version = "1.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57b6a275aa2903740dc87da01c62040406b8812552e97129a63ea8850a17c6e6"
dependencies = [
"shlex",
]
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "crimp"
version = "4087.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ead2c83f7d1f9b8e5a6f7a25985d0d1759ccd2cd72abb1eee2db65d05e12b39"
dependencies = [
"curl",
"serde",
"serde_json",
]
[[package]]
name = "cstr-argument"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6bd9c8e659a473bce955ae5c35b116af38af11a7acb0b480e01f3ed348aeb40"
dependencies = [
"cfg-if",
"memchr",
]
[[package]]
name = "curl"
version = "0.4.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e2161dd6eba090ff1594084e95fd67aeccf04382ffea77999ea94ed42ec67b6"
dependencies = [
"curl-sys",
"libc",
"openssl-probe",
"openssl-sys",
"schannel",
"socket2",
"windows-sys 0.52.0",
]
[[package]]
name = "curl-sys"
version = "0.4.74+curl-8.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8af10b986114528fcdc4b63b6f5f021b7057618411046a4de2ba0f0149a097bf"
dependencies = [
"cc",
"libc",
"libz-sys",
"openssl-sys",
"pkg-config",
"vcpkg",
"windows-sys 0.52.0",
]
[[package]]
name = "deranged"
version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4"
dependencies = [
"powerfmt",
"serde",
]
[[package]]
name = "env_logger"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
dependencies = [
"humantime",
"is-terminal",
"log",
"regex",
"termcolor",
]
[[package]]
name = "foreign-types"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1"
dependencies = [
"foreign-types-shared 0.1.1",
]
[[package]]
name = "foreign-types"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d737d9aa519fb7b749cbc3b962edcf310a8dd1f4b67c91c4f83975dbdd17d965"
dependencies = [
"foreign-types-macros",
"foreign-types-shared 0.3.1",
]
[[package]]
name = "foreign-types-macros"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a5c6c585bc94aaf2c7b51dd4c2ba22680844aba4c687be581871a6f518c5742"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "foreign-types-shared"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "foreign-types-shared"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa9a19cbb55df58761df49b23516a86d432839add4af60fc256da840f66ed35b"
[[package]]
name = "hermit-abi"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc"
[[package]]
name = "humantime"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "is-terminal"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "261f68e344040fbd0edea105bef17c66edf46f984ddb1115b775ce31be948f4b"
dependencies = [
"hermit-abi",
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "itoa"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
name = "journaldriver"
version = "5656.0.0"
dependencies = [
"anyhow",
"crimp",
"env_logger",
"lazy_static",
"log",
"medallion",
"pkg-config",
"serde",
"serde_json",
"systemd",
"time",
]
[[package]]
name = "lazy_static"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
[[package]]
name = "libc"
version = "0.2.158"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
[[package]]
name = "libsystemd-sys"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d28ad38d7bee81aabd41201ee7d36df8d7f76aa0a455c77d5c365c4669b4b4b6"
dependencies = [
"build-env",
"libc",
"pkg-config",
]
[[package]]
name = "libz-sys"
version = "1.1.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472"
dependencies = [
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "log"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "medallion"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35b83c0c3277d722b53a6eb24e3c1321172f85b715cc7405add8ffd1f2f06288"
dependencies = [
"anyhow",
"base64",
"openssl",
"serde",
"serde_json",
"time",
]
[[package]]
name = "memchr"
version = "2.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3"
[[package]]
name = "num-conv"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9"
[[package]]
name = "once_cell"
version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92"
[[package]]
name = "openssl"
version = "0.10.66"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1"
dependencies = [
"bitflags",
"cfg-if",
"foreign-types 0.3.2",
"libc",
"once_cell",
"openssl-macros",
"openssl-sys",
]
[[package]]
name = "openssl-macros"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "openssl-probe"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf"
[[package]]
name = "openssl-sys"
version = "0.9.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6"
dependencies = [
"cc",
"libc",
"pkg-config",
"vcpkg",
]
[[package]]
name = "pkg-config"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec"
[[package]]
name = "powerfmt"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "proc-macro2"
version = "1.0.86"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af"
dependencies = [
"proc-macro2",
]
[[package]]
name = "regex"
version = "1.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b"
[[package]]
name = "ryu"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f"
[[package]]
name = "schannel"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534"
dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "serde"
version = "1.0.209"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99fce0ffe7310761ca6bf9faf5115afbc19688edd00171d81b1bb1b116c63e09"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.209"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a5831b979fd7b5439637af1752d535ff49f4860c0f341d1baeb6faf0f4242170"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.127"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8043c06d9f82bd7271361ed64f415fe5e12a77fdb52e573e7f06a516dea329ad"
dependencies = [
"itoa",
"memchr",
"ryu",
"serde",
]
[[package]]
name = "shlex"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "socket2"
version = "0.5.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c"
dependencies = [
"libc",
"windows-sys 0.52.0",
]
[[package]]
name = "syn"
version = "2.0.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "systemd"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da95085b9c6eedbcf0b828302a3483a84bdbf772158e586b787092112008fd1f"
dependencies = [
"cstr-argument",
"foreign-types 0.5.0",
"libc",
"libsystemd-sys",
"log",
"utf8-cstr",
]
[[package]]
name = "termcolor"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
[[package]]
name = "time"
version = "0.3.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885"
dependencies = [
"deranged",
"itoa",
"num-conv",
"powerfmt",
"serde",
"time-core",
"time-macros",
]
[[package]]
name = "time-core"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf"
dependencies = [
"num-conv",
"time-core",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "utf8-cstr"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55bcbb425141152b10d5693095950b51c3745d019363fc2929ffd8f61449b628"
[[package]]
name = "vcpkg"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426"
[[package]]
name = "winapi-util"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-sys"
version = "0.59.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
[[package]]
name = "windows_aarch64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
[[package]]
name = "windows_i686_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
[[package]]
name = "windows_i686_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
[[package]]
name = "windows_i686_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
[[package]]
name = "windows_x86_64_gnu"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
[[package]]
name = "windows_x86_64_msvc"
version = "0.52.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"

View file

@ -1,21 +0,0 @@
[package]
name = "journaldriver"
version = "5656.0.0"
authors = ["Vincent Ambo <tazjin@tvl.su>"]
license = "GPL-3.0-or-later"
edition = "2021"
[dependencies]
anyhow = "1.0"
crimp = "4087.0"
env_logger = "0.10"
lazy_static = "1.4"
log = "0.4"
medallion = "2.5"
serde = { version = "1.0", features = [ "derive" ] }
serde_json = "1.0"
systemd = "0.5"
time = { version = "0.3", features = [ "serde-well-known", "macros" ]}
[build-dependencies]
pkg-config = "0.3"

View file

@ -1,152 +0,0 @@
journaldriver
=============
This is a small daemon used to forward logs from `journald` (systemd's
logging service) to [Stackdriver Logging][].
Many existing log services are written in inefficient dynamic
languages with error-prone "cover every possible use-case"
configuration. `journaldriver` instead aims to fit a specific use-case
very well, instead of covering every possible logging setup.
`journaldriver` can be run on GCP-instances with no additional
configuration as authentication tokens are retrieved from the
[metadata server][].
<!-- markdown-toc start - Don't edit this section. Run M-x markdown-toc-refresh-toc -->
**Table of Contents**
- [Features](#features)
- [Usage on Google Cloud Platform](#usage-on-google-cloud-platform)
- [Usage outside of Google Cloud Platform](#usage-outside-of-google-cloud-platform)
- [Log levels / severities / priorities](#log-levels--severities--priorities)
- [NixOS module](#nixos-module)
- [Stackdriver Error Reporting](#stackdriver-error-reporting)
<!-- markdown-toc end -->
# Features
* `journaldriver` persists the last forwarded position in the journal
and will resume forwarding at the same position after a restart
* `journaldriver` will recognise log entries in JSON format and
forward them appropriately to make structured log entries available
in Stackdriver
* `journaldriver` can be used outside of GCP by configuring static
credentials
* `journaldriver` will recognise journald's log priority levels and
convert them into equivalent Stackdriver log severity levels
# Usage on Google Cloud Platform
`journaldriver` does not require any configuration when running on GCP
instances.
1. Install `journaldriver` on the instance from which you wish to
forward logs.
2. Ensure that the instance has the appropriate permissions to write
to Stackdriver. Google continously changes how IAM is implemented
on GCP, so you will have to refer to [Google's documentation][].
By default instances have the required permissions if Stackdriver
Logging support is enabled in the project.
3. Start `journaldriver`, for example via `systemd`.
# Usage outside of Google Cloud Platform
When running outside of GCP, the following extra steps need to be
performed:
1. Create a Google Cloud Platform service account with the "Log
Writer" role and download its private key in JSON-format.
2. When starting `journaldriver`, configure the following environment
variables:
* `GOOGLE_CLOUD_PROJECT`: Name of the GCP project to which logs
should be written.
* `GOOGLE_APPLICATION_CREDENTIALS`: Filesystem path to the
JSON-file containing the service account's private key.
* `LOG_STREAM`: Name of the target log stream in Stackdriver Logging.
This will be automatically created if it does not yet exist.
* `LOG_NAME`: Name of the target log to write to. This defaults to
`journaldriver` if unset, but it is recommended to - for
example - set it to the machine hostname.
# Log levels / severities / priorities
`journaldriver` recognises [journald's priorities][] and converts them
into [equivalent severities][] in Stackdriver. Both sets of values
correspond to standard `syslog` priorities.
The easiest way to emit log messages with priorites from an
application is to use [priority prefixes][], which are compatible with
structured log messages.
For example, to emit a simple warning message (structured and
unstructured):
```
$ echo '<4>{"fnord":true, "msg":"structured log (warning)"}' | systemd-cat
$ echo '<4>unstructured log (warning)' | systemd-cat
```
# NixOS module
The NixOS package repository [contains a module][] for setting up
`journaldriver` on NixOS machines. NixOS by default uses `systemd` for
service management and `journald` for logging, which means that log
output from most services will be captured automatically.
On a GCP instance the only required option is this:
```nix
services.journaldriver.enable = true;
```
When running outside of GCP, the configuration looks as follows:
```nix
services.journaldriver = {
enable = true;
logStream = "prod-environment";
logName = "hostname";
googleCloudProject = "gcp-project-name";
applicationCredentials = keyFile;
};
```
**Note**: The `journaldriver`-module is included in stable releases of
NixOS since NixOS 18.09.
# Stackdriver Error Reporting
The [Stackdriver Error Reporting][] service of Google's monitoring
toolbox supports automatically detecting and correlating errors from
log entries.
To use this functionality log messages must be logged in the expected
[log format][].
*Note*: Reporting errors from non-GCP instances requires that the
`LOG_STREAM` environment variable is set to the special value
`global`.
This value changes the monitored resource descriptor from a log stream
to the project-global stream. Due to a limitation in Stackdriver Error
Reporting, this is the only way to correctly ingest errors from
non-GCP machines. Please see [issue #4][] for more information about
this.
[Stackdriver Logging]: https://cloud.google.com/logging/
[metadata server]: https://cloud.google.com/compute/docs/storing-retrieving-metadata
[Google's documentation]: https://cloud.google.com/logging/docs/access-control
[NixOS]: https://nixos.org/
[contains a module]: https://github.com/NixOS/nixpkgs/pull/42134
[journald's priorities]: http://0pointer.de/public/systemd-man/sd-daemon.html
[equivalent severities]: https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#logseverity
[priority prefixes]: http://0pointer.de/public/systemd-man/sd-daemon.html
[Stackdriver Error Reporting]: https://cloud.google.com/error-reporting/
[log format]: https://cloud.google.com/error-reporting/docs/formatting-error-messages
[issue #4]: https://github.com/tazjin/journaldriver/issues/4

View file

@ -1,5 +0,0 @@
extern crate pkg_config;
fn main() {
pkg_config::probe_library("libsystemd").expect("Could not probe libsystemd");
}

View file

@ -1,11 +0,0 @@
{ depot, pkgs, ... }:
depot.third_party.naersk.buildPackage {
src = ./.;
buildInputs = with pkgs; [
pkg-config
openssl
systemd.dev
];
}

View file

@ -1,638 +0,0 @@
// Copyright (C) 2018 Vincent Ambo <mail@tazj.in>
//
// journaldriver is free software: you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
//! This file implements journaldriver, a small application that
//! forwards logs from journald (systemd's log facility) to
//! Stackdriver Logging.
//!
//! Log entries are read continously from journald and are forwarded
//! to Stackdriver in batches.
//!
//! Stackdriver Logging has a concept of monitored resources. In the
//! simplest case this monitored resource will be the GCE instance on
//! which journaldriver is running.
//!
//! Information about the instance, the project and required security
//! credentials are retrieved from Google's metadata instance on GCP.
//!
//! To run journaldriver on non-GCP machines, users must specify the
//! `GOOGLE_APPLICATION_CREDENTIALS`, `GOOGLE_CLOUD_PROJECT` and
//! `LOG_NAME` environment variables.
use anyhow::{bail, Context, Result};
use lazy_static::lazy_static;
use log::{debug, error, info, trace};
use serde::{Deserialize, Serialize};
use serde_json::{from_str, json, Value};
use std::convert::TryInto;
use std::fs::{self, rename, File};
use std::io::{self, ErrorKind, Read, Write};
use std::path::PathBuf;
use std::time::{Duration, Instant};
use std::{env, mem, process};
use systemd::journal::{Journal, JournalFiles, JournalRecord, JournalSeek};
#[cfg(test)]
mod tests;
const LOGGING_SERVICE: &str = "https://logging.googleapis.com/google.logging.v2.LoggingServiceV2";
const ENTRIES_WRITE_URL: &str = "https://logging.googleapis.com/v2/entries:write";
const METADATA_TOKEN_URL: &str =
"http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token";
const METADATA_ID_URL: &str = "http://metadata.google.internal/computeMetadata/v1/instance/id";
const METADATA_ZONE_URL: &str = "http://metadata.google.internal/computeMetadata/v1/instance/zone";
const METADATA_PROJECT_URL: &str =
"http://metadata.google.internal/computeMetadata/v1/project/project-id";
/// Representation of static service account credentials for GCP.
#[derive(Debug, Deserialize)]
struct Credentials {
/// PEM encoded private key
private_key: String,
/// `kid` of this private key
private_key_id: String,
/// "email" address of the service account
client_email: String,
}
lazy_static! {
/// ID of the GCP project to which to send logs.
static ref PROJECT_ID: String = get_project_id();
/// Name of the log to write to (this should only be manually
/// configured if not running on GCP):
static ref LOG_NAME: String = env::var("LOG_NAME")
.unwrap_or("journaldriver".into());
/// Service account credentials (if configured)
static ref SERVICE_ACCOUNT_CREDENTIALS: Option<Credentials> =
env::var("GOOGLE_APPLICATION_CREDENTIALS").ok()
.and_then(|path| File::open(path).ok())
.and_then(|file| serde_json::from_reader(file).ok());
/// Descriptor of the currently monitored instance. Refer to the
/// documentation of `determine_monitored_resource` for more
/// information.
static ref MONITORED_RESOURCE: Value = determine_monitored_resource();
/// Path to the directory in which journaldriver should persist
/// its cursor state.
static ref CURSOR_DIR: PathBuf = env::var("CURSOR_POSITION_DIR")
.unwrap_or("/var/lib/journaldriver".into())
.into();
/// Path to the cursor position file itself.
static ref CURSOR_FILE: PathBuf = {
let mut path = CURSOR_DIR.clone();
path.push("cursor.pos");
path
};
/// Path to the temporary file used for cursor position writes.
static ref CURSOR_TMP_FILE: PathBuf = {
let mut path = CURSOR_DIR.clone();
path.push("cursor.tmp");
path
};
}
/// Convenience helper for retrieving values from the metadata server.
fn get_metadata(url: &str) -> Result<String> {
let response = crimp::Request::get(url)
.header("Metadata-Flavor", "Google")?
.timeout(std::time::Duration::from_secs(5))?
.send()?
.as_string()?;
if !response.is_success() {
bail!(
"Error response ({}) from metadata server: {}",
response.status,
response.body
);
}
Ok(response.body.trim().to_owned())
}
/// Convenience helper for determining the project ID.
fn get_project_id() -> String {
env::var("GOOGLE_CLOUD_PROJECT")
.or_else(|_| get_metadata(METADATA_PROJECT_URL))
.expect("Could not determine project ID")
}
/// Determines the monitored resource descriptor used in Stackdriver
/// logs. On GCP this will be set to the instance ID as returned by
/// the metadata server.
///
/// On non-GCP machines the value is determined by using the
/// `GOOGLE_CLOUD_PROJECT` and `LOG_STREAM` environment variables.
///
/// [issue #4]: https://github.com/tazjin/journaldriver/issues/4
fn determine_monitored_resource() -> Value {
if let Ok(log) = env::var("LOG_STREAM") {
// The special value `global` is recognised as a log stream name that
// results in a `global`-type resource descriptor. This is useful in
// cases where Stackdriver Error Reporting is intended to be used on
// a non-GCE instance. See [issue #4][] for details.
if log == "global" {
return json!({
"type": "global",
"labels": {
"project_id": PROJECT_ID.as_str(),
}
});
}
json!({
"type": "logging_log",
"labels": {
"project_id": PROJECT_ID.as_str(),
"name": log,
}
})
} else {
let instance_id = get_metadata(METADATA_ID_URL).expect("Could not determine instance ID");
let zone = get_metadata(METADATA_ZONE_URL).expect("Could not determine instance zone");
json!({
"type": "gce_instance",
"labels": {
"project_id": PROJECT_ID.as_str(),
"instance_id": instance_id,
"zone": zone,
}
})
}
}
/// Represents the response returned by the metadata server's token
/// endpoint. The token is normally valid for an hour.
#[derive(Deserialize)]
struct TokenResponse {
expires_in: u64,
access_token: String,
}
/// Struct used to store a token together with a sensible
/// representation of when it expires.
struct Token {
token: String,
fetched_at: Instant,
expires: Duration,
}
impl Token {
/// Does this token need to be renewed?
fn is_expired(&self) -> bool {
self.fetched_at.elapsed() > self.expires
}
}
/// Retrieves a token from the GCP metadata service. Retrieving these
/// tokens requires no additional authentication.
fn get_metadata_token() -> Result<Token> {
let body = get_metadata(METADATA_TOKEN_URL)?;
let token: TokenResponse = from_str(&body)?;
debug!("Fetched new token from metadata service");
Ok(Token {
fetched_at: Instant::now(),
expires: Duration::from_secs(token.expires_in / 2),
token: token.access_token,
})
}
/// Signs a token using static client credentials configured for a
/// service account. This service account must have been given the
/// `Log Writer` role in Google Cloud IAM.
///
/// The process for creating and signing these tokens is described
/// here:
///
/// https://developers.google.com/identity/protocols/OAuth2ServiceAccount#jwt-auth
fn sign_service_account_token(credentials: &Credentials) -> Result<Token> {
use medallion::{Algorithm, Header, Payload};
let iat = time::OffsetDateTime::now_utc();
let exp = iat + time::Duration::seconds(3600);
let header = Header {
alg: Algorithm::RS256,
headers: Some(json!({
"kid": credentials.private_key_id,
})),
};
let payload: Payload<()> = Payload {
iss: Some(credentials.client_email.clone()),
sub: Some(credentials.client_email.clone()),
aud: Some(LOGGING_SERVICE.to_string()),
iat: Some(iat.unix_timestamp().try_into().unwrap()),
exp: Some(exp.unix_timestamp().try_into().unwrap()),
..Default::default()
};
let token = medallion::Token::new(header, payload)
.sign(credentials.private_key.as_bytes())
.context("Signing service account token failed")?;
debug!("Signed new service account token");
Ok(Token {
token,
fetched_at: Instant::now(),
expires: Duration::from_secs(3000),
})
}
/// Retrieve the authentication token either by using static client
/// credentials, or by talking to the metadata server.
///
/// Which behaviour is used is controlled by the environment variable
/// `GOOGLE_APPLICATION_CREDENTIALS`, which should be configured to
/// point at a JSON private key file if service account authentication
/// is to be used.
fn get_token() -> Result<Token> {
if let Some(credentials) = SERVICE_ACCOUNT_CREDENTIALS.as_ref() {
sign_service_account_token(credentials)
} else {
get_metadata_token()
}
}
/// This structure represents the different types of payloads
/// supported by journaldriver.
///
/// Currently log entries can either contain plain text messages or
/// structured payloads in JSON-format.
#[derive(Debug, Serialize, PartialEq)]
#[serde(untagged)]
enum Payload {
TextPayload {
#[serde(rename = "textPayload")]
text_payload: String,
},
JsonPayload {
#[serde(rename = "jsonPayload")]
json_payload: Value,
},
}
/// Attempt to parse a log message as JSON and return it as a
/// structured payload. If parsing fails, return the entry in plain
/// text format.
fn message_to_payload(message: Option<String>) -> Payload {
match message {
None => Payload::TextPayload {
text_payload: "empty log entry".into(),
},
Some(text_payload) => {
// Attempt to deserialize the text payload as a generic
// JSON value.
if let Ok(json_payload) = serde_json::from_str::<Value>(&text_payload) {
// If JSON-parsing succeeded on the payload, check
// whether we parsed an object (Stackdriver does not
// expect other types of JSON payload) and return it
// in that case.
if json_payload.is_object() {
return Payload::JsonPayload { json_payload };
}
}
Payload::TextPayload { text_payload }
}
}
}
/// Attempt to parse journald's microsecond timestamps into a UTC
/// timestamp.
///
/// Parse errors are dismissed and returned as empty options: There
/// simply aren't any useful fallback mechanisms other than defaulting
/// to ingestion time for journaldriver's use-case.
fn parse_microseconds(input: String) -> Option<time::OffsetDateTime> {
if input.len() != 16 {
return None;
}
let micros: i128 = input.parse().ok()?;
let nanos: i128 = micros * 1000;
time::OffsetDateTime::from_unix_timestamp_nanos(nanos).ok()
}
/// Converts a journald log message priority to a
/// Stackdriver-compatible severity number.
///
/// Both Stackdriver and journald specify equivalent
/// severities/priorities. Conveniently, the names are the same.
/// Inconveniently, the numbers are not.
///
/// For more information on the journald priorities, consult these
/// man-pages:
///
/// * systemd.journal-fields(7) (section 'PRIORITY')
/// * sd-daemon(3)
/// * systemd.exec(5) (section 'SyslogLevelPrefix')
///
/// Note that priorities can be logged by applications via the prefix
/// concept described in these man pages, without interfering with
/// structured JSON-payloads.
///
/// For more information on the Stackdriver severity levels, please
/// consult Google's documentation:
///
/// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#LogSeverity
///
/// Any unknown priority values result in no severity being set.
fn priority_to_severity(priority: String) -> Option<u32> {
match priority.as_ref() {
"0" => Some(800), // emerg
"1" => Some(700), // alert
"2" => Some(600), // crit
"3" => Some(500), // err
"4" => Some(400), // warning
"5" => Some(300), // notice
"6" => Some(200), // info
"7" => Some(100), // debug
_ => None,
}
}
/// This structure represents a log entry in the format expected by
/// the Stackdriver API.
#[derive(Debug, Serialize)]
#[serde(rename_all = "camelCase")]
struct LogEntry {
labels: Value,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "time::serde::rfc3339::option")]
timestamp: Option<time::OffsetDateTime>,
#[serde(flatten)]
payload: Payload,
// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry#LogSeverity
#[serde(skip_serializing_if = "Option::is_none")]
severity: Option<u32>,
}
impl From<JournalRecord> for LogEntry {
// Converts from the fields contained in a journald record to the
// representation required by Stackdriver Logging.
//
// The fields are documented in systemd.journal-fields(7).
fn from(mut record: JournalRecord) -> LogEntry {
// The message field is technically just a convention, but
// journald seems to default to it when ingesting unit
// output.
let payload = message_to_payload(record.remove("MESSAGE"));
// Presumably this is always set, but who can be sure
// about anything in this world.
let hostname = record.remove("_HOSTNAME");
// The unit is seemingly missing on kernel entries, but
// present on all others.
let unit = record.remove("_SYSTEMD_UNIT");
// The source timestamp (if present) is specified in
// microseconds since epoch.
//
// If it is not present or can not be parsed, journaldriver
// will not send a timestamp for the log entry and it will
// default to the ingestion time.
let timestamp = record
.remove("_SOURCE_REALTIME_TIMESTAMP")
.and_then(parse_microseconds);
// Journald uses syslogd's concept of priority. No idea if this is
// always present, but it's optional in the Stackdriver API, so we just
// omit it if we can't find or parse it.
let severity = record.remove("PRIORITY").and_then(priority_to_severity);
LogEntry {
payload,
timestamp,
labels: json!({
"host": hostname,
"unit": unit.unwrap_or_else(|| "syslog".into()),
}),
severity,
}
}
}
/// Attempt to read from the journal. If no new entry is present,
/// await the next one up to the specified timeout.
fn receive_next_record(timeout: Duration, journal: &mut Journal) -> Result<Option<JournalRecord>> {
let next_record = journal.next_record()?;
if next_record.is_some() {
return Ok(next_record);
}
Ok(journal.await_next_record(Some(timeout))?)
}
/// This function starts a double-looped, blocking receiver. It will
/// buffer messages for half a second before flushing them to
/// Stackdriver.
fn receiver_loop(mut journal: Journal) -> Result<()> {
let mut token = get_token()?;
let mut buf: Vec<LogEntry> = Vec::new();
let iteration = Duration::from_millis(500);
loop {
trace!("Beginning outer iteration");
let now = Instant::now();
loop {
if now.elapsed() > iteration {
break;
}
if let Ok(Some(entry)) = receive_next_record(iteration, &mut journal) {
trace!("Received a new entry");
buf.push(entry.into());
}
}
if !buf.is_empty() {
let to_flush = mem::replace(&mut buf, Vec::new());
flush(&mut token, to_flush, journal.cursor()?)?;
}
trace!("Done outer iteration");
}
}
/// Writes the current cursor into `/var/journaldriver/cursor.pos`. To
/// avoid issues with journaldriver being terminated while the cursor
/// is still being written, this will first write the cursor into a
/// temporary file and then move it.
fn persist_cursor(cursor: String) -> Result<()> {
// This code exists to aid in tracking down if there are other
// causes of issue #2 than what has already been taken care of.
//
// One theory is that journald (or the Rust library to interface
// with it) may occasionally return empty cursor strings. If this
// is ever the case, we would like to know about it.
if cursor.is_empty() {
error!("Received empty journald cursor position, refusing to persist!");
error!("Please report this message at https://github.com/tazjin/journaldriver/issues/2");
return Ok(());
}
let mut file = File::create(&*CURSOR_TMP_FILE).context("Failed to create cursor file")?;
write!(file, "{}", cursor).context("Failed to write cursor file")?;
rename(&*CURSOR_TMP_FILE, &*CURSOR_FILE)
.context("Failed to move cursor file")
.map_err(Into::into)
}
/// Flushes all drained records to Stackdriver. Any Stackdriver
/// message can at most contain 1000 log entries which means they are
/// chunked up here.
///
/// In some cases large payloads seem to cause errors in Stackdriver -
/// the chunks are therefore made smaller here.
///
/// If flushing is successful the last cursor position will be
/// persisted to disk.
fn flush(token: &mut Token, entries: Vec<LogEntry>, cursor: String) -> Result<()> {
if token.is_expired() {
debug!("Refreshing Google metadata access token");
let new_token = get_token()?;
*token = new_token;
}
for chunk in entries.chunks(750) {
let request = prepare_request(chunk);
if let Err(write_error) = write_entries(token, request) {
error!("Failed to write {} entries: {}", chunk.len(), write_error)
} else {
debug!("Wrote {} entries to Stackdriver", chunk.len())
}
}
persist_cursor(cursor)
}
/// Convert a slice of log entries into the format expected by
/// Stackdriver. This format is documented here:
///
/// https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
fn prepare_request(entries: &[LogEntry]) -> Value {
json!({
"logName": format!("projects/{}/logs/{}", PROJECT_ID.as_str(), LOG_NAME.as_str()),
"resource": &*MONITORED_RESOURCE,
"entries": entries,
"partialSuccess": true
})
}
/// Perform the log entry insertion in Stackdriver Logging.
fn write_entries(token: &Token, request: Value) -> Result<()> {
let response = crimp::Request::post(ENTRIES_WRITE_URL)
.json(&request)?
.header("Authorization", format!("Bearer {}", token.token).as_str())?
// The timeout values are set relatively high, not because of
// an expectation of Stackdriver being slow but just to
// eventually force an error in case of network troubles.
// Presumably no request in a functioning environment will
// ever hit these limits.
.timeout(std::time::Duration::from_secs(5))?
.send()?;
if !response.is_success() {
let status = response.status;
let body = response
.as_string()
.map(|r| r.body)
.unwrap_or_else(|_| "no valid response body".to_owned());
bail!("Writing to Stackdriver failed({}): {}", status, body);
}
Ok(())
}
/// Attempt to read the initial cursor position from the configured
/// file. If there is no initial cursor position set, read from the
/// tail of the log.
///
/// The only "acceptable" error when reading the cursor position is
/// the cursor position file not existing, other errors are fatal
/// because they indicate a misconfiguration of journaldriver.
fn initial_cursor() -> Result<JournalSeek> {
let read_result: io::Result<String> = (|| {
let mut contents = String::new();
let mut file = File::open(&*CURSOR_FILE)?;
file.read_to_string(&mut contents)?;
Ok(contents.trim().into())
})();
match read_result {
Ok(cursor) => Ok(JournalSeek::Cursor { cursor }),
Err(ref err) if err.kind() == ErrorKind::NotFound => {
info!("No previous cursor position, reading from journal tail");
Ok(JournalSeek::Tail)
}
Err(err) => (Err(err).context("Could not read cursor position"))?,
}
}
fn main() {
env_logger::init();
// The directory in which cursor positions are persisted should
// have been created:
if !CURSOR_DIR.exists() {
error!("Cursor directory at '{:?}' does not exist", *CURSOR_DIR);
process::exit(1);
}
let cursor_position_dir = CURSOR_FILE
.parent()
.expect("Invalid cursor position file path");
fs::create_dir_all(cursor_position_dir)
.expect("Could not create directory to store cursor position in");
let mut journal =
Journal::open(JournalFiles::All, false, true).expect("Failed to open systemd journal");
let seek_position = initial_cursor().expect("Failed to determine initial cursor position");
match journal.seek(seek_position) {
Ok(cursor) => info!("Opened journal at cursor '{}'", cursor),
Err(err) => {
error!("Failed to set initial journal position: {}", err);
process::exit(1)
}
}
receiver_loop(journal).expect("log receiver encountered an unexpected error");
}

View file

@ -1,131 +0,0 @@
use super::*;
use serde_json::to_string;
use time::macros::datetime;
#[test]
fn test_text_entry_serialization() {
let entry = LogEntry {
labels: Value::Null,
timestamp: None,
payload: Payload::TextPayload {
text_payload: "test entry".into(),
},
severity: None,
};
let expected = "{\"labels\":null,\"textPayload\":\"test entry\"}";
let result = to_string(&entry).expect("serialization failed");
assert_eq!(
expected, result,
"Plain text payload should serialize correctly"
)
}
#[test]
fn test_timestamped_entry_serialization() {
let entry = LogEntry {
labels: Value::Null,
timestamp: Some(datetime!(1952-10-07 12:00:00 UTC)),
payload: Payload::TextPayload {
text_payload: "test entry".into(),
},
severity: None,
};
let expected =
"{\"labels\":null,\"timestamp\":\"1952-10-07T12:00:00Z\",\"textPayload\":\"test entry\"}";
let result = to_string(&entry).expect("serialization failed");
assert_eq!(
expected, result,
"Plain text payload should serialize correctly"
)
}
#[test]
fn test_json_entry_serialization() {
let entry = LogEntry {
labels: Value::Null,
timestamp: None,
payload: Payload::JsonPayload {
json_payload: json!({
"message": "JSON test"
}),
},
severity: None,
};
let expected = "{\"labels\":null,\"jsonPayload\":{\"message\":\"JSON test\"}}";
let result = to_string(&entry).expect("serialization failed");
assert_eq!(expected, result, "JSON payload should serialize correctly")
}
#[test]
fn test_plain_text_payload() {
let message = "plain text payload".into();
let payload = message_to_payload(Some(message));
let expected = Payload::TextPayload {
text_payload: "plain text payload".into(),
};
assert_eq!(
expected, payload,
"Plain text payload should be detected correctly"
);
}
#[test]
fn test_empty_payload() {
let payload = message_to_payload(None);
let expected = Payload::TextPayload {
text_payload: "empty log entry".into(),
};
assert_eq!(
expected, payload,
"Empty payload should be handled correctly"
);
}
#[test]
fn test_json_payload() {
let message = "{\"someKey\":\"someValue\", \"otherKey\": 42}".into();
let payload = message_to_payload(Some(message));
let expected = Payload::JsonPayload {
json_payload: json!({
"someKey": "someValue",
"otherKey": 42
}),
};
assert_eq!(
expected, payload,
"JSON payload should be detected correctly"
);
}
#[test]
fn test_json_no_object() {
// This message can be parsed as valid JSON, but it is not an
// object - it should be returned as a plain-text payload.
let message = "42".into();
let payload = message_to_payload(Some(message));
let expected = Payload::TextPayload {
text_payload: "42".into(),
};
assert_eq!(
expected, payload,
"Non-object JSON payload should be plain text"
);
}
#[test]
fn test_parse_microseconds() {
let input: String = "1529175149291187".into();
let expected: time::OffsetDateTime = datetime!(2018-06-16 18:52:29.291187 UTC);
assert_eq!(Some(expected), parse_microseconds(input));
}

View file

@ -1,28 +1,6 @@
{ depot, ... }:
(with depot.ops.machines; [
sanduny
bugry
nevsky
]) ++
(with depot.users.tazjin.nixos; [
camden
frog
tverskoy
zamalek
]) ++
(with depot.users.aspen.system.system; [
yeren
mugwump
ogopogo
lusca
]) ++
(with depot.users.wpcarro.nixos; [
ava
kyoko
marcus
tarasco
meta01
public01
])

View file

@ -1,205 +0,0 @@
{ depot, lib, pkgs, ... }: # readTree options
{ config, ... }: # passed by module system
let
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
in
{
imports = [
(mod "atward.nix")
(mod "depot-replica.nix")
(mod "known-hosts.nix")
(mod "nixery.nix")
(mod "tvl-cache.nix")
(mod "tvl-users.nix")
(mod "www/atward.tvl.fyi.nix")
(mod "www/nixery.dev.nix")
(mod "www/self-redirect.nix")
(mod "www/signup.tvl.fyi.nix")
(mod "www/static.tvl.fyi.nix")
(mod "www/todo.tvl.fyi.nix")
(mod "www/tvix.dev.nix")
(mod "www/tvl.fyi.nix")
(mod "www/tvl.su.nix")
(mod "www/wigglydonke.rs.nix")
(depot.third_party.agenix.src + "/modules/age.nix")
];
hardware.cpu.intel.updateMicrocode = true;
boot = {
tmp.useTmpfs = true;
kernelModules = [ "kvm-intel" ];
supportedFilesystems = [ "zfs" ];
kernelParams = [
"ip=91.199.149.239::91.199.149.1:255.255.255.0:bugry:enp6s0:none"
];
initrd = {
availableKernelModules = [ "uhci_hcd" "ehci_pci" "ahci" "usbhid" "usb_storage" "sd_mod" "e1000e" ];
# initrd SSH for disk unlocking
network = {
enable = true;
ssh = {
enable = true;
port = 2222;
authorizedKeys =
depot.users.tazjin.keys.all
++ depot.users.lukegb.keys.all
++ depot.users.sterni.keys.all;
hostKeys = [
/etc/secrets/initrd_host_ed25519_key
];
};
# this will launch the zfs password prompt on login and kill the
# other prompt
postCommands = ''
echo "zfs load-key -a && killall zfs" >> /root/.profile
'';
};
};
kernel.sysctl = {
"net.ipv4.tcp_congestion_control" = "bbr";
};
loader.grub = {
enable = true;
device = "/dev/disk/by-id/wwn-0x5002538ec0ae4c93";
};
zfs.requestEncryptionCredentials = true;
};
fileSystems = {
"/" = {
device = "tank/root";
fsType = "zfs";
};
"/boot" = {
device = "/dev/disk/by-uuid/70AC-4B48";
fsType = "vfat";
};
"/nix" = {
device = "tank/nix";
fsType = "zfs";
};
"/home" = {
device = "tank/home";
fsType = "zfs";
};
};
age.secrets = {
wg-privkey.file = depot.ops.secrets."wg-bugry.age";
};
networking = {
hostName = "bugry";
domain = "tvl.fyi";
hostId = "8425e349";
useDHCP = false;
interfaces.enp6s0.ipv4.addresses = [{
address = "91.199.149.239";
prefixLength = 24;
}];
defaultGateway = "91.199.149.1";
wireguard.interfaces.wg-nevsky = {
ips = [ "2a03:6f00:2:514b:5bc7:95ef:0:2/96" ];
privateKeyFile = "/run/agenix/wg-privkey";
peers = [{
publicKey = "gLyIY+R/YG9S8W8jtqE6pEV6MTyzeUX/PalL6iyvu3g="; # nevsky
endpoint = "188.225.81.75:51820";
persistentKeepalive = 25;
allowedIPs = [ "::/0" ];
}];
allowedIPsAsRoutes = false; # used as default v6 gateway below
};
defaultGateway6.address = "2a03:6f00:2:514b:5bc7:95ef::1";
defaultGateway6.interface = "wg-nevsky";
nameservers = [
"8.8.8.8"
"8.8.4.4"
];
firewall.allowedTCPPorts = [ 22 80 443 ];
};
# Generate an immutable /etc/resolv.conf from the nameserver settings
# above (otherwise DHCP overwrites it):
environment.etc."resolv.conf" = with lib; {
source = pkgs.writeText "resolv.conf" ''
${concatStringsSep "\n" (map (ns: "nameserver ${ns}") config.networking.nameservers)}
options edns0
'';
};
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
};
};
services.fail2ban.enable = true;
programs.mtr.enable = true;
programs.mosh.enable = true;
time.timeZone = "UTC";
nixpkgs.hostPlatform = lib.mkDefault "x86_64-linux";
# Join TVL Tailscale network at net.tvl.fyi
services.tailscale = {
enable = true;
useRoutingFeatures = "both";
};
security.sudo.extraRules = [
{
groups = [ "wheel" ];
commands = [{ command = "ALL"; options = [ "NOPASSWD" ]; }];
}
];
zramSwap.enable = true;
tvl.cache.enable = true;
tvl.cache.builderball = true;
services.depot =
{
nixery.enable = true;
# Allow Gerrit to replicate depot to /var/lib/depot
replica.enable = true;
# Run atward, the search engine redirection thing.
atward.enable = true;
automatic-gc = {
enable = true;
interval = "1 hour";
diskThreshold = 50; # GiB (10% of disk)
maxFreed = 150; # GiB
preserveGenerations = "14d";
};
};
system.stateVersion = "24.11";
}

View file

@ -1,560 +0,0 @@
{ depot, lib, pkgs, ... }: # readTree options
{ config, ... }: # passed by module system
let
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
in
{
imports = [
(depot.third_party.agenix.src + "/modules/age.nix")
(mod "builderball.nix")
(mod "cgit.nix")
(mod "cheddar.nix")
(mod "clbot.nix")
(mod "gerrit-autosubmit.nix")
(mod "harmonia.nix")
(mod "irccat.nix")
(mod "josh.nix")
(mod "known-hosts.nix")
(mod "livegrep.nix")
(mod "monitoring.nix")
(mod "monorepo-gerrit.nix")
(mod "owothia.nix")
(mod "panettone.nix")
(mod "paroxysm.nix")
(mod "restic.nix")
(mod "smtprelay.nix")
(mod "teleirc.nix")
(mod "tvl-buildkite.nix")
(mod "tvl-slapd/default.nix")
(mod "tvl-users.nix")
(mod "www/auth.tvl.fyi.nix")
(mod "www/b.tvl.fyi.nix")
(mod "www/cache.tvl.fyi.nix")
(mod "www/cache.tvl.su.nix")
(mod "www/cl.tvl.fyi.nix")
(mod "www/code.tvl.fyi.nix")
(mod "www/cs.tvl.fyi.nix")
(mod "www/grep.tvl.fyi.nix")
(mod "www/self-cache.tvl.fyi.nix")
(mod "www/self-redirect.nix")
(mod "www/status.tvl.su.nix")
];
hardware.cpu.amd.updateMicrocode = true;
hardware.enableRedistributableFirmware = true;
powerManagement.cpuFreqGovernor = "performance";
boot = {
tmp.useTmpfs = true;
kernelModules = [ "kvm-amd" ];
supportedFilesystems = [ "zfs" ];
kernelParams = [
"ip=188.225.81.75::188.225.81.1:255.255.255.0:nevsky:enp1s0f0np0:none"
];
initrd = {
availableKernelModules = [ "nvme" "xhci_pci" "usbhid" "ice" ];
# initrd SSH for disk unlocking
network = {
enable = true;
ssh = {
enable = true;
port = 2222;
authorizedKeys =
depot.users.tazjin.keys.all
++ depot.users.lukegb.keys.all
++ depot.users.sterni.keys.all;
hostKeys = [
/etc/secrets/initrd_host_ed25519_key
];
};
# this will launch the zfs password prompt on login and kill the
# other prompt
postCommands = ''
echo "zfs load-key -a && killall zfs" >> /root/.profile
'';
};
};
kernel.sysctl = {
"net.ipv4.tcp_congestion_control" = "bbr";
};
loader.systemd-boot.enable = true;
loader.efi.canTouchEfiVariables = true;
zfs.requestEncryptionCredentials = true;
};
fileSystems = {
"/" = {
device = "tank/root";
fsType = "zfs";
};
"/boot" = {
device = "/dev/disk/by-uuid/CCB4-8821";
fsType = "vfat";
};
"/nix" = {
device = "tank/nix";
fsType = "zfs";
};
"/home" = {
device = "tank/home";
fsType = "zfs";
};
"/depot" = {
device = "tank/depot";
fsType = "zfs";
};
};
age.secrets =
let
secretFile = name: depot.ops.secrets."${name}.age";
in
{
clbot.file = secretFile "clbot";
gerrit-autosubmit.file = secretFile "gerrit-autosubmit";
grafana.file = secretFile "grafana";
irccat.file = secretFile "irccat";
keycloak-db.file = secretFile "keycloak-db";
owothia.file = secretFile "owothia";
panettone.file = secretFile "panettone";
smtprelay.file = secretFile "smtprelay";
teleirc.file = secretFile "teleirc";
wg-privkey.file = depot.ops.secrets."wg-nevsky.age";
nix-cache-priv = {
file = secretFile "nix-cache-priv";
mode = "0440";
group = "harmonia";
};
# Not actually a secret
nix-cache-pub = {
file = secretFile "nix-cache-pub";
mode = "0444";
};
buildkite-agent-token = {
file = secretFile "buildkite-agent-token";
mode = "0440";
group = "buildkite-agents";
};
buildkite-graphql-token = {
file = secretFile "buildkite-graphql-token";
mode = "0440";
group = "buildkite-agents";
};
buildkite-besadii-config = {
file = secretFile "besadii";
mode = "0440";
group = "buildkite-agents";
};
buildkite-private-key = {
file = secretFile "buildkite-ssh-private-key";
mode = "0440";
group = "buildkite-agents";
};
gerrit-besadii-config = {
file = secretFile "besadii";
owner = "git";
};
gerrit-secrets = {
file = secretFile "gerrit-secrets";
path = "/var/lib/gerrit/etc/secure.config";
owner = "git";
mode = "0400";
};
clbot-ssh = {
file = secretFile "clbot-ssh";
owner = "clbot";
};
depot-replica-key = {
file = secretFile "depot-replica-key";
mode = "0500";
owner = "git";
group = "git";
path = "/var/lib/git/.ssh/id_ed25519";
};
};
networking = {
hostName = "nevsky";
domain = "tvl.fyi";
hostId = "0117d088";
useDHCP = false;
interfaces.enp1s0f0np0.ipv4.addresses = [{
address = "188.225.81.75";
prefixLength = 24;
}];
defaultGateway = "188.225.81.1";
interfaces.enp1s0f0np0.ipv6.addresses = [{
address = "2a03:6f00:2:514b:0:feed:edef:beef";
prefixLength = 64;
}];
defaultGateway6 = {
address = "2a03:6f00:2:514b::1";
interface = "enp1s0f0np0";
};
wireguard.interfaces.wg-bugry = {
ips = [ "2a03:6f00:2:514b:5bc7:95ef::1/96" ];
privateKeyFile = "/run/agenix/wg-privkey";
listenPort = 51820;
postSetup = ''
${pkgs.iptables}/bin/ip6tables -t nat -A POSTROUTING -s '2a03:6f00:2:514b:5bc7:95ef::1/96' -o enp1s0f0np0 -j MASQUERADE
ip -6 neigh add proxy 2a03:6f00:2:514b:5bc7:95ef:0:2 dev enp1s0f0np0
'';
postShutdown = ''
${pkgs.iptables}/bin/ip6tables -t nat -D POSTROUTING -s '2a03:6f00:2:514b:5bc7:95ef::1/96' -o enp1s0f0np0 -j MASQUERADE
ip -6 neigh del proxy 2a03:6f00:2:514b:5bc7:95ef:0:2 dev enp1s0f0np0
'';
peers = [{
publicKey = "+vFeWLH99aaypitw7x1J8IypoTrva28LItb1v2VjOAg="; # bugry
allowedIPs = [ "2a03:6f00:2:514b:5bc7:95ef::/96" ];
}];
allowedIPsAsRoutes = true;
};
nameservers = [
"8.8.8.8"
"8.8.4.4"
];
firewall.allowedTCPPorts = [ 22 80 443 29418 ];
firewall.allowedUDPPorts = [ 51820 ];
};
# Generate an immutable /etc/resolv.conf from the nameserver settings
# above (otherwise DHCP overwrites it):
environment.etc."resolv.conf" = with lib; {
source = pkgs.writeText "resolv.conf" ''
${concatStringsSep "\n" (map (ns: "nameserver ${ns}") config.networking.nameservers)}
options edns0
'';
};
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
KbdInteractiveAuthentication = false;
};
};
services.fail2ban.enable = true;
programs.mtr.enable = true;
programs.mosh.enable = true;
time.timeZone = "UTC";
nixpkgs.hostPlatform = "x86_64-linux";
services.fwupd.enable = true;
services.postgresql = {
enable = true;
enableTCPIP = true;
package = pkgs.postgresql_16;
authentication = lib.mkForce ''
local all all trust
host all all 127.0.0.1/32 password
host all all ::1/128 password
hostnossl all all 127.0.0.1/32 password
hostnossl all all ::1/128 password
'';
ensureDatabases = [
"panettone"
];
ensureUsers = [{
name = "panettone";
ensureDBOwnership = true;
}];
};
# Join TVL Tailscale network at net.tvl.fyi
services.tailscale = {
enable = true;
useRoutingFeatures = "both";
};
services.depot = {
# Run a Harmonia binary cache.
#
# TODO(tazjin): switch to upstream module after fix for Nix 2.3
harmonia = {
enable = true;
signKeyPaths = [ (config.age.secretsDir + "/nix-cache-priv") ];
settings.bind = "127.0.0.1:6443";
settings.priority = 50;
};
builderball.enable = true;
# Run Markdown/code renderer
cheddar.enable = true;
# Run a livegrep code search instance
livegrep.enable = true;
# Automatically collect garbage from the Nix store.
automatic-gc = {
enable = true;
interval = "1 hour";
diskThreshold = 200; # GiB
maxFreed = 420; # GiB
preserveGenerations = "60d";
};
# Run cgit & josh to serve git
cgit = {
enable = true;
user = "git"; # run as the same user as gerrit
};
josh.enable = true;
# Run a handful of Buildkite agents to support parallel builds.
buildkite = {
enable = true;
agentCount = 24;
largeSlots = 6;
};
# Run the Panettone issue tracker
panettone = {
enable = true;
dbUser = "panettone";
dbName = "panettone";
irccatChannel = "#tvl";
};
# Run the first cursed bot (quote bot)
paroxysm.enable = true;
# make our channel more owo
owothia = {
enable = true;
ircServer = "localhost";
ircPort = config.services.znc.config.Listener.l.Port;
};
# Run irccat to forward messages to IRC
irccat = {
enable = true;
config = {
tcp.listen = ":4722"; # "ircc"
irc = {
server = "localhost:${toString config.services.znc.config.Listener.l.Port}";
tls = false;
nick = "tvlbot";
# Note: irccat means 'ident' where it says 'realname', so
# this is critical for connecting to ZNC.
realname = "tvlbot";
channels = [
"#tvl"
];
};
};
};
# Start the Gerrit->IRC bot
clbot = {
enable = true;
channels = {
"#tvl" = { };
"#tvix-dev" = {
only_display = "tvix,nix-compat,third_party,third-party,3p";
};
};
# See //fun/clbot for details.
flags = {
gerrit_host = "cl.tvl.fyi:29418";
gerrit_ssh_auth_username = "clbot";
gerrit_ssh_auth_key = config.age.secretsDir + "/clbot-ssh";
irc_server = "localhost:${toString config.services.znc.config.Listener.l.Port}";
irc_user = "tvlbot";
irc_nick = "tvlbot";
notify_branches = "canon,refs/meta/config";
notify_repo = "depot";
# This secret is read from an environment variable, which is
# populated by a systemd EnvironmentFile.
irc_pass = "$CLBOT_PASS";
};
};
# Start a local SMTP relay to Gmail (used by gerrit)
smtprelay = {
enable = true;
args = {
listen = ":2525";
remote_host = "smtp.gmail.com:587";
remote_auth = "plain";
remote_user = "tvlbot@tazj.in";
};
};
# Run the Telegram<>IRC bridge for Volga Sprint.
teleirc.enable = true;
# Configure backups to GleSYS
restic = {
enable = true;
paths = [
"/var/backup/postgresql"
"/var/lib/grafana"
"/var/lib/znc"
];
};
# Run autosubmit bot for Gerrit
gerrit-autosubmit.enable = true;
};
# Start a ZNC instance which bounces for tvlbot and owothia.
services.znc = {
enable = true;
useLegacyConfig = false;
config = {
LoadModule = [
"webadmin"
"adminlog"
];
User.admin = {
Admin = true;
Pass.password = {
Method = "sha256";
Hash = "bb00aa8239de484c2925b1c3f6a196fb7612633f001daa9b674f83abe7e1103f";
Salt = "TiB0Ochb1CrtpMTl;2;j";
};
};
Listener.l = {
Host = "localhost";
Port = 2627; # bncr
SSL = false;
};
Listener.tailscale = {
Host = "100.64.0.11";
Port = 2627; # bncr
SSL = false;
};
};
};
services.keycloak = {
enable = true;
settings = {
http-port = 5925; # kycl
hostname = "auth.tvl.fyi";
http-relative-path = "/auth";
proxy-headers = "xforwarded";
http-enabled = true;
};
database = {
type = "postgresql";
passwordFile = config.age.secretsDir + "/keycloak-db";
createLocally = false;
};
};
services.postgresqlBackup = {
enable = true;
databases = [
"keycloak"
"panettone"
"tvldb"
];
};
# Use TVL cache locally through the proxy; for cross-builder substitution.
tvl.cache.enable = true;
tvl.cache.builderball = true;
# Disable background git gc system-wide, as it has a tendency to break CI.
environment.etc."gitconfig".source = pkgs.writeText "gitconfig" ''
[gc]
autoDetach = false
'';
security.sudo.extraRules = [{
groups = [ "wheel" ];
commands = [{ command = "ALL"; options = [ "NOPASSWD" ]; }];
}];
users = {
# Set up a user & group for git shenanigans
groups.git = { };
users.git = {
group = "git";
isSystemUser = true;
createHome = true;
home = "/var/lib/git";
};
};
zramSwap.enable = true;
environment.systemPackages = (with pkgs; [
bat
bb
curl
direnv
emacs-nox
fd
git
htop
hyperfine
jq
nano
nix-diff
nix-top
nvd
ripgrep
screen
tig
tree
unzip
vim
watchexec
zfs
zfstools
]);
system.stateVersion = "24.11";
}

View file

@ -1,138 +0,0 @@
# sanduny.tvl.su
#
# This is a VPS hosted with Bitfolk, intended to additionally serve
# some of our public services like cgit, josh and the websites.
#
# In case of whitby going down, sanduny will keep depot available.
_: # ignore readTree options
{ config, depot, lib, pkgs, ... }:
let
mod = name: depot.path.origSrc + ("/ops/modules/" + name);
in
{
imports = [
(mod "cgit.nix")
(mod "depot-inbox.nix")
(mod "depot-replica.nix")
(mod "journaldriver.nix")
(mod "known-hosts.nix")
(mod "tvl-cache.nix")
(mod "tvl-headscale.nix")
(mod "tvl-users.nix")
(mod "www/inbox.tvl.su.nix")
(mod "www/self-redirect.nix")
(mod "www/volgasprint.org.nix")
];
networking = {
hostName = "sanduny";
domain = "tvl.su";
useDHCP = false;
interfaces.eth0 = {
ipv4.addresses = lib.singleton {
address = "85.119.82.231";
prefixLength = 21;
};
ipv6.addresses = lib.singleton {
address = "2001:ba8:1f1:f109::feed:edef:beef";
prefixLength = 64;
};
};
defaultGateway = "85.119.80.1";
defaultGateway6.address = "2001:ba8:1f1:f109::1";
firewall.allowedTCPPorts = [ 22 80 443 ];
# https://bitfolk.com/customer_information.html#toc_2_DNS
nameservers = [
"85.119.80.232"
"85.119.80.233"
"2001:ba8:1f1:f205::53"
"2001:ba8:1f1:f206::53"
];
};
security.sudo.wheelNeedsPassword = false;
environment.systemPackages = with pkgs; [
emacs-nox
vim
curl
unzip
htop
];
programs.mtr.enable = true;
services.openssh.enable = true;
services.fail2ban.enable = true;
# Run tailscale for the TVL net.tvl.fyi network.
# tailscale up --login-server https://net.tvl.fyi --accept-dns=false --advertise-exit-node
services.tailscale = {
enable = true;
useRoutingFeatures = "server"; # for exit-node usage
};
# Automatically collect garbage from the Nix store.
services.depot.automatic-gc = {
enable = true;
interval = "1 hour";
diskThreshold = 2; # GiB
maxFreed = 5; # GiB
preserveGenerations = "90d";
};
# Allow Gerrit to replicate depot to /var/lib/depot
services.depot.replica.enable = true;
# Run git serving tools locally ...
services.depot.cgit = {
enable = true;
repo = "/var/lib/depot";
};
# Serve public-inbox ...
services.depot.inbox.enable = true;
time.timeZone = "UTC";
# GRUB does not actually need to be installed on disk; Bitfolk have
# their own way of booting systems as long as config is in place.
boot.loader.grub.device = "nodev";
boot.loader.grub.enable = true;
boot.initrd.availableKernelModules = [ "xen_blkfront" ];
hardware.cpu.intel.updateMicrocode = true;
fileSystems = {
"/" = {
device = "/dev/disk/by-uuid/aabc3638-43ca-45f3-af89-c451e8448e92";
fsType = "ext4";
};
"/boot" = {
device = "/dev/disk/by-uuid/75aa99d5-fed7-4c5c-8570-7745f6cff9f5";
fsType = "ext3";
};
"/nix" = {
device = "/dev/disk/by-uuid/d1721678-c294-482b-b72e-3b15f2c56c63";
fsType = "ext4";
};
};
tvl.cache.enable = true;
swapDevices = lib.singleton {
device = "/dev/disk/by-uuid/df4ad9da-0a06-4c27-93e5-5d44e4750e55";
};
system.stateVersion = "22.05"; # Did you read the comment?
}

View file

@ -1,55 +0,0 @@
# Configuration for running the TVL cgit instance using thttpd.
{ config, depot, lib, pkgs, ... }:
let
cfg = config.services.depot.cgit;
userConfig =
if builtins.isNull cfg.user then {
DynamicUser = true;
} else {
User = cfg.user;
Group = cfg.user;
};
in
{
options.services.depot.cgit = with lib; {
enable = mkEnableOption "Run cgit web interface for depot";
port = mkOption {
description = "Port on which cgit should listen";
type = types.int;
default = 2448;
};
repo = mkOption {
description = "Path to depot's .git folder on the machine";
type = types.str;
default = "/var/lib/gerrit/git/depot.git/";
};
user = mkOption {
description = ''
User to use for the cgit service. It is expected that this is
also the name of the user's primary group.
'';
type = with types; nullOr str;
default = null;
};
};
config = lib.mkIf cfg.enable {
systemd.services.cgit = {
wantedBy = [ "multi-user.target" ];
serviceConfig = {
Restart = "on-failure";
ExecStart = depot.web.cgit-tvl.override {
inherit (cfg) port repo;
};
} // userConfig;
};
};
}

View file

@ -1,29 +0,0 @@
{ depot, config, pkgs, lib, ... }:
let
cfg = config.services.depot.cheddar;
description = "cheddar - markdown/highlighting server";
in
{
options.services.depot.cheddar = with lib; {
enable = mkEnableOption description;
port = mkOption {
description = "Port on which cheddar should listen";
type = types.int;
default = 4238;
};
};
config = lib.mkIf cfg.enable {
systemd.services.cheddar-server = {
inherit description;
wantedBy = [ "multi-user.target" ];
script = "${depot.tools.cheddar}/bin/cheddar --listen 0.0.0.0:${toString cfg.port} --sourcegraph-server";
serviceConfig = {
DynamicUser = true;
Restart = "always";
};
};
};
}

View file

@ -9,6 +9,5 @@
imports = [
./automatic-gc.nix
./auto-deploy.nix
./tvl-cache.nix
];
}

View file

@ -1,148 +0,0 @@
# public-inbox configuration for depot@tvl.su
#
# The account itself is a Yandex 360 account in the tvl.su organisation, which
# is accessed via IMAP. Yandex takes care of spam filtering for us, so there is
# no particular SpamAssassin or other configuration.
{ config, depot, lib, pkgs, ... }:
let
cfg = config.services.depot.inbox;
imapConfig = pkgs.writeText "offlineimaprc" ''
[general]
accounts = depot
[Account depot]
localrepository = Local
remoterepository = Remote
[Repository Local]
type = Maildir
localfolders = /var/lib/public-inbox/depot-imap
[Repository Remote]
type = IMAP
ssl = yes
sslcacertfile = /etc/ssl/certs/ca-bundle.crt
remotehost = imap.yandex.ru
remoteuser = depot@tvl.su
remotepassfile = /var/run/agenix/depot-inbox-imap
'';
in
{
options.services.depot.inbox = with lib; {
enable = mkEnableOption "Enable public-inbox for depot@tvl.su";
depotPath = mkOption {
description = "path to local depot replica";
type = types.str;
default = "/var/lib/depot";
};
};
config = lib.mkIf cfg.enable {
# Having nginx *and* other services use ACME certificates for the
# same hostname is unsupported in NixOS without resorting to doing
# all ACME configuration manually.
#
# To work around this, we duplicate the TLS certificate used by
# nginx to a location that is readable by public-inbox daemons.
systemd.services.inbox-cert-sync = {
startAt = "daily";
script = ''
${pkgs.coreutils}/bin/install -D -g ${config.users.groups."public-inbox".name} -m 0440 \
/var/lib/acme/inbox.tvl.su/fullchain.pem /var/lib/public-inbox/tls/fullchain.pem
${pkgs.coreutils}/bin/install -D -g ${config.users.groups."public-inbox".name} -m 0440 \
/var/lib/acme/inbox.tvl.su/key.pem /var/lib/public-inbox/tls/key.pem
'';
};
services.public-inbox = {
enable = true;
http.enable = true;
http.port = 8053;
imap = {
enable = true;
port = 993;
cert = "/var/lib/public-inbox/tls/fullchain.pem";
key = "/var/lib/public-inbox/tls/key.pem";
};
nntp = {
enable = true;
port = 563;
cert = "/var/lib/public-inbox/tls/fullchain.pem";
key = "/var/lib/public-inbox/tls/key.pem";
};
inboxes.depot = rec {
address = [
"depot@tvl.su" # primary address
"depot@tazj.in" # legacy address
];
description = "TVL depot development (mail to depot@tvl.su)";
coderepo = [ "depot" ];
url = "https://inbox.tvl.su/depot";
watch = [
"maildir:/var/lib/public-inbox/depot-imap/INBOX/"
];
newsgroup = "su.tvl.depot";
};
settings.coderepo.depot = {
dir = cfg.depotPath;
cgitUrl = "https://code.tvl.fyi";
};
settings.publicinbox = {
wwwlisting = "all";
nntpserver = [ "inbox.tvl.su" ];
imapserver = [ "inbox.tvl.su" ];
depot.obfuscate = true;
noObfuscate = [
"tvl.su"
"tvl.fyi"
];
};
};
networking.firewall.allowedTCPPorts = [
993 # imap
563 # nntp
];
age.secrets.depot-inbox-imap = {
file = depot.ops.secrets."depot-inbox-imap.age";
mode = "0440";
group = config.users.groups."public-inbox".name;
};
systemd.services.offlineimap-depot = {
description = "download mail for depot@tvl.su";
wantedBy = [ "multi-user.target" ];
startAt = "minutely";
script = ''
mkdir -p /var/lib/public-inbox/depot-imap
${pkgs.offlineimap}/bin/offlineimap -c ${imapConfig}
'';
serviceConfig = {
Type = "oneshot";
# Run in the same user context as public-inbox itself to avoid
# permissions trouble.
User = config.users.users."public-inbox".name;
Group = config.users.groups."public-inbox".name;
};
};
};
}

View file

@ -1,45 +0,0 @@
# Configuration for receiving a depot replica from Gerrit's
# replication plugin.
#
# This only prepares the user and folder for receiving the replica,
# but Gerrit configuration still needs to be modified in addition.
{ config, depot, lib, pkgs, ... }:
let
cfg = config.services.depot.replica;
in
{
options.services.depot.replica = with lib; {
enable = mkEnableOption "Receive depot git replica from Gerrit";
key = mkOption {
description = "Public key to use for replication";
type = types.str;
default = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFFab9O1xaQ1TCyn+CxmXHexdlLzURREG+UR3Qdi3BvH";
};
path = mkOption {
description = "Replication destination path (will be created)";
type = types.str;
default = "/var/lib/depot";
};
};
config = lib.mkIf cfg.enable {
users.groups.depot = { };
users.users.depot = {
group = "depot";
isSystemUser = true;
createHome = true;
home = cfg.path;
homeMode = "755"; # everyone can read depot
openssh.authorizedKeys.keys = lib.singleton cfg.key;
shell = pkgs.bashInteractive; # gerrit needs to run shell commands
};
environment.systemPackages = [
pkgs.git
];
};
}

View file

@ -1,26 +0,0 @@
# Configures journaldriver to forward to the tvl-fyi GCP project from
# TVL machines.
{ config, depot, lib, pkgs, ... }:
{
imports = [
(depot.third_party.agenix.src + "/modules/age.nix")
];
age.secrets.journaldriver.file = depot.ops.secrets."journaldriver.age";
services.journaldriver = {
enable = true;
googleCloudProject = "tvl-fyi";
logStream = config.networking.hostName;
};
# Override the systemd service defined in the nixpkgs module to use
# the credentials provided by agenix.
systemd.services.journaldriver = {
serviceConfig = {
LoadCredential = "journaldriver.json:/run/agenix/journaldriver";
ExecStart = lib.mkForce "${pkgs.coreutils}/bin/env GOOGLE_APPLICATION_CREDENTIALS=\"\${CREDENTIALS_DIRECTORY}/journaldriver.json\" ${depot.ops.journaldriver}/bin/journaldriver";
};
};
}

View file

@ -3,21 +3,6 @@
{
programs.ssh.knownHosts = {
sanduny = {
hostNames = [ "sanduny.tvl.su" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOag0XhylaTVhmT6HB8EN2Fv5Ymrc4ZfypOXONUkykTX";
};
bugry = {
hostNames = [ "bugry.tvl.fyi" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGqG6sITyJ/UsQ/RtYqmmMvTT4r4sppadoQIz5SvA+5J";
};
nevsky = {
hostNames = [ "nevsky.tvl.fyi" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHQe7M+G8Id3ZD7j+I07TCUV1o12q1vpsOXHRlcPSEfa";
};
github = {
hostNames = [ "github.com" ];
publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl";

View file

@ -1,44 +0,0 @@
# NixOS module to run Nixery, currently with local-storage as the
# backend for storing/serving image layers.
{ depot, config, lib, pkgs, ... }:
let
cfg = config.services.depot.nixery;
description = "Nixery - container images on-demand";
nixpkgsSrc = depot.third_party.sources.nixpkgs;
storagePath = "/var/lib/nixery/${nixpkgsSrc.rev}";
in
{
options.services.depot.nixery = {
enable = lib.mkEnableOption description;
port = lib.mkOption {
type = lib.types.int;
default = 45243; # "image"
description = "Port on which Nixery should listen";
};
};
config = lib.mkIf cfg.enable {
systemd.services.nixery = {
inherit description;
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
StateDirectory = "nixery";
Restart = "always";
ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p ${storagePath}";
ExecStart = "${depot.tools.nixery.nixery}/bin/server";
};
environment = {
PORT = toString cfg.port;
NIXERY_PKGS_PATH = nixpkgsSrc.outPath;
NIXERY_STORAGE_BACKEND = "filesystem";
NIX_TIMEOUT = "60"; # seconds
STORAGE_PATH = storagePath;
};
};
};
}

View file

@ -1,119 +0,0 @@
{ depot, config, lib, pkgs, ... }:
let
cfg = config.services.depot.panettone;
in
{
options.services.depot.panettone = with lib; {
enable = mkEnableOption "Panettone issue tracker";
port = mkOption {
description = "Port on which Panettone should listen";
type = types.int;
default = 7268;
};
dbHost = mkOption {
description = "Postgresql host to connect to for Panettone";
type = types.str;
default = "localhost";
};
dbName = mkOption {
description = "Name of the database for Panettone";
type = types.str;
default = "panettone";
};
dbUser = mkOption {
description = "Name of the database user for Panettone";
type = types.str;
default = "panettone";
};
secretsFile = mkOption {
description = ''
Path to a file containing secrets, in the format accepted
by systemd's EnvironmentFile
'';
type = types.str;
default = config.age.secretsDir + "/panettone";
};
irccatHost = mkOption {
description = "Hostname for the irccat instance";
type = types.str;
default = "localhost";
};
irccatPort = mkOption {
description = "Port for the irccat instance";
type = types.int;
default = 4722;
};
irccatChannel = mkOption {
description = "IRC channels to post to via irccat";
type = types.str;
};
};
config = lib.mkIf cfg.enable {
assertions = [{
assertion =
cfg.dbHost != "localhost" || config.services.postgresql.enable;
message = "Panettone requires a postgresql database";
}
{
assertion =
cfg.dbHost != "localhost" || config.services.postgresql.enableTCPIP;
message = "Panettone can only connect to the postgresql database over TCP";
}
{
assertion =
cfg.dbHost != "localhost" || (lib.any
(user: user.name == cfg.dbUser)
config.services.postgresql.ensureUsers);
message = "Panettone requires a database user";
}
{
assertion =
cfg.dbHost != "localhost" || (lib.any
(db: db == cfg.dbName)
config.services.postgresql.ensureDatabases);
message = "Panettone requires a database";
}];
systemd.services.panettone = {
wantedBy = [ "multi-user.target" ];
script = "${depot.web.panettone}/bin/panettone";
serviceConfig = {
DynamicUser = true;
Restart = "always";
EnvironmentFile = cfg.secretsFile;
};
environment = {
PANETTONE_PORT = toString cfg.port;
PGHOST = "localhost";
PGUSER = cfg.dbUser;
PGDATABASE = cfg.dbName;
IRCCATHOST = cfg.irccatHost;
IRCCATPORT = toString cfg.irccatPort;
ISSUECHANNEL = cfg.irccatChannel;
};
};
systemd.services.panettone-fixer = {
description = "Restart panettone regularly to work around b/225";
wantedBy = [ "multi-user.target" ];
script = "${pkgs.systemd}/bin/systemctl restart panettone";
serviceConfig.Type = "oneshot";
# We don't exactly know how frequently this occurs, but
# _probably_ not more than hourly.
startAt = "hourly";
};
};
}

View file

@ -1,81 +0,0 @@
# Configures an OpenLDAP instance for TVL
#
# TODO(tazjin): Configure ldaps://
{ depot, lib, pkgs, ... }:
with depot.nix.yants;
let
user = struct {
username = string;
email = string;
password = string;
displayName = option string;
};
toLdif = defun [ user string ] (u: ''
dn: cn=${u.username},ou=users,dc=tvl,dc=fyi
objectClass: organizationalPerson
objectClass: inetOrgPerson
sn: ${u.username}
cn: ${u.username}
displayName: ${u.displayName or u.username}
mail: ${u.email}
userPassword: ${u.password}
'');
inherit (depot.ops) users;
in
{
services.openldap = {
enable = true;
settings.children = {
"olcDatabase={1}mdb".attrs = {
objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
olcDatabase = "{1}mdb";
olcDbDirectory = "/var/lib/openldap/db";
olcSuffix = "dc=tvl,dc=fyi";
olcAccess = "to * by * read";
olcRootDN = "cn=admin,dc=tvl,dc=fyi";
olcRootPW = "{ARGON2}$argon2id$v=19$m=65536,t=2,p=1$OfcgkOQ96VQ3aJj7NfA9vQ$oS6HQOkYl/bUYg4SejpltQYy7kvqx/RUxvoR4zo1vXU";
};
"cn=module{0}".attrs = {
objectClass = "olcModuleList";
olcModuleLoad = "argon2";
};
"cn=schema".includes =
map (schema: "${pkgs.openldap}/etc/schema/${schema}.ldif")
[ "core" "cosine" "inetorgperson" "nis" ];
};
# Contents are immutable at runtime, and adding user accounts etc.
# is done statically in the LDIF-formatted contents in this folder.
declarativeContents."dc=tvl,dc=fyi" = ''
dn: dc=tvl,dc=fyi
dc: tvl
o: TVL LDAP server
description: Root entry for tvl.fyi
objectClass: top
objectClass: dcObject
objectClass: organization
dn: ou=users,dc=tvl,dc=fyi
ou: users
description: All users in TVL
objectClass: top
objectClass: organizationalUnit
dn: ou=groups,dc=tvl,dc=fyi
ou: groups
description: All groups in TVL
objectClass: top
objectClass: organizationalUnit
${lib.concatStringsSep "\n" (map toLdif users)}
'';
};
}

View file

@ -1,83 +0,0 @@
# Standard NixOS users for TVL machines, as well as configuration that
# should following along when they are added to a machine.
{ depot, pkgs, ... }:
{
users = {
users.tazjin = {
isNormalUser = true;
extraGroups = [ "git" "wheel" ];
shell = pkgs.fish;
openssh.authorizedKeys.keys = depot.users.tazjin.keys.all;
};
users.lukegb = {
isNormalUser = true;
extraGroups = [ "git" "wheel" ];
openssh.authorizedKeys.keys = depot.users.lukegb.keys.all;
};
users.aspen = {
isNormalUser = true;
extraGroups = [ "git" "wheel" ];
openssh.authorizedKeys.keys = [ depot.users.aspen.keys.whitby ];
};
users.edef = {
isNormalUser = true;
extraGroups = [ "git" ];
openssh.authorizedKeys.keys = depot.users.edef.keys.all;
};
users.qyliss = {
isNormalUser = true;
description = "Alyssa Ross";
extraGroups = [ "git" ];
openssh.authorizedKeys.keys = depot.users.qyliss.keys.all;
};
users.eta = {
isNormalUser = true;
extraGroups = [ "git" ];
openssh.authorizedKeys.keys = depot.users.eta.keys.whitby;
};
users.cynthia = {
isNormalUser = true; # I'm normal OwO :3
extraGroups = [ "git" ];
openssh.authorizedKeys.keys = depot.users.cynthia.keys.all;
};
users.firefly = {
isNormalUser = true;
extraGroups = [ "git" ];
openssh.authorizedKeys.keys = depot.users.firefly.keys.whitby;
};
users.sterni = {
isNormalUser = true;
extraGroups = [ "git" "wheel" ];
openssh.authorizedKeys.keys = depot.users.sterni.keys.all;
};
users.flokli = {
isNormalUser = true;
extraGroups = [ "git" "wheel" ];
openssh.authorizedKeys.keys = depot.users.flokli.keys.all;
};
};
programs.fish.enable = true;
environment.systemPackages = with pkgs; [
alacritty.terminfo
foot.terminfo
rxvt-unicode-unwrapped.terminfo
kitty.terminfo
];
security.sudo.extraRules = [{
groups = [ "wheel" ];
commands = [{ command = "ALL"; options = [ "NOPASSWD" ]; }];
}];
}

View file

@ -1,33 +0,0 @@
# Serve atward, the query redirection ... thing.
{ config, ... }:
{
imports = [
./base.nix
];
config = {
# Short link support (i.e. plain http://at) for users with a
# configured tvl.fyi/tvl.su search domain.
services.nginx.virtualHosts."at-shortlink" = {
serverName = "at";
extraConfig = "return 302 https://atward.tvl.fyi$request_uri;";
};
services.nginx.virtualHosts."atward" = {
serverName = "atward.tvl.fyi";
enableACME = true;
forceSSL = true;
serverAliases = [
"atward.tvl.su"
"at.tvl.fyi"
"at.tvl.su"
];
locations."/" = {
proxyPass = "http://localhost:${toString config.services.depot.atward.port}";
};
};
};
}

View file

@ -1,28 +0,0 @@
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."auth.tvl.fyi" = {
serverName = "auth.tvl.fyi";
enableACME = true;
forceSSL = true;
extraConfig = ''
# increase buffer size for large headers
proxy_buffers 8 16k;
proxy_buffer_size 16k;
location / {
proxy_pass http://localhost:${toString config.services.keycloak.settings.http-port};
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $host;
}
'';
};
};
}

View file

@ -1,32 +0,0 @@
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."b-shortlink" = {
serverName = "b";
extraConfig = "return 302 https://b.tvl.fyi$request_uri;";
};
services.nginx.virtualHosts."b.tvl.fyi" = {
serverName = "b.tvl.fyi";
serverAliases = [ "b.tvl.su" ];
enableACME = true;
forceSSL = true;
extraConfig = ''
# Forward short links to issues to the issue itself (b/32)
location ~ ^/(\d+)$ {
return 302 https://b.tvl.fyi/issues$request_uri;
}
location / {
proxy_pass http://localhost:${toString config.services.depot.panettone.port};
}
'';
};
};
}

View file

@ -1,55 +0,0 @@
# Publicly serve builderball cache. This is an experimental setup, and separate
# from the "normal" harmonia cache on cache.tvl.su.
{ config, ... }:
let
# This attrset forms a linked list of hosts, which delegate ACME fallbacks to
# each other. These *must* form a circle, otherwise we may end up walking only
# part of the ring.
#
# TODO: remove whitby from here, it is gone; leaving this code for now for
# easier discovery when reconfiguring this.
acmeFallback = host: ({
whitby = "nevsky.cache.tvl.fyi";
nevsky = "whitby.cache.tvl.fyi"; # GOTO 1
})."${host}";
in
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."cache.tvl.fyi" = {
serverName = "cache.tvl.fyi";
enableACME = true;
forceSSL = true;
# This enables fetching TLS certificates for the same domain on different
# hosts. This config is kind of messy; it would be nice to generate a
# correct ring from the depot fixpoint, but this may be impossible due to
# infinite recursion. Please read the comment on `acmeFallback` above.
#
# TODO: whitby is gone, this is not needed at the moment
# acmeFallbackHost = acmeFallback config.networking.hostName;
extraConfig = ''
location = /cache-key.pub {
alias /run/agenix/nix-cache-pub;
}
location = / {
proxy_pass http://${config.services.depot.harmonia.settings.bind};
}
location / {
proxy_pass http://localhost:${toString config.services.depot.builderball.port};
}
'';
};
# participating hosts should use their local cache, otherwise they might end
# up querying themselves from afar for data they don't have.
networking.extraHosts = "127.0.0.1 cache.tvl.fyi";
};
}

View file

@ -1,25 +0,0 @@
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."cache.tvl.su" = {
serverName = "cache.tvl.su";
enableACME = true;
forceSSL = true;
extraConfig = ''
location = /cache-key.pub {
alias /run/agenix/nix-cache-pub;
}
location / {
proxy_pass http://${config.services.depot.harmonia.settings.bind};
}
'';
};
};
}

View file

@ -1,66 +0,0 @@
# This configuration redirects from the previous Sourcegraph instance to
# livegrep/cgit where appropriate.
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."cs.tvl.fyi" = {
serverName = "cs.tvl.fyi";
serverAliases = [ "cs.tvl.su" ];
enableACME = true;
forceSSL = true;
extraConfig = ''
set $lineno "";
# depot root
location = /depot {
return 301 https://code.tvl.fyi/tree/;
}
# folder/file on canon
location ~ ^/depot/-/(blob|tree)/([^\s]*)$ {
set $path $2;
if ($args ~ ^L(\d+)(-\d+)?$) {
set $lineno "#n$1";
}
return 302 https://code.tvl.fyi/tree/$path$lineno;
}
# folder/file on specific commit
location ~ ^/depot@([a-f0-9]+)/-/(blob|tree)/([^\s]*)$ {
set $commit $1;
set $path $3;
if ($args ~ ^L(\d+)(-\d+)?$) {
set $lineno "#n$1";
}
return 302 https://code.tvl.fyi/tree/$path?id=$commit$lineno;
}
# commit info
location ~ ^/depot/-/commit/([a-f0-9]+)$ {
set $commit $1;
return 302 https://code.tvl.fyi/commit/?id=$commit;
}
# search handler
# This only redirects to the new search, it doesn't try to parse and
# rewrite the query.
location /search {
return 302 https://grep.tvl.fyi/search;
}
location / {
return 404 "TVL code search has moved to grep.tvl.fyi and we could not figure out how to rewrite your query. Sorry!";
}
'';
};
};
}

View file

@ -1,22 +0,0 @@
{ pkgs, ... }:
{
imports = [
./base.nix
];
config = {
# Ensure the directory for deployment diffs exists.
systemd.tmpfiles.rules = [
"d /var/html/deploys.tvl.fyi/diff 0755 nginx nginx -"
];
services.nginx.virtualHosts."deploys.tvl.fyi" = {
enableACME = true;
forceSSL = true;
root = "/var/html/deploys.tvl.fyi";
};
services.depot.restic.paths = [ "/var/html/deploys.tvl.fyi" ];
};
}

View file

@ -1,19 +0,0 @@
# Experimental configuration for manually Livegrep.
{ config, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."grep.tvl.fyi" = {
enableACME = true;
forceSSL = true;
locations."/" = {
proxyPass = "http://127.0.0.1:${toString config.services.depot.livegrep.port}";
};
};
};
}

View file

@ -1,31 +0,0 @@
{ config, depot, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."inbox.tvl.su" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
# nginx is incapable of serving a single file at /, hence this hack:
location = / {
index /landing-page;
}
location = /landing-page {
types { } default_type "text/html; charset=utf-8";
alias ${depot.web.inbox};
}
# rest of requests is proxied to public-inbox-httpd
location / {
proxy_pass http://localhost:${toString config.services.public-inbox.http.port};
}
'';
};
};
}

View file

@ -1,26 +0,0 @@
# per-host addresses for publicly reachable caches, for use with builderball
# TODO(tazjin): merge with the public cache module; but needs ACME fixes
{ config, lib, ... }:
{
imports = [
./base.nix
];
config = lib.mkIf config.services.depot.harmonia.enable {
services.nginx.virtualHosts."${config.networking.hostName}.cache.tvl.fyi" = {
enableACME = true;
forceSSL = true;
extraConfig = ''
location = /cache-key.pub {
alias /run/agenix/nix-cache-pub;
}
location / {
proxy_pass http://${config.services.depot.harmonia.settings.bind};
}
'';
};
};
}

View file

@ -1,19 +0,0 @@
{ depot, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."signup.tvl.fyi" = {
root = depot.web.pwcrypt;
enableACME = true;
forceSSL = true;
extraConfig = ''
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
'';
};
};
}

View file

@ -1,42 +0,0 @@
# Host the static assets at static.tvl.fyi
#
# All assets are served from $base/$drvhash/$file, but can also be
# included with `latest/` which will return a (non-permanent!)
# redirect to the real location.
#
# For all purposes within depot, using the drvhash of web.static is
# recommended.
{ depot, pkgs, ... }:
let staticHash = depot.web.static.drvHash;
in {
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."static.tvl.fyi" = {
serverAliases = [ "static.tvl.su" ];
enableACME = true;
forceSSL = true;
extraConfig = ''
location = / {
add_header Content-Type text/plain;
return 200 "looking for tvl.fyi or tvl.su?";
}
location /latest {
rewrite ^/latest/(.*) /${staticHash}/$1 redirect;
}
location /${staticHash}/ {
alias ${depot.web.static}/;
expires max;
add_header Access-Control-Allow-Origin "*";
add_header Cache-Control "public";
}
'';
};
};
}

View file

@ -1,25 +0,0 @@
{ depot, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."todo.tvl.fyi" = {
serverName = "todo.tvl.fyi";
serverAliases = [ "todo.tvl.su" ];
root = depot.web.todolist;
enableACME = true;
forceSSL = true;
extraConfig = ''
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
location ~* \.(webp|woff2)$ {
add_header Cache-Control "public, max-age=31536000";
}
'';
};
};
}

View file

@ -1,46 +0,0 @@
{ depot, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."tvix.dev" = {
serverName = "tvix.dev";
enableACME = true;
forceSSL = true;
root = depot.tvix.website;
};
services.nginx.virtualHosts."bolt.tvix.dev" = {
root = depot.web.tvixbolt;
enableACME = true;
forceSSL = true;
};
# old domain, serve redirect
services.nginx.virtualHosts."tvixbolt.tvl.su" = {
enableACME = true;
forceSSL = true;
extraConfig = "return 301 https://bolt.tvix.dev$request_uri;";
};
services.nginx.virtualHosts."docs.tvix.dev" = {
serverName = "docs.tvix.dev";
enableACME = true;
forceSSL = true;
extraConfig = ''
location = / {
# until we have a better default page here
return 301 https://docs.tvix.dev/rust/tvix_eval/index.html;
}
location /rust/ {
alias ${depot.tvix.rust-docs}/;
}
'';
};
};
}

View file

@ -1,47 +0,0 @@
{ depot, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."tvl.fyi" = {
serverName = "tvl.fyi";
root = depot.web.tvl;
enableACME = true;
forceSSL = true;
extraConfig = ''
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
rewrite ^/builds/?$ https://buildkite.com/tvl/depot/ last;
rewrite ^/monorepo-doc/?$ https://docs.google.com/document/d/1nnyByXcH0F6GOmEezNOUa2RFelpeRpDToBLYD_CtjWE/edit?usp=sharing last;
rewrite ^/irc/?$ ircs://irc.hackint.org:6697/#tvl last;
rewrite ^/webchat/?$ https://webirc.hackint.org/#ircs://irc.hackint.org/#tvl last;
location ~* \.(webp|woff2)$ {
add_header Cache-Control "public, max-age=31536000";
}
location /blog {
if ($request_uri ~ ^/(.*)\.html$) {
return 302 /$1;
}
try_files $uri $uri.html $uri/ =404;
}
location = /blog {
return 302 /#blog;
}
location = /blog/ {
return 302 /#blog;
}
'';
};
};
}

View file

@ -1,20 +0,0 @@
{ depot, ... }:
{
imports = [
./base.nix
];
config = {
services.nginx.virtualHosts."tvl.su" = {
serverName = "tvl.su";
root = depot.corp.website;
enableACME = true;
forceSSL = true;
extraConfig = ''
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
'';
};
};
}

View file

@ -1,4 +0,0 @@
/target/
**/*.rs.bk
.idea/
*.iml

View file

@ -1,20 +0,0 @@
A SERMON ON ETHICS AND LOVE
===========================
One day Mal-2 asked the messenger spirit Saint Gulik to approach the Goddess and request Her presence for some desperate advice. Shortly afterwards the radio came on by itself, and an ethereal female Voice said **YES?**
"O! Eris! Blessed Mother of Man! Queen of Chaos! Daughter of Discord! Concubine of Confusion! O! Exquisite Lady, I beseech You to lift a heavy burden from my heart!"
**WHAT BOTHERS YOU, MAL? YOU DON'T SOUND WELL.**
"I am filled with fear and tormented with terrible visions of pain. Everywhere people are hurting one another, the planet is rampant with injustices, whole societies plunder groups of their own people, mothers imprison sons, children perish while brothers war. O, woe."
**WHAT IS THE MATTER WITH THAT, IF IT IS WHAT YOU WANT TO DO?**
"But nobody Wants it! Everybody hates it."
**OH. WELL, THEN *STOP*.**
At which moment She turned herself into an aspirin commercial and left The Polyfather stranded alone with his species.
SINISTER DEXTER HAS A BROKEN SPIROMETER.

168
ops/mq_cli/Cargo.lock generated
View file

@ -1,168 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "ansi_term"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
dependencies = [
"winapi",
]
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "cc"
version = "1.0.72"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "2.34.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0610544180c38b88101fecf2dd634b174a62eef6946f84dfc6a7127512b381c"
dependencies = [
"ansi_term",
"atty",
"bitflags",
"strsim",
"textwrap",
"unicode-width",
"vec_map",
]
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "libc"
version = "0.2.117"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "mq_cli"
version = "3773.0.0"
dependencies = [
"clap",
"libc",
"nix",
"posix_mq",
]
[[package]]
name = "nix"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6"
dependencies = [
"bitflags",
"cc",
"cfg-if",
"libc",
"memoffset",
]
[[package]]
name = "posix_mq"
version = "3771.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f462ad79a99ea13f3ef76d9c271956e924183f5aeb67a8649c8c2b6bdd079da8"
dependencies = [
"libc",
"nix",
]
[[package]]
name = "strsim"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
"unicode-width",
]
[[package]]
name = "unicode-width"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

View file

@ -1,14 +0,0 @@
[package]
name = "mq_cli"
description = "CLI tool for accessing POSIX message queues (mq_overview(7))"
license = "MIT"
version = "3773.0.0"
authors = ["Vincent Ambo <tazjin@tvl.su>"]
homepage = "https://code.tvl.fyi/tree/ops/mq_cli"
repository = "https://code.tvl.fyi/depot.git:/ops/mq_cli.git"
[dependencies]
clap = "2.34"
libc = "0.2"
nix = "0.23"
posix_mq = "3771.0.0"

View file

@ -1,22 +0,0 @@
MIT License
Copyright (c) 2017-2018 Langler AS
Copyright (c) 2019-2020 Vincent Ambo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,42 +0,0 @@
mq-cli
======
This project provides a very simple CLI interface to [POSIX message queues][].
It can be used to create and inspect queues, as well as send and
receive messages from them.
```
1.0.0
Administrate and inspect POSIX message queues
USAGE:
mq <SUBCOMMAND>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
SUBCOMMANDS:
create Create a new queue
help Prints this message or the help of the given subcommand(s)
inspect inspect details about a queue
ls list message queues
receive Receive a message from a queue
rlimit Get the message queue rlimit
send Send a message to a queue
```
## Development
Development happens in the [TVL
monorepo](https://code.tvl.fyi/tree/ops/mq_cli).
Starting from version `3773.0.0`, the version numbers correspond to
_revisions_ of the TVL repository, available as git refs (e.g.
`refs/r/3773`).
See the TVL documentation for more information about how to contribute
to the codebase.
[POSIX message queues]: https://linux.die.net/man/7/mq_overview

View file

@ -1,3 +0,0 @@
{ depot, ... }:
depot.third_party.naersk.buildPackage ./.

View file

@ -1,235 +0,0 @@
extern crate clap;
extern crate libc;
extern crate nix;
extern crate posix_mq;
use clap::{App, AppSettings, Arg, ArgMatches, SubCommand};
use posix_mq::{Message, Name, Queue};
use std::fs::{read_dir, File};
use std::io::{self, Read, Write};
use std::process::exit;
fn run_ls() {
let mqueues = read_dir("/dev/mqueue").expect("Could not read message queues");
for queue in mqueues {
let path = queue.unwrap().path();
let status = {
let mut file = File::open(&path).expect("Could not open queue file");
let mut content = String::new();
file.read_to_string(&mut content)
.expect("Could not read queue file");
content
};
let queue_name = path
.components()
.last()
.unwrap()
.as_os_str()
.to_string_lossy();
println!("/{}: {}", queue_name, status)
}
}
fn run_inspect(queue_name: &str) {
let name = Name::new(queue_name).expect("Invalid queue name");
let queue = Queue::open(name).expect("Could not open queue");
println!("Queue {}:\n", queue_name);
println!("Max. message size: {} bytes", queue.max_size());
println!("Max. # of pending messages: {}", queue.max_pending());
}
fn run_create(cmd: &ArgMatches) {
if let Some(rlimit) = cmd.value_of("rlimit") {
set_rlimit(rlimit.parse().expect("Invalid rlimit value"));
}
let name = Name::new(cmd.value_of("queue").unwrap()).expect("Invalid queue name");
let max_pending: i64 = cmd.value_of("max-pending").unwrap().parse().unwrap();
let max_size: i64 = cmd.value_of("max-size").unwrap().parse().unwrap();
let queue = Queue::create(name, max_pending, max_size * 1024);
match queue {
Ok(_) => println!("Queue created successfully"),
Err(e) => {
writeln!(io::stderr(), "Could not create queue: {}", e).ok();
exit(1);
}
};
}
fn run_receive(queue_name: &str) {
let name = Name::new(queue_name).expect("Invalid queue name");
let queue = Queue::open(name).expect("Could not open queue");
let message = match queue.receive() {
Ok(msg) => msg,
Err(e) => {
writeln!(io::stderr(), "Failed to receive message: {}", e).ok();
exit(1);
}
};
// Attempt to write the message out as a string, but write out raw bytes if it turns out to not
// be UTF-8 encoded data.
match String::from_utf8(message.data.clone()) {
Ok(string) => println!("{}", string),
Err(_) => {
writeln!(io::stderr(), "Message not UTF-8 encoded!").ok();
io::stdout().write(message.data.as_ref()).ok();
}
};
}
fn run_send(queue_name: &str, content: &str) {
let name = Name::new(queue_name).expect("Invalid queue name");
let queue = Queue::open(name).expect("Could not open queue");
let message = Message {
data: content.as_bytes().to_vec(),
priority: 0,
};
match queue.send(&message) {
Ok(_) => (),
Err(e) => {
writeln!(io::stderr(), "Could not send message: {}", e).ok();
exit(1);
}
}
}
fn run_rlimit() {
let mut rlimit = libc::rlimit {
rlim_cur: 0,
rlim_max: 0,
};
let mut errno = 0;
unsafe {
let res = libc::getrlimit(libc::RLIMIT_MSGQUEUE, &mut rlimit);
if res != 0 {
errno = nix::errno::errno();
}
};
if errno != 0 {
writeln!(
io::stderr(),
"Could not get message queue rlimit: {}",
errno
)
.ok();
} else {
println!("Message queue rlimit:");
println!("Current limit: {}", rlimit.rlim_cur);
println!("Maximum limit: {}", rlimit.rlim_max);
}
}
fn set_rlimit(new_limit: u64) {
let rlimit = libc::rlimit {
rlim_cur: new_limit,
rlim_max: new_limit,
};
let mut errno: i32 = 0;
unsafe {
let res = libc::setrlimit(libc::RLIMIT_MSGQUEUE, &rlimit);
if res != 0 {
errno = nix::errno::errno();
}
}
match errno {
0 => println!("Set RLIMIT_MSGQUEUE hard limit to {}", new_limit),
_ => {
// Not mapping these error codes to messages for now, the user can
// look up the meaning in setrlimit(2).
panic!("Could not set hard limit: {}", errno);
}
};
}
fn main() {
let ls = SubCommand::with_name("ls").about("list message queues");
let queue_arg = Arg::with_name("queue").required(true).takes_value(true);
let rlimit_arg = Arg::with_name("rlimit")
.help("RLIMIT_MSGQUEUE to set for this command")
.long("rlimit")
.takes_value(true);
let inspect = SubCommand::with_name("inspect")
.about("inspect details about a queue")
.arg(&queue_arg);
let create = SubCommand::with_name("create")
.about("Create a new queue")
.arg(&queue_arg)
.arg(&rlimit_arg)
.arg(
Arg::with_name("max-size")
.help("maximum message size (in kB)")
.long("max-size")
.required(true)
.takes_value(true),
)
.arg(
Arg::with_name("max-pending")
.help("maximum # of pending messages")
.long("max-pending")
.required(true)
.takes_value(true),
);
let receive = SubCommand::with_name("receive")
.about("Receive a message from a queue")
.arg(&queue_arg);
let send = SubCommand::with_name("send")
.about("Send a message to a queue")
.arg(&queue_arg)
.arg(
Arg::with_name("message")
.help("the message to send")
.required(true),
);
let rlimit = SubCommand::with_name("rlimit")
.about("Get the message queue rlimit")
.setting(AppSettings::SubcommandRequiredElseHelp);
let matches = App::new("mq")
.setting(AppSettings::SubcommandRequiredElseHelp)
.version("1.0.0")
.about("Administrate and inspect POSIX message queues")
.subcommand(ls)
.subcommand(inspect)
.subcommand(create)
.subcommand(receive)
.subcommand(send)
.subcommand(rlimit)
.get_matches();
match matches.subcommand() {
("ls", _) => run_ls(),
("inspect", Some(cmd)) => run_inspect(cmd.value_of("queue").unwrap()),
("create", Some(cmd)) => run_create(cmd),
("receive", Some(cmd)) => run_receive(cmd.value_of("queue").unwrap()),
("send", Some(cmd)) => run_send(
cmd.value_of("queue").unwrap(),
cmd.value_of("message").unwrap(),
),
("rlimit", _) => run_rlimit(),
_ => unimplemented!(),
}
}

View file

@ -60,8 +60,5 @@ in rec {
'';
# Systems that should be built in CI
sandunySystem = (nixosFor depot.ops.machines.sanduny).system;
bugrySystem = (nixosFor depot.ops.machines.bugry).system;
nevskySystem = (nixosFor depot.ops.machines.nevsky).system;
meta.ci.targets = [ "sandunySystem" "bugrySystem" "nevskySystem" ];
meta.ci.targets = [ ];
}

View file

@ -7,16 +7,6 @@
env:
BUILDKITE_TOKEN_PATH: /run/agenix/buildkite-graphql-token
steps:
# Run pipeline for tvl-kit when new commits arrive on canon. Since
# it is not part of the depot build tree, this is a useful
# verification to ensure we don't break external things (too much).
- trigger: "tvl-kit"
async: true
label: ":fork:"
branches: "refs/heads/canon"
build:
message: "Verification triggered by ${BUILDKITE_COMMIT}"
# Run pipeline for tvix when new commits arrive on canon. Since
# it is not part of the depot build tree, this is a useful
# verification to ensure we don't break external things (too much).

View file

@ -1,3 +0,0 @@
/target/
**/*.rs.bk
.idea/

View file

@ -1,20 +0,0 @@
A SERMON ON ETHICS AND LOVE
===========================
One day Mal-2 asked the messenger spirit Saint Gulik to approach the Goddess and request Her presence for some desperate advice. Shortly afterwards the radio came on by itself, and an ethereal female Voice said **YES?**
"O! Eris! Blessed Mother of Man! Queen of Chaos! Daughter of Discord! Concubine of Confusion! O! Exquisite Lady, I beseech You to lift a heavy burden from my heart!"
**WHAT BOTHERS YOU, MAL? YOU DON'T SOUND WELL.**
"I am filled with fear and tormented with terrible visions of pain. Everywhere people are hurting one another, the planet is rampant with injustices, whole societies plunder groups of their own people, mothers imprison sons, children perish while brothers war. O, woe."
**WHAT IS THE MATTER WITH THAT, IF IT IS WHAT YOU WANT TO DO?**
"But nobody Wants it! Everybody hates it."
**OH. WELL, THEN *STOP*.**
At which moment She turned herself into an aspirin commercial and left The Polyfather stranded alone with his species.
SINISTER DEXTER HAS A BROKEN SPIROMETER.

View file

@ -1,63 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "bitflags"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "cc"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "libc"
version = "0.2.117"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e74d72e0f9b65b5b4ca49a346af3976df0f9c61d550727f349ecd559f251a26c"
[[package]]
name = "memoffset"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce"
dependencies = [
"autocfg",
]
[[package]]
name = "nix"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f866317acbd3a240710c63f065ffb1e4fd466259045ccb504130b7f668f35c6"
dependencies = [
"bitflags",
"cc",
"cfg-if",
"libc",
"memoffset",
]
[[package]]
name = "posix_mq"
version = "3771.0.0"
dependencies = [
"libc",
"nix",
]

View file

@ -1,12 +0,0 @@
[package]
name = "posix_mq"
version = "3771.0.0"
authors = ["Vincent Ambo <tazjin@tvl.su>"]
description = "(Higher-level) Rust bindings to POSIX message queues"
license = "MIT"
homepage = "https://code.tvl.fyi/tree/ops/posix_mq.rs"
repository = "https://code.tvl.fyi/depot.git:/ops/posix_mq.rs.git"
[dependencies]
nix = "0.23"
libc = "0.2"

View file

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2017-2020 Vincent Ambo
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,44 +0,0 @@
posix_mq
========
[![crates.io](https://img.shields.io/crates/v/posix_mq.svg)](https://crates.io/crates/posix_mq)
This is a simple, relatively high-level library for the POSIX [message queue API][]. It wraps the lower-level API in a
simpler interface with more robust error handling.
Check out this project's [sister library][] in Kotlin.
Usage example:
```rust
// Values that need to undergo validation are wrapped in safe types:
let name = Name::new("/test-queue").unwrap();
// Queue creation with system defaults is simple:
let queue = Queue::open_or_create(name).expect("Opening queue failed");
// Sending a message:
let message = Message {
data: "test-message".as_bytes().to_vec(),
priority: 0,
};
queue.send(&message).expect("message sending failed");
// ... and receiving it!
let result = queue.receive().expect("message receiving failed");
```
## Development
Development happens in the [TVL
monorepo](https://code.tvl.fyi/tree/ops/posix_mq.rs).
Starting from version `3771.0.0`, the version numbers correspond to
_revisions_ of the TVL repository, available as git refs (e.g.
`refs/r/3771`).
See the TVL documentation for more information about how to contribute
to the codebase.
[message queue API]: https://linux.die.net/man/7/mq_overview
[sister library]: https://github.com/aprilabank/posix_mq.kt

View file

@ -1,3 +0,0 @@
{ depot, ... }:
depot.third_party.naersk.buildPackage ./.

View file

@ -1,122 +0,0 @@
use nix;
use std::{error, fmt, io, num};
/// This module implements a simple error type to match the errors that can be thrown from the C
/// functions as well as some extra errors resulting from internal validations.
///
/// As this crate exposes an opinionated API to the POSIX queues certain errors have been
/// ignored:
///
/// * ETIMEDOUT: The low-level timed functions are not exported and this error can not occur.
/// * EAGAIN: Non-blocking queue calls are not supported.
/// * EINVAL: Same reason as ETIMEDOUT
/// * EMSGSIZE: The message size is immutable after queue creation and this crate checks it.
/// * ENAMETOOLONG: This crate performs name validation
///
/// If an unexpected error is encountered it will be wrapped appropriately and should be reported
/// as a bug on https://b.tvl.fyi
#[derive(Debug)]
pub enum Error {
// These errors are raised inside of the library
InvalidQueueName(&'static str),
ValueReadingError(io::Error),
MessageSizeExceeded(),
MaximumMessageSizeExceeded(),
MaximumMessageCountExceeded(),
// These errors match what is described in the man pages (from mq_overview(7) onwards).
PermissionDenied(),
InvalidQueueDescriptor(),
QueueCallInterrupted(),
QueueAlreadyExists(),
QueueNotFound(),
InsufficientMemory(),
InsufficientSpace(),
// These two are (hopefully) unlikely in modern systems
ProcessFileDescriptorLimitReached(),
SystemFileDescriptorLimitReached(),
// If an unhandled / unknown / unexpected error occurs this error will be used.
// In those cases bug reports would be welcome!
UnknownForeignError(nix::errno::Errno),
// Some other unexpected / unknown error occured. This is probably an error from
// the nix crate. Bug reports also welcome for this!
UnknownInternalError(),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use Error::*;
f.write_str(match *self {
// This error contains more sensible description strings already
InvalidQueueName(e) => e,
ValueReadingError(_) => "error reading system configuration for message queues",
MessageSizeExceeded() => "message is larger than maximum size for specified queue",
MaximumMessageSizeExceeded() => "specified queue message size exceeds system maximum",
MaximumMessageCountExceeded() => "specified queue message count exceeds system maximum",
PermissionDenied() => "permission to the specified queue was denied",
InvalidQueueDescriptor() => "the internal queue descriptor was invalid",
QueueCallInterrupted() => "queue method interrupted by signal",
QueueAlreadyExists() => "the specified queue already exists",
QueueNotFound() => "the specified queue could not be found",
InsufficientMemory() => "insufficient memory to call queue method",
InsufficientSpace() => "insufficient space to call queue method",
ProcessFileDescriptorLimitReached() => {
"maximum number of process file descriptors reached"
}
SystemFileDescriptorLimitReached() => {
"maximum number of system file descriptors reached"
}
UnknownForeignError(_) => "unknown foreign error occured: please report a bug!",
UnknownInternalError() => "unknown internal error occured: please report a bug!",
})
}
}
impl error::Error for Error {
fn source(&self) -> Option<&(dyn error::Error + 'static)> {
match self {
Error::ValueReadingError(e) => Some(e),
Error::UnknownForeignError(e) => Some(e),
_ => None,
}
}
}
/// This from implementation is used to translate errors from the lower-level
/// C-calls into sensible Rust errors.
impl From<nix::errno::Errno> for Error {
fn from(err: nix::Error) -> Self {
use nix::errno::Errno::*;
match err {
EACCES => Error::PermissionDenied(),
EBADF => Error::InvalidQueueDescriptor(),
EINTR => Error::QueueCallInterrupted(),
EEXIST => Error::QueueAlreadyExists(),
EMFILE => Error::ProcessFileDescriptorLimitReached(),
ENFILE => Error::SystemFileDescriptorLimitReached(),
ENOENT => Error::QueueNotFound(),
ENOMEM => Error::InsufficientMemory(),
ENOSPC => Error::InsufficientSpace(),
_ => Error::UnknownForeignError(err),
}
}
}
// This implementation is used when reading system queue settings.
impl From<io::Error> for Error {
fn from(e: io::Error) -> Self {
Error::ValueReadingError(e)
}
}
// This implementation is used when parsing system queue settings. The unknown error is returned
// here because the system is probably seriously broken if those files don't contain numbers.
impl From<num::ParseIntError> for Error {
fn from(_: num::ParseIntError) -> Self {
Error::UnknownInternalError()
}
}

View file

@ -1,247 +0,0 @@
extern crate libc;
extern crate nix;
use error::Error;
use libc::mqd_t;
use nix::mqueue;
use nix::sys::stat;
use std::ffi::CString;
use std::fs::File;
use std::io::Read;
use std::ops::Drop;
use std::string::ToString;
pub mod error;
#[cfg(test)]
mod tests;
/// Wrapper type for queue names that performs basic validation of queue names before calling
/// out to C code.
#[derive(Debug, Clone, PartialEq)]
pub struct Name(CString);
impl Name {
pub fn new<S: ToString>(s: S) -> Result<Self, Error> {
let string = s.to_string();
if !string.starts_with('/') {
return Err(Error::InvalidQueueName("Queue name must start with '/'"));
}
// The C library has a special error return for this case, so I assume people must actually
// have tried just using '/' as a queue name.
if string.len() == 1 {
return Err(Error::InvalidQueueName(
"Queue name must be a slash followed by one or more characters",
));
}
if string.len() > 255 {
return Err(Error::InvalidQueueName(
"Queue name must not exceed 255 characters",
));
}
if string.matches('/').count() > 1 {
return Err(Error::InvalidQueueName(
"Queue name can not contain more than one slash",
));
}
// TODO: What error is being thrown away here? Is it possible?
Ok(Name(CString::new(string).unwrap()))
}
}
#[derive(Debug, Clone, PartialEq)]
pub struct Message {
pub data: Vec<u8>,
pub priority: u32,
}
/// Represents an open queue descriptor to a POSIX message queue. This carries information
/// about the queue's limitations (i.e. maximum message size and maximum message count).
#[derive(Debug)]
pub struct Queue {
name: Name,
/// Internal file/queue descriptor.
queue_descriptor: mqd_t,
/// Maximum number of pending messages in this queue.
max_pending: i64,
/// Maximum size of this queue.
max_size: usize,
}
impl Queue {
/// Creates a new queue and fails if it already exists.
/// By default the queue will be read/writable by the current user with no access for other
/// users.
/// Linux users can change this setting themselves by modifying the queue file in /dev/mqueue.
pub fn create(name: Name, max_pending: i64, max_size: i64) -> Result<Queue, Error> {
if max_pending > read_i64_from_file(MSG_MAX)? {
return Err(Error::MaximumMessageCountExceeded());
}
if max_size > read_i64_from_file(MSGSIZE_MAX)? {
return Err(Error::MaximumMessageSizeExceeded());
}
let oflags = {
let mut flags = mqueue::MQ_OFlag::empty();
// Put queue in r/w mode
flags.toggle(mqueue::MQ_OFlag::O_RDWR);
// Enable queue creation
flags.toggle(mqueue::MQ_OFlag::O_CREAT);
// Fail if queue exists already
flags.toggle(mqueue::MQ_OFlag::O_EXCL);
flags
};
let attr = mqueue::MqAttr::new(0, max_pending, max_size, 0);
let queue_descriptor = mqueue::mq_open(&name.0, oflags, default_mode(), Some(&attr))?;
Ok(Queue {
name,
queue_descriptor,
max_pending,
max_size: max_size as usize,
})
}
/// Opens an existing queue.
pub fn open(name: Name) -> Result<Queue, Error> {
// No extra flags need to be constructed as the default is to open and fail if the
// queue does not exist yet - which is what we want here.
let oflags = mqueue::MQ_OFlag::O_RDWR;
let queue_descriptor = mqueue::mq_open(&name.0, oflags, default_mode(), None)?;
let attr = mq_getattr(queue_descriptor)?;
Ok(Queue {
name,
queue_descriptor,
max_pending: attr.mq_maxmsg,
max_size: attr.mq_msgsize as usize,
})
}
/// Opens an existing queue or creates a new queue with the OS default settings.
pub fn open_or_create(name: Name) -> Result<Queue, Error> {
let oflags = {
let mut flags = mqueue::MQ_OFlag::empty();
// Put queue in r/w mode
flags.toggle(mqueue::MQ_OFlag::O_RDWR);
// Enable queue creation
flags.toggle(mqueue::MQ_OFlag::O_CREAT);
flags
};
let default_pending = read_i64_from_file(MSG_DEFAULT)?;
let default_size = read_i64_from_file(MSGSIZE_DEFAULT)?;
let attr = mqueue::MqAttr::new(0, default_pending, default_size, 0);
let queue_descriptor = mqueue::mq_open(&name.0, oflags, default_mode(), Some(&attr))?;
let actual_attr = mq_getattr(queue_descriptor)?;
Ok(Queue {
name,
queue_descriptor,
max_pending: actual_attr.mq_maxmsg,
max_size: actual_attr.mq_msgsize as usize,
})
}
/// Delete a message queue from the system. This method will make the queue unavailable for
/// other processes after their current queue descriptors have been closed.
pub fn delete(self) -> Result<(), Error> {
mqueue::mq_unlink(&self.name.0)?;
drop(self);
Ok(())
}
/// Send a message to the message queue.
/// If the queue is full this call will block until a message has been consumed.
pub fn send(&self, msg: &Message) -> Result<(), Error> {
if msg.data.len() > self.max_size as usize {
return Err(Error::MessageSizeExceeded());
}
mqueue::mq_send(self.queue_descriptor, msg.data.as_ref(), msg.priority)
.map_err(|e| e.into())
}
/// Receive a message from the message queue.
/// If the queue is empty this call will block until a message arrives.
pub fn receive(&self) -> Result<Message, Error> {
let mut data: Vec<u8> = vec![0; self.max_size as usize];
let mut priority: u32 = 0;
let msg_size = mqueue::mq_receive(self.queue_descriptor, data.as_mut(), &mut priority)?;
data.truncate(msg_size);
Ok(Message { data, priority })
}
pub fn max_pending(&self) -> i64 {
self.max_pending
}
pub fn max_size(&self) -> usize {
self.max_size
}
}
impl Drop for Queue {
fn drop(&mut self) {
// Attempt to close the queue descriptor and discard any possible errors.
// The only error thrown in the C-code is EINVAL, which would mean that the
// descriptor has already been closed.
mqueue::mq_close(self.queue_descriptor).ok();
}
}
// Creates the default queue mode (0600).
fn default_mode() -> stat::Mode {
let mut mode = stat::Mode::empty();
mode.toggle(stat::Mode::S_IRUSR);
mode.toggle(stat::Mode::S_IWUSR);
mode
}
/// This file defines the default number of maximum pending messages in a queue.
const MSG_DEFAULT: &'static str = "/proc/sys/fs/mqueue/msg_default";
/// This file defines the system maximum number of pending messages in a queue.
const MSG_MAX: &'static str = "/proc/sys/fs/mqueue/msg_max";
/// This file defines the default maximum size of messages in a queue.
const MSGSIZE_DEFAULT: &'static str = "/proc/sys/fs/mqueue/msgsize_default";
/// This file defines the system maximum size for messages in a queue.
const MSGSIZE_MAX: &'static str = "/proc/sys/fs/mqueue/msgsize_max";
/// This method is used in combination with the above constants to find system limits.
fn read_i64_from_file(name: &str) -> Result<i64, Error> {
let mut file = File::open(name.to_string())?;
let mut content = String::new();
file.read_to_string(&mut content)?;
Ok(content.trim().parse()?)
}
/// The mq_getattr implementation in the nix crate hides the maximum message size and count, which
/// is very impractical.
/// To work around it, this method calls the C-function directly.
fn mq_getattr(mqd: mqd_t) -> Result<libc::mq_attr, Error> {
use std::mem;
let mut attr = mem::MaybeUninit::<libc::mq_attr>::uninit();
let res = unsafe { libc::mq_getattr(mqd, attr.as_mut_ptr()) };
nix::errno::Errno::result(res)
.map(|_| unsafe { attr.assume_init() })
.map_err(|e| e.into())
}

View file

@ -1,21 +0,0 @@
use super::*;
#[test]
fn test_open_delete() {
// Simple test with default queue settings
let name = Name::new("/test-queue").unwrap();
let queue = Queue::open_or_create(name).expect("Opening queue failed");
let message = Message {
data: "test-message".as_bytes().to_vec(),
priority: 0,
};
queue.send(&message).expect("message sending failed");
let result = queue.receive().expect("message receiving failed");
assert_eq!(message, result);
queue.delete().expect("deleting queue failed");
}

Binary file not shown.

Binary file not shown.

View file

@ -1,17 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 wI5oAA TDjaldqySaCEFAPuoUBVMR342403nhkawwtXbsJJenQ
eoeL2v5mCLksay/24miqYkWLJLLhrUIny4p1/e3/iTY
-> ssh-ed25519 dcsaLw /KSRH3XUGU7P+Ckpk86PFl8oRfPP/dlLb6zLUW04iH8
e2nXDvjkd5lzmXflhd820XmGvo/agSxDtfejxM45nhY
-> ssh-ed25519 zcCuhA pIxJxTWsOyH6Zqunv427jdy1G7BV7Dpjpp2l+HEe/3s
rxyoDZTQmsGwNB0nCTXNHhc7VCsb11/ynManfsbf5Ss
-> ssh-ed25519 1SxhRA K8DkqAk5gPfsjTQiTjLGRNR+63uG9ogacsrH69/afGs
LAt97AuWaPEYG7IlFiIOll9s02cQ+qhrpr0GVXX+e/o
-> ssh-ed25519 ch/9tw 6eG9psphjjfW6TRTYxqozX9WLhQgC9u2ngR35rpXiH4
9dFgdqSDKRXAotKKR19l9gqWlTNxuv9r/IcaMZeO9Oc
-> ssh-ed25519 CpJBgQ /t2M57kxjKDq/UMoh9UnmQvlETqiwqClt0Lg6quqKRk
oDblqLXxyJ7gTtZzhbStHep62oZK0iXikJ6fi6EW0gM
-> ssh-ed25519 aXKGcg DVn6XZGaBHAeL4pGMaolO8cSOIKRIhmuDycV65pfxhs
M6XVDk1Z9BdCoCvK30NDJsKKnURk/XUcRP4t5pZ7JFk
--- H3D9jfLkrsDOgWfNhUJHZcVGa9dPqr4EuuBU40iJbJs
{:jІ ¿÷pVF´x'`è×5Lz# yµìÈìÀ¿|"²öÕòû/>èô"QX¹ ÓÅJe<4A>þ W*œ‚¸?ðñ

View file

@ -1,17 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 xR+E/Q iVPpAC04Up8hrFvnh2RsAXgfrFv2QNk0kwydlGrVslg
2McWSNxxnPypGs9HKMNCjpHe+g2kxA86G6Drt2HWOeU
-> ssh-ed25519 dcsaLw Xuw5V21UrJcIwWEoRyI+h60RaoJUdBAJJPqN12TY93U
X4Wxo2qdBgtzQ8ct2UySnvXr9x0EzqBzSoHTBq9jQ9A
-> ssh-ed25519 zcCuhA GPMgbaoLctcsQxVtoIPvXUvpfTZEUA2QnvgTXDUiJRI
wnzHlJC8F45s/VzBjy9xfvDujtH+9uBmoX5OrzXapBA
-> ssh-ed25519 1SxhRA 3GuXyDOYy6tur+nThpBLKMCvk/2c44XeR+NrLKj3G1Q
LoVYepF0BiZ4E29ZMzXgOAyloK+UhX+FqiM9pPgngZ4
-> ssh-ed25519 ch/9tw ihNjsBfasDmfW3Kcg8vQ1tpym9HKIL/qEzwpO7ye0Cg
brnJzK7V4OV5jLfCFYqNue1oLSpJ88rRNN19KDcMdeM
-> ssh-ed25519 CpJBgQ SvLMx+bG59LPZNI4EctDnFutMXoh4Otk2yFd17dXVQ4
+LO4a5ARK1mpYaenS1P13ajhpNtxndFvin2Po1h0cwM
-> ssh-ed25519 aXKGcg VdaJIfDpeet7fzesJ3FGwZhQiAKNzEJMVUXNtbwm8UU
rMqLrCdx55rzkuqJ3/9SrY6BKdSgHCLgAx8E0rrYVfY
--- jNvJTcOvsKU6YkgEDBiOI27GBEzNlEg4JymqeKYa/fA
ˆØ½±V(idóT”L\‚©¯ä°'£çHŽßGgD‘÷^‰vܽí1×1c»ñ`®}ž†šêV¢™¦=zàK &ˆ´èÅ

Binary file not shown.

Binary file not shown.

View file

@ -1,17 +0,0 @@
age-encryption.org/v1
-> ssh-ed25519 xR+E/Q pAv2Rm0oj9HJu8LAIRbT+bvNOufRcDTLlLfanvM84y0
eTOICFyxEt3b6Kh9NbKMmt9i3+GFsgk4K+jBuR8LjUs
-> ssh-ed25519 dcsaLw cYYa3+xqONMvU4y4LsWZ5VZBrQoMdoRYbII70H0x7hw
w7XolSOTiX5WdrnURn/PtAGriMz4n0rBUAulOU4pP3E
-> ssh-ed25519 zcCuhA xwUsUilN81ZHuqE6gCzmUddjD+gLd6bkDclSb73K+08
H2uNajL/IDn+hXJyehDG22Zu5k7RZmjWyWtJnzCxAEw
-> ssh-ed25519 1SxhRA rkPyrom+2W0BWU6S9Vy+h5ggrdjEQdVcFd6onYENNmE
AuonnG8d2RwpLUR+FAgf4TrWCmB3dxnO3OFLM4+EVH8
-> ssh-ed25519 ch/9tw DJs3TcbTmAx0HqTWoyvugqfqxdZ13kqerOlaJKADljA
bxyxi0/J7MBm+Hlq6CUM4s9+j4M3E9f55lUgMKx+ABc
-> ssh-ed25519 CpJBgQ ralv1E55uSriSrM8X+rOr/h70dbEOVOfux7U20cky0k
lflA+jocbQn7dricKpH6iBQ3P2K+g8ZDvyuRkyCUL7c
-> ssh-ed25519 aXKGcg Fb5vBSvT0RaO5yXhd/b/75QdC6IPSDpntUHhtdhhFGw
UASa/ristknh342EyYO24qAT+rAaWI5ks00mSTjPOQY
--- e8mOir6/NeozcVmD17lJkrFVIffhOAXR3kKq5OgqbRw
%…ç»Õe¸Ì×lAظ>5.48ù}ˆ ¼@þáÆD* uÏGˆ×¨y|S„—-F‰ð*&áªÅúLa¼ µþ,XY¹À·=õ'H

View file

@ -1,5 +0,0 @@
//ops/terraform
===============
This folder contains Terraform modules and other related
Terraform-tooling by TVL.

View file

@ -1,56 +0,0 @@
<!--
SPDX-FileCopyrightText: 2023 The TVL Authors
SPDX-License-Identifier: MIT
-->
deploy-nixos
============
This is a Terraform module to deploy a NixOS system closure to a
remote machine.
The system closure must be accessible by Nix-importing the repository
root and building a specific attribute
(e.g. `nix-build -A ops.machines.machine-name`).
The target machine must be accessible normally over SSH, and an SSH
key must be used for access.
Notably this module separates the evaluation of the system closure from building
and deploying it, and uses the closure's derivation hash to determine whether a
deploy is necessary.
## Usage example:
```terraform
module "deploy_somehost" {
# Clone just this directory through josh. Add a `ref=` parameter to pin to a specific commit.
source = "git::https://code.tvl.fyi/depot.git:/ops/terraform/deploy-nixos.git"
# The attribute.path pointing to the expression to instantiate.
attrpath = "ops.nixos.somehost"
# The path to the Nix file to invoke. Optional.
# If omitted, will shell out to git to determine the repo root, and Nix will
# use `default.nix` in there.
entrypoint = "${path.module}/../../somewhere.nix"
target_host = "somehost.tvl.su"
target_user = "someone"
target_user_ssh_key = tls_private_key.somehost.private_key_pem
}
```
## Future work
Several things can be improved about this module, for example:
* The remote system closure could be discovered to restore remote system state
after manual deploys on the target (i.e. "stomping" of changes).
More ideas and contributions are, of course, welcome.
## Acknowledgements
Development of this module was sponsored by [Resoptima](https://resoptima.com/).

View file

@ -1,113 +0,0 @@
# SPDX-FileCopyrightText: 2023 The TVL Authors
#
# SPDX-License-Identifier: MIT
# This module deploys a NixOS host by building a system closure
# located at the specified attribute in the current repository.
#
# The closure's derivation path is persisted in the Terraform state to
# determine after Nix evaluation whether the system closure has
# changed and needs to be built/deployed.
#
# The system configuration is then built (or substituted) on the
# machine that runs `terraform apply`, then copied and activated on
# the target machine using `nix-copy-closure`.
variable "attrpath" {
description = "attribute set path pointing to the NixOS system closure"
type = string
}
variable "target_host" {
description = "address (IP or hostname) at which the target is reachable"
type = string
}
variable "entrypoint" {
description = <<EOT
Path to a .nix file (or directory containing `default.nix` file)
that provides the attrset specified in `closure`.
If unset, asks git for the root of the repository.
EOT
type = string
default = ""
}
variable "target_user" {
description = "username on the target machine"
type = string
}
variable "target_user_ssh_key" {
description = "SSH key to use for connecting to the target"
type = string
default = ""
sensitive = true
}
variable "triggers" {
type = map(string)
description = "Triggers for deploy"
default = {}
}
# Fetch the derivation hash for the NixOS system.
data "external" "nixos_system" {
program = ["${path.module}/nix-eval.sh"]
query = {
attrpath = var.attrpath
entrypoint = var.entrypoint
}
}
# Deploy the NixOS configuration if anything changed.
resource "null_resource" "nixos_deploy" {
connection {
type = "ssh"
host = var.target_host
user = var.target_user
private_key = var.target_user_ssh_key
}
# 1. Wait for SSH to become available.
provisioner "remote-exec" {
inline = ["true"]
}
# 2. Build NixOS system.
provisioner "local-exec" {
command = "nix-build ${data.external.nixos_system.result.drv} --no-out-link"
}
# 3. Copy closure to the target.
provisioner "local-exec" {
command = "${path.module}/nixos-copy.sh"
environment = {
SYSTEM_DRV = data.external.nixos_system.result.drv
TARGET_HOST = var.target_host
DEPLOY_KEY = var.target_user_ssh_key
TARGET_USER = var.target_user
}
}
# 4. Activate closure on the target.
provisioner "remote-exec" {
inline = [
"set -eu",
"SYSTEM=$(nix-build ${data.external.nixos_system.result.drv} --no-out-link)",
"sudo nix-env --profile /nix/var/nix/profiles/system --set $SYSTEM",
"sudo $SYSTEM/bin/switch-to-configuration switch",
]
}
triggers = merge({
nixos_drv = data.external.nixos_system.result.drv
target_host = var.target_host
}, var.triggers)
}
output "nixos_drv" {
value = data.external.nixos_system.result
}

View file

@ -1,47 +0,0 @@
#!/usr/bin/env bash
# SPDX-FileCopyrightText: 2023 The TVL Authors
#
# SPDX-License-Identifier: MIT
set -ueo pipefail
# Evaluates a Nix expression.
#
# Receives input parameters as JSON from stdin.
# It expects a dict with the following keys:
#
# - `attrpath`: the attribute.path pointing to the expression to instantiate.
# Required.
# - `entrypoint`: the path to the Nix file to invoke.
# Optional. If omitted, will shell out to git to determine the repo root,
# and Nix will use `default.nix` in there.
# - `argstr_json`: A string JSON-encoding a map containing string keys and
# values which should be passed to Nix as `--argstr $key $value`.
# command line args. Optional.
# - `build`: A boolean (or string being "true" or "false") stating whether the
# expression should also be built/substituted on the machine executing this script.
#
# jq's @sh format takes care of escaping.
eval "$(jq -r '@sh "attrpath=\(.attrpath) && entrypoint=\(.entrypoint) && argstr=\((.argstr_json // "{}"|fromjson) | to_entries | map ("--argstr", .key, .value) | join(" ")) build=\(.build)"')"
# Evaluate the expression.
[[ -z "$entrypoint" ]] && entrypoint=$(git rev-parse --show-toplevel)
# shellcheck disable=SC2086,SC2154
drv=$(nix-instantiate -A "${attrpath}" "${entrypoint}" ${argstr})
# If `build` is set to true, invoke nix-build on the .drv.
# We need to swallow all stdout, to not garble the JSON printed later.
# shellcheck disable=SC2154
if [ "${build}" == "true" ]; then
nix-build --no-out-link "${drv}" > /dev/null
fi
# Determine the output path.
outPath=$(nix show-derivation "${drv}" | jq -r ".\"${drv}\".outputs.out.path")
# Return a JSON back to stdout.
# It contains the following keys:
#
# - `drv`: the store path of the Derivation that has been instantiated.
# - `outPath`: the output store path.
jq -n --arg drv "$drv" --arg outPath "$outPath" '{"drv":$drv, "outPath":$outPath}'

View file

@ -1,32 +0,0 @@
#!/usr/bin/env bash
# SPDX-FileCopyrightText: 2023 The TVL Authors
#
# SPDX-License-Identifier: MIT
#
# Copies a NixOS system to a target host, using the provided key,
# or whatever ambient key is configured if the key is not set.
set -ueo pipefail
export NIX_SSHOPTS="\
-o StrictHostKeyChecking=no\
-o UserKnownHostsFile=/dev/null\
-o GlobalKnownHostsFile=/dev/null"
# If DEPLOY_KEY was passed, write it to $scratch/id_deploy
if [ -n "${DEPLOY_KEY-}" ]; then
scratch="$(mktemp -d)"
trap 'rm -rf -- "${scratch}"' EXIT
echo -n "$DEPLOY_KEY" > $scratch/id_deploy
chmod 0600 $scratch/id_deploy
export NIX_SSHOPTS="$NIX_SSHOPTS -o IdentityFile=$scratch/id_deploy"
fi
nix-copy-closure \
--to ${TARGET_USER}@${TARGET_HOST} \
${SYSTEM_DRV} \
--gzip \
--include-outputs \
--use-substitutes

View file

@ -1,9 +0,0 @@
# Base image for Yandex Cloud VMs.
{ depot, ... }:
(depot.ops.nixos.nixosFor {
imports = [
(depot.path.origSrc + ("/ops/modules/yandex-cloud.nix"))
(depot.path.origSrc + ("/ops/modules/tvl-users.nix"))
];
}).config.system.build.yandexCloudImage

View file

@ -1,5 +0,0 @@
target/
result/
# Ignore everything under src (except for lib.rs)
src/*
!src/lib.rs

File diff suppressed because it is too large Load diff

View file

@ -1,24 +0,0 @@
[package]
name = "yandex-cloud"
description = "Generated gRPC clients for the Yandex Cloud API"
license = "MIT"
version = "2023.9.4"
edition = "2021"
homepage = "https://code.tvl.fyi/tree/ops/yandex-cloud-rs"
repository = "https://code.tvl.fyi/depot.git:/ops/yandex-cloud-rs.git"
include = [ "/src", "README.md" ]
[dependencies]
prost = "0.11"
prost-types = "0.11"
[dependencies.tonic]
version = "0.9"
features = [ "tls", "tls-roots", "gzip" ]
[build-dependencies]
tonic-build = "0.9"
walkdir = "2.3.3"
[dev-dependencies]
tokio = "1.28" # check when updating tonic

View file

@ -1,49 +0,0 @@
yandex-cloud-rs
===============
Client library for Yandex Cloud gRPC APIs, as published in their
[GitHub repository][repo].
Please see the [online documentation][docs] for user-facing
information, this README is intended for library developers.
The source code of the library lives [in the TVL repository][code].
-------------
In order to build this library, the gRPC API definitions need to be
fetched from GitHub. By default this is done by Nix (see
`default.nix`), which then injects the location of the API definitions
through the `YANDEX_CLOUD_PROTOS` environment variable.
The actual code generation happens through the calls in `build.rs`.
Releases of this library are done from *dirty* trees, meaning that the
version on crates.io should already contain all the generated code. In
order to do this, after bumping the version in `Cargo.toml` and the
API commit in `default.nix`, the following release procedure should be
used:
```
# Get rid of all generated source files
find src | grep '.rs$' | grep -v '^src/lib.rs$' | xargs rm
# Get rid of all old artefacts
cargo clean
# Verify that a clean build works as intended
cargo build
# Verify that all documentation builds, and verify that it looks fine:
#
# - Is the version correct (current date)?
# - Are all the services included (i.e. not an accidental empty build)?
cargo doc --open
# If everything looks fine, release:
cargo publish --allow-dirty
```
[repo]: https://github.com/yandex-cloud/cloudapi
[docs]: https://docs.rs/yandex-cloud/latest/yandex_cloud/
[code]: https://code.tvl.fyi/tree/ops/yandex-cloud-rs

View file

@ -1,43 +0,0 @@
use std::path::PathBuf;
use walkdir::{DirEntry, WalkDir};
fn proto_files(proto_dir: &str) -> Vec<PathBuf> {
let mut out = vec![];
fn is_proto(entry: &DirEntry) -> bool {
entry.file_type().is_file()
&& entry
.path()
.extension()
.map(|e| e.to_string_lossy() == "proto")
.unwrap_or(false)
}
for entry in WalkDir::new(format!("{}/yandex", proto_dir)).into_iter() {
let entry = entry.expect("failed to list proto files");
if is_proto(&entry) {
out.push(entry.into_path())
}
}
out
}
fn main() {
if let Some(proto_dir) = option_env!("YANDEX_CLOUD_PROTOS") {
tonic_build::configure()
.build_client(true)
.build_server(false)
.out_dir("src/")
.include_file("includes.rs")
.compile(
&proto_files(proto_dir),
&[
format!("{}", proto_dir),
format!("{}/third_party/googleapis", proto_dir),
],
)
.expect("failed to generate gRPC clients for Yandex Cloud")
}
}

View file

@ -1,22 +0,0 @@
{ depot, lib, pkgs, ... }:
let
protoSrc = pkgs.fetchFromGitHub {
owner = "yandex-cloud";
repo = "cloudapi";
rev = "b4383be5ebe360bd946e49c8eaf647a73e9c44c0";
sha256 = "0z4jyw2cylvyrq5ja8pcaqnlf6lf6ximj85hgjag6ckawayk1rzx";
};
in
pkgs.rustPlatform.buildRustPackage rec {
name = "yandex-cloud-rs";
src = depot.third_party.gitignoreSource ./.;
cargoLock.lockFile = ./Cargo.lock;
YANDEX_CLOUD_PROTOS = "${protoSrc}";
nativeBuildInputs = [ pkgs.protobuf ];
# The generated doc comments contain lots of things that rustc
# *thinks* are doctests, but are actually just garbage leading to
# compiler errors.
doCheck = false;
}

View file

@ -1,37 +0,0 @@
//! This example uses the Yandex Cloud Logging API to write a log entry.
use prost_types::Timestamp;
use tonic::transport::channel::Endpoint;
use yandex_cloud::yandex::cloud::logging::v1::destination::Destination;
use yandex_cloud::yandex::cloud::logging::v1::log_ingestion_service_client::LogIngestionServiceClient;
use yandex_cloud::yandex::cloud::logging::v1::Destination as OuterDestination;
use yandex_cloud::yandex::cloud::logging::v1::IncomingLogEntry;
use yandex_cloud::yandex::cloud::logging::v1::WriteRequest;
use yandex_cloud::AuthInterceptor;
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let channel = Endpoint::from_static("https://ingester.logging.yandexcloud.net")
.connect()
.await?;
let mut client = LogIngestionServiceClient::with_interceptor(
channel,
AuthInterceptor::new("YOUR_TOKEN_HERE"),
);
let request = WriteRequest {
destination: Some(OuterDestination {
destination: Some(Destination::LogGroupId("YOUR_LOG_GROUP_ID".into())),
}),
entries: vec![IncomingLogEntry {
timestamp: Some(Timestamp::date_time(2023, 04, 24, 23, 44, 30).unwrap()),
message: "test log message".into(),
..Default::default()
}],
..Default::default()
};
client.write(request).await.unwrap();
Ok(())
}

View file

@ -1,108 +0,0 @@
//! This module provides low-level generated gRPC clients for the
//! Yandex Cloud APIs.
//!
//! The clients are generated using the [tonic][] and [prost][]
//! crates and have default configuration.
//!
//! Documentation present in the protos is retained into the generated
//! Rust types, but for detailed API information you should visit the
//! official Yandex Cloud Documentation pages:
//!
//! * [in English](https://cloud.yandex.com/en-ru/docs/overview/api)
//! * [in Russian](https://cloud.yandex.ru/docs/overview/api)
//!
//! The proto sources are available on the [Yandex Cloud GitHub][protos].
//!
//! [tonic]: https://docs.rs/tonic/latest/tonic/
//! [prost]: https://docs.rs/prost/latest/prost/
//! [protos]: https://github.com/yandex-cloud/cloudapi
//!
//! The majority of user-facing structures can be found in the
//! [`yandex::cloud`] module.
//!
//! ## Usage
//!
//! Typically to use these APIs, you need to provide an authentication
//! credential and an endpoint to connect to. The full list of
//! Yandex's endpoints is [available online][endpoints] and you should
//! look up the service you plan to use and pick the correct endpoint
//! from the list.
//!
//! Authentication is done via an HTTP header using an IAM token,
//! which can be done in Tonic using [interceptors][]. The
//! [`AuthInterceptor`] provided by this crate can be used for that
//! purpose.
//!
//! Full usage examples are [available here][examples].
//!
//! [endpoints]: https://cloud.yandex.com/en/docs/api-design-guide/concepts/endpoints
//! [interceptors]: https://docs.rs/tonic/latest/tonic/service/trait.Interceptor.html
//! [examples]: https://code.tvl.fyi/tree/ops/yandex-cloud-rs/examples
use tonic::metadata::{Ascii, MetadataValue};
use tonic::service::Interceptor;
/// Publicly re-export some types from tonic which users might need
/// for implementing traits, or for naming concrete client types.
pub mod tonic_exports {
pub use tonic::service::interceptor::InterceptedService;
pub use tonic::transport::Channel;
pub use tonic::transport::Endpoint;
pub use tonic::Status;
}
/// Helper trait for types or closures that can provide authentication
/// tokens for Yandex Cloud.
pub trait TokenProvider {
/// Fetch a currently valid authentication token for Yandex Cloud.
fn get_token<'a>(&'a mut self) -> Result<&'a str, tonic::Status>;
}
impl TokenProvider for String {
fn get_token<'a>(&'a mut self) -> Result<&'a str, tonic::Status> {
Ok(self.as_str())
}
}
impl TokenProvider for &'static str {
fn get_token(&mut self) -> Result<&'static str, tonic::Status> {
Ok(*self)
}
}
/// Interceptor for adding authentication headers to gRPC requests.
/// This is constructed with a callable that returns authentication
/// tokens.
///
/// This callable is responsible for ensuring that the returned tokens
/// are valid at the given time, i.e. it should take care of
/// refreshing and so on.
pub struct AuthInterceptor<T: TokenProvider> {
token_provider: T,
}
impl<T: TokenProvider> AuthInterceptor<T> {
pub fn new(token_provider: T) -> Self {
Self { token_provider }
}
}
impl<T: TokenProvider> Interceptor for AuthInterceptor<T> {
fn call(
&mut self,
mut request: tonic::Request<()>,
) -> Result<tonic::Request<()>, tonic::Status> {
let token: MetadataValue<Ascii> = format!("Bearer {}", self.token_provider.get_token()?)
.try_into()
.map_err(|_| {
tonic::Status::invalid_argument("authorization token contained invalid characters")
})?;
request.metadata_mut().insert("authorization", token);
Ok(request)
}
}
// The rest of this file is generated by the build script at ../build.rs.
include!("includes.rs");