chore(*): drop everything that is not required for Tvix

Co-Authored-By: edef <edef@edef.eu>
Co-Authored-By: Ryan Lahfa <raito@lix.systems>
Change-Id: I9817214c3122e49d694c5e41818622a08d9dfe45
This commit is contained in:
Florian Klink 2025-01-05 17:12:30 +01:00
parent bd91cac1f3
commit df4500ea2b
2905 changed files with 34 additions and 493328 deletions

View file

@ -1,5 +0,0 @@
if pass apps/declib/mastodon_access_token >/dev/null; then
export DECLIB_MASTODON_ACCESS_TOKEN=$(pass apps/declib/mastodon_access_token)
fi
eval "$(lorri direnv)"

View file

@ -1,6 +0,0 @@
dist-newstyle/
result-*
/node-modules/
# ignore all package-locks (for now?)
package-lock.json

View file

@ -1,359 +0,0 @@
# HLint configuration file
# https://github.com/ndmitchell/hlint
# Run `hlint --default` to see the example configuration file.
##########################
# WARNING: These need to be synced with the default-extensions field
# in the cabal file.
- arguments: [-XGHC2021, -XOverloadedRecordDot]
# Ignore some builtin hints
# often functions are more readable with explicit arguments
- ignore: { name: Eta reduce }
# these redundancy warnings are just completely irrelevant
- ignore: { name: Redundant bracket }
- ignore: { name: Move brackets to avoid $ }
- ignore: { name: Redundant $ }
- ignore: { name: Redundant do }
- ignore: { name: Redundant multi-way if }
# allow case-matching on bool, because why not
- ignore: { name: Use if }
# hlint cannot distinguish actual newtypes from data types
# that accidentally have only one field
# (but might have more in the future).
# Since its a mostly irrelevant runtime optimization, we dont care.
- ignore: { name: Use newtype instead of data }
# these lead to harder-to-read/more implicit code
- ignore: { name: Use fmap }
- ignore: { name: Use <$> }
- ignore: { name: Use tuple-section }
- ignore: { name: Use forM_ }
- ignore: { name: Functor law }
- ignore: { name: Use maybe }
# fst and snd are usually a code smell and should be explicit matches, _naming the ignored side.
- ignore: { name: Use fst }
- ignore: { name: Use snd }
- ignore: { name: Use fromMaybe }
- ignore: { name: Use const }
- ignore: { name: Replace case with maybe }
- ignore: { name: Replace case with fromMaybe }
- ignore: { name: Avoid lambda }
- ignore: { name: Avoid lambda using `infix` }
- ignore: { name: Use curry }
- ignore: { name: Use uncurry }
- ignore: { name: Use first }
- ignore: { name: Redundant first }
- ignore: { name: Use second }
- ignore: { name: Use bimap }
# just use `not x`
- ignore: { name: Use unless }
- ignore: { name: Redundant <&> }
# list comprehensions are a seldomly used part of the Haskell language
# and they introduce syntactic overhead that is usually not worth the conciseness
- ignore: { name: Use list comprehension }
# Seems to be buggy in cases
- ignore: { name: Use section }
# multiple maps in a row are usually used for clarity,
# and the compiler will optimize them away, thank you very much.
- ignore: { name: Use map once }
- ignore: { name: Fuse foldr/map }
- ignore: { name: Fuse traverse/map }
- ignore: { name: Fuse traverse_/map }
- ignore: { name: Fuse traverse/<$> }
# this is silly, why would I use a special function if I can just (heh) `== Nothing`
- ignore: { name: Use isNothing }
# The duplication heuristic is not very smart
# and more annoying than helpful.
# see https://github.com/ndmitchell/hlint/issues/1009
- ignore: { name: Reduce duplication }
# Stops the pattern match trick
- ignore: { name: Use record patterns }
- ignore: { name: Use null }
- ignore: { name: Use uncurry }
# we dont want void, see below
- ignore: { name: Use void }
- functions:
# disallow Enum instance functions, they are partial
- name: Prelude.succ
within: [Relude.Extra.Enum]
message: "Dangerous, will fail for highest element"
- name: Prelude.pred
within: [Relude.Extra.Enum]
message: "Dangerous, will fail for lowest element"
- name: Prelude.toEnum
within: []
message: "Extremely partial"
- name: Prelude.fromEnum
within: []
message: "Dangerous for most uses"
- name: Prelude.enumFrom
within: []
- name: Prelude.enumFromThen
within: []
- name: Prelude.enumFromThenTo
within: []
- name: Prelude.oundedEnumFrom
within: []
- name: Prelude.boundedEnumFromThen
within: []
- name: Text.Read.readMaybe
within:
# The BSON ObjectId depends on Read for parsing
- Milkmap.Milkmap
- Milkmap.FieldData.Value
message: "`readMaybe` is probably not what you want for parsing values, please use the `FieldParser` module."
# `void` discards its argument and is polymorphic,
# thus making it brittle in the face of code changes.
# (see https://tech.freckle.com/2020/09/23/void-is-a-smell/)
# Use an explicit `_ <- …` instead.
- name: Data.Functor.void
within: []
message: "`void` leads to bugs. Use an explicit `_ <- …` instead"
- name: Data.Foldable.length
within: ["MyPrelude"]
message: "`Data.Foldable.length` is dangerous to use, because it also works on types you wouldnt expect, like `length (3,4) == 1` and `length (Just 2) == 1`. Use the `length` function for your specific type instead, for example `List.length` or `Map.length`."
- name: Prelude.length
within: ["MyPrelude"]
message: "`Prelude.length` is dangerous to use, because it also works on types you wouldnt expect, like `length (3,4) == 1` and `length (Just 2) == 1`. Use the `length` function for your specific type instead, for example `List.length` or `Map.length`."
# Using an explicit lambda with its argument “underscored”
# is more clear in every case.
# e.g. `const True` => `\_request -> True`
# shows the reader that the ignored argument was a request.
- name: Prelude.const
within: []
message: "Replace `const` with an explicit lambda with type annotation for code clarity and type safety, e.g.: `const True` => `\\(_ :: Request) -> True`. If you really dont want to spell out the type (which might lead to bugs!), you can also use something like `\_request -> True`."
- name: Data.List.nub
within: []
message: "O(n²), use `Data.Containers.ListUtils.nubOrd"
- name: Prelude.maximum
within: []
message: "`maximum` crashes on empty list; use non-empty lists and `maximum1`"
- name: Data.List.maximum
within: []
message: "`maximum` crashes on empty list; use non-empty lists and `maximum1`"
- name: Prelude.minimum
within: []
message: "`minimum` crashes on empty list; use non-empty lists and `minimum1`"
- name: Data.List.minimum
within: []
message: "`minimum` crashes on empty list; use non-empty lists and `minimum1`"
- name: Data.Foldable.maximum
within: []
message: "`maximum` crashes on empty foldable stucture; use Foldable1 and `maximum1`."
- name: Data.Foldable.minimum
within: []
message: "`minimum` crashes on empty foldable stucture; use Foldable1 and `minimum1`."
# Using prelude functions instead of stdlib functions
- name: "Data.Text.Encoding.encodeUtf8"
within: ["MyPrelude"]
message: "Use `textToBytesUtf8`"
- name: "Data.Text.Lazy.Encoding.encodeUtf8"
within: ["MyPrelude"]
message: "Use `textToBytesUtf8Lazy`"
- name: "Data.Text.Encoding.decodeUtf8'"
within: ["MyPrelude"]
message: "Use `bytesToTextUtf8`"
- name: "Data.Text.Encoding.Lazy.decodeUtf8'"
within: ["MyPrelude"]
message: "Use `bytesToTextUtf8Lazy`"
- name: "Data.Text.Encoding.decodeUtf8"
within: ["MyPrelude"]
message: "Either check for errors with `bytesToTextUtf8`, decode leniently with unicode replacement characters with `bytesToTextUtf8Lenient` or use the crashing version `bytesToTextUtf8Unsafe` (discouraged)."
- name: "Data.Text.Encoding.Lazy.decodeUtf8"
within: ["MyPrelude"]
message: "Either check for errors with `bytesToTextUtf8Lazy`, decode leniently with unicode replacement characters with `bytesToTextUtf8LenientLazy` or use the crashing version `bytesToTextUtf8UnsafeLazy` (discouraged)."
- name: "Data.Text.Lazy.toStrict"
within: ["MyPrelude"]
message: "Use `toStrict`"
- name: "Data.Text.Lazy.fromStrict"
within: ["MyPrelude"]
message: "Use `toLazy`"
- name: "Data.ByteString.Lazy.toStrict"
within: ["MyPrelude"]
message: "Use `toStrictBytes`"
- name: "Data.ByteString.Lazy.fromStrict"
within: ["MyPrelude"]
message: "Use `toLazyBytes`"
- name: "Data.Text.unpack"
within: ["MyPrelude"]
message: "Use `textToString`"
- name: "Data.Text.pack"
within: ["MyPrelude"]
message: "Use `stringToText`"
- name: "Data.Maybe.listToMaybe"
within: []
message: |
`listToMaybe`` throws away everything but the first element of a list (it is essentially `safeHead`).
If that is what you want, please use a pattern match like
```
case xs of
[] -> …
(x:_) -> …
```
- name: "Data.List.head"
within: []
message: |
`List.head` fails on an empty list. I didnt think I have to say this, but please use a pattern match on the list, like:
```
case xs of
[] -> … error handling …
(x:_) -> …
```
Also think about why the rest of the list should be ignored.
- name: "Prelude.head"
within: []
message: |
`List.head` fails on an empty list. I didnt think I have to say this, but please use a pattern match on the list, like.
```
case xs of
[] -> … error handling …
(x:_) -> …
```
Also think about why the rest of the list should be ignored.
- name: "Data.Maybe.fromJust"
within: []
message: |
`Maybe.fromJust` is obviously partial. Please use a pattern match.
In case you actually want to throw an error on an empty list,
please add an error message, like so:
```
myMaybe & annotate "my error message" & unwrapError
```
If you are in `IO`, use `unwrapIOError` instead,
or throw a monad-specific error.
- name: "Data.Either.fromLeft"
within: []
message: |
`Either.fromLeft` is obviously partial. Please use a pattern match.
- name: "Data.Either.fromRight"
within: []
message: |
`Either.fromRight` is obviously partial. Please use a pattern match.
# Make restricted functions into an error if found
- error: { name: "Avoid restricted function, see comment in .hlint.yaml" }
# Some functions that have (more modern) aliases.
# They are not dangerous per se,
# but we want to make it easier to read our code so we should
# make sure we dont use too many things that are renames.
- hint:
lhs: "undefined"
rhs: "todo"
note: "`undefined` is a silent error, `todo` will display a warning as long as it exists in the code."
- hint:
lhs: "return"
rhs: "pure"
note: "Use `pure` from `Applicative` instead, its the exact same function."
- hint:
lhs: "mapM"
rhs: "traverse"
note: "Use `traverse` from `Traversable` instead. Its the exact same function."
- hint:
lhs: "mapM_"
rhs: "traverse_"
note: "Use `traverse_` from `Traversable` instead. Its the exact same function."
- hint:
lhs: "forM"
rhs: "for"
note: "Use `for` from `Traversable` instead. Its the exact same function."
- hint:
lhs: "forM_"
rhs: "for_"
note: "Use `for_` from `Traversable` instead. Its the exact same function."
- hint:
lhs: "stringToText (show x)"
rhs: "showToText x"
- hint:
lhs: "Data.Set.toList (Data.Set.fromList x)"
rhs: "List.nubOrd x"
note: "`nubOrd` removes duplicate elements from a list."
- modules:
# Disallowed Modules
- name: Data.Map
within: []
message: "Lazy maps leak space, use `import Data.Map.Strict as Map` instead"
- name: Control.Monad.Writer
within: []
message: "Lazy writers leak space, use `Control.Monad.Trans.Writer.CPS` instead"
- name: Control.Monad.Trans.Writer.Lazy
within: []
message: "Lazy writers leak space, use `Control.Monad.Trans.Writer.CPS` instead"
- name: Control.Monad.Trans.Writer.Strict
within: []
message: "Even strict writers leak space, use `Control.Monad.Trans.Writer.CPS` instead"
# Qualified module imports
- { name: Data.Map.Strict, as: Map }
- { name: Data.HashMap.Strict, as: HashMap }
- { name: Data.Set, as: Set }
- { name: Data.ByteString.Char8, as: Char8 }
- { name: Data.ByteString.Lazy.Char8, as: Char8.Lazy }
- { name: Data.Text, as: Text }
- { name: Data.Vector, as: Vector }
- { name: Data.Vault.Lazy, as: Vault }
- { name: Data.Aeson, as: Json }
- { name: Data.Aeson.Types, as: Json }
- { name: Data.Aeson.BetterErrors as Json }

View file

@ -1,8 +0,0 @@
{
"trailingComma": "all",
"tabWidth": 2,
"semi": true,
"singleQuote": true,
"printWidth": 90,
"arrowParens": "avoid"
}

View file

@ -1,53 +0,0 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [
{
"name": "node: Attach to running process",
"port": 9229,
"request": "attach",
"skipFiles": [
"<node_internals>/**"
],
"type": "node"
},
{
"name": "run declib",
"type": "node",
"cwd": "${workspaceFolder}/declib",
"request": "launch",
"runtimeExecutable": "ninja",
"runtimeArgs": [
"run",
],
},
{
"type": "node",
"name": "Run tap-bpm",
"skipFiles": [
"<node_internals>/**"
],
"request": "launch",
"program": "${workspaceFolder}/lyric/dist/index.js",
"preLaunchTask": "ninja build lyric",
"outFiles": [
"${workspaceFolder}/lyric/dist/**/*.js"
],
"args": [
"tap-bpm"
]
},
{
"preLaunchTask": "npm build lyric extension",
"name": "Launch lyric vscode Extension",
"type": "extensionHost",
"request": "launch",
"args": [
"--extensionDevelopmentPath=${workspaceFolder}/lyric/extension",
"${workspaceFolder}/lyric/extension"
]
}
]
}

View file

@ -1,25 +0,0 @@
{
"sqltools.connections": [
{
"previewLimit": 50,
"driver": "SQLite",
"name": "cas-serve",
"database": "${workspaceFolder:Profpatsch}/cas-serve/data.sqlite"
}
],
"sqltools.useNodeRuntime": true,
"editor.formatOnSave": true,
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"purescript.codegenTargets": [
"corefn"
],
"purescript.foreignExt": "nix"
}

View file

@ -1,4 +0,0 @@
set noparent
Profpatsch
sterni

View file

@ -1,10 +0,0 @@
# Profpatschs assemblage of peculiarities and curiosities
Welcome, Welcome.
Welcome to my user dir, where we optimize f\*\*\*ing around, in order to optimize finding out.
![fafo graph](./fafo.jpg)
DISCLAIMER: All of this code is of the “do not try at work” sort, unless noted otherwise.
You might try at home, however. Get inspired or get grossed out, whichever you like.

View file

@ -1,2 +0,0 @@
/node_modules/
/result

View file

@ -1,15 +0,0 @@
ayu_mirage
catppuccin
gruvbox_dark
inferno
monokai_pro
night_owlish_light
papercolor_light
everforest_light
github_light
gruvbox_light
one_light
papertheme
rose_pine_dawn

View file

@ -1 +0,0 @@
declare module 'dbus-native';

View file

@ -1,3 +0,0 @@
{ depot, pkgs, ... }:
depot.users.Profpatsch.napalm.buildPackage ./. { }

View file

@ -1,546 +0,0 @@
{
"name": "alacritty-change-color-scheme",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "alacritty-change-color-scheme",
"version": "1.0.0",
"license": "ISC",
"dependencies": {
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/context-async-hooks": "^1.29.0",
"@opentelemetry/core": "^1.29.0",
"@opentelemetry/exporter-trace-otlp-http": "^0.56.0",
"@opentelemetry/sdk-trace-base": "^1.29.0",
"dbus-native": "^0.4.0"
},
"bin": {
"alacritty-change-color-scheme": "alacritty-change-color-scheme.js"
}
},
"node_modules/@opentelemetry/api": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
"integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
"license": "Apache-2.0",
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/@opentelemetry/api-logs": {
"version": "0.56.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/api-logs/-/api-logs-0.56.0.tgz",
"integrity": "sha512-Wr39+94UNNG3Ei9nv3pHd4AJ63gq5nSemMRpCd8fPwDL9rN3vK26lzxfH27mw16XzOSO+TpyQwBAMaLxaPWG0g==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/api": "^1.3.0"
},
"engines": {
"node": ">=14"
}
},
"node_modules/@opentelemetry/context-async-hooks": {
"version": "1.29.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/context-async-hooks/-/context-async-hooks-1.29.0.tgz",
"integrity": "sha512-TKT91jcFXgHyIDF1lgJF3BHGIakn6x0Xp7Tq3zoS3TMPzT9IlP0xEavWP8C1zGjU9UmZP2VR1tJhW9Az1A3w8Q==",
"license": "Apache-2.0",
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": ">=1.0.0 <1.10.0"
}
},
"node_modules/@opentelemetry/core": {
"version": "1.29.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/core/-/core-1.29.0.tgz",
"integrity": "sha512-gmT7vAreXl0DTHD2rVZcw3+l2g84+5XiHIqdBUxXbExymPCvSsGOpiwMmn8nkiJur28STV31wnhIDrzWDPzjfA==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/semantic-conventions": "1.28.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": ">=1.0.0 <1.10.0"
}
},
"node_modules/@opentelemetry/exporter-trace-otlp-http": {
"version": "0.56.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/exporter-trace-otlp-http/-/exporter-trace-otlp-http-0.56.0.tgz",
"integrity": "sha512-vqVuJvcwameA0r0cNrRzrZqPLB0otS+95g0XkZdiKOXUo81wYdY6r4kyrwz4nSChqTBEFm0lqi/H2OWGboOa6g==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/core": "1.29.0",
"@opentelemetry/otlp-exporter-base": "0.56.0",
"@opentelemetry/otlp-transformer": "0.56.0",
"@opentelemetry/resources": "1.29.0",
"@opentelemetry/sdk-trace-base": "1.29.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": "^1.3.0"
}
},
"node_modules/@opentelemetry/otlp-exporter-base": {
"version": "0.56.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/otlp-exporter-base/-/otlp-exporter-base-0.56.0.tgz",
"integrity": "sha512-eURvv0fcmBE+KE1McUeRo+u0n18ZnUeSc7lDlW/dzlqFYasEbsztTK4v0Qf8C4vEY+aMTjPKUxBG0NX2Te3Pmw==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/core": "1.29.0",
"@opentelemetry/otlp-transformer": "0.56.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": "^1.3.0"
}
},
"node_modules/@opentelemetry/otlp-transformer": {
"version": "0.56.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/otlp-transformer/-/otlp-transformer-0.56.0.tgz",
"integrity": "sha512-kVkH/W2W7EpgWWpyU5VnnjIdSD7Y7FljQYObAQSKdRcejiwMj2glypZtUdfq1LTJcv4ht0jyTrw1D3CCxssNtQ==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/api-logs": "0.56.0",
"@opentelemetry/core": "1.29.0",
"@opentelemetry/resources": "1.29.0",
"@opentelemetry/sdk-logs": "0.56.0",
"@opentelemetry/sdk-metrics": "1.29.0",
"@opentelemetry/sdk-trace-base": "1.29.0",
"protobufjs": "^7.3.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": "^1.3.0"
}
},
"node_modules/@opentelemetry/resources": {
"version": "1.29.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/resources/-/resources-1.29.0.tgz",
"integrity": "sha512-s7mLXuHZE7RQr1wwweGcaRp3Q4UJJ0wazeGlc/N5/XSe6UyXfsh1UQGMADYeg7YwD+cEdMtU1yJAUXdnFzYzyQ==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/core": "1.29.0",
"@opentelemetry/semantic-conventions": "1.28.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": ">=1.0.0 <1.10.0"
}
},
"node_modules/@opentelemetry/sdk-logs": {
"version": "0.56.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/sdk-logs/-/sdk-logs-0.56.0.tgz",
"integrity": "sha512-OS0WPBJF++R/cSl+terUjQH5PebloidB1Jbbecgg2rnCmQbTST9xsRes23bLfDQVRvmegmHqDh884h0aRdJyLw==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/api-logs": "0.56.0",
"@opentelemetry/core": "1.29.0",
"@opentelemetry/resources": "1.29.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": ">=1.4.0 <1.10.0"
}
},
"node_modules/@opentelemetry/sdk-metrics": {
"version": "1.29.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/sdk-metrics/-/sdk-metrics-1.29.0.tgz",
"integrity": "sha512-MkVtuzDjXZaUJSuJlHn6BSXjcQlMvHcsDV7LjY4P6AJeffMa4+kIGDjzsCf6DkAh6Vqlwag5EWEam3KZOX5Drw==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/core": "1.29.0",
"@opentelemetry/resources": "1.29.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": ">=1.3.0 <1.10.0"
}
},
"node_modules/@opentelemetry/sdk-trace-base": {
"version": "1.29.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/sdk-trace-base/-/sdk-trace-base-1.29.0.tgz",
"integrity": "sha512-hEOpAYLKXF3wGJpXOtWsxEtqBgde0SCv+w+jvr3/UusR4ll3QrENEGnSl1WDCyRrpqOQ5NCNOvZch9UFVa7MnQ==",
"license": "Apache-2.0",
"dependencies": {
"@opentelemetry/core": "1.29.0",
"@opentelemetry/resources": "1.29.0",
"@opentelemetry/semantic-conventions": "1.28.0"
},
"engines": {
"node": ">=14"
},
"peerDependencies": {
"@opentelemetry/api": ">=1.0.0 <1.10.0"
}
},
"node_modules/@opentelemetry/semantic-conventions": {
"version": "1.28.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/semantic-conventions/-/semantic-conventions-1.28.0.tgz",
"integrity": "sha512-lp4qAiMTD4sNWW4DbKLBkfiMZ4jbAboJIGOQr5DvciMRI494OapieI9qiODpOt0XBr1LjIDy1xAGAnVs5supTA==",
"license": "Apache-2.0",
"engines": {
"node": ">=14"
}
},
"node_modules/@protobufjs/aspromise": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
"integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/base64": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
"integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/codegen": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
"integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/eventemitter": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
"integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/fetch": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
"integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==",
"license": "BSD-3-Clause",
"dependencies": {
"@protobufjs/aspromise": "^1.1.1",
"@protobufjs/inquire": "^1.1.0"
}
},
"node_modules/@protobufjs/float": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
"integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/inquire": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
"integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/path": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
"integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/pool": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
"integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==",
"license": "BSD-3-Clause"
},
"node_modules/@protobufjs/utf8": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
"integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==",
"license": "BSD-3-Clause"
},
"node_modules/@types/node": {
"version": "22.10.1",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.10.1.tgz",
"integrity": "sha512-qKgsUwfHZV2WCWLAnVP1JqnpE6Im6h3Y0+fYgMTasNQ7V++CBX5OT1as0g0f+OyubbFqhf6XVNIsmN4IIhEgGQ==",
"license": "MIT",
"dependencies": {
"undici-types": "~6.20.0"
}
},
"node_modules/abstract-socket": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/abstract-socket/-/abstract-socket-2.1.1.tgz",
"integrity": "sha512-YZJizsvS1aBua5Gd01woe4zuyYBGgSMeqDOB6/ChwdTI904KP6QGtJswXl4hcqWxbz86hQBe++HWV0hF1aGUtA==",
"deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.",
"hasInstallScript": true,
"license": "MIT",
"optional": true,
"os": [
"linux"
],
"dependencies": {
"bindings": "^1.2.1",
"nan": "^2.12.1"
},
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/bindings": {
"version": "1.5.0",
"resolved": "https://registry.npmjs.org/bindings/-/bindings-1.5.0.tgz",
"integrity": "sha512-p2q/t/mhvuOj/UeLlV6566GD/guowlr0hHxClI0W9m7MWYkL1F0hLo+0Aexs9HSPCtR1SXQ0TD3MMKrXZajbiQ==",
"license": "MIT",
"optional": true,
"dependencies": {
"file-uri-to-path": "1.0.0"
}
},
"node_modules/dbus-native": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/dbus-native/-/dbus-native-0.4.0.tgz",
"integrity": "sha512-i3zvY3tdPEOaMgmK4riwupjDYRJ53rcE1Kj8rAgnLOFmBd0DekUih59qv8v+Oyils/U9p+s4sSsaBzHWLztI+Q==",
"license": "MIT",
"dependencies": {
"event-stream": "^4.0.0",
"hexy": "^0.2.10",
"long": "^4.0.0",
"optimist": "^0.6.1",
"put": "0.0.6",
"safe-buffer": "^5.1.1",
"xml2js": "^0.4.17"
},
"bin": {
"dbus2js": "bin/dbus2js.js"
},
"optionalDependencies": {
"abstract-socket": "^2.0.0"
}
},
"node_modules/duplexer": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz",
"integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg==",
"license": "MIT"
},
"node_modules/event-stream": {
"version": "4.0.1",
"resolved": "https://registry.npmjs.org/event-stream/-/event-stream-4.0.1.tgz",
"integrity": "sha512-qACXdu/9VHPBzcyhdOWR5/IahhGMf0roTeZJfzz077GwylcDd90yOHLouhmv7GJ5XzPi6ekaQWd8AvPP2nOvpA==",
"license": "MIT",
"dependencies": {
"duplexer": "^0.1.1",
"from": "^0.1.7",
"map-stream": "0.0.7",
"pause-stream": "^0.0.11",
"split": "^1.0.1",
"stream-combiner": "^0.2.2",
"through": "^2.3.8"
}
},
"node_modules/file-uri-to-path": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/file-uri-to-path/-/file-uri-to-path-1.0.0.tgz",
"integrity": "sha512-0Zt+s3L7Vf1biwWZ29aARiVYLx7iMGnEUl9x33fbB/j3jR81u/O2LbqK+Bm1CDSNDKVtJ/YjwY7TUd5SkeLQLw==",
"license": "MIT",
"optional": true
},
"node_modules/from": {
"version": "0.1.7",
"resolved": "https://registry.npmjs.org/from/-/from-0.1.7.tgz",
"integrity": "sha512-twe20eF1OxVxp/ML/kq2p1uc6KvFK/+vs8WjEbeKmV2He22MKm7YF2ANIt+EOqhJ5L3K/SuuPhk0hWQDjOM23g==",
"license": "MIT"
},
"node_modules/hexy": {
"version": "0.2.11",
"resolved": "https://registry.npmjs.org/hexy/-/hexy-0.2.11.tgz",
"integrity": "sha512-ciq6hFsSG/Bpt2DmrZJtv+56zpPdnq+NQ4ijEFrveKN0ZG1mhl/LdT1NQZ9se6ty1fACcI4d4vYqC9v8EYpH2A==",
"license": "MIT",
"bin": {
"hexy": "bin/hexy_cmd.js"
}
},
"node_modules/long": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
"integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==",
"license": "Apache-2.0"
},
"node_modules/map-stream": {
"version": "0.0.7",
"resolved": "https://registry.npmjs.org/map-stream/-/map-stream-0.0.7.tgz",
"integrity": "sha512-C0X0KQmGm3N2ftbTGBhSyuydQ+vV1LC3f3zPvT3RXHXNZrvfPZcoXp/N5DOa8vedX/rTMm2CjTtivFg2STJMRQ==",
"license": "MIT"
},
"node_modules/minimist": {
"version": "0.0.10",
"resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.10.tgz",
"integrity": "sha512-iotkTvxc+TwOm5Ieim8VnSNvCDjCK9S8G3scJ50ZthspSxa7jx50jkhYduuAtAjvfDUwSgOwf8+If99AlOEhyw==",
"license": "MIT"
},
"node_modules/nan": {
"version": "2.22.0",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.22.0.tgz",
"integrity": "sha512-nbajikzWTMwsW+eSsNm3QwlOs7het9gGJU5dDZzRTQGk03vyBOauxgI4VakDzE0PtsGTmXPsXTbbjVhRwR5mpw==",
"license": "MIT",
"optional": true
},
"node_modules/optimist": {
"version": "0.6.1",
"resolved": "https://registry.npmjs.org/optimist/-/optimist-0.6.1.tgz",
"integrity": "sha512-snN4O4TkigujZphWLN0E//nQmm7790RYaE53DdL7ZYwee2D8DDo9/EyYiKUfN3rneWUjhJnueija3G9I2i0h3g==",
"license": "MIT/X11",
"dependencies": {
"minimist": "~0.0.1",
"wordwrap": "~0.0.2"
}
},
"node_modules/pause-stream": {
"version": "0.0.11",
"resolved": "https://registry.npmjs.org/pause-stream/-/pause-stream-0.0.11.tgz",
"integrity": "sha512-e3FBlXLmN/D1S+zHzanP4E/4Z60oFAa3O051qt1pxa7DEJWKAyil6upYVXCWadEnuoqa4Pkc9oUx9zsxYeRv8A==",
"license": [
"MIT",
"Apache2"
],
"dependencies": {
"through": "~2.3"
}
},
"node_modules/protobufjs": {
"version": "7.4.0",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.4.0.tgz",
"integrity": "sha512-mRUWCc3KUU4w1jU8sGxICXH/gNS94DvI1gxqDvBzhj1JpcsimQkYiOJfwsPUykUI5ZaspFbSgmBLER8IrQ3tqw==",
"hasInstallScript": true,
"license": "BSD-3-Clause",
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",
"@protobufjs/base64": "^1.1.2",
"@protobufjs/codegen": "^2.0.4",
"@protobufjs/eventemitter": "^1.1.0",
"@protobufjs/fetch": "^1.1.0",
"@protobufjs/float": "^1.0.2",
"@protobufjs/inquire": "^1.1.0",
"@protobufjs/path": "^1.1.2",
"@protobufjs/pool": "^1.1.0",
"@protobufjs/utf8": "^1.1.0",
"@types/node": ">=13.7.0",
"long": "^5.0.0"
},
"engines": {
"node": ">=12.0.0"
}
},
"node_modules/protobufjs/node_modules/long": {
"version": "5.2.3",
"resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz",
"integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==",
"license": "Apache-2.0"
},
"node_modules/put": {
"version": "0.0.6",
"resolved": "https://registry.npmjs.org/put/-/put-0.0.6.tgz",
"integrity": "sha512-w0szIZ2NkqznMFqxYPRETCIi+q/S8UKis9F4yOl6/N9NDCZmbjZZT85aI4FgJf3vIPrzMPX60+odCLOaYxNWWw==",
"license": "MIT/X11",
"engines": {
"node": ">=0.3.0"
}
},
"node_modules/safe-buffer": {
"version": "5.2.1",
"resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
"integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/feross"
},
{
"type": "patreon",
"url": "https://www.patreon.com/feross"
},
{
"type": "consulting",
"url": "https://feross.org/support"
}
],
"license": "MIT"
},
"node_modules/sax": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz",
"integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==",
"license": "ISC"
},
"node_modules/split": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/split/-/split-1.0.1.tgz",
"integrity": "sha512-mTyOoPbrivtXnwnIxZRFYRrPNtEFKlpB2fvjSnCQUiAA6qAZzqwna5envK4uk6OIeP17CsdF3rSBGYVBsU0Tkg==",
"license": "MIT",
"dependencies": {
"through": "2"
},
"engines": {
"node": "*"
}
},
"node_modules/stream-combiner": {
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/stream-combiner/-/stream-combiner-0.2.2.tgz",
"integrity": "sha512-6yHMqgLYDzQDcAkL+tjJDC5nSNuNIx0vZtRZeiPh7Saef7VHX9H5Ijn9l2VIol2zaNYlYEX6KyuT/237A58qEQ==",
"license": "MIT",
"dependencies": {
"duplexer": "~0.1.1",
"through": "~2.3.4"
}
},
"node_modules/through": {
"version": "2.3.8",
"resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
"integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==",
"license": "MIT"
},
"node_modules/undici-types": {
"version": "6.20.0",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.20.0.tgz",
"integrity": "sha512-Ny6QZ2Nju20vw1SRHe3d9jVu6gJ+4e3+MMpqu7pqE5HT6WsTSlce++GQmK5UXS8mzV8DSYHrQH+Xrf2jVcuKNg==",
"license": "MIT"
},
"node_modules/wordwrap": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-0.0.3.tgz",
"integrity": "sha512-1tMA907+V4QmxV7dbRvb4/8MaRALK6q9Abid3ndMYnbyo8piisCmeONVqVSXqQA3KaP4SLt5b7ud6E2sqP8TFw==",
"license": "MIT",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/xml2js": {
"version": "0.4.23",
"resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz",
"integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==",
"license": "MIT",
"dependencies": {
"sax": ">=0.6.0",
"xmlbuilder": "~11.0.0"
},
"engines": {
"node": ">=4.0.0"
}
},
"node_modules/xmlbuilder": {
"version": "11.0.1",
"resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz",
"integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==",
"license": "MIT",
"engines": {
"node": ">=4.0"
}
}
}
}

View file

@ -1,20 +0,0 @@
{
"name": "alacritty-change-color-scheme",
"version": "1.0.0",
"main": "alacritty-change-color-scheme.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"bin": "alacritty-change-color-scheme.js",
"author": "",
"license": "ISC",
"description": "",
"dependencies": {
"@opentelemetry/api": "^1.9.0",
"@opentelemetry/context-async-hooks": "^1.29.0",
"@opentelemetry/core": "^1.29.0",
"@opentelemetry/exporter-trace-otlp-http": "^0.56.0",
"@opentelemetry/sdk-trace-base": "^1.29.0",
"dbus-native": "^0.4.0"
}
}

View file

@ -1,17 +0,0 @@
{
"compilerOptions": {
"noEmit": true,
"target": "ES2022",
"lib": [
"ES2022"
],
"module": "ES2022",
"strict": true,
"checkJs": true,
"moduleResolution": "node"
},
"include": [
"*.ts",
"*.js"
],
}

View file

@ -1,93 +0,0 @@
{ depot, pkgs, lib, ... }:
let
bins = depot.nix.getBins pkgs.alacritty [ "alacritty" ]
// depot.nix.getBins pkgs.coreutils [ "mkdir" "cp" "install" ];
# https://github.com/alacritty/alacritty-theme
themes = {
# dark = "alacritty_0_12";
dark = "google";
light = "dayfox";
};
config =
{
config = {
# sets the theme for this config (autogenerated file)
general.import = [ "~/.config/alacritty/alacritty-colors-autogen.toml" ];
font.size = 18;
scrolling.history = 100000;
};
# This disables the dpi-sensitive scaling (cause otherwise the font will be humongous on my laptop screen)
alacritty-env.WINIT_X11_SCALE_FACTOR = 1;
};
alacritty = depot.nix.writeExecline "alacritty" { } (
(lib.concatLists (lib.mapAttrsToList (k: v: [ "export" k (toString v) ]) config.alacritty-env))
++ [
"backtick"
"-E"
"config"
[ depot.users.Profpatsch.xdg-config-home ]
bins.alacritty
"--config-file"
((pkgs.formats.toml { }).generate "alacritty.conf" config.config)
"$@"
]
);
alacritty-themes-upstream = pkgs.fetchFromGitHub {
owner = "alacritty";
repo = "alacritty-theme";
rev = "95a7d695605863ede5b7430eb80d9e80f5f504bc";
sha256 = "sha256-D37MQtNS20ESny5UhW1u6ELo9czP4l+q0S8neH7Wdbc=";
};
alacritty-themes-modes-ef = pkgs.fetchFromGitHub {
owner = "anhsirk0";
repo = "alacritty-themes";
rev = "5a2c194a682ec75d46553f9a9d6c43fbf39c689d";
sha256 = "sha256-x5QrtSXNc05DNexM+ZtRzd8T9FdthZUzjW/2uEBdRCk=";
};
alacritty-themes = depot.nix.runExecline "alacritty-themes-merged" { } [
"importas"
"out"
"out"
"if"
[ bins.mkdir "-p" "$\{out}/themes" ]
"if"
[
"elglob"
"-0"
"themes"
"${alacritty-themes-upstream}/themes/*"
bins.install
"-m644"
"-t"
"\${out}/themes"
"$themes"
]
"if"
[
"elglob"
"-0"
"themes"
"${alacritty-themes-modes-ef}/themes/*"
bins.install
"-m644"
"-t"
"\${out}/themes"
"$themes"
]
];
in
{
inherit
alacritty
alacritty-themes
themes;
}

View file

@ -1,88 +0,0 @@
{ depot, pkgs, lib, ... }:
let
bins = depot.nix.getBins pkgs.findutils [ "find" ];
in
depot.nix.readTree.drvTargets {
findia = depot.nix.writeExecline "findia"
{
readNArgs = 1;
# TODO: comment out, thanks to sterni blocking the runExecline change
# meta.description = ''
# Find case-insensitive anywhere (globbing)
# Usage: findia <pattern> <more find(1) arguments>
# '';
} [
bins.find
"-iname"
"*\${1}*"
"$@"
];
findial = depot.nix.writeExecline "findial"
{
readNArgs = 1;
# TODO: comment out, thanks to sterni blocking the runExecline change
# meta.description = ''
# Find case-insensitive anywhere (globbing), follow symlinks";
# Usage: findial <pattern> <more find(1) arguments>
# '';
} [
bins.find
"-L"
"-iname"
"*\${1}*"
"$@"
];
findian = depot.nix.writeExecline "findian"
{
readNArgs = 2;
# TODO: comment out, thanks to sterni blocking the runExecline change
# meta.description = ''
# Find case-insensitive anywhere (globbing) in directory
# Usage: findian <directory> <pattern> <more find(1) arguments>
# '';
} [
bins.find
"$1"
"-iname"
"*\${2}*"
"$@"
];
findiap = depot.nix.writeExecline "findiap"
{
readNArgs = 2;
# TODO: comment out, thanks to sterni blocking the runExecline change
# meta.description = ''
# Find case-insensitive anywhere (globbing) in directory, the pattern allows for paths.
# Usage: findiap <directory> <pattern> <more find(1) arguments>
# '';
} [
bins.find
"$1"
"-ipath"
"*\${2}*"
"$@"
];
bell = depot.nix.writeExecline "bell" { } [
"if"
[
"pactl"
"upload-sample"
"${pkgs.sound-theme-freedesktop}/share/sounds/freedesktop/stereo/complete.oga"
"bell-window-system"
]
"pactl"
"play-sample"
"bell-window-system"
];
}

View file

@ -1,22 +0,0 @@
{-# LANGUAGE QuasiQuotes #-}
module ArglibNetencode where
import Data.Attoparsec.ByteString qualified as Atto
import ExecHelpers
import Label
import Netencode qualified
import PossehlAnalyticsPrelude
import System.Posix.Env.ByteString qualified as ByteEnv
arglibNetencode :: CurrentProgramName -> Maybe (Label "arglibEnvvar" Text) -> IO Netencode.T
arglibNetencode progName mEnvvar = do
let envvar = mEnvvar <&> (.arglibEnvvar) & fromMaybe "ARGLIB_NETENCODE" & textToBytesUtf8
ByteEnv.getEnv envvar >>= \case
Nothing -> dieUserError progName [fmt|could not read args, envvar {envvar} not set|]
Just bytes ->
case Atto.parseOnly (Netencode.netencodeParser <* Atto.endOfInput) bytes of
Left err -> dieEnvironmentProblem progName [fmt|arglib parsing error: {err}|]
Right t -> do
ByteEnv.unsetEnv envvar
pure t

View file

@ -1,65 +0,0 @@
cabal-version: 3.0
name: arglib-netencode
version: 0.1.0.0
author: Profpatsch
maintainer: mail@profpatsch.de
common common-options
ghc-options:
-Wall
-Wno-type-defaults
-Wunused-packages
-Wredundant-constraints
-fwarn-missing-deriving-strategies
-- See https://downloads.haskell.org/ghc/latest/docs/users_guide/exts.html
-- for a description of all these extensions
default-extensions:
-- Infer Applicative instead of Monad where possible
ApplicativeDo
-- Allow literal strings to be Text
OverloadedStrings
-- Syntactic sugar improvements
LambdaCase
MultiWayIf
-- Makes the (deprecated) usage of * instead of Data.Kind.Type an error
NoStarIsType
-- Convenient and crucial to deal with ambiguous field names, commonly
-- known as RecordDotSyntax
OverloadedRecordDot
-- does not export record fields as functions, use OverloadedRecordDot to access instead
NoFieldSelectors
-- Record punning
RecordWildCards
-- Improved Deriving
DerivingStrategies
DerivingVia
-- Type-level strings
DataKinds
-- to enable the `type` keyword in import lists (ormolu uses this automatically)
ExplicitNamespaces
default-language: GHC2021
library
import: common-options
exposed-modules: ArglibNetencode
build-depends:
base >=4.15 && <5,
pa-prelude,
pa-label,
netencode,
exec-helpers,
attoparsec,
unix

View file

@ -1,81 +0,0 @@
{ depot, pkgs, lib, ... }:
let
# Add the given nix arguments to the program as ARGLIB_NETENCODE envvar
#
# Calls `netencode.gen.dwim` on the provided nix args value.
with-args = name: args: prog: depot.nix.writeExecline "${name}-with-args" { } [
"export"
"ARGLIB_NETENCODE"
(depot.users.Profpatsch.netencode.gen.dwim args)
prog
];
rust = depot.nix.writers.rustSimpleLib
{
name = "arglib-netencode";
dependencies = [
depot.users.Profpatsch.execline.exec-helpers
depot.users.Profpatsch.netencode.netencode-rs
];
} ''
extern crate netencode;
extern crate exec_helpers;
use netencode::{T};
use std::os::unix::ffi::OsStrExt;
pub fn arglib_netencode(prog_name: &str, env: Option<&std::ffi::OsStr>) -> T {
let env = match env {
None => std::ffi::OsStr::from_bytes("ARGLIB_NETENCODE".as_bytes()),
Some(a) => a
};
let t = match std::env::var_os(env) {
None => exec_helpers::die_user_error(prog_name, format!("could not read args, envvar {} not set", env.to_string_lossy())),
// TODO: good error handling for the different parser errors
Some(soup) => match netencode::parse::t_t(soup.as_bytes()) {
Ok((remainder, t)) => match remainder.is_empty() {
true => t,
false => exec_helpers::die_environment_problem(prog_name, format!("arglib: there was some unparsed bytes remaining: {:?}", remainder))
},
Err(err) => exec_helpers::die_environment_problem(prog_name, format!("arglib parsing error: {:?}", err))
}
};
std::env::remove_var(env);
t
}
'';
haskell = pkgs.haskellPackages.mkDerivation {
pname = "arglib-netencode";
version = "0.1.0";
src = depot.users.Profpatsch.exactSource ./. [
./arglib-netencode.cabal
./ArglibNetencode.hs
];
libraryHaskellDepends = [
pkgs.haskellPackages.pa-prelude
pkgs.haskellPackages.pa-label
pkgs.haskellPackages.pa-error-tree
depot.users.Profpatsch.netencode.netencode-hs
depot.users.Profpatsch.execline.exec-helpers-hs
];
isLibrary = true;
license = lib.licenses.mit;
};
in
depot.nix.readTree.drvTargets {
inherit
with-args
rust
haskell
;
}

View file

@ -1,29 +0,0 @@
{ depot, pkgs, ... }:
# Atomically write a file (just `>` redirection in bash
# empties a file even if the command crashes).
#
# Maybe there is an existing tool for that?
# But its easy enough to implement.
#
# Example:
# atomically-write
# ./to
# echo "foo"
#
# will atomically write the string "foo" into ./to
let
atomically-write = pkgs.writers.writeDash "atomically-write" ''
set -e
to=$1
shift
# assumes that the tempfile is on the same file system, (or in memory)
# for the `mv` at the end to be more-or-less atomic.
tmp=$(${pkgs.coreutils}/bin/mktemp -d)
trap 'rm -r "$tmp"' EXIT
"$@" \
> "$tmp/out"
mv "$tmp/out" "$to"
'';
in
atomically-write

View file

@ -1,7 +0,0 @@
# (Parts of) my website
This is a part of https://profpatsch.de/, notably the blog posts.
The other parts can be found in [vuizvui](https://github.com/openlab-aux/vuizvui/tree/master/pkgs/profpatsch/profpatsch.de). Its a mess.
And yes, this implements a webserver & routing engine with nix, execline & s6 utils. “Bis einer weint”, as we say in German.

View file

@ -1,481 +0,0 @@
{ depot, pkgs, lib, ... }:
let
bins = depot.nix.getBins pkgs.lowdown [ "lowdown" ]
// depot.nix.getBins pkgs.cdb [ "cdbget" "cdbmake" "cdbdump" ]
// depot.nix.getBins pkgs.coreutils [ "mv" "cat" "printf" "test" ]
// depot.nix.getBins pkgs.s6-networking [ "s6-tcpserver" ]
// depot.nix.getBins pkgs.time [ "time" ]
;
# /
# TODO: use
toplevel = [
{
route = [ "notes" ];
name = "Notes";
page = { cssFile }: router cssFile;
}
{
route = [ "projects" ];
name = "Projects";
# page = projects;
}
];
# /notes/*
notes = [
{
route = [ "notes" "private-trackers-are-markets" ];
name = "Private bittorrent trackers are markets";
page = { cssFile }: markdownToHtml {
name = "private-trackers-are-markets";
markdown = ./notes/private-trackers-are-markets.md;
inherit cssFile;
};
}
{
route = [ "notes" "an-idealized-conflang" ];
name = "An Idealized Configuration Language";
page = { cssFile }: markdownToHtml {
name = "an-idealized-conflang";
markdown = ./notes/an-idealized-conflang.md;
inherit cssFile;
};
}
{
route = [ "notes" "rust-string-conversions" ];
name = "Converting between different String types in Rust";
page = { cssFile }: markdownToHtml {
name = "rust-string-conversions";
markdown = ./notes/rust-string-conversions.md;
inherit cssFile;
};
}
{
route = [ "notes" "preventing-oom" ];
name = "Preventing out-of-memory (OOM) errors on Linux";
page = { cssFile }: markdownToHtml {
name = "preventing-oom";
markdown = ./notes/preventing-oom.md;
inherit cssFile;
};
}
];
projects = [
{
name = "lorri";
description = "<code>nix-shell</code> replacement for projects";
link = "https://github.com/nix-community/lorri";
}
{
name = "netencode";
description = ''A human-readble nested data exchange format inspired by <a href="https://en.wikipedia.org/wiki/Netstring">netstrings</a> and <a href="https://en.wikipedia.org/wiki/Bencode">bencode</a>.'';
link = depotCgitLink { relativePath = "users/Profpatsch/netencode/README.md"; };
}
{
name = "yarn2nix";
description = ''nix dependency generator for the <a href="https://yarnpkg.com/"><code>yarn</code> Javascript package manager</a>'';
link = "https://github.com/Profpatsch/yarn2nix";
}
];
posts = [
{
date = "2017-05-04";
title = "Ligature Emulation in Emacs";
subtitle = "Its not pretty, but the results are";
description = "How to set up ligatures using <code>prettify-symbols-mode</code> and the Hasklig/FiraCode fonts.";
page = { cssFile }: markdownToHtml {
name = "2017-05-04-ligature-emluation-in-emacs";
markdown = ./posts/2017-05-04-ligature-emulation-in-emacs.md;
inherit cssFile;
};
route = [ "posts" "2017-05-04-ligature-emluation-in-emacs" ];
tags = [ "emacs" ];
}
];
# convert a markdown file to html via lowdown
markdownToHtml =
{ name
, # the file to convert
markdown
, # css file to add to the final result, as { route }
cssFile
}:
depot.nix.runExecline "${name}.html" { } ([
"importas"
"out"
"out"
(depot.users.Profpatsch.lib.debugExec "")
bins.lowdown
"-s"
"-Thtml"
] ++
(lib.optional (cssFile != null) ([ "-M" "css=${mkRoute cssFile.route}" ]))
++ [
"-o"
"$out"
markdown
]);
# takes a { route … } attrset and converts the route lists to an absolute path
fullRoute = attrs: lib.pipe attrs [
(map (x@{ route, ... }: x // { route = mkRoute route; }))
];
# a cdb from route to a netencoded version of data for each route
router = cssFile: lib.pipe (notes ++ posts) [
(map (r: with depot.users.Profpatsch.lens;
lib.pipe r [
(over (field "route") mkRoute)
(over (field "page") (_ { inherit cssFile; }))
]))
(map (x: {
name = x.route;
value = depot.users.Profpatsch.netencode.gen.dwim x;
}))
lib.listToAttrs
(cdbMake "router")
];
# Create a link to the given source file/directory, given the relative path in the depot repo.
# Checks that the file exists at evaluation time.
depotCgitLink =
{
# relative path from the depot root (without leading /).
relativePath
}:
assert
(lib.assertMsg
(builtins.pathExists (depot.path.origSrc + ("/" + relativePath)))
"depotCgitLink: path /${relativePath} does not exist in depot, and depot.path was ${toString depot.path}");
"https://code.tvl.fyi/tree/${relativePath}";
# look up a route by path ($1)
router-lookup = cssFile: depot.nix.writeExecline "router-lookup" { readNArgs = 1; } [
cdbLookup
(router cssFile)
"$1"
];
runExeclineStdout = name: args: cmd: depot.nix.runExecline name args ([
"importas"
"-ui"
"out"
"out"
"redirfd"
"-w"
"1"
"$out"
] ++ cmd);
notes-index-html =
let o = fullRoute notes;
in ''
<ul>
${scope o (o: ''
<li><a href="${str o.route}">${esc o.name}</a></li>
'')}
</ul>
'';
notes-index = pkgs.writeText "notes-index.html" notes-index-html;
# A simple mustache-inspired string interpolation combinator
# that takes an object and a template (a function from o to string)
# and returns a string.
scope = o: tpl:
if builtins.typeOf o == "list" then
lib.concatMapStringsSep "\n" tpl o
else if builtins.typeOf o == "set" then
tpl o
else throw "${lib.generators.toPretty {} o} not allowed in template";
# string-escape html (TODO)
str = s: s;
# html-escape (TODO)
esc = s: s;
html = s: s;
projects-index-html =
let o = projects;
in ''
<dl>
${scope o (o: ''
<dt><a href="${str o.link}">${esc o.name}</a></dt>
<dd>${html o.description}</dd>
'')}
</dl>
'';
projects-index = pkgs.writeText "projects-index.html" projects-index-html;
posts-index-html =
let o = fullRoute posts;
in ''
<dl>
${scope o (o: ''
<dt>${str o.date} <a href="${str o.route}">${esc o.title}</a></dt>
<dd>${html o.description}</dd>
'')}
</dl>
'';
posts-index = pkgs.writeText "projects-index.html" posts-index-html;
arglibNetencode = val: depot.nix.writeExecline "arglib-netencode" { } [
"export"
"ARGLIB_NETENCODE"
(depot.users.Profpatsch.netencode.gen.dwim val)
"$@"
];
# A simple http server that serves the site. Yes, its horrible.
site-server = { cssFile, port }: depot.nix.writeExecline "blog-server" { } [
(depot.users.Profpatsch.lib.runInEmptyEnv [ "PATH" ])
bins.s6-tcpserver
"127.0.0.1"
port
bins.time
"--format=time: %es"
"--"
runOr
return400
"pipeline"
[
(arglibNetencode {
what = "request";
})
depot.users.Profpatsch.read-http
]
depot.users.Profpatsch.netencode.record-splice-env
runOr
return500
"importas"
"-i"
"path"
"path"
"if"
[ depot.tools.eprintf "GET \${path}\n" ]
runOr
return404
"backtick"
"-ni"
"TEMPLATE_DATA"
[
# TODO: factor this out of here, this is routing not serving
"ifelse"
[ bins.test "$path" "=" "/notes" ]
[
"export"
"content-type"
"text/html"
"export"
"serve-file"
notes-index
depot.users.Profpatsch.netencode.env-splice-record
]
"ifelse"
[ bins.test "$path" "=" "/projects" ]
[
"export"
"content-type"
"text/html"
"export"
"serve-file"
projects-index
depot.users.Profpatsch.netencode.env-splice-record
]
"ifelse"
[ bins.test "$path" "=" "/posts" ]
[
"export"
"content-type"
"text/html"
"export"
"serve-file"
posts-index
depot.users.Profpatsch.netencode.env-splice-record
]
# TODO: ignore potential query arguments. See 404 message
"pipeline"
[ (router-lookup cssFile) "$path" ]
depot.users.Profpatsch.netencode.record-splice-env
"importas"
"-ui"
"page"
"page"
"export"
"content-type"
"text/html"
"export"
"serve-file"
"$page"
depot.users.Profpatsch.netencode.env-splice-record
]
runOr
return500
"if"
[
"pipeline"
[
bins.printf
''
HTTP/1.1 200 OK
Content-Type: {{{content-type}}}; charset=UTF-8
Connection: close
''
]
depot.users.Profpatsch.netencode.netencode-mustache
]
"pipeline"
[ "importas" "t" "TEMPLATE_DATA" bins.printf "%s" "$t" ]
depot.users.Profpatsch.netencode.record-splice-env
"importas"
"-ui"
"serve-file"
"serve-file"
bins.cat
"$serve-file"
];
# run argv or $1 if argv returns a failure status code.
runOr = depot.nix.writeExecline "run-or" { readNArgs = 1; } [
"foreground"
[ "$@" ]
"importas"
"?"
"?"
"ifelse"
[ bins.test "$?" "-eq" "0" ]
[ ]
"if"
[ depot.tools.eprintf "runOr: exited \${?}, running \${1}\n" ]
"$1"
];
return400 = depot.nix.writeExecline "return400" { } [
bins.printf
"%s"
''
HTTP/1.1 400 Bad Request
Content-Type: text/plain; charset=UTF-8
Connection: close
''
];
return404 = depot.nix.writeExecline "return404" { } [
bins.printf
"%s"
''
HTTP/1.1 404 Not Found
Content-Type: text/plain; charset=UTF-8
Connection: close
This page doesnt exist! Query arguments are not handled at the moment.
''
];
return500 = depot.nix.writeExecline "return500" { } [
bins.printf
"%s"
''
HTTP/1.1 500 Internal Server Error
Content-Type: text/plain; charset=UTF-8
Connection: close
Encountered an internal server error. Please try again.
''
];
capture-stdin = depot.nix.writers.rustSimple
{
name = "capture-stdin";
dependencies = [ depot.users.Profpatsch.execline.exec-helpers ];
} ''
extern crate exec_helpers;
use std::io::Read;
fn main() {
let (args, prog) = exec_helpers::args_for_exec("capture-stdin", 1);
let valname = &args[1];
let mut v : Vec<u8> = vec![];
std::io::stdin().lock().read_to_end(&mut v).unwrap();
exec_helpers::exec_into_args("capture-stdin", prog, vec![(valname, v)]);
}
'';
# go from a list of path elements to an absolute route string
mkRoute = route: "/" + lib.concatMapStringsSep "/" urlencodeAscii route;
# urlencodes, but only ASCII characters
# https://en.wikipedia.org/wiki/Percent-encoding
urlencodeAscii = urlPiece:
let
raw = [ "!" "#" "$" "%" "&" "'" "(" ")" "*" "+" "," "/" ":" ";" "=" "?" "@" "[" "]" ];
enc = [ "%21" "%23" "%24" "%25" "%26" "%27" "%28" "%29" "%2A" "%2B" "%2C" "%2F" "%3A" "%3B" "%3D" "%3F" "%40" "%5B" "%5D" ];
rest = [ "A" "B" "C" "D" "E" "F" "G" "H" "I" "J" "K" "L" "M" "N" "O" "P" "Q" "R" "S" "T" "U" "V" "W" "X" "Y" "Z" "a" "b" "c" "d" "e" "f" "g" "h" "i" "j" "k" "l" "m" "n" "o" "p" "q" "r" "s" "t" "u" "v" "w" "x" "y" "z" "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" "-" "_" "." "~" ];
in
assert lib.assertMsg (lib.all (c: builtins.elem c (raw ++ rest)) (lib.stringToCharacters urlPiece))
"urlencodeAscii: the urlPiece must only contain valid url ASCII characters, was: ${urlPiece}";
builtins.replaceStrings raw enc urlPiece;
# create a cdb record entry, as required by the cdbmake tool
cdbRecord = key: val:
"+${toString (builtins.stringLength key)},${toString (builtins.stringLength val)}:"
+ "${key}->${val}\n";
# create a full cdbmake input from an attribute set of keys to values (strings)
cdbRecords =
with depot.nix.yants;
defun [ (attrs (either drv string)) string ]
(attrs:
(lib.concatStrings (lib.mapAttrsToList cdbRecord attrs)) + "\n");
# run cdbmake on a list of key/value pairs (strings
cdbMake = name: attrs: depot.nix.runExecline "${name}.cdb"
{
stdin = cdbRecords attrs;
} [
"importas"
"out"
"out"
depot.users.Profpatsch.lib.eprint-stdin
"if"
[ bins.cdbmake "db" "tmp" ]
bins.mv
"db"
"$out"
];
# look up a key ($2) in the given cdb ($1)
cdbLookup = depot.nix.writeExecline "cdb-lookup" { readNArgs = 2; } [
# cdb ($1) on stdin
"redirfd"
"-r"
"0"
"$1"
# key ($2) lookup
bins.cdbget
"$2"
];
in
depot.nix.readTree.drvTargets {
inherit
router
depotCgitLink
site-server
notes-index
notes-index-html
projects-index
projects-index-html
posts-index-html
;
}

View file

@ -1,298 +0,0 @@
tags: netencode, json
date: 2022-03-31
certainty: likely
status: initial
title: An idealized Configuration Language
# An Idealized Configuration Language
JSON brought us one step closer to what an idealized configuration language is,
which I define as “data, stripped of all externalities of the system it is working in”.
Specifically, JSON is very close to what I consider the minimal properties to represent structured data.
## A short history, according to me
In the beginning, Lisp defined s-expressions as a stand-in for an actual syntax.
Then, people figured out that its also a way to represent structured data.
It has scalars, which can be nested into lists, recursively.
```
(this is (a (list) (of lists)))
```
This provides the first three rules of our idealized language:
1. A **scalar** is a primitive value that is domain-specific.
We can assume a bunch of bytes here, or a text or an integer.
2. A **list** gives an ordering to `0..n` (or `1..n`) values
3. Both a scalar and a list are the *same kind* of “thing” (from here on called **value**),
lists can be created from arbitrary values *recursively*
(for example scalars, or lists of scalars and other lists)
Later, ASN.1 came and had the important insight that the same idealized data structure
can be represented in different fashions,
for example as a binary-efficient version and a human-readable format.
Then, XML “graced” the world for a decade or two, and the main lesson from it was
that you dont want to mix markup languages and configuration languages,
and that you dont want a committee to design these things.
---
In the meantime, Brendan Eich designed Javascript. Its prototype-based object system
arguably stripped down the rituals of existing OO-systems.
Douglas Crockford later extracted the object format (minus functions) into a syntax, and we got JSON.
```
{
"foo": [
{ "nested": "attrs" },
"some text"
],
"bar": 42
}
```
JSON adds another fundamental idea into the mix:
4. **Records** are unordered collections of `name`/`value` pairs.
A `name` is defined to be a unicode string, so a semantic descriptor of the nested `value`.
Unfortunately, the JSON syntax does not actually specify any semantics of records (`objects` in JSON lingo),
in particular it does not mention what the meaning is if a `name` appears twice in one record.
If records can have multiple entries with the same `name`, suddenly ordering becomes important!
But wait, remember earlier we defined *lists* to impose ordering on two values.
So in order to rectify that problem, we say that
5. A `name` can only appear in a record *once*, names must be unique.
This is the current state of the programming community at large,
where most “modern” configuration languages basically use a version of the JSON model
as their underlying data structure. (However not all of them use the same version.)
## Improving JSONs data model
We are not yet at the final “idealized” configuration language, though.
Modern languages like Standard ML define their data types as a mixture of
* *records* (“structs” in the C lingo)
* and *sums* (which you can think about as enums that can hold more `value`s inside them)
This allows to express the common pattern where some fields in a record are only meaningful
if another field—the so-called `tag`-field—is set to a specific value.
An easy example: if a request can fail with an error message or succeed with a result.
You could model that as
```
{
"was_error": true,
"error_message": "there was an error"
}
```
or
```
{
"was_error": false,
"result": 42
}
```
in your JSON representation.
But in a ML-like language (like, for example, Rust), you would instead model it as
```
type RequestResult
= Error { error_message: String }
| Success { result: i64 }
```
where the distinction in `Error` or `Success` makes it clear that `error_message` and `result`
only exist in one of these cases, not the other.
We *can* encode exactly that idea into JSON in multiple ways, but not a “blessed” way.
For example, another way to encode the above would be
```
{
"Error": {
"error_message": "there was an error"
}
}
```
and
```
{
"Success": {
"result": 42
}
}
```
Particularly notice the difference between the language representation, where the type is “closed”only `Success` or `Error` can happen—
and the data representation where the type is “open”, more cases could potentially exist.
This is an important differentiation from a type system:
Our idealized configuration language just gives more structure to a bag of data,
it does not restrict which value can be where.
Think of a value in an unityped language, like Python.
So far we have the notion of
1. a scalar (a primitive)
2. a list (ordering on values)
3. a record (unordered collection of named values)
and in order to get the “open” `tag`ged enumeration values, we introduce
4. a `tag`, which gives a name to a value
We can then redefine `record` to mean “an unordered collection of `tag`ged values”,
which further reduces the amount of concepts needed.
And thats it, this is the full idealized configuration language.
## Some examples of data modelling with tags
This is all well and good, but what does it look like in practice?
For these examples I will be using JSON with a new `< "tag": value >` syntax
to represent `tag`s.
From a compatibility standpoint, `tag`s (or sum types) have dual properties to record types.
With a record, when you have a producer that *adds* a field to it, the consumer will still be able to handle the record (provided the semantics of the existing fields is not changed by the new field).
With a tag, *removing* a tag from the producer will mean that the consumer will still be able to handle the tag. It might do one “dead” check on the removed `tag`, but can still handle the remaining ones just fine.
<!-- TODO: some illustration here -->
An example of how that is applied in practice is that in `protobuf3`, fields of a record are *always* optional fields.
We can model optional fields by wrapping them in `< "Some": value >` or `< "None": {} >` (where the actual value of the `None` is ignored or always an empty record).
So a protobuf with the fields `foo: int` and `bar: string` has to be parsed by the receiver als containing *four* possibilities:
№|foo|bar|
|--:|---|---|
|1|`<"None":{}>`|`<"None":{}>`|
|2|`<"Some":42>`|`<"None":{}>`|
|3|`<"None":{}>`|`<"Some":"x">`|
|4|`<"Some":42>`|`<"Some":"x">`|
Now, iff the receiver actually handles all four possibilities
(and doesnt just crash if a field is not set, as customary in million-dollar-mistake languages),
its easy to see how removing a field from the producer is semantically equal to always setting it to `<"None":{}>`.
Since all receivers should be ready to receive `None` for every field, this provides a simple forward-compatibility scheme.
We can abstract this to any kind of tag value:
If you start with “more” tags, you give yourself space to remove them later without breaking compatibility, typically called “forward compatibility”.
## To empty list/record or not to
Something to think about is whether records and fields should be defined
to always contain at least one element.
As it stands, JSON has multiple ways of expressing the “empty value”:
* `null`
* `[]`
* `{}`
* `""`
* *leave out the field*
and two of those come from the possibility of having empty structured values.
## Representations of this language
This line of thought originally fell out of me designing [`netencode`](https://code.tvl.fyi/tree/users/Profpatsch/netencode/README.md)
as a small human-debuggable format for pipeline serialization.
In addition to the concepts mentioned here (especially tags),
it provides a better set of scalars than JSON (specifically arbitrary bytestrings),
but it cannot practically be written or modified by hand,
which might be a good thing depending on how you look at it.
---
The way that is compatible with the rest of the ecosystem is probably to use a subset of json
to represent our idealized language.
There is multiple ways of encoding tags in json, which each have their pros and cons.
The most common is probably the “tag field” variant, where the tag is pulled into the nested record:
```
{
"_tag": "Success",
"result": 42
}
```
Which has the advantage that people know how to deal with it and that its easy to “just add another field”,
plus it is backward-compatible when you had a record in the first place.
It has multiple disadvantages however:
* If your value wasnt a record (e.g. an int) before, you have to put it in a record and assign an arbitrary name to its field
* People are not forced to “unwrap” the tag first, so they are going to forget to check it
* The magic “_tag” name cannot be used by any of the records fields
An in-between version of this with less downsides is to always push a json record onto the stack:
```
{
"tag": "Success",
"value": {
"result": 42
}
}
```
This makes it harder for people to miss checking the `tag`, but still possible of course.
It also makes it easily possible to inspect the contents of `value` without knowing the
exhaustive list of `tag`s, which can be useful in practice (though often not sound!).
It also gets rid of the “_tag” field name clash problem.
Disadvantages:
* Breaks the backwards-compatibility with an existing record-based approach if you want to introduce `tag`s
* Verbosity of representation
* hard to distinguish a record with the `tag` and `value` fields from a `tag`ed value (though you know the type layout of your data on a higher level, dont you? ;) )
The final, “most pure” representation is the one I gave in the original introduction:
```
{
"Success": {
"result": 42
}
}
```
Now you *have* to match on the `tag` name first, before you can actually access your data,
and its less verbose than the above representation.
Disavantages:
* You also have to *know* what `tag`s to expect, its harder to query cause you need to extract the keys and values from the dict and then take the first one.
* Doing a “tag backwards compat” check is harder,
because you cant just check whether `_tag` or `tag`/`value` are the keys in the dict.

View file

@ -1,33 +0,0 @@
tags: linux
date: 2020-01-25
certainty: likely
status: initial
title: Preventing out-of-memory (OOM) errors on Linux
# Preventing out-of-memory (OOM) errors on Linux
Ive been running out of memory more and more often lately. I dont use any swap space because I am of the opinion that 16GB of memory should be sufficient for most daily and professional tasks. Which is generally true, however sometimes I have a runaway filling my memory. Emacs is very good at doing this for example, prone to filling your RAM when you open json files with very long lines.
In theory, the kernel OOM killer should come in and save the day, but the Linux OOM killer is notorious for being extremely … conservative. It will try to free every internal structure it can before even thinking about touching any userspace processes. At that point, the desktop usually stopped responding minutes ago.
Luckily the kernel provides memory statistics for the whole system, as well as single process, and the [`earlyoom`](https://github.com/rfjakob/earlyoom) tool uses those to keep memory usage under a certain limit. It will start killing processes, “heaviest” first, until the given upper memory limit is satisfied again.
On NixOS, I set:
```nix
{
services.earlyoom = {
enable = true;
freeMemThreshold = 5; # <%5 free
};
}
```
and after activation, this simple test shows whether the daemon is working:
```shell
$ tail /dev/zero
fish: “tail /dev/zero” terminated by signal SIGTERM (Polite quit request)
```
`tail /dev/zero` searches for the last line of the file `/dev/zero`, and since it cannot know that there is no next line and no end to the stream of `\0` this file produces, it will fill the RAM as quickly as physically possible. Before it can fill it completely, `earlyoom` recognizes that the limit was breached, singles out the `tail` command as the process using the most amount of memory, and sends it a `SIGTERM`.

View file

@ -1,46 +0,0 @@
# Private bittorrent trackers are markets
Private bittorrent trackers have a currency called ratio,
which is the bits you upload divided the bits you download.
You have to keep the ratio above a certain lower limit,
otherwise you get banned from the market or have to cut a deal with the moderators → bancruptcy
New liquidity (?) is introduced to the market by so-called “freeleech” events or tokens,
which essentially allow you to exchange a token (or some time in the case of time-restricted freeleech)
for some data, which can then be seeded to generate future profits without spending ratio.
Sometimes, ratio is pulled from the market by allowing to exchange it into website perks,
like forum titles or other benefits like chat-memberships. This has a deflationary effect.
It could be compared to “vanity items” in MMOs, which dont grant a mechanical advantage in the market.
Is there a real-world equivalent? i.e. allowing rich people to exchange some of their worth
for vanity items instead of investing it for future gain?
Sometimes, ratio can be traded for more than just transferred bits,
for example by requesting a torrent for a certain album or movie,
paying some ratio for the fulfillment of the request.
---
Based on how bittorrent works, usually multiple people “seed” a torrent.
This means multiple people can answer a request for trading ratio.
Part of the request (i.e. the first 30% of a movie)
can be fulfilled by one party, part of it by a second or even more parties.
For small requests (e.g. albums), often the time between announcing the trade
and filling the trade is important for who is able to fill it.
Getting a 1 second head-start vastly increases your chance of a handshake
and starting the transmission, so on average you get a vastly higher ratio gain from that torrent.
Meaning that using a bittorrent client which is fast to answer as a seeder will lead to better outcomes.
This could be compared to mechanisms seen in high-speed trading.
---
Of course these market-mechanisms are in service of a wider policy goal,
which is to ensure the constant availability of as much high-quality data as possible.
There is more mechanisms at play on these trackers that all contribute to this goal
(possible keywords to research: trumping, freeleech for underseeded torrents).
In general, it is important to remember that markets are only a tool,
never an end in themselves, as neoliberalists would like us to believe.
They always are in service of a wider goal or policy. We live in a society.

View file

@ -1,53 +0,0 @@
# Converting between different String types in Rust
```
let s: String = ...
let st: &str = ...
let u: &[u8] = ...
let b: [u8; 3] = b"foo"
let v: Vec<u8> = ...
let os: OsString = ...
let ost: OsStr = ...
From To Use Comment
---- -- --- -------
&str -> String String::from(st)
&str -> &[u8] st.as_bytes()
&str -> Vec<u8> st.as_bytes().to_owned() via &[u8]
&str -> &OsStr OsStr::new(st)
String -> &str &s alt. s.as_str()
String -> &[u8] s.as_bytes()
String -> Vec<u8> s.into_bytes()
String -> OsString OsString::from(s)
&[u8] -> &str str::from_utf8(u).unwrap()
&[u8] -> String String::from_utf8(u).unwrap()
&[u8] -> Vec<u8> u.to_owned()
&[u8] -> &OsStr OsStr::from_bytes(u) use std::os::unix::ffi::OsStrExt;
[u8; 3] -> &[u8] &b[..] byte literal
[u8; 3] -> &[u8] "foo".as_bytes() alternative via utf8 literal
Vec<u8> -> &str str::from_utf8(&v).unwrap() via &[u8]
Vec<u8> -> String String::from_utf8(v)
Vec<u8> -> &[u8] &v
Vec<u8> -> OsString OsString::from_vec(v) use std::os::unix::ffi::OsStringExt;
&OsStr -> &str ost.to_str().unwrap()
&OsStr -> String ost.to_os_string().into_string() via OsString
.unwrap()
&OsStr -> Cow<str> ost.to_string_lossy() Unicode replacement characters
&OsStr -> OsString ost.to_os_string()
&OsStr -> &[u8] ost.as_bytes() use std::os::unix::ffi::OsStringExt;
OsString -> String os.into_string().unwrap() returns original OsString on failure
OsString -> &str os.to_str().unwrap()
OsString -> &OsStr os.as_os_str()
OsString -> Vec<u8> os.into_vec() use std::os::unix::ffi::OsStringExt;
```
## Source
Original source is [this document on Pastebin](https://web.archive.org/web/20190710121935/https://pastebin.com/Mhfc6b9i)

View file

@ -1,123 +0,0 @@
title: Ligature Emulation in Emacs
date: 2017-05-04
Monday was (yet another)
[NixOS hackathon][hackathon] at [OpenLab Augsburg][ola].
[Maximilian][mhuber] was there and to my amazement
he got working ligatures in his Haskell files in Emacs! Ever since Hasklig
updated its format to use ligatures and private Unicode code points a while ago,
the hack I had used in my config stopped working.
Encouraged by that I decided to take a look on Tuesday. Long story short, I was
able to [get it working in a pretty satisfying way][done].
[hackathon]: https://www.meetup.com/Munich-NixOS-Meetup/events/239077247/
[mhuber]: https://github.com/maximilianhuber
[ola]: https://openlab-augsburg.de
[done]: https://github.com/i-tu/Hasklig/issues/84#issuecomment-298803495
Whats left to do is package it into a module and push to melpa.
### elisp still sucks, but its bearable, sometimes
Im the kind of person who, when trying to fix something elisp related, normally
gives up two hours later and three macro calls deep. Yes, homoiconic,
non-lexically-scoped, self-rewriting code is not exactly my fetish.
This time the task and the library (`prettify-symbols-mode`) were simple enough
for that to not happen.
Some interesting technical trivia:
- elisp literal character syntax is `?c`. `?\t` is the tab character
- You join characters by `(string c1 c2 c3 ...)`
- [dash.el][dash] is pretty awesome and does what a functional programmer
expects. Also, Rainbow Dash.
- Hasklig and FiraCode multi-column symbols actually [only occupy one column, on
the far right of the glyph][glyph]. `my-correct-symbol-bounds` fixes emacs
rendering in that case.
[dash]: https://github.com/magnars/dash.el
[glyph]: https://github.com/tonsky/FiraCode/issues/211#issuecomment-239082368
## Appendix A
For reference, heres the complete code as it stands now. Feel free to paste
into your config; lets make it [MIT][mit]. Maybe link to this site, in case there are
updates.
[mit]: https://opensource.org/licenses/MIT
```elisp
(defun my-correct-symbol-bounds (pretty-alist)
"Prepend a TAB character to each symbol in this alist,
this way compose-region called by prettify-symbols-mode
will use the correct width of the symbols
instead of the width measured by char-width."
(mapcar (lambda (el)
(setcdr el (string ?\t (cdr el)))
el)
pretty-alist))
(defun my-ligature-list (ligatures codepoint-start)
"Create an alist of strings to replace with
codepoints starting from codepoint-start."
(let ((codepoints (-iterate '1+ codepoint-start (length ligatures))))
(-zip-pair ligatures codepoints)))
; list can be found at https://github.com/i-tu/Hasklig/blob/master/GlyphOrderAndAliasDB#L1588
(setq my-hasklig-ligatures
(let* ((ligs '("&&" "***" "*>" "\\\\" "||" "|>" "::"
"==" "===" "==>" "=>" "=<<" "!!" ">>"
">>=" ">>>" ">>-" ">-" "->" "-<" "-<<"
"<*" "<*>" "<|" "<|>" "<$>" "<>" "<-"
"<<" "<<<" "<+>" ".." "..." "++" "+++"
"/=" ":::" ">=>" "->>" "<=>" "<=<" "<->")))
(my-correct-symbol-bounds (my-ligature-list ligs #Xe100))))
;; nice glyphs for haskell with hasklig
(defun my-set-hasklig-ligatures ()
"Add hasklig ligatures for use with prettify-symbols-mode."
(setq prettify-symbols-alist
(append my-hasklig-ligatures prettify-symbols-alist))
(prettify-symbols-mode))
(add-hook 'haskell-mode-hook 'my-set-hasklig-ligatures)
```
## Appendix B (Update 1): FiraCode integration
I also created a mapping for [FiraCode][fira]. You need to grab the [additional
symbol font][symbol] that adds (most) ligatures to the unicode private use area.
Consult your system documentation on how to add it to your font cache.
Next add `"Fira Code"` and `"Fira Code Symbol"` to your font preferences. Symbol
only contains the additional characters, so you need both.
If you are on NixOS, the font package should be on the main branch shortly, [I
added a package][symbol-pkg].
[fira]: https://github.com/tonsky/FiraCode/
[symbol]: https://github.com/tonsky/FiraCode/issues/211#issuecomment-239058632
[symbol-pkg]: https://github.com/NixOS/nixpkgs/pull/25517
Heres the mapping adjusted for FiraCode:
```elisp
(setq my-fira-code-ligatures
(let* ((ligs '("www" "**" "***" "**/" "*>" "*/" "\\\\" "\\\\\\"
"{-" "[]" "::" ":::" ":=" "!!" "!=" "!==" "-}"
"--" "---" "-->" "->" "->>" "-<" "-<<" "-~"
"#{" "#[" "##" "###" "####" "#(" "#?" "#_" "#_("
".-" ".=" ".." "..<" "..." "?=" "??" ";;" "/*"
"/**" "/=" "/==" "/>" "//" "///" "&&" "||" "||="
"|=" "|>" "^=" "$>" "++" "+++" "+>" "=:=" "=="
"===" "==>" "=>" "=>>" "<=" "=<<" "=/=" ">-" ">="
">=>" ">>" ">>-" ">>=" ">>>" "<*" "<*>" "<|" "<|>"
"<$" "<$>" "<!--" "<-" "<--" "<->" "<+" "<+>" "<="
"<==" "<=>" "<=<" "<>" "<<" "<<-" "<<=" "<<<" "<~"
"<~~" "</" "</>" "~@" "~-" "~=" "~>" "~~" "~~>" "%%"
"x" ":" "+" "+" "*")))
(my-correct-symbol-bounds (my-ligature-list ligs #Xe100))))
```

View file

@ -1,14 +0,0 @@
packages:
./my-prelude/my-prelude.cabal
./my-webstuff/my-webstuff.cabal
./netencode/netencode.cabal
./arglib/arglib-netencode.cabal
./execline/exec-helpers.cabal
./htmx-experiment/htmx-experiment.cabal
./cas-serve/cas-serve.cabal
./jbovlaste-sqlite/jbovlaste-sqlite.cabal
./whatcd-resolver/whatcd-resolver.cabal
./openlab-tools/openlab-tools.cabal
./httzip/httzip.cabal
./my-xmonad/my-xmonad.cabal
./my-tools/my-tools.cabal

View file

@ -1,247 +0,0 @@
{-# LANGUAGE QuasiQuotes #-}
{-# OPTIONS_GHC -Wno-orphans #-}
module Main where
import ArglibNetencode (arglibNetencode)
import Control.Applicative
import Control.Monad.Reader
import Crypto.Hash qualified as Crypto
import Data.ByteArray qualified as ByteArray
import Data.ByteString.Lazy qualified as ByteString.Lazy
import Data.ByteString.Lazy qualified as Lazy
import Data.Functor.Compose
import Data.Int (Int64)
import Data.List qualified as List
import Data.Text qualified as Text
import Data.Text.IO qualified as Text
import Database.SQLite.Simple (NamedParam ((:=)))
import Database.SQLite.Simple qualified as Sqlite
import Database.SQLite.Simple.FromField qualified as Sqlite
import Database.SQLite.Simple.QQ qualified as Sqlite
import Label
import Netencode.Parse qualified as Net
import Network.HTTP.Types qualified as Http
import Network.Wai qualified as Wai
import Network.Wai.Handler.Warp qualified as Warp
import PossehlAnalyticsPrelude
import System.IO (stderr)
parseArglib = do
let env = label @"arglibEnvvar" "CAS_SERVE_ARGS"
let asApi =
Net.asRecord >>> do
address <- label @"bindToAddress" <$> (Net.key "bindToAddress" >>> Net.asText)
port <- label @"port" <$> (Net.key "port" >>> Net.asText)
pure (T2 address port)
arglibNetencode "cas-serve" (Just env)
<&> Net.runParse
[fmt|Cannot parse arguments in "{env.arglibEnvvar}"|]
( Net.asRecord >>> do
publicApi <- label @"publicApi" <$> (Net.key "publicApi" >>> asApi)
privateApi <- label @"privateApi" <$> (Net.key "privateApi" >>> asApi)
pure $ T2 publicApi privateApi
)
main :: IO ()
main = do
withEnv $ \env ->
Warp.runSettings
(Warp.defaultSettings & Warp.setPort 7070)
(api env)
withEnv :: (Env -> IO a) -> IO a
withEnv inner = do
withSqlite "./data.sqlite" $ \envData -> do
withSqlite "./wordlist.sqlite" $ \envWordlist -> inner Env {..}
withSqlite :: String -> (Sqlite.Connection -> IO a) -> IO a
withSqlite fileName inner = Sqlite.withConnection fileName $ \conn -> do
Sqlite.setTrace conn (Just (\msg -> Text.hPutStrLn stderr [fmt|{fileName}: {msg}|]))
Sqlite.execute conn [Sqlite.sql|PRAGMA foreign_keys = ON|] ()
inner conn
api :: Env -> Wai.Application
api env req respond = do
case runHandler (getById <|> insertById) req env of
Nothing -> respond $ Wai.responseLBS Http.status404 [] "endpoint does not exist."
Just handler' -> do
handler' >>= \case
Left (status, err) -> respond $ Wai.responseLBS status [] (err & toLazyBytes)
Right (headers, body) ->
respond $
Wai.responseLBS
Http.status200
headers
(body & toLazyBytes)
data Env = Env
{ envWordlist :: Sqlite.Connection,
envData :: Sqlite.Connection
}
-- | I dont need any fancy routing in this, so a handler is just something that returns a @Just (IO a)@ if it wants to handle the request.
newtype Handler a
= Handler (ReaderT (Wai.Request, Env) (Compose Maybe IO) a)
deriving newtype (Functor, Applicative, Alternative)
handler :: ((Wai.Request, Env) -> Maybe (IO a)) -> Handler a
handler f = Handler (ReaderT (Compose . f))
runHandler :: Handler a -> Wai.Request -> Env -> Maybe (IO a)
runHandler (Handler handler') req env = getCompose $ handler' & (\readerT -> runReaderT readerT (req, env))
getById ::
Handler
( Either
(Http.Status, ByteString)
([(Http.HeaderName, ByteString)], ByteString)
)
getById = handler $ \(req, env) -> do
guard ((req & Wai.requestMethod) == Http.methodGet)
case req & Wai.pathInfo of
["v0", "by-id", filename] -> Just $ do
Sqlite.queryNamed
@( T3
"mimetype"
Text
"content"
ByteString
"size"
Int
)
(env.envData)
[Sqlite.sql|
SELECT
mimetype,
cast (content AS blob) as content,
size
FROM file_content
JOIN file_references
ON file_references.file_content = file_content.hash_sha256
WHERE
file_references.reference_type = 'by-id'
AND (file_references.name || file_references.extension) = :filename
|]
[":filename" Sqlite.:= filename]
<&> \case
[] -> Left (Http.status404, "File not found.")
[res] ->
Right
( [ ("Content-Type", res.mimetype & textToBytesUtf8),
("Content-Length", res.size & showToText & textToBytesUtf8)
],
-- TODO: should this be lazy/streamed?
res.content
)
_more -> Left "file_references must be unique (in type and name)" & unwrapError
_ -> Nothing
insertById :: Handler (Either a ([(Http.HeaderName, ByteString)], ByteString))
insertById = handler $ \(req, env) -> do
guard ((req & Wai.requestMethod) == Http.methodPost)
case req & Wai.pathInfo of
["v0", "by-id"] -> Just $ do
let maybeText bytes = case bytesToTextUtf8 bytes of
Left _err -> Nothing
Right t -> Just t
let mimeType =
( (req & Wai.requestHeaders & List.lookup "X-Cas-Serve-Mimetype" >>= maybeText)
<|> (req & Wai.requestHeaders & List.lookup "Content-Type" >>= maybeText)
)
& fromMaybe "application/octet-stream"
let magicFileEnding mimeType' = case Text.split (== '/') mimeType' of
[_, ""] -> Nothing
["", _] -> Nothing
[_, "any"] -> Nothing
["image", ty] -> Just (Text.cons '.' ty)
["video", ty] -> Just (Text.cons '.' ty)
["text", "plain"] -> Just ".txt"
["text", "html"] -> Just ".html"
["application", "pdf"] -> Just ".pdf"
["application", "json"] -> Just ".json"
_ -> Nothing
let extension =
( (req & Wai.requestHeaders & List.lookup "X-Cas-Serve-FileExtension" >>= maybeText)
<|> ( (req & Wai.requestHeaders & List.lookup "Content-Type")
>>= maybeText
>>= magicFileEnding
)
)
-- Just the empty extension if we cant figure it out.
& fromMaybe ""
body <- Wai.consumeRequestBodyStrict req
let hash :: Crypto.Digest Crypto.SHA256 = Crypto.hashlazy body
let hashBytes = hash & ByteArray.convert @(Crypto.Digest Crypto.SHA256) @ByteString
let len = ByteString.Lazy.length body
name <- getNameFromWordlist env
let fullname = name <> extension
let conn = env.envData
Sqlite.withTransaction conn $ do
Sqlite.executeNamed
conn
[Sqlite.sql|
INSERT INTO file_content
(content, hash_sha256, size)
VALUES
(:content, :hash_sha256, :size)
ON CONFLICT (hash_sha256) DO NOTHING
|]
[ ":content" := (body :: Lazy.ByteString),
":hash_sha256" := (hashBytes :: ByteString),
":size" := (len :: Int64)
]
-- TODO: we are not checking if the name already exists,
-- we just assume that 1633^3 is enough to not get any collisions for now.
-- If the name exists, the user gets a 500.
Sqlite.executeNamed
conn
[Sqlite.sql|
INSERT INTO file_references
(file_content, reference_type, name, extension, mimetype)
VALUES
(:file_content, :reference_type, :name, :extension, :mimetype)
|]
[ ":file_content" := (hashBytes :: ByteString),
":reference_type" := ("by-id" :: Text),
":name" := name,
":extension" := (extension :: Text),
":mimetype" := (mimeType :: Text)
]
pure $
Right
( [("Content-Type", "text/plain")],
[fmt|/v0/by-id/{fullname}|]
)
_ -> Nothing
-- Get a random name from a wordlist, that is three words connected by @-@.
getNameFromWordlist :: Env -> IO Text
getNameFromWordlist env =
do
let numberOfWords = 3 :: Int
Sqlite.queryNamed @(Sqlite.Only Text)
(env.envWordlist)
[Sqlite.sql|SELECT word FROM wordlist ORDER BY RANDOM() LIMIT :words|]
[":words" Sqlite.:= numberOfWords]
<&> map Sqlite.fromOnly
<&> Text.intercalate "-"
-- | We can use a Rec with a named list of types to parse a returning row of sqlite!!
instance
( Sqlite.FromField t1,
Sqlite.FromField t2,
Sqlite.FromField t3
) =>
Sqlite.FromRow (T3 l1 t1 l2 t2 l3 t3)
where
fromRow = do
T3
<$> (label @l1 <$> Sqlite.field)
<*> (label @l2 <$> Sqlite.field)
<*> (label @l3 <$> Sqlite.field)

View file

@ -1,73 +0,0 @@
cabal-version: 3.0
name: cas-serve
version: 0.1.0.0
author: Profpatsch
maintainer: mail@profpatsch.de
common common-options
ghc-options:
-Wall
-Wno-type-defaults
-Wunused-packages
-Wredundant-constraints
-fwarn-missing-deriving-strategies
-- See https://downloads.haskell.org/ghc/latest/docs/users_guide/exts.html
-- for a description of all these extensions
default-extensions:
-- Infer Applicative instead of Monad where possible
ApplicativeDo
-- Allow literal strings to be Text
OverloadedStrings
-- Syntactic sugar improvements
LambdaCase
MultiWayIf
-- Makes the (deprecated) usage of * instead of Data.Kind.Type an error
NoStarIsType
-- Convenient and crucial to deal with ambiguous field names, commonly
-- known as RecordDotSyntax
OverloadedRecordDot
-- does not export record fields as functions, use OverloadedRecordDot to access instead
NoFieldSelectors
-- Record punning
RecordWildCards
-- Improved Deriving
DerivingStrategies
DerivingVia
-- Type-level strings
DataKinds
-- to enable the `type` keyword in import lists (ormolu uses this automatically)
ExplicitNamespaces
default-language: GHC2021
executable cas-serve
import: common-options
main-is: CasServe.hs
build-depends:
base >=4.15 && <5,
pa-prelude,
pa-label,
arglib-netencode,
netencode,
text,
sqlite-simple,
http-types,
wai,
warp,
mtl,
bytestring,
memory,
crypton,

View file

@ -1,38 +0,0 @@
{ depot, pkgs, lib, ... }:
let
bins = depot.nix.getBins pkgs.sqlite [ "sqlite3" ];
cas-serve = pkgs.haskellPackages.mkDerivation {
pname = "cas-serve";
version = "0.1.0";
src = depot.users.Profpatsch.exactSource ./. [
./cas-serve.cabal
./CasServe.hs
];
libraryHaskellDepends = [
pkgs.haskellPackages.pa-prelude
pkgs.haskellPackages.pa-label
pkgs.haskellPackages.crypton
pkgs.haskellPackages.wai
pkgs.haskellPackages.warp
pkgs.haskellPackages.sqlite-simple
depot.users.Profpatsch.arglib.netencode.haskell
depot.users.Profpatsch.netencode.netencode-hs
];
isExecutable = true;
isLibrary = false;
license = lib.licenses.mit;
};
create-cas-database = depot.nix.writeExecline "create-cas-database" { readNArgs = 1; } [
bins.sqlite3
"$1"
"-init"
./schema.sql
];
in
cas-serve

View file

@ -1,38 +0,0 @@
-- SQLite
.dump
PRAGMA foreign_keys = ON;
BEGIN transaction;
create table if not exists file_content (
content blob NOT NULL,
hash_sha256 blob PRIMARY KEY,
size integer NOT NULL
) WITHOUT ROWID;
create table if not exists file_references (
rowid integer PRIMARY KEY,
file_content NOT NULL REFERENCES file_content ON DELETE CASCADE,
reference_type text NOT NULL,
name text NOT NULL,
extension text NOT NULL,
mimetype text NOT NULL
);
create unique index if not exists file_references_type_name_unique on file_references (reference_type, name);
-- insert into file_content values ('mycontent', 'myhash', 9);
-- insert into file_references values (NULL, 'myhash', 'by-id', 'myschranz', '.txt', 'text/plain');
-- insert into file_content values (readfile('/home/philip/Pictures/screenshot.png'), 'anotherhash', 999);
-- insert into file_references values (NULL, 'anotherhash', 'by-id', 'img', '.png', 'image/png');
select * from file_content;
select * from file_references;
COMMIT;
-- drop table file_content;
-- drop table file_references;

File diff suppressed because one or more lines are too long

View file

@ -1,93 +0,0 @@
{ depot, pkgs, ... }:
let
cdbListToNetencode = depot.nix.writers.rustSimple
{
name = "cdb-list-to-netencode";
dependencies = [
depot.third_party.rust-crates.nom
depot.users.Profpatsch.execline.exec-helpers
depot.users.Profpatsch.netencode.netencode-rs
];
} ''
extern crate nom;
extern crate exec_helpers;
extern crate netencode;
use std::collections::HashMap;
use std::io::BufRead;
use nom::{IResult};
use nom::sequence::{tuple};
use nom::bytes::complete::{tag, take};
use nom::character::complete::{digit1, char};
use nom::error::{context, ErrorKind, ParseError};
use nom::combinator::{map_res};
use netencode::{T, Tag};
fn usize_t(s: &[u8]) -> IResult<&[u8], usize> {
context(
"usize",
map_res(
map_res(digit1, |n| std::str::from_utf8(n)),
|s| s.parse::<usize>())
)(s)
}
fn parse_cdb_record(s: &[u8]) -> IResult<&[u8], (&[u8], &[u8])> {
let (s, (_, klen, _, vlen, _)) = tuple((
char('+'),
usize_t,
char(','),
usize_t,
char(':')
))(s)?;
let (s, (key, _, val)) = tuple((
take(klen),
tag("->"),
take(vlen),
))(s)?;
Ok((s, (key, val)))
}
fn main() {
let mut res = vec![];
let stdin = std::io::stdin();
let mut lines = stdin.lock().split(b'\n');
loop {
match lines.next() {
None => exec_helpers::die_user_error("cdb-list-to-netencode", "stdin ended but we didnt receive the empty line to signify the end of the cdbdump input!"),
Some(Err(err)) => exec_helpers::die_temporary("cdb-list-to-netencode", format!("could not read from stdin: {}", err)),
Some(Ok(line)) =>
if &line == b"" {
// the cdbdump input ends after an empty line (double \n)
break;
} else {
match parse_cdb_record(&line) {
Ok((b"", (key, val))) => {
let (key, val) = match
std::str::from_utf8(key)
.and_then(|k| std::str::from_utf8(val).map(|v| (k, v))) {
Ok((key, val)) => (key.to_owned(), val.to_owned()),
Err(err) => exec_helpers::die_user_error("cdb-list-to-netencode", format!("cannot decode line {:?}, we only support utf8-encoded key/values pairs for now: {}", String::from_utf8_lossy(&line), err)),
};
let _ = res.push((key, val));
},
Ok((rest, _)) => exec_helpers::die_user_error("cdb-list-to-netencode", format!("could not decode record line {:?}, had some trailing bytes", String::from_utf8_lossy(&line))),
Err(err) => exec_helpers::die_user_error("cdb-list-to-netencode", format!("could not decode record line {:?}: {:?}", String::from_utf8_lossy(&line), err)),
}
}
}
}
let list = T::List(res.into_iter().map(
|(k, v)| T::Record(vec![(String::from("key"), T::Text(k)), (String::from("val"), T::Text(v))].into_iter().collect())
).collect());
netencode::encode(&mut std::io::stdout(), &list.to_u());
}
'';
in
{
inherit
cdbListToNetencode
;
}

View file

@ -1,14 +0,0 @@
{
"extends": ["eslint:recommended", "plugin:@typescript-eslint/strict-type-checked"],
"parser": "@typescript-eslint/parser",
"plugins": ["@typescript-eslint"],
"parserOptions": {
"project": true
},
"root": true,
"rules": {
"no-unused-vars": "warn",
"prefer-const": "warn",
"@typescript-eslint/no-unused-vars": "warn"
}
}

View file

@ -1,6 +0,0 @@
/node_modules/
/.ninja/
/output/
# ignore for now
/package.lock.json

View file

@ -1,8 +0,0 @@
{
"trailingComma": "all",
"tabWidth": 2,
"semi": true,
"singleQuote": true,
"printWidth": 100,
"arrowParens": "avoid"
}

View file

@ -1,4 +0,0 @@
# Decentralized Library
https://en.wikipedia.org/wiki/Distributed_library
https://faculty.ist.psu.edu/jjansen/academic/pubs/ride98/ride98.html

View file

@ -1,16 +0,0 @@
builddir = .ninja
outdir = ./output
jsdir = $outdir/js
rule tsc
command = node_modules/.bin/tsc
build $outdir/index.js: tsc | index.ts tsconfig.json
rule run
command = node $in
build run: run $outdir/index.js
pool = console

View file

@ -1,245 +0,0 @@
import generator, { MegalodonInterface } from 'megalodon';
import { Account } from 'megalodon/lib/src/entities/account';
import * as masto from 'megalodon/lib/src/entities/notification';
import { Status } from 'megalodon/lib/src/entities/status';
import * as rxjs from 'rxjs';
import { Observable } from 'rxjs';
import { NodeEventHandler } from 'rxjs/internal/observable/fromEvent';
import * as sqlite from 'sqlite';
import sqlite3 from 'sqlite3';
import * as parse5 from 'parse5';
import { mergeMap } from 'rxjs/operators';
type Events =
| { type: 'connect'; event: [] }
| { type: 'update'; event: Status }
| { type: 'notification'; event: Notification }
| { type: 'delete'; event: number }
| { type: 'error'; event: Error }
| { type: 'heartbeat'; event: [] }
| { type: 'close'; event: [] }
| { type: 'parser-error'; event: Error };
type Notification = masto.Notification & {
type: 'favourite' | 'reblog' | 'status' | 'mention' | 'poll' | 'update';
status: NonNullable<masto.Notification['status']>;
account: NonNullable<masto.Notification['account']>;
};
class Main {
private client: MegalodonInterface;
private socket: Observable<Events>;
private state!: State;
private config: {
databaseFile?: string;
baseServer: string;
};
private constructor() {
this.config = {
databaseFile: process.env['DECLIB_DATABASE_FILE'],
baseServer: process.env['DECLIB_MASTODON_SERVER'] ?? 'mastodon.xyz',
};
const ACCESS_TOKEN = process.env['DECLIB_MASTODON_ACCESS_TOKEN'];
if (!ACCESS_TOKEN) {
console.error('Please set DECLIB_MASTODON_ACCESS_TOKEN');
process.exit(1);
}
this.client = generator('mastodon', `https://${this.config.baseServer}`, ACCESS_TOKEN);
const websocket = this.client.publicSocket();
function mk<Name extends string, Type>(name: Name): Observable<{ type: Name; event: Type }> {
const wrap =
(h: NodeEventHandler) =>
(event: Type): void => {
h({ type: name, event });
};
return rxjs.fromEventPattern<{ type: Name; event: Type }>(
hdl => websocket.on(name, wrap(hdl)),
hdl => websocket.removeListener(name, wrap(hdl)),
);
}
this.socket = rxjs.merge(
mk<'connect', []>('connect'),
mk<'update', Status>('update'),
mk<'notification', Notification>('notification'),
mk<'delete', number>('delete'),
mk<'error', Error>('error'),
mk<'heartbeat', []>('heartbeat'),
mk<'close', []>('close'),
mk<'parser-error', Error>('parser-error'),
);
}
static async init(): Promise<Main> {
const self = new Main();
self.state = await State.init(self.config);
return self;
}
public main() {
// const res = await this.getAcc({ username: 'grindhold', server: 'chaos.social' });
// const res = await this.getAcc({ username: 'Profpatsch', server: 'mastodon.xyz' });
// const res = await this.getStatus('111862170899069698');
this.socket
.pipe(
mergeMap(async event => {
switch (event.type) {
case 'update': {
await this.state.addStatus(event.event);
console.log(`${event.event.account.acct}: ${event.event.content}`);
console.log(await this.state.databaseInternal.all(`SELECT * from status`));
break;
}
case 'notification': {
console.log(`NOTIFICATION (${event.event.type}):`);
console.log(event.event);
console.log(event.event.status.content);
const content = parseContent(event.event.status.content);
if (content) {
switch (content.command) {
case 'addbook': {
if (content.content[0]) {
const book = {
$owner: event.event.account.acct,
$bookid: content.content[0],
};
console.log('adding book', book);
await this.state.addBook(book);
await this.client.postStatus(
`@${event.event.account.acct} I have inserted book "${book.$bookid}" for you.`,
{
in_reply_to_id: event.event.status.id,
visibility: 'direct',
},
);
}
}
}
}
break;
}
default: {
console.log(event);
}
}
}),
)
.subscribe();
}
private async getStatus(id: string): Promise<Status | null> {
return (await this.client.getStatus(id)).data;
}
private async getAcc(user: { username: string; server: string }): Promise<Account | null> {
const fullAccount = `${user.username}@${user.server}`;
const res = await this.client.searchAccount(fullAccount, {
limit: 10,
});
const accs = res.data.filter(acc =>
this.config.baseServer === user.server
? (acc.acct = user.username)
: acc.acct === fullAccount,
);
return accs[0] ?? null;
}
}
type Interaction = {
originalStatus: { id: string };
lastStatus: { id: string };
};
class State {
db!: sqlite.Database;
private constructor() {}
static async init(config: { databaseFile?: string }): Promise<State> {
const s = new State();
s.db = await sqlite.open({
filename: config.databaseFile ?? ':memory:',
driver: sqlite3.Database,
});
await s.db.run('CREATE TABLE books (owner text, bookid text)');
await s.db.run('CREATE TABLE status (id text primary key, content json)');
return s;
}
async addBook(opts: { $owner: string; $bookid: string }) {
return await this.db.run('INSERT INTO books (owner, bookid) VALUES ($owner, $bookid)', opts);
}
async addStatus($status: Status) {
return await this.db.run(
`
INSERT INTO status (id, content) VALUES ($id, $status)
ON CONFLICT (id) DO UPDATE SET id = $id, content = $status
`,
{
$id: $status.id,
$status: JSON.stringify($status),
},
);
}
get databaseInternal() {
return this.db;
}
}
/** Parse the message; take the plain text, first line is the command any any successive lines are content */
function parseContent(html: string): { command: string; content: string[] } | null {
const plain = contentToPlainText(html).split('\n');
if (plain[0]) {
return { command: plain[0].replace(' ', '').trim(), content: plain.slice(1) };
} else {
return null;
}
}
/** Convert the Html content to a plain text (best effort), keeping line breaks */
function contentToPlainText(html: string): string {
const queue: parse5.DefaultTreeAdapterMap['childNode'][] = [];
queue.push(...parse5.parseFragment(html).childNodes);
let res = '';
let endOfP = false;
for (const el of queue) {
switch (el.nodeName) {
case '#text': {
res += (el as parse5.DefaultTreeAdapterMap['textNode']).value;
break;
}
case 'br': {
res += '\n';
break;
}
case 'p': {
if (endOfP) {
res += '\n';
endOfP = false;
}
queue.push(...el.childNodes);
endOfP = true;
break;
}
case 'span': {
break;
}
default: {
console.warn('unknown element in message: ', el);
break;
}
}
}
return res.trim();
}
Main.init().then(
m => {
m.main();
},
rej => {
throw rej;
},
);

View file

@ -1,25 +0,0 @@
{
"name": "declib",
"version": "1.0.0",
"description": "",
"main": "index.ts",
"type": "commonjs",
"scripts": {
"run": "ninja run"
},
"author": "",
"license": "MIT",
"dependencies": {
"megalodon": "^9.2.2",
"parse5": "^7.1.2",
"rxjs": "^7.8.1",
"sqlite": "^5.1.1",
"sqlite3": "^5.1.7"
},
"devDependencies": {
"@typescript-eslint/eslint-plugin": "^6.21.0",
"@typescript-eslint/parser": "^6.21.0",
"eslint": "^8.56.0",
"typescript": "^5.3.3"
}
}

View file

@ -1,25 +0,0 @@
{
"compilerOptions": {
"strict": true,
"module": "NodeNext",
"sourceMap": true,
"outDir": "output",
"target": "ES6",
"lib": [],
"typeRoots": ["node_modules/@types", "shims/@types"],
"moduleResolution": "NodeNext",
// importHelpers & downlevelIteration will reduce the generated javascript for new language features.
// `importHelpers` requires the `tslib` dependency.
// "downlevelIteration": true,
// "importHelpers": true
"noFallthroughCasesInSwitch": true,
"noImplicitOverride": true,
"noImplicitReturns": true,
"noPropertyAccessFromIndexSignature": true,
"noUncheckedIndexedAccess": true,
},
"files": ["index.ts"]
}

View file

@ -1,84 +0,0 @@
let List/map
: ∀(a : Type) → ∀(b : Type) → (a → b) → List a → List b
= λ(a : Type) →
λ(b : Type) →
λ(f : a → b) →
λ(xs : List a) →
List/build
b
( λ(list : Type) →
λ(cons : b → list → list) →
List/fold a xs list (λ(x : a) → cons (f x))
)
let
--| Concatenate a `List` of `List`s into a single `List`
List/concat
: ∀(a : Type) → List (List a) → List a
= λ(a : Type) →
λ(xss : List (List a)) →
List/build
a
( λ(list : Type) →
λ(cons : a → list → list) →
λ(nil : list) →
List/fold
(List a)
xss
list
(λ(xs : List a) → λ(ys : list) → List/fold a xs list cons ys)
nil
)
let
-- Transform a list by applying a function to each element and flattening the results
List/concatMap
: ∀(a : Type) → ∀(b : Type) → (a → List b) → List a → List b
= λ(a : Type) →
λ(b : Type) →
λ(f : a → List b) →
λ(xs : List a) →
List/build
b
( λ(list : Type) →
λ(cons : b → list → list) →
List/fold a xs list (λ(x : a) → List/fold b (f x) list cons)
)
let Status = < Empty | NonEmpty : Text >
let
{-|
Transform each value in a `List` to `Text` and then concatenate them with a
separator in between each value
-}
Text/concatMapSep
: ∀(separator : Text) → ∀(a : Type) → (a → Text) → List a → Text
= λ(separator : Text) →
λ(a : Type) →
λ(f : a → Text) →
λ(elements : List a) →
let status =
List/fold
a
elements
Status
( λ(x : a) →
λ(status : Status) →
merge
{ Empty = Status.NonEmpty (f x)
, NonEmpty =
λ(result : Text) →
Status.NonEmpty (f x ++ separator ++ result)
}
status
)
Status.Empty
in merge { Empty = "", NonEmpty = λ(result : Text) → result } status
in { List/map, List/concat, List/concatMap, Text/concatMapSep }

View file

@ -1,5 +0,0 @@
# emacs-tree-sitter-move
An experiment in whether we can implement structural editing in emacs using the tree-sitter parser.
What currently works: loading a tree-sitter gramma, navigating the AST left/right/up/down.

View file

@ -1,3 +0,0 @@
# nothing yet (TODO: expose shell & tool)
{ ... }:
{ }

View file

@ -1,17 +0,0 @@
{ pkgs ? import ../../../third_party { }, ... }:
let
inherit (pkgs) lib;
treeSitterGrammars = pkgs.runCommandLocal "grammars" { } ''
mkdir -p $out/bin
${lib.concatStringsSep "\n"
(lib.mapAttrsToList (name: src: "ln -s ${src}/parser $out/bin/${name}.so") pkgs.tree-sitter.builtGrammars)};
'';
in
pkgs.mkShell {
buildInputs = [
pkgs.tree-sitter.builtGrammars.python
];
TREE_SITTER_GRAMMAR_DIR = treeSitterGrammars;
}

View file

@ -1,14 +0,0 @@
{
"foo": {
"x": [ 1, 2, 3, 4 ],
"bar": "test"
},
"foo": {
"x": [ 1, 2, 3, 4 ],
"bar": "test"
},
"foo": {
"x": [ 1, 2, 3, 4 ],
"bar": "test"
}
}

View file

@ -1,13 +0,0 @@
(4 + 5 + 5)
def foo(a, b, c)
def bar(a, b):
4
4
4
[1, 4, 5, 10]
def foo():
pass

View file

@ -1,14 +0,0 @@
function foo () {
local x=123
}
function bar () {
local x=123
}
echo abc def \
gef gef
printf \
"%s\n" \
haha

View file

@ -1,28 +0,0 @@
(defun tree-sitter-load-from-grammar-dir (grammar-dir sym lang-name)
(tree-sitter-load
sym
(format "%s/bin/%s"
(getenv grammar-dir)
lang-name)))
(defun tree-sitter-init-tmp-langs (alist)
(mapcar
(lambda (lang)
(pcase-let ((`(,name ,sym ,mode) lang))
(tree-sitter-load-from-grammar-dir "TREE_SITTER_GRAMMAR_DIR" sym name)
(cons mode sym)))
alist))
(setq tree-sitter-major-mode-language-alist
(tree-sitter-init-tmp-langs
'(("python" python python-mode)
("json" json js-mode)
("bash" bash sh-mode)
)))
(define-key evil-normal-state-map (kbd "C-.") #'tree-sitter-move-reset)
(define-key evil-normal-state-map (kbd "C-<right>") #'tree-sitter-move-right)
(define-key evil-normal-state-map (kbd "C-<left>") #'tree-sitter-move-left)
(define-key evil-normal-state-map (kbd "C-<up>") #'tree-sitter-move-up)
(define-key evil-normal-state-map (kbd "C-<down>") #'tree-sitter-move-down)

View file

@ -1,139 +0,0 @@
;; this is not an actual cursor, just a node.
;; Its not super efficient, but cursors cant be *set* to an arbitrary
;; subnode, because they cant access the parent otherwise.
;; Wed need a way to reset the cursor and walk down to the node?!
(defvar-local tree-sitter-move--cursor nil
"the buffer-local cursor used for movement")
(defvar-local tree-sitter-move--debug-overlay nil
"an overlay used to visually display the region currently marked by the cursor")
;;;;; TODO: should everything use named nodes? Only some things?
;;;;; maybe there should be a pair of functions for everything?
;;;;; For now restrict to named nodes.
(defun tree-sitter-move--setup ()
;; TODO
(progn
;; TODO: if tree-sitter-mode fails to load, display a better error
(tree-sitter-mode t)
(setq tree-sitter-move--cursor (tsc-root-node tree-sitter-tree))
(add-variable-watcher
'tree-sitter-move--cursor
#'tree-sitter-move--debug-overlay-update)))
(defun tree-sitter-move--debug-overlay-update (sym newval &rest _args)
"variable-watcher to update the debug overlay when the cursor changes"
(let ((start (tsc-node-start-position newval))
(end (tsc-node-end-position newval)))
(symbol-macrolet ((o tree-sitter-move--debug-overlay))
(if o
(move-overlay o start end)
(setq o (make-overlay start end))
(overlay-put o 'face 'highlight)
))))
(defun tree-sitter-move--debug-overlay-teardown ()
"Turn of the overlay visibility and delete the overlay object"
(when tree-sitter-move--debug-overlay
(delete-overlay tree-sitter-move--debug-overlay)
(setq tree-sitter-move--debug-overlay nil)))
(defun tree-sitter-move--teardown ()
(setq tree-sitter-move--cursor nil)
(tree-sitter-move--debug-overlay-teardown)
(tree-sitter-mode nil))
;; Get the syntax node the cursor is on.
(defun tsc-get-named-node-at-point ()
(let ((p (point)))
(tsc-get-named-descendant-for-position-range
(tsc-root-node tree-sitter-tree) p p)))
;; TODO: is this function necessary?
;; Maybe tree-sitter always guarantees that parents are named?
(defun tsc-get-named-parent (node)
(when-let ((parent (tsc-get-parent node)))
(while (and parent (not (tsc-node-named-p parent)))
(setq parent (tsc-get-parent parent)))
parent))
(defun tsc-get-first-named-node-with-siblings-up (node)
"Returns the first 'upwards' node that has siblings. That includes the current
node, so if the given node has siblings, it is returned. Returns nil if there
is no such node until the root"
(when-let ((has-siblings-p
(lambda (parent-node)
(> (tsc-count-named-children parent-node)
1)))
(cur node)
(parent (tsc-get-named-parent node)))
(while (and parent (not (funcall has-siblings-p parent)))
(setq cur parent)
(setq parent (tsc-get-named-parent cur)))
cur))
(defun tree-sitter-move--set-cursor-to-node (node)
(setq tree-sitter-move--cursor node))
(defun tree-sitter-move--set-cursor-to-node-at-point ()
(tree-sitter-move--set-cursor-to-node (tsc-get-named-node-at-point)))
(defun tree-sitter-move--move-point-to-node (node)
(set-window-point
(selected-window)
(tsc-node-start-position node)))
;; interactive commands (“do what I expect” section)
(defun tree-sitter-move-reset ()
(interactive)
(tree-sitter-move--set-cursor-to-node-at-point))
(defun tree-sitter-move-right ()
(interactive)
(tree-sitter-move--move-skip-non-sibling-nodes 'tsc-get-next-named-sibling))
(defun tree-sitter-move-left ()
(interactive)
(tree-sitter-move--move-skip-non-sibling-nodes 'tsc-get-prev-named-sibling))
(defun tree-sitter-move-up ()
(interactive)
(tree-sitter-move--move-skip-non-sibling-nodes 'tsc-get-parent))
;; TODO: does not skip siblings yet, because the skip function only goes up (not down)
(defun tree-sitter-move-down ()
(interactive)
(tree-sitter-move--move-if-possible (lambda (n) (tsc-get-nth-named-child n 0))))
(defun tree-sitter-move--move-skip-non-sibling-nodes (move-fn)
"Moves to the sidewards next sibling. If the current node does not have siblings, go
upwards until something has siblings and then move to the side (right or left)."
(tree-sitter-move--move-if-possible
(lambda (cur)
(when-let ((with-siblings
(tsc-get-first-named-node-with-siblings-up cur)))
(funcall move-fn with-siblings)))))
(defun tree-sitter-move--move-if-possible (dir-fn)
(let ((next (funcall dir-fn tree-sitter-move--cursor)))
(when next
(tree-sitter-move--set-cursor-to-node next)
(tree-sitter-move--move-point-to-node next))))
; mostly stolen from tree-sitter-mode
;;;###autoload
(define-minor-mode tree-sitter-move-mode
"Minor mode to do cursor movements via tree-sitter"
:init-value nil
:lighter " tree-sitter-move"
(if tree-sitter-move-mode
(tree-sitter--error-protect
(progn
(tree-sitter-move--setup))
(setq tree-sitter-move-mode nil)
(tree-sitter-move--teardown))
(lambda ())
(tree-sitter-move--teardown)))

View file

@ -1,80 +0,0 @@
import tseslint from 'typescript-eslint';
import tsplugin from '@typescript-eslint/eslint-plugin';
import parser from '@typescript-eslint/parser';
let recommended = { ...tseslint.configs.eslintRecommended };
let set = tseslint.config(recommended, {
languageOptions: {
parser: parser,
parserOptions: {
projectService: true,
},
},
plugins: { '@typescript-eslint': tsplugin },
rules: {
'prettier/prettier': 'off',
'prefer-const': 'warn',
'@typescript-eslint/ban-ts-comment': 'warn',
'no-array-constructor': 'off',
'@typescript-eslint/no-array-constructor': 'warn',
'@typescript-eslint/no-duplicate-enum-values': 'warn',
'@typescript-eslint/no-empty-object-type': 'warn',
'@typescript-eslint/no-explicit-any': 'warn',
'@typescript-eslint/no-extra-non-null-assertion': 'warn',
'@typescript-eslint/no-misused-new': 'warn',
'@typescript-eslint/no-namespace': 'warn',
'@typescript-eslint/no-non-null-asserted-optional-chain': 'warn',
'@typescript-eslint/no-require-imports': 'warn',
'@typescript-eslint/no-this-alias': 'warn',
'@typescript-eslint/no-unnecessary-type-constraint': 'warn',
'@typescript-eslint/no-unsafe-declaration-merging': 'warn',
'@typescript-eslint/no-unsafe-function-type': 'warn',
'@typescript-eslint/strict-boolean-expressions': ['warn'],
'no-unused-expressions': 'off',
'@typescript-eslint/no-unused-expressions': 'warn',
'no-unused-vars': 'off',
'@typescript-eslint/no-unused-vars': ['warn', { argsIgnorePattern: '^_' }],
'@typescript-eslint/no-wrapper-object-types': 'warn',
'@typescript-eslint/prefer-as-const': 'warn',
'@typescript-eslint/prefer-namespace-keyword': 'warn',
'@typescript-eslint/triple-slash-reference': 'warn',
'@typescript-eslint/await-thenable': 'warn',
'no-array-constructor': 'off',
'@typescript-eslint/no-array-delete': 'warn',
'@typescript-eslint/no-base-to-string': 'warn',
'@typescript-eslint/no-duplicate-type-constituents': 'warn',
'@typescript-eslint/no-floating-promises': 'warn',
'@typescript-eslint/no-for-in-array': 'warn',
'no-implied-eval': 'off',
'@typescript-eslint/no-implied-eval': 'warn',
'@typescript-eslint/no-misused-promises': 'warn',
'@typescript-eslint/no-redundant-type-constituents': 'warn',
'@typescript-eslint/no-unnecessary-type-assertion': 'warn',
'@typescript-eslint/no-unsafe-argument': 'warn',
'@typescript-eslint/no-unsafe-assignment': 'warn',
'@typescript-eslint/no-unsafe-call': 'warn',
'@typescript-eslint/no-unsafe-enum-comparison': 'warn',
'@typescript-eslint/no-unsafe-member-access': 'warn',
'@typescript-eslint/no-unsafe-return': 'warn',
'@typescript-eslint/no-unsafe-unary-minus': 'warn',
'no-throw-literal': 'off',
'@typescript-eslint/only-throw-error': 'warn',
'prefer-promise-reject-errors': 'off',
'@typescript-eslint/prefer-promise-reject-errors': 'warn',
'require-await': 'off',
'@typescript-eslint/require-await': 'warn',
'@typescript-eslint/restrict-plus-operands': 'warn',
'@typescript-eslint/restrict-template-expressions': 'warn',
'@typescript-eslint/unbound-method': 'warn',
},
});
// override files for each set
const files = ['src/**/*.ts', 'src/**/*.tsx'];
for (let s of set) {
s.files = files;
}
export default set;

View file

@ -1,90 +0,0 @@
{ ... }:
# SPDX-License-Identifier: MIT
# Created by Graham Christensen
# version from https://github.com/grahamc/mayday/blob/c48f7583e622fe2e695a2a929de34679e5818816/exact-source.nix
let
# Require that every path specified does exist.
#
# By default, Nix won't complain if you refer to a missing file
# if you don't actually use it:
#
# nix-repl> ./bogus
# /home/grahamc/playground/bogus
#
# nix-repl> toString ./bogus
# "/home/grahamc/playground/bogus"
#
# so in order for this interface to be *exact*, we must
# specifically require every provided path exists:
#
# nix-repl> "${./bogus}"
# error: getting attributes of path
# '/home/grahamc/playground/bogus': No such file or
# directory
requireAllPathsExist = paths:
let
validation = builtins.map (path: "${path}") paths;
in
builtins.deepSeq validation paths;
# Break down a given path in to a list of all of the path and
# its parent directories.
#
# `builtins.path` / `builtins.filterSource` will ask about
# a containing directory, and we must say YES otherwise it will
# not include anything below it.
#
# Concretely, convert: "/foo/baz/tux" in to:
# [ "/foo/baz/tux" "/foo/baz" "/foo" ]
recursivelyPopDir = path:
if path == "/" then [ ]
else [ path ] ++ (recursivelyPopDir (builtins.dirOf path));
# Given a list of of strings, dedup the list and return a
# list of all unique strings.
#
# Note: only works on strings ;):
#
# First convert [ "foo" "foo" "bar" ] in to:
# [
# { name = "foo"; value = ""; }
# { name = "foo"; value = ""; }
# { name = "bar"; value = ""; }
# ]
# then convert that to { "foo" = ""; "bar" = ""; }
# then get the attribute names, "foo" and "bar".
dedup = strings:
let
name_value_pairs = builtins.map
(string: { name = string; value = ""; })
strings;
attrset_of_strings = builtins.listToAttrs name_value_pairs;
in
builtins.attrNames attrset_of_strings;
exactSource = source_root: paths:
let
all_possible_paths =
let
# Convert all the paths in to relative paths on disk.
# ie: stringPaths will contain [ "/home/grahamc/playground/..." ];
# instead of /nix/store paths.
string_paths = builtins.map toString
(requireAllPathsExist paths);
all_paths_with_duplicates = builtins.concatMap
recursivelyPopDir
string_paths;
in
dedup all_paths_with_duplicates;
pathIsSpecified = path:
builtins.elem path all_possible_paths;
in
builtins.path {
path = source_root;
filter = (path: _type: pathIsSpecified path);
};
in
exactSource

View file

@ -1,48 +0,0 @@
{-# LANGUAGE DerivingStrategies #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE TypeApplications #-}
module ExecHelpers where
import Data.String (IsString)
import MyPrelude
import qualified System.Exit as Sys
newtype CurrentProgramName = CurrentProgramName { unCurrentProgramName :: Text }
deriving newtype (Show, Eq, Ord, IsString)
-- | Exit 1 to signify a generic expected error
-- (e.g. something that sometimes just goes wrong, like a nix build).
dieExpectedError :: CurrentProgramName -> Text -> IO a
dieExpectedError = dieWith 1
-- | Exit 100 to signify a user error (“the user is holding it wrong”).
-- This is a permanent error, if the program is executed the same way
-- it should crash with 100 again.
dieUserError :: CurrentProgramName -> Text -> IO a
dieUserError = dieWith 100
-- | Exit 101 to signify an unexpected crash (failing assertion or panic).
diePanic :: CurrentProgramName -> Text -> IO a
diePanic = dieWith 101
-- | Exit 111 to signify a temporary error (such as resource exhaustion)
dieTemporary :: CurrentProgramName -> Text -> IO a
dieTemporary = dieWith 111
-- | Exit 126 to signify an environment problem
-- (the user has set up stuff incorrectly so the program cannot work)
dieEnvironmentProblem :: CurrentProgramName -> Text -> IO a
dieEnvironmentProblem = dieWith 126
-- | Exit 127 to signify a missing executable.
dieMissingExecutable :: CurrentProgramName -> Text -> IO a
dieMissingExecutable = dieWith 127
dieWith :: Natural -> CurrentProgramName -> Text -> IO a
dieWith status currentProgramName msg = do
putStderrLn [fmt|{currentProgramName & unCurrentProgramName}: {msg}|]
Sys.exitWith
(Sys.ExitFailure (status & fromIntegral @Natural @Int))

View file

@ -1,70 +0,0 @@
{ depot, pkgs, lib, ... }:
let
exec-helpers-hs = pkgs.haskellPackages.mkDerivation {
pname = "exec-helpers";
version = "0.1.0";
src = depot.users.Profpatsch.exactSource ./. [
./exec-helpers.cabal
./ExecHelpers.hs
];
libraryHaskellDepends = [
depot.users.Profpatsch.my-prelude
];
isLibrary = true;
license = lib.licenses.mit;
};
print-one-env = depot.nix.writers.rustSimple
{
name = "print-one-env";
dependencies = [
depot.users.Profpatsch.execline.exec-helpers
];
} ''
extern crate exec_helpers;
use std::os::unix::ffi::OsStrExt;
use std::io::Write;
fn main() {
let args = exec_helpers::args("print-one-env", 1);
let valname = std::ffi::OsStr::from_bytes(&args[0]);
match std::env::var_os(&valname) {
None => exec_helpers::die_user_error("print-one-env", format!("Env variable `{:?}` is not set", valname)),
Some(val) => std::io::stdout().write_all(&val.as_bytes()).unwrap()
}
}
'';
setsid = depot.nix.writers.rustSimple
{
name = "setsid";
dependencies = [
depot.users.Profpatsch.execline.exec-helpers
depot.third_party.rust-crates.libc
];
} ''
use std::os::unix::ffi::OsStrExt;
use std::ffi::OsStr;
fn main() {
let (args, prog) = exec_helpers::args_for_exec("setsid", 1);
let envvar = OsStr::from_bytes(&args.get(0).expect("first argument must be envvar name to set"));
let sid: i32 = unsafe { libc::setsid() };
std::env::set_var(envvar, format!("{}", sid));
let env: Vec<(&[u8], &[u8])> = vec![];
exec_helpers::exec_into_args("getid", prog, env);
}
'';
in
depot.nix.readTree.drvTargets {
inherit
exec-helpers-hs
print-one-env
setsid
;
}

View file

@ -1,14 +0,0 @@
cabal-version: 3.0
name: exec-helpers
version: 0.1.0.0
author: Profpatsch
maintainer: mail@profpatsch.de
library
exposed-modules: ExecHelpers
build-depends:
base >=4.15 && <5,
my-prelude
default-language: Haskell2010

View file

@ -1,7 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "exec_helpers"
version = "0.1.0"

View file

@ -1,8 +0,0 @@
[package]
name = "exec_helpers"
version = "0.1.0"
edition = "2021"
[lib]
name = "exec_helpers"
path = "exec_helpers.rs"

View file

@ -1,6 +0,0 @@
{ depot, ... }:
depot.nix.writers.rustSimpleLib
{
name = "exec-helpers";
}
(builtins.readFile ./exec_helpers.rs)

View file

@ -1,149 +0,0 @@
use std::ffi::OsStr;
use std::os::unix::ffi::{OsStrExt, OsStringExt};
use std::os::unix::process::CommandExt;
pub fn no_args(current_prog_name: &str) -> () {
let mut args = std::env::args_os();
// remove argv[0]
let _ = args.nth(0);
if args.len() > 0 {
die_user_error(
current_prog_name,
format!("Expected no arguments, got {:?}", args.collect::<Vec<_>>()),
)
}
}
pub fn args(current_prog_name: &str, no_of_positional_args: usize) -> Vec<Vec<u8>> {
let mut args = std::env::args_os();
// remove argv[0]
let _ = args.nth(0);
if args.len() != no_of_positional_args {
die_user_error(
current_prog_name,
format!(
"Expected {} arguments, got {}, namely {:?}",
no_of_positional_args,
args.len(),
args.collect::<Vec<_>>()
),
)
}
args.map(|arg| arg.into_vec()).collect()
}
pub fn args_for_exec(
current_prog_name: &str,
no_of_positional_args: usize,
) -> (Vec<Vec<u8>>, Vec<Vec<u8>>) {
let mut args = std::env::args_os();
// remove argv[0]
let _ = args.nth(0);
let mut args = args.map(|arg| arg.into_vec());
let mut pos_args = vec![];
// get positional args
for i in 1..no_of_positional_args + 1 {
pos_args.push(args.nth(0).expect(&format!(
"{}: expects {} positional args, only got {}",
current_prog_name, no_of_positional_args, i
)));
}
// prog... is the rest of the iterator
let prog: Vec<Vec<u8>> = args.collect();
(pos_args, prog)
}
pub fn exec_into_args<'a, 'b, Args, Arg, Env, Key, Val>(
current_prog_name: &str,
args: Args,
env_additions: Env,
) -> !
where
Args: IntoIterator<Item = Arg>,
Arg: AsRef<[u8]>,
Env: IntoIterator<Item = (Key, Val)>,
Key: AsRef<[u8]>,
Val: AsRef<[u8]>,
{
// TODO: is this possible without collecting into a Vec first, just leaving it an IntoIterator?
let args = args.into_iter().collect::<Vec<Arg>>();
let mut args = args.iter().map(|v| OsStr::from_bytes(v.as_ref()));
let prog = args.nth(0).expect(&format!(
"{}: first argument must be an executable",
current_prog_name
));
// TODO: same here
let env = env_additions.into_iter().collect::<Vec<(Key, Val)>>();
let env = env
.iter()
.map(|(k, v)| (OsStr::from_bytes(k.as_ref()), OsStr::from_bytes(v.as_ref())));
let err = std::process::Command::new(prog).args(args).envs(env).exec();
die_missing_executable(
current_prog_name,
format!(
"exec failed: {}, while trying to execing into {:?}",
err, prog
),
);
}
/// Exit 1 to signify a generic expected error
/// (e.g. something that sometimes just goes wrong, like a nix build).
pub fn die_expected_error<S>(current_prog_name: &str, msg: S) -> !
where
S: AsRef<str>,
{
die_with(1, current_prog_name, msg)
}
/// Exit 100 to signify a user error (“the user is holding it wrong”).
/// This is a permanent error, if the program is executed the same way
/// it should crash with 100 again.
pub fn die_user_error<S>(current_prog_name: &str, msg: S) -> !
where
S: AsRef<str>,
{
die_with(100, current_prog_name, msg)
}
/// Exit 101 to signify an unexpected crash (failing assertion or panic).
/// This is the same exit code that `panic!()` emits.
pub fn die_panic<S>(current_prog_name: &str, msg: S) -> !
where
S: AsRef<str>,
{
die_with(101, current_prog_name, msg)
}
/// Exit 111 to signify a temporary error (such as resource exhaustion)
pub fn die_temporary<S>(current_prog_name: &str, msg: S) -> !
where
S: AsRef<str>,
{
die_with(111, current_prog_name, msg)
}
/// Exit 126 to signify an environment problem
/// (the user has set up stuff incorrectly so the program cannot work)
pub fn die_environment_problem<S>(current_prog_name: &str, msg: S) -> !
where
S: AsRef<str>,
{
die_with(126, current_prog_name, msg)
}
/// Exit 127 to signify a missing executable.
pub fn die_missing_executable<S>(current_prog_name: &str, msg: S) -> !
where
S: AsRef<str>,
{
die_with(127, current_prog_name, msg)
}
fn die_with<S>(status: i32, current_prog_name: &str, msg: S) -> !
where
S: AsRef<str>,
{
eprintln!("{}: {}", current_prog_name, msg.as_ref());
std::process::exit(status)
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

View file

@ -1,5 +0,0 @@
# haskell-module-deps
An executable that when run in a project directory containing `.hs` files in `./src` will output a png/graph of how those modules import each other, transitively.
Useful for getting an overview, finding weird import edges, figuring out how to get more compilation parallelism into your Haskell project.

View file

@ -1,55 +0,0 @@
{ depot, pkgs, lib, ... }:
let
bins = depot.nix.getBins pkgs.zathura [ "zathura" ]
// depot.nix.getBins pkgs.haskellPackages.graphmod [ "graphmod" ]
// depot.nix.getBins pkgs.graphviz [ "dot" ]
;
# Display a graph of all modules in a project and how they depend on each other.
# Takes the project directory as argument.
# Open in zathura.
haskell-module-deps = depot.nix.writeExecline "haskell-module-deps" { } [
"pipeline"
[ haskell-module-deps-with-filetype "pdf" "$@" ]
bins.zathura
"-"
];
# Display a graph of all modules in a project and how they depend on each other.
# Takes the project directory as argument.
# Print a png to stdout.
haskell-module-deps-png = depot.nix.writeExecline "haskell-module-deps-png" { } [
haskell-module-deps-with-filetype
"png"
"$@"
];
# Display a graph of all modules in a project and how they depend on each other.
# Takes the file type to generate as first argument
# and the project directory as second argument.
haskell-module-deps-with-filetype = pkgs.writers.writeBash "haskell-module-deps-with-filetype" ''
set -euo pipefail
shopt -s globstar
filetype="$1"
rootDir="$2"
${bins.graphmod} \
${/*silence warnings for missing external dependencies*/""} \
--quiet \
${/*applies some kind of import simplification*/""} \
--prune-edges \
"$rootDir"/src/**/*.hs \
| ${bins.dot} \
${/*otherwise its a bit cramped*/""} \
-Gsize="20,20!" \
-T"$filetype"
'';
in
depot.nix.readTree.drvTargets {
inherit
haskell-module-deps
haskell-module-deps-png
haskell-module-deps-with-filetype
;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 406 KiB

View file

@ -1,36 +0,0 @@
cradle:
cabal:
- path: "./my-prelude"
component: "lib:my-prelude"
- path: "./my-webstuff"
component: "lib:my-webstuff"
- path: "./netencode"
component: "lib:netencode"
- path: "./arglib"
component: "lib:arglib-netencode"
- path: "./execline"
component: "lib:exec-helpers"
- path: "./htmx-experiment/src"
component: "lib:htmx-experiment"
- path: "./htmx-experiment/Main.hs"
component: "htmx-experiment:exe:htmx-experiment"
- path: "./cas-serve/CasServe.hs"
component: "cas-serve:exe:cas-serve"
- path: "./jbovlaste-sqlite/JbovlasteSqlite.hs"
component: "jbovlaste-sqlite:exe:jbovlaste-sqlite"
- path: "./whatcd-resolver/src"
component: "lib:whatcd-resolver"
- path: "./whatcd-resolver/Main.hs"
component: "whatcd-resolver:exe:whatcd-resolver"
- path: "./openlab-tools/src"
component: "lib:openlab-tools"
- path: "./openlab-tools/Main.hs"
component: "openlab-tools:exe:openlab-tools"
- path: "./httzip/Httzip.hs"
component: "httzip:exe:httzip"
- path: "./my-xmonad/Xmonad.hs"
component: "my-xmonad:exe:xmonad"
- path: "./my-tools/src"
component: "lib:my-tools"
- path: "./my-tools/exe/Copy.hs"
component: "my-tools:exe:copy"

View file

@ -1,4 +0,0 @@
import HtmxExperiment qualified
main :: IO ()
main = HtmxExperiment.main

View file

@ -1,46 +0,0 @@
{ depot, pkgs, lib, ... }:
let
htmx-experiment = pkgs.haskellPackages.mkDerivation {
pname = "htmx-experiment";
version = "0.1.0";
src = depot.users.Profpatsch.exactSource ./. [
./htmx-experiment.cabal
./Main.hs
./src/HtmxExperiment.hs
./src/ServerErrors.hs
./src/ValidationParseT.hs
];
libraryHaskellDepends = [
depot.users.Profpatsch.my-webstuff
pkgs.haskellPackages.pa-label
pkgs.haskellPackages.pa-error-tree
pkgs.haskellPackages.blaze-html
pkgs.haskellPackages.blaze-markup
pkgs.haskellPackages.bytestring
pkgs.haskellPackages.dlist
pkgs.haskellPackages.http-types
pkgs.haskellPackages.ihp-hsx
pkgs.haskellPackages.monad-logger
pkgs.haskellPackages.pa-error-tree
pkgs.haskellPackages.pa-field-parser
pkgs.haskellPackages.pa-label
pkgs.haskellPackages.pa-prelude
pkgs.haskellPackages.selective
pkgs.haskellPackages.text
pkgs.haskellPackages.unliftio
pkgs.haskellPackages.wai
pkgs.haskellPackages.warp
];
isLibrary = false;
isExecutable = true;
license = lib.licenses.mit;
};
in
htmx-experiment

View file

@ -1,89 +0,0 @@
cabal-version: 3.0
name: htmx-experiment
version: 0.1.0.0
author: Profpatsch
maintainer: mail@profpatsch.de
common common-options
ghc-options:
-Wall
-Wno-type-defaults
-Wunused-packages
-Wredundant-constraints
-fwarn-missing-deriving-strategies
-- See https://downloads.haskell.org/ghc/latest/docs/users_guide/exts.html
-- for a description of all these extensions
default-extensions:
-- Infer Applicative instead of Monad where possible
ApplicativeDo
-- Allow literal strings to be Text
OverloadedStrings
-- Syntactic sugar improvements
LambdaCase
MultiWayIf
-- Makes the (deprecated) usage of * instead of Data.Kind.Type an error
NoStarIsType
-- Convenient and crucial to deal with ambiguous field names, commonly
-- known as RecordDotSyntax
OverloadedRecordDot
-- does not export record fields as functions, use OverloadedRecordDot to access instead
NoFieldSelectors
-- Record punning
RecordWildCards
-- Improved Deriving
DerivingStrategies
DerivingVia
-- Type-level strings
DataKinds
-- to enable the `type` keyword in import lists (ormolu uses this automatically)
ExplicitNamespaces
default-language: GHC2021
library
import: common-options
exposed-modules:
HtmxExperiment,
ServerErrors,
ValidationParseT
hs-source-dirs: ./src
build-depends:
base >=4.15 && <5,
-- http-api-data
blaze-html,
blaze-markup,
bytestring,
dlist,
http-types,
ihp-hsx,
monad-logger,
pa-error-tree,
pa-field-parser,
pa-label,
pa-prelude,
my-webstuff,
selective,
text,
unliftio,
wai,
warp
executable htmx-experiment
import: common-options
main-is: Main.hs
build-depends:
htmx-experiment,
base >=4.15 && <5,

View file

@ -1,369 +0,0 @@
{-# LANGUAGE AllowAmbiguousTypes #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE QuasiQuotes #-}
module HtmxExperiment where
import Control.Category qualified as Cat
import Control.Exception qualified as Exc
import Control.Monad.Logger
import Control.Selective (Selective (select))
import Control.Selective qualified as Selective
import Data.ByteString qualified as Bytes
import Data.DList (DList)
import Data.Functor.Compose
import Data.List qualified as List
import Data.Maybe (maybeToList)
import Data.Maybe qualified as Maybe
import Data.Monoid qualified as Monoid
import Data.Text qualified as Text
import FieldParser hiding (nonEmpty)
import IHP.HSX.QQ (hsx)
import Label
import Multipart2 (FormValidation (FormValidation), FormValidationResult, MultipartParseT, failFormValidation)
import Multipart2 qualified as Multipart
import Network.HTTP.Types qualified as Http
import Network.Wai qualified as Wai
import Network.Wai.Handler.Warp qualified as Warp
import PossehlAnalyticsPrelude
import ServerErrors (ServerError (..), throwUserErrorTree)
import Text.Blaze.Html5 (Html, docTypeHtml)
import Text.Blaze.Renderer.Utf8 (renderMarkup)
import UnliftIO (MonadUnliftIO (withRunInIO))
import Prelude hiding (compare)
-- data Routes
-- = Root
-- | Register
-- | RegisterSubmit
-- data Router url = Router
-- { parse :: Routes.URLParser url,
-- print :: url -> [Text]
-- }
-- routerPathInfo :: Routes.PathInfo a => Router a
-- routerPathInfo =
-- Router
-- { parse = Routes.fromPathSegments,
-- print = Routes.toPathSegments
-- }
-- subroute :: Text -> Router subUrl -> Router subUrl
-- subroute path inner =
-- Router
-- { parse = Routes.segment path *> inner.parse,
-- print = \url -> path : inner.print url
-- }
-- routerLeaf :: a -> Router a
-- routerLeaf a =
-- Router
-- { parse = pure a,
-- print = \_ -> []
-- }
-- routerToSite ::
-- ((url -> [(Text, Maybe Text)] -> Text) -> url -> a) ->
-- Router url ->
-- Routes.Site url a
-- routerToSite handler router =
-- Routes.Site
-- { handleSite = handler,
-- formatPathSegments = (\x -> (x, [])) . router.print,
-- parsePathSegments = Routes.parseSegments router.parse
-- }
-- handlers queryParams = \case
-- Root -> "root"
-- Register -> "register"
-- RegisterSubmit -> "registersubmit"
newtype Router handler from to = Router {unRouter :: from -> [Text] -> (Maybe handler, to)}
deriving
(Functor, Applicative)
via ( Compose
((->) from)
( Compose
((->) [Text])
((,) (Monoid.First handler))
)
)
data Routes r handler = Routes
{ users :: r (Label "register" handler)
}
data Endpoint handler subroutes = Endpoint
{ root :: handler,
subroutes :: subroutes
}
deriving stock (Show, Eq)
data Handler = Handler {url :: Text}
-- myRoute :: Router () from (Endpoint (Routes (Endpoint ()) Handler) b)
-- myRoute =
-- root $ do
-- users <- fixed "users" () $ fixedFinal @"register" ()
-- pure $ Routes {..}
-- -- | the root and its children
-- root :: routes from a -> routes from (Endpoint a b)
-- root = todo
-- | A fixed sub-route with children
fixed :: Text -> handler -> Router handler from a -> Router handler from (Endpoint handler a)
fixed route handler inner = Router $ \from -> \case
[final]
| route == final ->
( Just handler,
Endpoint
{ root = handler,
subroutes = (inner.unRouter from []) & snd
}
)
(this : more)
| route == this ->
( (inner.unRouter from more) & fst,
Endpoint
{ root = handler,
subroutes = (inner.unRouter from more) & snd
}
)
_ -> (Nothing, Endpoint {root = handler, subroutes = (inner.unRouter from []) & snd})
-- integer ::
-- forall routeName routes from a.
-- Router (T2 routeName Integer "more" from) a ->
-- Router from (Endpoint () a)
-- integer inner = Router $ \case
-- (path, []) ->
-- runFieldParser Field.signedDecimal path
-- (path, more) ->
-- inner.unRouter more (runFieldParser Field.signedDecimal path)
-- -- | A leaf route
-- fixedFinal :: forall route handler from. (KnownSymbol route) => handler -> Router handler from (Label route Handler)
-- fixedFinal handler = do
-- let route = symbolText @route
-- Rounter $ \from -> \case
-- [final] | route == final -> (Just handler, label @route (Handler from))
-- _ -> (Nothing, label @route handler)
main :: IO ()
main = runStderrLoggingT @IO $ do
withRunInIO @(LoggingT IO) $ \runInIO -> do
Warp.run 8080 $ \req respond -> catchServerError respond $ do
let respondOk res = Wai.responseLBS Http.ok200 [] (renderMarkup res)
let htmlRoot inner =
docTypeHtml
[hsx|
<head>
<script src="https://unpkg.com/htmx.org@1.9.2" integrity="sha384-L6OqL9pRWyyFU3+/bjdSri+iIphTN/bvYyM37tICVyOJkWZLpP2vGn6VUEXgzg6h" crossorigin="anonymous"></script>
</head>
<body>
{inner}
</body>
|]
res <-
case req & Wai.pathInfo of
[] ->
pure $
respondOk $
htmlRoot
[hsx|
<div id="register_buttons">
<button hx-get="/register" hx-target="body" hx-push-url="/register">Register an account</button>
<button hx-get="/login" hx-target="body">Login</button>
</div>
|]
["register"] ->
pure $ respondOk $ fullEndpoint req $ \case
FullPage -> htmlRoot $ registerForm mempty
Snippet -> registerForm mempty
["register", "submit"] -> do
FormValidation body <-
req
& parsePostBody
registerFormValidate
& runInIO
case body of
-- if the parse succeeds, ignore any of the data
(_, Just a) -> pure $ respondOk $ htmlRoot [hsx|{a}|]
(errs, Nothing) -> pure $ respondOk $ htmlRoot $ registerForm errs
other ->
pure $ respondOk [hsx|no route here at {other}|]
respond $ res
where
catchServerError respond io =
Exc.catch io (\(ex :: ServerError) -> respond $ Wai.responseLBS ex.status [] ex.errBody)
parsePostBody ::
(MonadIO m, MonadThrow m, MonadLogger m) =>
MultipartParseT m b ->
Wai.Request ->
m b
parsePostBody parser req =
Multipart.parseMultipartOrThrow throwUserErrorTree parser req
-- migrate :: IO (Label "numberOfRowsAffected" Natural)
-- migrate =
-- Init.runAppTest $ do
-- runTransaction $
-- execute
-- [sql|
-- CREATE TABLE IF NOT EXISTS experiments.users (
-- id SERIAL PRIMARY KEY,
-- email TEXT NOT NULL,
-- registration_pending_token TEXT NULL
-- )
-- |]
-- ()
data HsxRequest
= Snippet
| FullPage
fullEndpoint :: Wai.Request -> (HsxRequest -> t) -> t
fullEndpoint req act = do
let isHxRequest = req & Wai.requestHeaders & List.find (\h -> (h & fst) == "HX-Request") & Maybe.isJust
if isHxRequest
then act Snippet
else act FullPage
data FormField = FormField
{ label_ :: Html,
required :: Bool,
id_ :: Text,
name :: ByteString,
type_ :: Text,
placeholder :: Maybe Text
}
inputHtml ::
FormField ->
DList FormValidationResult ->
Html
inputHtml (FormField {..}) validationResults = do
let validation =
validationResults
& toList
& mapMaybe
( \v ->
if v.formFieldName == name
then
Just
( T2
(label @"errors" (maybeToList v.hasError))
(label @"originalValue" (Monoid.First (Just v.originalValue)))
)
else Nothing
)
& mconcat
let isFirstError =
validationResults
& List.find (\res -> Maybe.isJust res.hasError && res.formFieldName == name)
& Maybe.isJust
[hsx|
<label for={id_}>{label_}
<input
autofocus={isFirstError}
onfocus="this.select()"
required={required}
id={id_}
name={name}
type={type_}
placeholder={placeholder}
value={validation.originalValue.getFirst}
/>
<p id="{id_}.validation">{validation.errors & nonEmpty <&> toList <&> map prettyError <&> Text.intercalate "; "}</p>
</label>
|]
registerForm :: DList FormValidationResult -> Html
registerForm validationErrors =
let fields =
mconcat
[ inputHtml $
FormField
{ label_ = "Your Email:",
required = True,
id_ = "register_email",
name = "email",
type_ = "email",
placeholder = Just "your@email.com"
},
inputHtml $
FormField
{ label_ = "New password:",
required = True,
id_ = "register_password",
name = "password",
type_ = "password",
placeholder = Just "hunter2"
},
inputHtml $
FormField
{ label_ = "Repeated password:",
required = True,
id_ = "register_password_repeated",
name = "password_repeated",
type_ = "password",
placeholder = Just "hunter2"
}
]
in [hsx|
<form hx-post="/register/submit">
<fieldset>
<legend>Register user</legend>
{fields validationErrors}
<button id="register_submit_button" name="register">
Register
</button>
</fieldset>
</form>
|]
registerFormValidate ::
(Applicative m) =>
MultipartParseT
m
(FormValidation (T2 "email" ByteString "password" ByteString))
registerFormValidate = do
let emailFP = FieldParser $ \b ->
if
| Bytes.elem (charToWordUnsafe '@') b -> Right b
| otherwise -> Left [fmt|This is not an email address: "{b & bytesToTextUtf8Unsafe}"|]
getCompose @(MultipartParseT _) @FormValidation $ do
email <- Compose $ Multipart.fieldLabel' @"email" "email" emailFP
password <-
aEqB
"password_repeated"
"The two password fields must be the same"
(Compose $ Multipart.field' "password" Cat.id)
(\field -> Compose $ Multipart.field' field Cat.id)
pure $ T2 email (label @"password" password)
where
aEqB field validateErr fCompare fValidate =
Selective.fromMaybeS
-- TODO: this check only reached if the field itself is valid. Could we combine those errors?
(Compose $ pure $ failFormValidation (T2 (label @"formFieldName" field) (label @"originalValue" "")) validateErr)
$ do
compare <- fCompare
validate <- fValidate field
pure $ if compare == validate then Just validate else Nothing
-- | A lifted version of 'Data.Maybe.fromMaybe'.
fromMaybeS :: (Selective f) => f a -> f (Maybe a) -> f a
fromMaybeS ifNothing fma =
select
( fma <&> \case
Nothing -> Left ()
Just a -> Right a
)
( do
a <- ifNothing
pure (\() -> a)
)

View file

@ -1,244 +0,0 @@
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TemplateHaskell #-}
module ServerErrors where
import Control.Exception (Exception)
import Control.Monad.Logger (MonadLogger, logError, logWarn)
import Data.ByteString.Lazy qualified as Bytes.Lazy
import Data.Error.Tree
import Network.HTTP.Types qualified as Http
import PossehlAnalyticsPrelude
data ServerError = ServerError
{ status :: Http.Status,
errBody :: Bytes.Lazy.ByteString
}
deriving stock (Show)
deriving anyclass (Exception)
emptyServerError :: Http.Status -> ServerError
emptyServerError status = ServerError {status, errBody = ""}
-- | Throw a user error.
--
-- “User” here is a client using our API, not a human user.
-- So we throw a `HTTP 400` error, which means the API was used incorrectly.
--
-- We also log the error as a warning, because it probably signifies a programming bug in our client.
--
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
throwUserError ::
(MonadLogger m, MonadThrow m) =>
-- | The error to log & throw to the user
Error ->
m b
throwUserError err = do
-- TODO: should we make this into a macro to keep the line numbers?
$logWarn (err & errorContext "There was a “user holding it wrong” error, check the client code" & prettyError)
throwM
ServerError
{ status = Http.badRequest400,
errBody = err & prettyError & textToBytesUtf8 & toLazyBytes
}
-- | Throw a user error.
--
-- “User” here is a client using our API, not a human user.
-- So we throw a `HTTP 400` error, which means the API was used incorrectly.
--
-- We also log the error as a warning, because it probably signifies a programming bug in our client.
--
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
throwUserErrorTree ::
(MonadLogger m, MonadThrow m) =>
-- | The error to log & throw to the user
ErrorTree ->
m b
throwUserErrorTree err = do
-- TODO: should we make this into a macro to keep the line numbers?
$logWarn (err & nestedError "There was a “user holding it wrong” error, check the client code" & prettyErrorTree)
throwM
ServerError
{ status = Http.badRequest400,
errBody = err & prettyErrorTree & textToBytesUtf8 & toLazyBytes
}
-- | Unwrap the `Either` and if `Left` throw a user error.
--
-- Intended to use in a pipeline, e.g.:
--
-- @@
-- doSomething
-- >>= orUserError "Oh no something did not work"
-- >>= doSomethingElse
-- @@
--
-- “User” here is a client using our API, not a human user.
-- So we throw a `HTTP 400` error, which means the API was used incorrectly.
--
-- We also log the error as a warning, because it probably signifies a programming bug in our client.
--
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
orUserError ::
(MonadThrow m, MonadLogger m) =>
-- | The message to add as a context to the error being thrown
Text ->
-- | Result to unwrap and potentially throw
Either Error a ->
m a
orUserError outerMsg eErrA =
orUserErrorTree outerMsg (first singleError eErrA)
-- | Unwrap the `Either` and if `Left` throw a user error. Will pretty-print the 'ErrorTree'
--
-- Intended to use in a pipeline, e.g.:
--
-- @@
-- doSomething
-- >>= orUserErrorTree "Oh no something did not work"
-- >>= doSomethingElse
-- @@
--
-- “User” here is a client using our API, not a human user.
-- So we throw a `HTTP 400` error, which means the API was used incorrectly.
--
-- We also log the error as a warning, because it probably signifies a programming bug in our client.
--
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
orUserErrorTree ::
(MonadThrow m, MonadLogger m) =>
-- | The message to add as a context to the 'ErrorTree' being thrown
Text ->
-- | Result to unwrap and potentially throw
Either ErrorTree a ->
m a
orUserErrorTree outerMsg = \case
Right a -> pure a
Left err -> do
-- TODO: this outer message should probably be added as a separate root instead of adding to the root error?
let tree = errorTreeContext outerMsg err
-- TODO: should we make this into a macro to keep the line numbers?
$logWarn (errorTreeContext "There was a “user holding it wrong” error, check the client code" tree & prettyErrorTree)
throwM
ServerError
{ status = Http.badRequest400,
errBody = tree & prettyErrorTree & textToBytesUtf8 & toLazyBytes
}
-- | Throw an internal error.
--
-- “Internal” here means some assertion that we depend on failed,
-- e.g. some database request returned a wrong result/number of results
-- or some invariant that we expect to hold failed.
--
-- This prints the full error to the log,
-- and returns a “HTTP 500” error without the message.
--
-- If you want to signify a mishandling of the API (e.g. a wrong request), throw a `userError`.
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
throwInternalError ::
(MonadLogger m, MonadThrow m) =>
-- | The error to log internally
Error ->
m b
throwInternalError err = do
-- TODO: should we make this into a macro to keep the line numbers?
$logError
(err & prettyError)
throwM $ emptyServerError Http.internalServerError500
-- | Throw an internal error.
--
-- “Internal” here means some assertion that we depend on failed,
-- e.g. some database request returned a wrong result/number of results
-- or some invariant that we expect to hold failed.
--
-- This prints the full error to the log,
-- and returns a “HTTP 500” error without the message.
--
-- If you want to signify a mishandling of the API (e.g. a wrong request), throw a `userError`.
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
throwInternalErrorTree ::
(MonadLogger m, MonadThrow m) =>
-- | The error to log internally
ErrorTree ->
m b
throwInternalErrorTree err = do
-- TODO: should we make this into a macro to keep the line numbers?
$logError
(err & prettyErrorTree)
throwM $ emptyServerError Http.internalServerError500
-- | Unwrap the `Either` and if `Left` throw an internal error.
--
-- Intended to use in a pipeline, e.g.:
--
-- @@
-- doSomething
-- >>= orInternalError "Oh no something did not work"
-- >>= doSomethingElse
-- @@
--
-- “Internal” here means some assertion that we depend on failed,
-- e.g. some database request returned a wrong result/number of results
-- or some invariant that we expect to hold failed.
--
-- This prints the full error to the log,
-- and returns a “HTTP 500” error without the message.
--
-- If you want to signify a mishandling of the API (e.g. a wrong request), throw a `userError`.
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
orInternalError ::
(MonadThrow m, MonadLogger m) =>
-- | The message to add as a context to the error being thrown
Text ->
-- | Result to unwrap and potentially throw
Either Error a ->
m a
orInternalError outerMsg eErrA = orInternalErrorTree outerMsg (first singleError eErrA)
-- | Unwrap the `Either` and if `Left` throw an internal error. Will pretty-print the 'ErrorTree'.
--
-- Intended to use in a pipeline, e.g.:
--
-- @@
-- doSomething
-- >>= orInternalErrorTree "Oh no something did not work"
-- >>= doSomethingElse
-- @@
--
-- “Internal” here means some assertion that we depend on failed,
-- e.g. some database request returned a wrong result/number of results
-- or some invariant that we expect to hold failed.
--
-- This prints the full error to the log,
-- and returns a “HTTP 500” error without the message.
--
-- If you want to signify a mishandling of the API (e.g. a wrong request), throw a `userError`.
-- If you need to display a message to a human user, return a `FrontendResponse`
-- or a structured type with translation keys (so we can localize the errors).
orInternalErrorTree ::
(MonadThrow m, MonadLogger m) =>
-- | The message to add as a context to the 'ErrorTree' being thrown
Text ->
-- | Result to unwrap and potentially throw
Either ErrorTree a ->
m a
orInternalErrorTree outerMsg = \case
Right a -> pure a
Left err -> do
-- TODO: this outer message should probably be added as a separate root instead of adding to the root error?
let tree = errorTreeContext outerMsg err
-- TODO: should we make this into a macro to keep the line numbers?
$logError (tree & prettyErrorTree)
throwM $ emptyServerError Http.internalServerError500

View file

@ -1,40 +0,0 @@
module ValidationParseT where
import Control.Monad.Logger (MonadLogger)
import Control.Selective (Selective)
import Data.Error.Tree
import Data.Functor.Compose (Compose (..))
import PossehlAnalyticsPrelude
import ServerErrors
-- | A simple way to create an Applicative parser that parses from some environment.
--
-- Use with DerivingVia. Grep codebase for examples.
newtype ValidationParseT env m a = ValidationParseT {unValidationParseT :: env -> m (Validation (NonEmpty Error) a)}
deriving
(Functor, Applicative, Selective)
via ( Compose
((->) env)
(Compose m (Validation (NonEmpty Error)))
)
-- | Helper that runs the given parser and throws a user error if the parsing failed.
runValidationParseTOrUserError ::
forall validationParseT env m a.
( Coercible validationParseT (ValidationParseT env m a),
MonadLogger m,
MonadThrow m
) =>
-- | toplevel error message to throw if the parsing fails
Error ->
-- | The parser which should be run
validationParseT ->
-- | input to the parser
env ->
m a
{-# INLINE runValidationParseTOrUserError #-}
runValidationParseTOrUserError contextError parser env =
(coerce @_ @(ValidationParseT _ _ _) parser).unValidationParseT env
>>= \case
Failure errs -> throwUserErrorTree (errorTree contextError errs)
Success a -> pure a

View file

@ -1,66 +0,0 @@
{-# LANGUAGE QuasiQuotes #-}
module Main where
import Conduit ((.|))
import Data.Binary.Builder qualified as Builder
import Data.Conduit qualified as Cond
import Data.Conduit.Combinators qualified as Cond
import Data.Conduit.Process.Typed qualified as Cond
import Data.Conduit.Process.Typed qualified as Proc
import Data.List qualified as List
import Data.Text qualified as Text
import Network.HTTP.Types qualified as Http
import Network.Wai qualified as Wai
import Network.Wai.Conduit qualified as Wai.Conduit
import Network.Wai.Handler.Warp qualified as Warp
import PossehlAnalyticsPrelude
import System.Directory qualified as Dir
import System.FilePath ((</>))
import System.FilePath qualified as File
import System.Posix qualified as Unix
-- Webserver that returns folders under CWD as .zip archives (recursively)
main :: IO ()
main = do
currentDirectory <- Dir.getCurrentDirectory >>= Dir.canonicalizePath
run currentDirectory
run :: FilePath -> IO ()
run dir = do
currentDirectory <- Dir.canonicalizePath dir
putStderrLn $ [fmt|current {show currentDirectory}|]
Warp.run 7070 $ \req respond -> do
let respondHtml status content = respond $ Wai.responseLBS status [("Content-Type", "text/html")] content
case req & Wai.pathInfo of
[] -> respond $ Wai.responseLBS Http.status200 [("Content-Type", "text/html")] "any directory will be returned as .zip!"
filePath -> do
absoluteWantedFilepath <- Dir.canonicalizePath (currentDirectory </> (File.joinPath (filePath <&> textToString)))
-- I hope this prevents any shenanigans lol
let noCurrentDirPrefix = List.stripPrefix (File.addTrailingPathSeparator currentDirectory) absoluteWantedFilepath
if
| (any (Text.elem '/') filePath) -> putStderrLn "tried %2F encoding" >> respondHtml Http.status400 "no"
| Nothing <- noCurrentDirPrefix -> putStderrLn "tried parent dir with .." >> respondHtml Http.status400 "no^2"
| Just wantedFilePath <- noCurrentDirPrefix -> do
putStderrLn $ [fmt|wanted {show wantedFilePath}|]
ex <- Unix.fileExist wantedFilePath
if ex
then do
status <- Unix.getFileStatus wantedFilePath
if status & Unix.isDirectory
then do
zipDir <- zipDirectory wantedFilePath
Proc.withProcessWait zipDir $ \process -> do
let stream =
Proc.getStdout process
.| Cond.map (\bytes -> Cond.Chunk $ Builder.fromByteString bytes)
-- TODO: how to handle broken zip? Is it just gonna return a 500? But the stream is already starting, so hard!
respond $ Wai.Conduit.responseSource Http.ok200 [("Content-Type", "application/zip")] stream
else respondHtml Http.status404 "not found"
else respondHtml Http.status404 "not found"
where
zipDirectory toZipDir = do
putStderrLn [fmt|running $ zip {show ["--recurse-paths", "-", toZipDir]}|]
pure $
Proc.proc "zip" ["--recurse-paths", "-", toZipDir]
& Proc.setStdout Cond.createSource

View file

@ -1,40 +0,0 @@
{ depot, pkgs, lib, ... }:
let
httzip = pkgs.haskellPackages.mkDerivation {
pname = "httzip";
version = "0.1.0";
src = depot.users.Profpatsch.exactSource ./. [
./httzip.cabal
./Httzip.hs
];
libraryHaskellDepends = [
pkgs.haskellPackages.pa-prelude
pkgs.haskellPackages.warp
pkgs.haskellPackages.wai
pkgs.haskellPackages.wai-conduit
pkgs.haskellPackages.conduit-extra
pkgs.haskellPackages.conduit
];
isExecutable = true;
isLibrary = false;
license = lib.licenses.mit;
};
bins = depot.nix.getBins httzip [ "httzip" ];
in
depot.nix.writeExecline "httzip-wrapped" { } [
"importas"
"-i"
"PATH"
"PATH"
"export"
"PATH"
"${pkgs.zip}/bin:$${PATH}"
bins.httzip
]

View file

@ -1,73 +0,0 @@
cabal-version: 3.0
name: httzip
version: 0.1.0.0
author: Profpatsch
maintainer: mail@profpatsch.de
common common-options
ghc-options:
-Wall
-Wno-type-defaults
-Wunused-packages
-Wredundant-constraints
-fwarn-missing-deriving-strategies
-- See https://downloads.haskell.org/ghc/latest/docs/users_guide/exts.html
-- for a description of all these extensions
default-extensions:
-- Infer Applicative instead of Monad where possible
ApplicativeDo
-- Allow literal strings to be Text
OverloadedStrings
-- Syntactic sugar improvements
LambdaCase
MultiWayIf
-- Makes the (deprecated) usage of * instead of Data.Kind.Type an error
NoStarIsType
-- Convenient and crucial to deal with ambiguous field names, commonly
-- known as RecordDotSyntax
OverloadedRecordDot
-- does not export record fields as functions, use OverloadedRecordDot to access instead
NoFieldSelectors
-- Record punning
RecordWildCards
-- Improved Deriving
DerivingStrategies
DerivingVia
-- Type-level strings
DataKinds
-- to enable the `type` keyword in import lists (ormolu uses this automatically)
ExplicitNamespaces
default-language: GHC2021
executable httzip
import: common-options
main-is: Httzip.hs
build-depends:
base >=4.15 && <5,
pa-prelude,
bytestring,
text,
warp,
wai,
http-types,
directory,
filepath,
unix,
wai-conduit,
conduit,
conduit-extra,
binary

View file

@ -1,17 +0,0 @@
{ depot, pkgs, lib, ... }:
let
imap-idle = depot.nix.writers.rustSimple
{
name = "imap-idle";
dependencies = [
depot.users.Profpatsch.arglib.netencode.rust
depot.third_party.rust-crates.imap
depot.third_party.rust-crates.epoll
depot.users.Profpatsch.execline.exec-helpers
];
}
(builtins.readFile ./imap-idle.rs);
in
imap-idle

View file

@ -1,140 +0,0 @@
extern crate exec_helpers;
// extern crate arglib_netencode;
// extern crate netencode;
extern crate epoll;
extern crate imap;
// use netencode::dec;
use imap::extensions::idle::SetReadTimeout;
use std::convert::TryFrom;
use std::fs::File;
use std::io::{Read, Write};
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::time::Duration;
/// Implements an UCSPI client that wraps fd 6 & 7
/// and implements Write and Read with a timeout.
/// See https://cr.yp.to/proto/ucspi.txt
#[derive(Debug)]
struct UcspiClient {
read: File,
read_epoll_fd: RawFd,
read_timeout: Option<Duration>,
write: File,
}
impl UcspiClient {
/// Use fd 6 and 7 to connect to the net, as is specified.
/// Unsafe because fd 6 and 7 are global resources and we dont mutex them.
pub unsafe fn new_from_6_and_7() -> std::io::Result<Self> {
unsafe {
let read_epoll_fd = epoll::create(false)?;
Ok(UcspiClient {
read: File::from_raw_fd(6),
read_epoll_fd,
read_timeout: None,
write: File::from_raw_fd(7),
})
}
}
}
/// Emulates set_read_timeout() like on a TCP socket with an epoll on read.
/// The BSD socket API is rather bad, so fd != fd,
/// and if we cast the `UcspiClient` fds to `TcpStream` instead of `File`,
/// wed break any UCSPI client programs that *dont* connect to TCP.
/// Instead we use the (linux) `epoll` API in read to wait on the timeout.
impl SetReadTimeout for UcspiClient {
fn set_read_timeout(&mut self, timeout: Option<Duration>) -> imap::Result<()> {
self.read_timeout = timeout;
Ok(())
}
}
impl Read for UcspiClient {
// TODO: test the epoll code with a short timeout
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
const NO_DATA: u64 = 0;
// in order to implement the read_timeout,
// we use epoll to wait for either data or time out
epoll::ctl(
self.read_epoll_fd,
epoll::ControlOptions::EPOLL_CTL_ADD,
self.read.as_raw_fd(),
epoll::Event::new(epoll::Events::EPOLLIN, NO_DATA),
)?;
let UNUSED = epoll::Event::new(epoll::Events::EPOLLIN, NO_DATA);
let wait = epoll::wait(
self.read_epoll_fd,
match self.read_timeout {
Some(duration) => {
i32::try_from(duration.as_millis()).expect("duration too big for epoll")
}
None => -1, // infinite
},
// event that was generated; but we dont care
&mut vec![UNUSED; 1][..],
);
// Delete the listen fd from the epoll fd before reacting
// (otherwise it fails on the next read with `EPOLL_CTL_ADD`)
epoll::ctl(
self.read_epoll_fd,
epoll::ControlOptions::EPOLL_CTL_DEL,
self.read.as_raw_fd(),
UNUSED,
)?;
match wait {
// timeout happened (0 events)
Ok(0) => Err(std::io::Error::new(
std::io::ErrorKind::TimedOut,
"ucspi read timeout",
)),
// its ready for reading, we can read
Ok(_) => self.read.read(buf),
// error
err => err,
}
}
}
/// Just proxy through the `Write` of the write fd.
impl Write for UcspiClient {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.write.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.write.flush()
}
}
/// Connect to IMAP account and listen for new mails on the INBOX.
fn main() {
exec_helpers::no_args("imap-idle");
// TODO: use arglib_netencode
let username = std::env::var("IMAP_USERNAME").expect("username");
let password = std::env::var("IMAP_PASSWORD").expect("password");
let net = unsafe { UcspiClient::new_from_6_and_7().expect("no ucspi client for you") };
let client = imap::Client::new(net);
let mut session = client
.login(username, password)
.map_err(|(err, _)| err)
.expect("unable to login");
eprintln!("{:#?}", session);
let list = session.list(None, Some("*"));
eprintln!("{:#?}", list);
let mailbox = session.examine("INBOX");
eprintln!("{:#?}", mailbox);
fn now() -> String {
String::from_utf8_lossy(&std::process::Command::new("date").output().unwrap().stdout)
.trim_right()
.to_string()
}
loop {
eprintln!("{}: idling on INBOX", now());
let mut handle = session.idle().expect("cannot idle on INBOX");
let () = handle.wait_keepalive().expect("waiting on idle failed");
eprintln!("{}: The mailbox has changed!", now());
}
}

View file

@ -1,93 +0,0 @@
{ pkgs, depot, lib, ... }:
let
# import the dhall file as nix expression via dhall-nix.
# Converts the normalized dhall expression to a nix file,
# puts it in the store and imports it.
# Types are erased, functions are converted to nix functions,
# unions values are nix functions that take a record of match
# functions for their alternatives.
# TODO: document better
importDhall =
{
# Root path of the dhall file tree to import (will be filtered by files)
root
, # A list of files which should be taken from `root` (relative paths).
# This is for minimizing the amount of things that have to be copied to the store.
# TODO: can you have directory prefixes?
files
, # The path of the dhall file which should be evaluated, relative to `root`, has to be in `files`
main
, # List of dependencies (TODO: what is a dependency?)
deps
, # dhall type of `main`, or `null` if anything should be possible.
type ? null
}:
let
absRoot = path: toString root + "/" + path;
src =
depot.users.Profpatsch.exactSource
root
# exactSource wants nix paths, but I think relative paths
# as strings are more intuitive.
([ (absRoot main) ] ++ (map absRoot files));
cache = ".cache";
cacheDhall = "${cache}/dhall";
hadTypeAnnot = type != null;
typeAnnot = lib.optionalString hadTypeAnnot ": ${type}";
convert = pkgs.runCommandLocal "dhall-to-nix" { inherit deps; } ''
mkdir -p ${cacheDhall}
for dep in $deps; do
${pkgs.xorg.lndir}/bin/lndir -silent $dep/${cacheDhall} ${cacheDhall}
done
export XDG_CACHE_HOME=$(pwd)/${cache}
# go into the source directory, so that the type can import files.
# TODO: This is a bit of a hack hrm.
cd "${src}"
printf 'Generating dhall nix code. Run
%s --file %s
to reproduce
' \
${pkgs.dhall}/bin/dhall \
${absRoot main}
${if hadTypeAnnot then ''
printf '%s' ${lib.escapeShellArg "${src}/${main} ${typeAnnot}"} \
| ${pkgs.dhall-nix}/bin/dhall-to-nix \
> $out
''
else ''
printf 'No type annotation given, the dhall expression type was:\n'
${pkgs.dhall}/bin/dhall type --file "${src}/${main}"
printf '%s' ${lib.escapeShellArg "${src}/${main}"} \
| ${pkgs.dhall-nix}/bin/dhall-to-nix \
> $out
''}
'';
in
import convert;
# read dhall file in as JSON, then import as nix expression.
# The dhall file must not try to import from non-local URLs!
readDhallFileAsJson = dhallType: file:
let
convert = pkgs.runCommandLocal "dhall-to-json" { } ''
printf '%s' ${lib.escapeShellArg "${file} : ${dhallType}"} \
| ${pkgs.dhall-json}/bin/dhall-to-json \
> $out
'';
in
builtins.fromJSON (builtins.readFile convert);
in
{
inherit
importDhall
readDhallFileAsJson
;
}

View file

@ -1,48 +0,0 @@
{ depot, pkgs, ... }:
let
drv =
pkgs.stdenv.mkDerivation {
pname = "jaeger";
version = "1.49.0";
src = pkgs.fetchurl {
url = "https://github.com/jaegertracing/jaeger/releases/download/v1.49.0/jaeger-1.49.0-linux-amd64.tar.gz";
hash = "sha256-QhxISDlk/t431EesgVkHWTe7yiw2B+yyfq//GLP0As4=";
};
phases = [ "unpackPhase" "installPhase" "fixupPhase" ];
installPhase = ''
mkdir -p $out/bin
install ./jaeger-all-in-one $out/bin
'';
};
image =
pkgs.dockerTools.buildImage {
name = "jaeger";
tag = "1.49.0";
copyToRoot = drv;
config = {
Cmd = [ "/bin/jaeger-all-in-one" ];
};
};
runner =
depot.nix.writeExecline "jaeger-docker-run" { } [
"if"
[ "docker" "load" "-i" image ]
"docker"
"run"
"--rm"
"--name"
"jaeger"
# Web UI
"-p"
"16686:16686"
# Opentelemetry
"-p"
"4318:4318"
"jaeger:1.49.0"
"jaeger-all-in-one"
"$@"
];
in
runner

View file

@ -1,389 +0,0 @@
{-# LANGUAGE QuasiQuotes #-}
{-# OPTIONS_GHC -Wno-orphans #-}
module Main where
import Conduit ((.|))
import Conduit qualified as Cond
import Control.Category qualified as Cat
import Control.Foldl qualified as Fold
import Data.ByteString.Internal qualified as Bytes
import Data.Error.Tree
import Data.Int (Int64)
import Data.List qualified as List
import Data.Map.Strict qualified as Map
import Data.Maybe (catMaybes)
import Data.Text qualified as Text
import Data.Text.IO qualified as Text
import Database.SQLite.Simple qualified as Sqlite
import Database.SQLite.Simple.FromField qualified as Sqlite
import Database.SQLite.Simple.QQ qualified as Sqlite
import FieldParser qualified as Field
import Label
import Parse
import PossehlAnalyticsPrelude
import Text.XML (def)
import Text.XML qualified as Xml
import Prelude hiding (init, maybe)
main :: IO ()
main = do
f <- file
f.documentRoot
& filterDown
& toTree
& prettyErrorTree
& Text.putStrLn
test :: IO ()
test = do
withEnv $ \env -> do
migrate env
f <- file
parseJbovlasteXml f
& \case
Left errs -> Text.putStrLn $ prettyErrorTree errs
Right valsi -> insertValsi env valsi
filterDown :: Xml.Element -> Xml.Element
filterDown el =
el
& filterElementsRec noUsers
& downTo (T2 (label @"maxdepth" 5) (label @"maxlistitems" 30))
data Valsi = Valsi
{ word :: Text,
definition :: Text,
definitionId :: Natural,
typ :: Text,
selmaho :: Maybe Text,
notes :: Maybe Text,
glosswords :: [T2 "word" Text "sense" (Maybe Text)],
keywords :: [T3 "word" Text "place" Natural "sense" (Maybe Text)]
}
deriving stock (Show)
insertValsi :: Env -> [Valsi] -> IO ()
insertValsi env vs = do
Sqlite.withTransaction env.envData $
do
valsiIds <-
Cond.yieldMany vs
.| Cond.mapMC
( \v ->
Sqlite.queryNamed
@(Sqlite.Only Int64)
env.envData
[Sqlite.sql|
INSERT INTO valsi
(word , definition , type , selmaho , notes )
VALUES
(:word, :definition, :type, :selmaho, :notes)
RETURNING (id)
|]
[ ":word" Sqlite.:= v.word,
":definition" Sqlite.:= v.definition,
":type" Sqlite.:= v.typ,
":selmaho" Sqlite.:= v.selmaho,
":notes" Sqlite.:= v.notes
]
>>= \case
[one] -> pure one
_ -> error "more or less than one result"
)
.| Cond.sinkList
& Cond.runConduit
for_ (zip valsiIds vs) $ \(Sqlite.Only vId, v) -> do
for_ v.glosswords $ \g -> do
Sqlite.executeNamed
env.envData
[Sqlite.sql|
INSERT INTO glosswords
(valsi_id , word , sense )
VALUES
(:valsi_id, :word, :sense)
|]
[ ":valsi_id" Sqlite.:= vId,
":word" Sqlite.:= g.word,
":sense" Sqlite.:= g.sense
]
for_ (zip valsiIds vs) $ \(Sqlite.Only vId, v) -> do
for_ v.keywords $ \g -> do
Sqlite.executeNamed
env.envData
[Sqlite.sql|
INSERT INTO keywords
(valsi_id , word , place , sense )
VALUES
(:valsi_id, :word, :place, :sense)
|]
[ ":valsi_id" Sqlite.:= vId,
":word" Sqlite.:= g.word,
":place" Sqlite.:= (g.place & fromIntegral @Natural @Int),
":sense" Sqlite.:= g.sense
]
migrate :: (HasField "envData" p Sqlite.Connection) => p -> IO ()
migrate env = do
let x q = Sqlite.execute env.envData q ()
x
[Sqlite.sql|
CREATE TABLE IF NOT EXISTS valsi (
id integer PRIMARY KEY,
word text NOT NULL,
definition text NOT NULL,
type text NOT NULL,
selmaho text NULL,
notes text NULL
)
|]
x
[Sqlite.sql|
CREATE TABLE IF NOT EXISTS glosswords (
id integer PRIMARY KEY,
valsi_id integer NOT NULL,
word text NOT NULL,
sense text NULL,
FOREIGN KEY(valsi_id) REFERENCES valsi(id)
)
|]
x
[Sqlite.sql|
CREATE TABLE IF NOT EXISTS keywords (
id integer PRIMARY KEY,
valsi_id integer NOT NULL,
word text NOT NULL,
place integer NOT NULL,
sense text NULL,
FOREIGN KEY(valsi_id) REFERENCES valsi(id)
)
|]
data Env = Env
{ envData :: Sqlite.Connection
}
withEnv :: (Env -> IO a) -> IO a
withEnv inner = do
withSqlite "./jbovlaste.sqlite" $ \envData -> inner Env {..}
withSqlite :: String -> (Sqlite.Connection -> IO a) -> IO a
withSqlite fileName inner = Sqlite.withConnection fileName $ \conn -> do
-- Sqlite.setTrace conn (Just (\msg -> Text.hPutStrLn IO.stderr [fmt|{fileName}: {msg}|]))
Sqlite.execute conn [Sqlite.sql|PRAGMA foreign_keys = ON|] ()
inner conn
parseJbovlasteXml :: (HasField "documentRoot" r Xml.Element) => r -> Either ErrorTree [Valsi]
parseJbovlasteXml xml =
xml.documentRoot
& runParse
"cannot parse jbovlaste.xml"
parser
where
parser =
(element "dictionary" <&> (.elementNodes) <&> mapMaybe nodeElementMay)
>>> ( find
( element "direction"
>>> do
(attribute "from" >>> exactly showToText "lojban")
*> (attribute "to" >>> exactly showToText "English")
*> Cat.id
)
<&> (\x -> x.elementNodes <&> nodeElementMay)
)
>>> (multiple (maybe valsi) <&> catMaybes)
valsi =
element "valsi"
>>> do
let subNodes =
( Cat.id
<&> (.elementNodes)
<&> mapMaybe nodeElementMay
)
let subElementContent elName =
subNodes
>>> ( (find (element elName))
<&> (.elementNodes)
)
>>> exactlyOne
>>> content
let optionalSubElementContent elName =
subNodes
>>> ((findAll (element elName) >>> zeroOrOne))
>>> (maybe (lmap (.elementNodes) exactlyOne >>> content))
word <- attribute "word"
typ <- attribute "type"
selmaho <- optionalSubElementContent "selmaho"
definition <- subElementContent "definition"
definitionId <- subElementContent "definitionid" >>> fieldParser Field.decimalNatural
notes <- optionalSubElementContent "notes"
glosswords <-
(subNodes >>> findAll (element "glossword"))
>>> ( multiple $ do
word' <- label @"word" <$> (attribute "word")
sense <- label @"sense" <$> (attributeMay "sense")
pure $ T2 word' sense
)
keywords <-
(subNodes >>> findAll (element "keyword"))
>>> ( multiple $ do
word' <- label @"word" <$> (attribute "word")
place <- label @"place" <$> (attribute "place" >>> fieldParser Field.decimalNatural)
sense <- label @"sense" <$> (attributeMay "sense")
pure $ T3 word' place sense
)
pure $ Valsi {..}
file :: IO Xml.Document
file = Xml.readFile def "./jbovlaste-en.xml"
-- | Filter XML elements recursively based on the given predicate
filterElementsRec :: (Xml.Element -> Bool) -> Xml.Element -> Xml.Element
filterElementsRec f el =
el
{ Xml.elementNodes =
mapMaybe
( \case
Xml.NodeElement el' ->
if f el'
then Just $ Xml.NodeElement $ filterElementsRec f el'
else Nothing
other -> Just other
)
el.elementNodes
}
-- | no <user> allowed
noUsers :: Xml.Element -> Bool
noUsers el = el.elementName.nameLocalName /= "user"
downTo :: (T2 "maxdepth" Int "maxlistitems" Int) -> Xml.Element -> Xml.Element
downTo n el =
if n.maxdepth > 0
then
el
{ Xml.elementNodes =
( do
let eleven = take (n.maxlistitems + 1) $ map down el.elementNodes
if List.length eleven == (n.maxlistitems + 1)
then eleven <> [Xml.NodeComment "snip!"]
else eleven
)
}
else el {Xml.elementNodes = [Xml.NodeComment "snip!"]}
where
down =
\case
Xml.NodeElement el' ->
Xml.NodeElement $
downTo
( T2
(label @"maxdepth" $ n.maxdepth - 1)
(label @"maxlistitems" n.maxlistitems)
)
el'
more -> more
toTree :: Xml.Element -> ErrorTree
toTree el = do
case el.elementNodes & filter (not . isEmptyContent) & nonEmpty of
Nothing -> singleError (newError (prettyXmlElement el))
Just (n :| []) | not $ isElementNode n -> singleError $ errorContext (prettyXmlElement el) (nodeErrorNoElement n)
Just nodes -> nestedMultiError (newError (prettyXmlElement el)) (nodes <&> node)
where
isEmptyContent = \case
Xml.NodeContent c -> c & Text.all Bytes.isSpaceChar8
_ -> False
isElementNode = \case
Xml.NodeElement _ -> True
_ -> False
node :: Xml.Node -> ErrorTree
node = \case
Xml.NodeElement el' -> toTree el'
other -> singleError $ nodeErrorNoElement other
nodeErrorNoElement :: Xml.Node -> Error
nodeErrorNoElement = \case
Xml.NodeInstruction i -> [fmt|Instruction: {i & show}|]
Xml.NodeContent c -> [fmt|"{c & Text.replace "\"" "\\\""}"|]
Xml.NodeComment c -> [fmt|<!-- {c} -->|]
Xml.NodeElement _ -> error "NodeElement not allowed here"
prettyXmlName :: Xml.Name -> Text
prettyXmlName n = [fmt|{n.namePrefix & fromMaybe ""}{n.nameLocalName}|]
prettyXmlElement :: Xml.Element -> Text
prettyXmlElement el =
if not $ null el.elementAttributes
then [fmt|<{prettyXmlName el.elementName}: {attrs el.elementAttributes}>|]
else [fmt|<{prettyXmlName el.elementName}>|]
where
attrs :: Map Xml.Name Text -> Text
attrs a = a & Map.toList <&> (\(k, v) -> [fmt|{prettyXmlName k}={v}|]) & Text.intercalate ", " & \s -> [fmt|({s})|]
nodeElementMay :: Xml.Node -> Maybe Xml.Element
nodeElementMay = \case
Xml.NodeElement el -> Just el
_ -> Nothing
element :: Text -> Parse Xml.Element Xml.Element
element name = Parse $ \(ctx, el) ->
if el.elementName.nameLocalName == name
then Success (ctx & addContext (prettyXmlName el.elementName), el)
else Failure $ singleton [fmt|Expected element named <{name}> but got {el & prettyXmlElement} at {showContext ctx}|]
content :: Parse Xml.Node Text
content = Parse $ \(ctx, node) -> case node of
Xml.NodeContent t -> Success (ctx, t)
-- TODO: give an example of the node content?
n -> Failure $ singleton [fmt|Expected a content node, but got a {n & nodeType} node, at {showContext ctx}|]
where
nodeType = \case
Xml.NodeContent _ -> "content" :: Text
Xml.NodeComment _ -> "comment"
Xml.NodeInstruction _ -> "instruction"
Xml.NodeElement _ -> "element"
attribute :: Text -> Parse Xml.Element Text
attribute name = Parse $ \(ctx, el) ->
case el.elementAttributes & Map.mapKeys (.nameLocalName) & Map.lookup name of
Just a -> Success (ctx & addContext [fmt|{{attr:{name}}}|], a)
Nothing -> Failure $ singleton [fmt|Attribute "{name}" missing at {showContext ctx}|]
attributeMay :: Text -> Parse Xml.Element (Maybe Text)
attributeMay name = Parse $ \(ctx, el) ->
case el.elementAttributes & Map.mapKeys (.nameLocalName) & Map.lookup name of
Just a -> Success (ctx & addContext [fmt|{{attr:{name}}}|], Just a)
Nothing -> Success (ctx, Nothing)
instance
( Sqlite.FromField t1,
Sqlite.FromField t2,
Sqlite.FromField t3
) =>
Sqlite.FromRow (T3 l1 t1 l2 t2 l3 t3)
where
fromRow = do
T3
<$> (label @l1 <$> Sqlite.field)
<*> (label @l2 <$> Sqlite.field)
<*> (label @l3 <$> Sqlite.field)
foldRows ::
forall row params b.
(Sqlite.FromRow row, Sqlite.ToRow params) =>
Sqlite.Connection ->
Sqlite.Query ->
params ->
Fold.Fold row b ->
IO b
foldRows conn qry params = Fold.purely f
where
f :: forall x. (x -> row -> x) -> x -> (x -> b) -> IO b
f acc init extract = do
x <- Sqlite.fold conn qry params init (\a r -> pure $ acc a r)
pure $ extract x

View file

@ -1,33 +0,0 @@
{ depot, pkgs, lib, ... }:
let
# bins = depot.nix.getBins pkgs.sqlite ["sqlite3"];
jbovlaste-sqlite = pkgs.haskellPackages.mkDerivation {
pname = "jbovlaste-sqlite";
version = "0.1.0";
src = depot.users.Profpatsch.exactSource ./. [
./jbovlaste-sqlite.cabal
./JbovlasteSqlite.hs
];
libraryHaskellDepends = [
pkgs.haskellPackages.pa-prelude
pkgs.haskellPackages.pa-label
pkgs.haskellPackages.pa-error-tree
pkgs.haskellPackages.pa-field-parser
depot.users.Profpatsch.my-prelude
pkgs.haskellPackages.foldl
pkgs.haskellPackages.sqlite-simple
pkgs.haskellPackages.xml-conduit
];
isExecutable = true;
isLibrary = false;
license = lib.licenses.mit;
};
in
jbovlaste-sqlite

View file

@ -1,76 +0,0 @@
cabal-version: 3.0
name: jbovlaste-sqlite
version: 0.1.0.0
author: Profpatsch
maintainer: mail@profpatsch.de
common common-options
ghc-options:
-Wall
-Wno-type-defaults
-Wunused-packages
-Wredundant-constraints
-fwarn-missing-deriving-strategies
-- See https://downloads.haskell.org/ghc/latest/docs/users_guide/exts.html
-- for a description of all these extensions
default-extensions:
-- Infer Applicative instead of Monad where possible
ApplicativeDo
-- Allow literal strings to be Text
OverloadedStrings
-- Syntactic sugar improvements
LambdaCase
MultiWayIf
-- Makes the (deprecated) usage of * instead of Data.Kind.Type an error
NoStarIsType
-- Convenient and crucial to deal with ambiguous field names, commonly
-- known as RecordDotSyntax
OverloadedRecordDot
-- does not export record fields as functions, use OverloadedRecordDot to access instead
NoFieldSelectors
-- Record punning
RecordWildCards
-- Improved Deriving
DerivingStrategies
DerivingVia
-- Type-level strings
DataKinds
-- to enable the `type` keyword in import lists (ormolu uses this automatically)
ExplicitNamespaces
default-language: GHC2021
executable jbovlaste-sqlite
import: common-options
main-is: JbovlasteSqlite.hs
build-depends:
base >=4.15 && <5,
pa-prelude,
pa-label,
pa-error-tree,
pa-field-parser,
my-prelude,
containers,
selective,
semigroupoids,
validation-selective,
sqlite-simple,
foldl,
conduit,
bytestring,
text,
sqlite-simple,
xml-conduit,

View file

@ -1,137 +0,0 @@
{ ... }:
let
id = x: x;
const = x: y: x;
comp = f: g: x: f (g x);
_ = v: f: f v;
# Profunctor (p :: Type -> Type -> Type)
Profunctor = rec {
# dimap :: (a -> b) -> (c -> d) -> p b c -> p a d
dimap = f: g: x: lmap f (rmap g x);
# lmap :: (a -> b) -> p b c -> p a c
lmap = f: dimap f id;
# rmap :: (c -> d) -> p b c -> p b d
rmap = g: dimap id g;
};
# Profunctor (->)
profunctorFun = Profunctor // {
# dimap :: (a -> b) -> (c -> d) -> (b -> c) -> a -> d
dimap = ab: cd: bc: a: cd (bc (ab a));
# lmap :: (a -> b) -> (b -> c) -> (a -> c)
lmap = ab: bc: a: bc (ab a);
# rmap :: (c -> d) -> (b -> c) -> (b -> d)
rmap = cd: bc: b: cd (bc b);
};
tuple = fst: snd: {
inherit fst snd;
};
swap = { fst, snd }: {
fst = snd;
snd = fst;
};
# Profunctor p => Strong (p :: Type -> Type -> Type)
Strong = pro: pro // rec {
# firstP :: p a b -> p (a, c) (b, c)
firstP = pab: pro.dimap swap swap (pro.secondP pab);
# secondP :: p a b -> p (c, a) (c, b)
secondP = pab: pro.dimap swap swap (pro.firstP pab);
};
# Strong (->)
strongFun = Strong profunctorFun // {
# firstP :: (a -> b) -> (a, c) -> (b, c)
firstP = f: { fst, snd }: { fst = f fst; inherit snd; };
# secondP :: (a -> b) -> (c, a) -> (c, b)
secondP = f: { snd, fst }: { snd = f snd; inherit fst; };
};
# Iso s t a b :: forall p. Profunctor p -> p a b -> p s t
# iso :: (s -> a) -> (b -> t) -> Iso s t a b
iso = pro: pro.dimap;
# Lens s t a b :: forall p. Strong p -> p a b -> p s t
# lens :: (s -> a) -> (s -> b -> t) -> Lens s t a b
lens = strong: get: set: pab:
lensP
strong
(s: tuple (get s) (b: set s b))
pab;
# lensP :: (s -> (a, b -> t)) -> Lens s t a b
lensP = strong: to: pab:
strong.dimap
to
({ fst, snd }: snd fst)
(strong.firstP pab);
# first element of a tuple
# _1 :: Lens (a, c) (b, c) a b
_1 = strong: strong.firstP;
# second element of a tuple
# _2 :: Lens (c, a) (c, b) a b
_2 = strong: strong.secondP;
# a the given field in the record
# field :: (f :: String) -> Lens { f :: a; ... } { f :: b; ... } a b
field = name: strong:
lens
strong
(attrs: attrs.${name})
(attrs: a: attrs // { ${name} = a; });
# Setter :: (->) a b -> (->) s t
# Setter :: (a -> b) -> (s -> t)
# Subclasses of profunctor for (->).
# We only have Strong for now, but when we implement Choice we need to add it here.
profunctorSubclassesFun = strongFun;
# over :: Setter s t a b -> (a -> b) -> s -> t
over = setter:
# A setter needs to be instanced to the profunctor-subclass instances of (->).
(setter profunctorSubclassesFun);
# set :: Setter s t a b -> b -> s -> t
set = setter: b: over setter (const b);
# combine a bunch of optics, for the subclass instance of profunctor you give it.
optic = accessors: profunctorSubclass:
builtins.foldl' comp id
(map (accessor: accessor profunctorSubclass) accessors);
in
{
inherit
id
_
const
comp
Profunctor
profunctorFun
Strong
strongFun
iso
lens
optic
_1
_2
field
tuple
swap
over
set
;
}

View file

@ -1,108 +0,0 @@
{ depot, pkgs, ... }:
let
bins = depot.nix.getBins pkgs.coreutils [ "printf" "echo" "cat" "printenv" "tee" ]
// depot.nix.getBins pkgs.bash [ "bash" ]
// depot.nix.getBins pkgs.fdtools [ "multitee" ]
;
# Print `msg` and and argv to stderr, then execute into argv
debugExec = msg: depot.nix.writeExecline "debug-exec" { } [
"if"
[
"fdmove"
"-c"
"1"
"2"
"if"
[ bins.printf "%s: " msg ]
"if"
[ bins.echo "$@" ]
]
"$@"
];
# Print stdin to stderr and stdout
eprint-stdin = depot.nix.writeExecline "eprint-stdin" { } [
"pipeline"
[ bins.multitee "0-1,2" ]
"$@"
];
# Assume the input on stdin is netencode, pretty print it to stderr and forward it to stdout
eprint-stdin-netencode = depot.nix.writeExecline "eprint-stdin-netencode" { } [
"pipeline"
[
# move stdout to 3
"fdmove"
"3"
"1"
# the multitee copies stdin to 1 (the other pipeline end) and 3 (the stdout of the outer pipeline block)
"pipeline"
[ bins.multitee "0-1,3" ]
# make stderr the stdout of pretty, merging with the stderr of pretty
"fdmove"
"-c"
"1"
"2"
depot.users.Profpatsch.netencode.pretty
]
"$@"
];
# print the given environment variable in $1 to stderr, then execute into the rest of argv
eprintenv = depot.nix.writeExecline "eprintenv" { readNArgs = 1; } [
"ifelse"
[ "fdmove" "-c" "1" "2" bins.printenv "$1" ]
[ "$@" ]
"if"
[ depot.tools.eprintf "eprintenv: could not find \"\${1}\" in the environment\n" ]
"$@"
];
# Split stdin into two commands, given by a block and the rest of argv
#
# Example (execline):
#
# pipeline [ echo foo ]
# split-stdin [ fdmove 1 2 foreground [ cat ] echo "bar" ] cat
#
# stdout: foo\n
# stderr: foo\nbar\n
split-stdin = depot.nix.writeExecline "split-stdin" { argMode = "env"; } [
"pipeline"
[
# this is horrible yes but the quickest way I knew how to implement it
"runblock"
"1"
bins.bash
"-c"
''${bins.tee} >("$@")''
"bash-split-stdin"
]
"runblock"
"-r"
"1"
];
# remove everything but a few selected environment variables
runInEmptyEnv = keepVars:
let
importas = pkgs.lib.concatMap (var: [ "importas" "-i" var var ]) keepVars;
# we have to explicitely call export here, because PATH is probably empty
export = pkgs.lib.concatMap (var: [ "${pkgs.execline}/bin/export" var ''''${${var}}'' ]) keepVars;
in
depot.nix.writeExecline "empty-env" { }
(importas ++ [ "emptyenv" ] ++ export ++ [ "${pkgs.execline}/bin/exec" "$@" ]);
in
{
inherit
debugExec
eprint-stdin
eprint-stdin-netencode
eprintenv
split-stdin
runInEmptyEnv
;
}

View file

@ -1,173 +0,0 @@
{-# LANGUAGE DerivingStrategies #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE NumericUnderscores #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# OPTIONS_GHC -Wall #-}
module Main where
import Conduit
import Conduit qualified as Cond
import Control.Concurrent
import Control.Concurrent.Async qualified as Async
import Control.Monad
import Data.Aeson.BetterErrors qualified as Json
import Data.Bifunctor
import Data.Conduit.Binary qualified as Conduit.Binary
import Data.Conduit.Combinators qualified as Cond
import Data.Conduit.Process
import Data.Error
import Data.Function
import Data.Functor
import Data.Text.IO (hPutStrLn)
import MyPrelude
import System.Directory qualified as Dir
import System.Environment qualified as Env
import System.Exit qualified as Exit
import System.FilePath (takeDirectory)
import System.FilePath.Posix qualified as FilePath
import System.IO (stderr)
import System.Posix qualified as Posix
import Prelude hiding (log)
data LorriEvent = LorriEvent
{ nixFile :: Text,
eventType :: LorriEventType
}
deriving stock (Show)
data LorriEventType
= Completed
| Started
| EvalFailure
deriving stock (Show)
main :: IO ()
main = do
argv <- Env.getArgs <&> nonEmpty
dir <- Dir.getCurrentDirectory
shellNix <-
findShellNix dir >>= \case
Nothing -> Exit.die [fmt|could not find any shell.nix in or above the directory {dir}|]
Just s -> pure s
getEventChan :: MVar (Chan LorriEvent) <- newEmptyMVar
Async.race_
( do
sendEventChan :: Chan LorriEvent <- newChan
(exitCode, ()) <-
sourceProcessWithConsumer
(proc "lorri" ["internal", "stream-events"])
$
-- first, we want to send a message over the chan that the process is running (for timeout)
liftIO (putMVar getEventChan sendEventChan)
*> Conduit.Binary.lines
.| Cond.mapC
( \jsonBytes ->
(jsonBytes :: ByteString)
& Json.parseStrict
( Json.key
"Completed"
( do
nixFile <- Json.key "nix_file" Json.asText
pure LorriEvent {nixFile, eventType = Completed}
)
Json.<|> Json.key
"Started"
( do
nixFile <- Json.key "nix_file" Json.asText
pure LorriEvent {nixFile, eventType = Started}
)
Json.<|> Json.key
"Failure"
( do
nixFile <- Json.key "nix_file" Json.asText
pure LorriEvent {nixFile, eventType = EvalFailure}
)
)
& first Json.displayError'
& first (map newError)
& first (smushErrors [fmt|Cannot parse line returned by lorri: {jsonBytes & bytesToTextUtf8Lenient}|])
& unwrapError
)
.| (Cond.mapM_ (\ev -> writeChan sendEventChan ev))
log [fmt|lorri internal stream-events exited {show exitCode}|]
)
( do
let waitMs ms = threadDelay (ms * 1000)
-- log [fmt|Waiting for lorri event for {shellNix}|]
eventChan <- takeMVar getEventChan
let isOurEvent ev = FilePath.normalise (ev & nixFile & textToString) == FilePath.normalise shellNix
let handleEvent ev =
case ev & eventType of
Started ->
log [fmt|waiting for lorri build to finish|]
Completed -> do
log [fmt|build completed|]
exec (inDirenvDir (takeDirectory shellNix) <$> argv)
EvalFailure -> do
log [fmt|evaluation failed! for path {ev & nixFile}|]
Exit.exitWith (Exit.ExitFailure 111)
-- wait for 100ms for the first message from lorri,
-- or else assume lorri is not building the project yet
Async.race
(waitMs 100)
( do
-- find the first event that we can use
let go = do
ev <- readChan eventChan
if isOurEvent ev then pure ev else go
go
)
>>= \case
Left () -> do
log [fmt|No event received from lorri, assuming this is the first evaluation|]
exec argv
Right ch -> handleEvent ch
runConduit $
repeatMC (readChan eventChan)
.| filterC isOurEvent
.| mapM_C handleEvent
)
where
inDirenvDir dir' argv' = ("direnv" :| ["exec", dir']) <> argv'
exec = \case
Just (exe :| args') -> Posix.executeFile exe True args' Nothing
Nothing -> Exit.exitSuccess
log :: Text -> IO ()
log msg = hPutStrLn stderr [fmt|lorri-wait-for-eval: {msg}|]
-- | Searches from the current directory upwards, until it finds the `shell.nix`.
findShellNix :: FilePath -> IO (Maybe FilePath)
findShellNix curDir = do
let go :: (FilePath -> IO (Maybe FilePath))
go dir = do
let file = dir FilePath.</> "shell.nix"
Dir.doesFileExist file >>= \case
True -> pure (Just file)
False -> do
let parent = FilePath.takeDirectory dir
if parent == dir
then pure Nothing
else go parent
go (FilePath.normalise curDir)
smushErrors :: Foldable t => Text -> t Error -> Error
smushErrors msg errs =
errs
-- hrm, pretty printing and then creating a new error is kinda shady
& foldMap (\err -> "\n- " <> prettyError err)
& newError
& errorContext msg

View file

@ -1,7 +0,0 @@
# lorri-wait-for-eval
A helper script for [lorri](https://github.com/nix-community/lorri), which wraps a command and executes it once lorri is finished evaluating the current `shell.nix`, and uses the new environment.
This is useful when you need the new shell environment to be in scope of the command, but dont want to waste time waiting for it to finish.
This should really be a feature of lorri, but I couldnt be assed to touch rust :P

View file

@ -1,20 +0,0 @@
{ depot, pkgs, lib, ... }:
let
lorri-wait-for-eval = pkgs.writers.writeHaskell "lorri-wait-for-eval"
{
libraries = [
depot.users.Profpatsch.my-prelude
pkgs.haskellPackages.async
pkgs.haskellPackages.aeson-better-errors
pkgs.haskellPackages.conduit-extra
pkgs.haskellPackages.error
pkgs.haskellPackages.PyF
pkgs.haskellPackages.unliftio
];
ghcArgs = [ "-threaded" ];
} ./LorriWaitForEval.hs;
in
lorri-wait-for-eval

View file

@ -1,6 +0,0 @@
/dist/
/.ninja/
/node_modules/
# ignore for now
/package-lock.json

View file

@ -1 +0,0 @@
../.prettierrc

View file

@ -1,17 +0,0 @@
builddir = .ninja
outdir = ./dist
jsdir = $outdir/js
rule tsc
command = node_modules/.bin/tsc
build $outdir/index.js: tsc | src/index.ts tsconfig.json
rule run
command = node $in
build run: run $outdir/index.js
build run-tap-bpm: run $outdir/index.js tap-bpm
pool = console

View file

@ -1,149 +0,0 @@
{ pkgs, depot, lib, ... }:
let
bins = depot.nix.getBins pkgs.sqlite [ "sqlite3" ]
// depot.nix.getBins pkgs.util-linux [ "unshare" ]
// depot.nix.getBins pkgs.coreutils [ "echo" ]
// depot.nix.getBins pkgs.gnused [ "sed" ]
// depot.nix.getBins pkgs.squashfuse [ "squashfuse" ]
// depot.nix.getBins pkgs.jq [ "jq" ];
mpv-script = pkgs.writeTextFile {
name = "lyric.lua";
text =
lib.replaceStrings
[ "@get_subtitles_command@" ]
[ (toString lyric-to-temp-file) ]
(builtins.readFile ./lyric-mpv-script.lua);
derivationArgs.passthru.scriptName = "lyric.lua";
};
lyric-to-temp-file = depot.nix.writeExecline "lyric-to-temp-file" { readNArgs = 1; } [
"backtick"
"-E"
"cache"
[ depot.users.Profpatsch.xdg-cache-home ]
"if"
[ "mkdir" "-p" "\${cache}/lyric/as-files" ]
"if"
[
"redirfd"
"-w"
"1"
"\${cache}/lyric/as-files/\${1}.lrc"
lyric
"$1"
]
"printf"
"\${cache}/lyric/as-files/\${1}.lrc"
];
# looool
escapeSqliteString = depot.nix.writeExecline "escape-sqlite-string" { readNArgs = 1; } [
"pipeline"
[
"printf"
"%s"
"$1"
]
bins.sed
"s/''/''''/g"
];
# Display lyrics for the given search string;
# search string can contain a substring of band name, album name, song title
#
# Use the database dump from https://lrclib.net/db-dumps and place it in ~/.cache/lyric/lrclib-db-dump.sqlite3
#
# TODO: put in the nodejs argh
lyric =
(depot.nix.writeExecline "lyric" { readNArgs = 1; } [
"backtick"
"-E"
"cache"
[ depot.users.Profpatsch.xdg-cache-home ]
# make sure the squashfuse is only mounted while the command is running
bins.unshare
"--user"
"--mount"
"--pid"
"--map-root-user"
"--kill-child"
"if"
[ "mkdir" "-p" "\${cache}/lyric/dump" ]
# TODO: provide a command that takes an url of a lyric.gz and converts it to this here squash image
"if"
[ bins.squashfuse "-ononempty" "\${cache}/lyric/lyric-db.squash" "\${cache}/lyric/dump" ]
# please help me god
"backtick"
"-E"
"searchstring"
[ escapeSqliteString "$1" ]
"pipeline"
[
"pipeline"
[
"echo"
(''
.mode json
select * from (
-- first we try to find if we can find the track verbatim
select * from (select
synced_lyrics,
has_synced_lyrics,
plain_lyrics
from
tracks_fts('' + "'\${searchstring}'" + '') tf
join tracks t on t.rowid = tf.rowid
join lyrics l on t.rowid = l.track_id
order by
has_synced_lyrics desc, t.id
)
UNION
select * from (select
synced_lyrics,
has_synced_lyrics,
plain_lyrics
from
tracks_fts('' + "'\${searchstring}'" + '') tf
join tracks t on t.rowid = tf.rowid
join lyrics l on t.rowid = l.track_id
order by
has_synced_lyrics desc, t.id
)
)
where synced_lyrics is not null and synced_lyrics != ''''
and plain_lyrics is not null and plain_lyrics != ''''
limit
1;
''
)
]
bins.sqlite3
"file:\${cache}/lyric/dump/lrclib-db-dump.sqlite3?immutable=1"
]
bins.jq
"-r"
''
if .[0] == null
then ""
else
.[0]
| if .has_synced_lyrics == 1
then .synced_lyrics
else .plain_lyrics
end
end
''
]);
js = depot.users.Profpatsch.napalm.buildPackage ./. { };
in
{
inherit
lyric
js
mpv-script;
}

View file

@ -1,3 +0,0 @@
import config from '../eslint.config.mjs';
export default config;

View file

@ -1,5 +0,0 @@
/node_modules/
/out/
# ignore for now
/package-lock.json

View file

@ -1 +0,0 @@
../../.prettierrc

View file

@ -1 +0,0 @@
same as toplevel

View file

@ -1,3 +0,0 @@
import config from '../../eslint.config.mjs';
export default config;

View file

@ -1,87 +0,0 @@
{
"name": "profpatsch-jump-to-lrc-position",
"displayName": "Jump to .lrc Position in mpv",
"description": "Reads a timestamp from the current file and pipes it to a mpv socket",
"version": "0.0.1",
"engines": {
"vscode": "^1.75.0"
},
"categories": [
"Other"
],
"main": "./out/extension.js",
"activationEvents": [
"onLanguage:lrc"
],
"contributes": {
"commands": [
{
"command": "extension.jumpToLrcPosition",
"title": "Jump to .lrc Position",
"category": "LRC"
},
{
"command": "extension.shiftLyricsDown",
"title": "Shift Lyrics Down from Current Line",
"category": "LRC"
},
{
"command": "extension.shiftLyricsUp",
"title": "Shift Lyrics Up from Current Line",
"category": "LRC"
},
{
"command": "extension.tapBpm",
"title": "Add bpm header by tapping to the song",
"category": "LRC"
},
{
"command": "extension.quantizeToEigthNote",
"title": "Quantize timestamps to nearest eighth note",
"category": "LRC"
},
{
"command": "extension.fineTuneTimestampDown100MsAndPlay",
"title": "Remove 100 ms from current timestamp and play from shortly before the change",
"category": "LRC"
},
{
"command": "extension.fineTuneTimestampUp100MsAndPlay",
"title": "Add 100 ms to current timestamp and play from shortly before the change",
"category": "LRC"
},
{
"command": "extension.uploadLyricsToLrclibDotNet",
"title": "Upload Lyrics to lrclib.net",
"category": "LRC"
}
],
"languages": [
{
"id": "lrc",
"extensions": [
".lrc"
],
"aliases": [
"Lyric file"
]
}
]
},
"scripts": {
"vscode:prepublish": "npm run compile",
"compile": "tsc",
"install-extension": "vsce package --allow-missing-repository --out ./jump-to-lrc-position.vsix && code --install-extension ./jump-to-lrc-position.vsix"
},
"devDependencies": {
"vscode": "^1.1.37",
"@eslint/js": "^9.10.0",
"@types/eslint__js": "^8.42.3",
"@types/node": "^22.5.5",
"@typescript-eslint/parser": "^8.7.0",
"eslint": "^9.10.0",
"globals": "^15.9.0",
"typescript": "^5.6.2",
"typescript-eslint": "^8.6.0"
}
}

View file

@ -1,864 +0,0 @@
import * as vscode from 'vscode';
import * as net from 'net';
import { adjustTimestampToEighthNote, bpmToEighthNoteDuration } from './quantize-lrc';
import { publishLyrics, PublishRequest } from './upload-lrc';
const channel_global = vscode.window.createOutputChannel('LRC');
export function activate(context: vscode.ExtensionContext) {
context.subscriptions.push(...registerCheckLineTimestamp(context));
context.subscriptions.push(
vscode.commands.registerCommand('extension.jumpToLrcPosition', jumpToLrcPosition),
vscode.commands.registerCommand('extension.shiftLyricsDown', shiftLyricsDown),
vscode.commands.registerCommand('extension.shiftLyricsUp', shiftLyricsUp),
vscode.commands.registerCommand('extension.tapBpm', tapBpm),
vscode.commands.registerCommand('extension.quantizeToEigthNote', quantizeToEigthNote),
vscode.commands.registerCommand(
'extension.fineTuneTimestampDown100MsAndPlay',
fineTuneTimestampAndPlay(-100),
),
vscode.commands.registerCommand(
'extension.fineTuneTimestampUp100MsAndPlay',
fineTuneTimestampAndPlay(100),
),
vscode.commands.registerCommand(
'extension.uploadLyricsToLrclibDotNet',
uploadToLrclibDotNet,
),
);
}
/**
* Jumps to the position in the lyric file corresponding to the current cursor position in the active text editor.
* Sends a command to a socket to seek to the specified position in mpv at the socket path `~/tmp/mpv-socket`.
* @remarks
* This function requires the following dependencies:
* - `vscode` module for accessing the active text editor and displaying messages.
* - `net` module for creating a socket connection.
* @throws {Error} If there is an error sending the command to the socket.
*/
function jumpToLrcPosition() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showErrorMessage('No active editor found.');
return;
}
const ext = new Ext(editor.document);
const position = editor.selection.active;
const res = ext.getTimestampFromLine(position.line);
if (!res) {
return;
}
const { milliseconds, seconds } = res;
// Prepare JSON command to send to the socket
const seekCommand = {
command: ['seek', seconds, 'absolute'],
};
const reloadSubtitlesCommand = {
command: ['sub-reload'],
};
const socket = new net.Socket();
const socketPath = process.env.HOME + '/tmp/mpv-socket';
socket.connect(socketPath, () => {
socket.write(JSON.stringify(seekCommand));
socket.write('\n');
socket.write(JSON.stringify(reloadSubtitlesCommand));
socket.write('\n');
vscode.window.showInformationMessage(
`Sent command to jump to [${formatTimestamp(milliseconds)}] and sync subtitles.`,
);
socket.end();
});
socket.on('error', err => {
vscode.window.showErrorMessage(`Failed to send command: ${err.message}`);
});
}
/**
* Shifts the lyrics down by one line starting from the current cursor position in the active text editor.
* @remarks
* This function requires the following dependencies:
* - `vscode` module for accessing the active text editor and displaying messages.
*/
async function shiftLyricsDown() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showErrorMessage('No active editor found.');
return;
}
const ext = new Ext(editor.document);
const getLine = (line: number) => ({
number: line,
range: editor.document.lineAt(line),
});
// get the document range from the beginning of the current line to the end of the file
const documentRange = new vscode.Range(
getLine(editor.selection.active.line).range.range.start,
editor.document.lineAt(editor.document.lineCount - 1).range.end,
);
let newLines: string = '';
// iterate through all lines under the current line, save the lyric text from the current line, and replace it with the lyric text from the previous line
let previousLineText = '';
for (
// get the current line range
let line = getLine(editor.selection.active.line);
line.number < editor.document.lineCount - 1;
// next line as position from line number
line = getLine(line.number + 1)
) {
const timestamp = ext.getTimestampFromLine(line.number);
if (timestamp === undefined) {
newLines += line.range.text + '\n';
continue;
}
newLines += `[${formatTimestamp(timestamp.milliseconds)}]` + previousLineText + '\n';
previousLineText = timestamp.text;
}
// replace documentRange with newLines
await editor.edit(editBuilder => {
editBuilder.replace(documentRange, newLines);
});
}
/**
* Shifts the lyrics up by one line starting from the current cursor position in the active text editor.
* @remarks
* This function requires the following dependencies:
* - `vscode` module for accessing the active text editor and displaying messages.
*/
async function shiftLyricsUp() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showErrorMessage('No active editor found.');
return;
}
const ext = new Ext(editor.document);
const getLine = (line: number) => ({
number: line,
range: editor.document.lineAt(line),
});
// get the document range from the beginning of the current line to the end of the file
const documentRange = new vscode.Range(
getLine(editor.selection.active.line).range.range.start,
editor.document.lineAt(editor.document.lineCount - 1).range.end,
);
let newLines: string = '';
// iterate through all lines under the current line, save the lyric text from the current line, and replace it with the lyric text from the next line
for (
// get the current line range
let line = getLine(editor.selection.active.line);
line.number < editor.document.lineCount - 2;
// next line as position from line number
line = getLine(line.number + 1)
) {
const nextLineText =
ext.getTimestampFromLine(line.number + 1)?.text ??
ext.document.lineAt(line.number + 1).text;
const timestamp = ext.getTimestampFromLine(line.number);
if (timestamp === undefined) {
continue;
}
newLines += `[${formatTimestamp(timestamp.milliseconds)}]` + nextLineText + '\n';
}
// replace documentRange with newLines
await editor.edit(editBuilder => {
editBuilder.replace(documentRange, newLines);
});
}
/**
* Tap the BPM of the track and write it to the header of the active text editor.
* @remarks
* This function requires the following dependencies:
* - `vscode` module for accessing the active text editor and displaying messages.
*/
async function tapBpm() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showErrorMessage('No active editor found.');
return;
}
const ext = new Ext(editor.document);
const startBpm = ext.findBpmHeader();
const bpm = await timeInputBpm(startBpm);
if (bpm === undefined) {
return;
}
await ext.writeHeader('bpm', bpm.toString());
}
/** first ask the user for the BPM of the track, then quantize the timestamps in the active text editor to the closest eighth note based on the given BPM */
async function quantizeToEigthNote() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showErrorMessage('No active editor found.');
return;
}
const ext = new Ext(editor.document);
const startBpm = ext.findBpmHeader();
const bpm = await timeInputBpm(startBpm);
if (bpm === undefined) {
return;
}
await ext.writeHeader('bpm', bpm.toString());
const getLine = (line: number) => ({
number: line,
range: editor.document.lineAt(line),
});
const documentRange = new vscode.Range(
getLine(0).range.range.start,
editor.document.lineAt(editor.document.lineCount - 1).range.end,
);
const eighthNoteDuration = bpmToEighthNoteDuration(bpm);
let newLines: string = '';
for (
let line = getLine(0);
line.number < editor.document.lineCount - 1;
line = getLine(line.number + 1)
) {
const timestamp = ext.getTimestampFromLine(line.number);
if (timestamp === undefined) {
newLines += line.range.text + '\n';
continue;
}
const adjustedMs = adjustTimestampToEighthNote(
timestamp.milliseconds,
eighthNoteDuration,
);
newLines += `[${formatTimestamp(adjustedMs)}]${timestamp.text}\n`;
}
await editor.edit(editBuilder => {
editBuilder.replace(documentRange, newLines);
});
}
/** fine tune the timestamp of the current line by the given amount (in milliseconds) and play the track at slightly before the new timestamp */
function fineTuneTimestampAndPlay(amountMs: number) {
return async () => {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showErrorMessage('No active editor found.');
return;
}
const ext = new Ext(editor.document);
const position = editor.selection.active;
const res = ext.getTimestampFromLine(position.line);
if (!res) {
return;
}
const { milliseconds } = res;
const newMs = milliseconds + amountMs;
// adjust the timestamp
const documentRange = editor.document.lineAt(position.line).range;
await editor.edit(editBuilder => {
editBuilder.replace(documentRange, `[${formatTimestamp(newMs)}]${res.text}`);
});
const PLAY_BEFORE_TIMESTAMP_MS = 2000;
const seekCommand = {
command: ['seek', (newMs - PLAY_BEFORE_TIMESTAMP_MS) / 1000, 'absolute'],
};
const reloadSubtitlesCommand = {
command: ['sub-reload'],
};
const socket = new net.Socket();
const socketPath = process.env.HOME + '/tmp/mpv-socket';
socket.connect(socketPath, () => {
socket.write(JSON.stringify(seekCommand));
socket.write('\n');
socket.write(JSON.stringify(reloadSubtitlesCommand));
socket.write('\n');
vscode.window.showInformationMessage(
`Sent command to jump to [${formatTimestamp(newMs)}] and sync subtitles.`,
);
socket.end();
});
socket.on('error', err => {
vscode.window.showErrorMessage(`Failed to send command: ${err.message}`);
});
};
}
// convert the given bpm to miliseconds
function bpmToMs(bpm: number) {
return Math.floor((60 / bpm) * 1000);
}
// Show input boxes in a loop, and record the time between each input, averaging the last 5 inputs over a sliding window, then calculate the BPM of the average
async function timeInputBpm(startBpm?: number) {
const startBpmMs = bpmToMs(startBpm ?? 120);
const timeDifferences: number[] = [
startBpmMs,
startBpmMs,
startBpmMs,
startBpmMs,
startBpmMs,
];
// assign a weight to the time differences, so that the most recent time differences have more weight
const weights = [0.1, 0.1, 0.2, 0.3, 0.3];
const calculateBPM = () => {
// use a weighted average here
let avg = 0;
for (let i = 0; i < timeDifferences.length; i++) {
avg += timeDifferences[i] * weights[i];
}
return Math.floor(60000 / avg);
};
let lastPressTime = Date.now();
let firstLoop = true;
while (true) {
const res = await vscode.window.showInputBox({
prompt: `Press enter to record BPM (current BPM: ${calculateBPM()}), enter the final BPM once you know, or press esc to finish`,
placeHolder: 'BPM',
value: startBpm !== undefined && firstLoop ? startBpm.toString() : undefined,
});
firstLoop = false;
if (res === undefined) {
return undefined;
}
if (res !== '') {
const resBpm = parseInt(res, 10);
if (isNaN(resBpm)) {
vscode.window.showErrorMessage('Invalid BPM');
continue;
}
return resBpm;
}
const now = Date.now();
const timeDiff = now - lastPressTime;
// Add the time difference to the array (limit to last 5 key presses)
timeDifferences.shift(); // Remove the oldest time difference
timeDifferences.push(timeDiff);
lastPressTime = now;
}
}
/**
* Uploads the lyrics in the active text editor to the LrclibDotNet API.
* @remarks
* This function requires the following dependencies:
* - `vscode` module for accessing the active text editor and displaying messages.
* - `fetch` module for making HTTP requests.
* @throws {Error} If there is an HTTP error.
*/
async function uploadToLrclibDotNet() {
const editor = vscode.window.activeTextEditor;
if (!editor) {
vscode.window.showErrorMessage('No active editor found.');
return;
}
const ext = new Ext(editor.document);
const title = ext.findHeader('ti')?.value;
const artist = ext.findHeader('ar')?.value;
const album = ext.findHeader('al')?.value;
const lengthString = ext.findHeader('length')?.value;
if (
title === undefined ||
artist === undefined ||
album === undefined ||
lengthString === undefined
) {
vscode.window.showErrorMessage(
'Missing required headers: title, artist, album, length',
);
return;
}
// parse length as mm:ss
const [minutes, seconds] = lengthString?.split(':') ?? [];
if (
!minutes ||
!seconds ||
isNaN(parseInt(minutes, 10)) ||
isNaN(parseInt(seconds, 10))
) {
vscode.window.showErrorMessage('Invalid length header, expected format: mm:ss');
return;
}
const length = parseInt(minutes, 10) * 60 + parseInt(seconds, 10);
const syncedLyrics = ext.getLyricsPart();
const plainLyrics = plainLyricsFromLyrics(syncedLyrics);
// open a html preview with the lyrics saying
//
// Uploading these lyrics to lrclib.net:
// <metadata as table>
// Lyrics:
// ```<lyrics>```
// Plain lyrics:
// ```<plainLyrics>```
//
// Is this ok?
// <button to upload>
const previewTitle = 'Lyric Preview';
const metadataTable = `
<table>
<tr>
<th>Title</th>
<td>${title}</td>
</tr>
<tr>
<th>Artist</th>
<td>${artist}</td>
</tr>
<tr>
<th>Album</th>
<td>${album}</td>
</tr>
<tr>
<th>Length</th>
<td>${lengthString}</td>
</tr>
</table>
`;
const previewContent = `
<p>Uploading these lyrics to lrclib.net:</p>
${metadataTable}
<p>Lyrics:</p>
<pre>${syncedLyrics}</pre>
<p>Plain lyrics:</p>
<pre>${plainLyrics}</pre>
<p>Is this ok?</p>
<button>Upload</button>
`;
const panel = vscode.window.createWebviewPanel(
'lyricPreview',
previewTitle,
vscode.ViewColumn.One,
{ enableScripts: true },
);
panel.webview.html = `
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Markdown Preview</title>
</head>
<body>
${previewContent}
</body>
<script>
const vscode = acquireVsCodeApi();
document.querySelector('button').addEventListener('click', () => {
vscode.postMessage({ command: 'upload' });
});
</script>
</html>
`;
let isDisposed = false;
panel.onDidDispose(() => {
isDisposed = true;
});
await new Promise((resolve, _reject) => {
panel.webview.onDidReceiveMessage((message: { command: string }) => {
if (isDisposed) {
return;
}
if (message.command === 'upload') {
panel.dispose();
resolve(true);
}
});
});
const toUpload: PublishRequest = {
trackName: title,
artistName: artist,
albumName: album,
duration: length,
plainLyrics: plainLyrics,
syncedLyrics: syncedLyrics,
};
// log the data to our extension output buffer
channel_global.appendLine('Uploading lyrics to LrclibDotNet');
const json = JSON.stringify(toUpload, null, 2);
channel_global.appendLine(json);
const res = await publishLyrics(toUpload);
if (res) {
vscode.window.showInformationMessage('Lyrics successfully uploaded.');
channel_global.appendLine('Lyrics successfully uploaded.');
} else {
vscode.window.showErrorMessage('Failed to upload lyrics.');
channel_global.appendLine('Failed to upload lyrics.');
}
}
// If the difference to the timestamp on the next line is larger than 10 seconds (for 120 BPM), underline the next line and show a warning message on hover
export function registerCheckLineTimestamp(_context: vscode.ExtensionContext) {
const changesToCheck: Set<vscode.TextDocument> = new Set();
const everSeen = new Set<vscode.TextDocument>();
return [
vscode.workspace.onDidChangeTextDocument(e => {
changesToCheck.add(e.document);
if (vscode.window.activeTextEditor?.document === e.document) {
doEditorChecks(vscode.window.activeTextEditor.document, everSeen, changesToCheck);
}
}),
vscode.workspace.onDidOpenTextDocument(e => {
changesToCheck.add(e);
everSeen.add(e);
if (vscode.window.activeTextEditor?.document === e) {
doEditorChecks(vscode.window.activeTextEditor.document, everSeen, changesToCheck);
}
}),
vscode.window.onDidChangeActiveTextEditor(editor => {
if (editor) {
doEditorChecks(editor.document, everSeen, changesToCheck);
}
}),
vscode.window.onDidChangeVisibleTextEditors(editors => {
for (const editor of editors) {
doEditorChecks(editor.document, everSeen, changesToCheck);
}
}),
];
}
function doEditorChecks(
document: vscode.TextDocument,
everSeen: Set<vscode.TextDocument>,
changesToCheck: Set<vscode.TextDocument>,
) {
const ext = new Ext(document);
if (!everSeen.has(document)) {
changesToCheck.add(document);
everSeen.add(document);
}
if (!changesToCheck.has(document)) {
return;
}
changesToCheck.delete(document);
const from = 0;
const to = document.lineCount - 1;
for (let line = from; line <= to; line++) {
const warnings: string[] = [];
const timeDiff = timeDifferenceTooLarge(ext, line);
if (timeDiff !== undefined) {
warnings.push(timeDiff);
}
const nextTimestampSmaller = nextLineTimestampSmallerThanCurrent(ext, line);
if (nextTimestampSmaller !== undefined) {
warnings.push(nextTimestampSmaller);
}
for (const warning of warnings) {
global_manageWarnings.setWarning(document, line, warning);
}
// unset any warnings if this doesnt apply anymore
if (warnings.length === 0) {
global_manageWarnings.setWarning(document, line);
}
}
}
/** Warn if the difference to the timestamp on the next line is larger than
* * 10 seconds at 120 BPM
* * 5 seconds at 240 BPM
* * 20 seconds at 60 BPM
* * etc
*/
function timeDifferenceTooLarge(ext: Ext, line: number): string | undefined {
const bpm = ext.findBpmHeader() ?? 120;
const maxTimeDifference = 10000 * (120 / bpm);
const timeDifference = ext.getTimeDifferenceToNextLineTimestamp(
new vscode.Position(line, 0),
);
if (
!timeDifference ||
timeDifference.thisLineIsEmpty ||
timeDifference.difference <= maxTimeDifference
) {
return;
}
return `Time difference to next line is ${formatTimestamp(
timeDifference.difference,
)}, should there be silence here? At ${bpm} BPM, we assume anything more than ${(
maxTimeDifference / 1000
).toFixed(2)} seconds is a mistake.`;
}
/** Warn if the timestamp on the next line is smaller or equal to the current timestamp */
function nextLineTimestampSmallerThanCurrent(ext: Ext, line: number): string | undefined {
const timeDifference = ext.getTimeDifferenceToNextLineTimestamp(
new vscode.Position(line, 0),
);
if (!timeDifference) {
return;
}
if (timeDifference.difference == 0) {
return `The timestamp to the next line is not increasing`;
}
if (timeDifference.difference < 0) {
return `The timestamp to the next line is decreasing`;
}
}
class Ext {
constructor(public document: vscode.TextDocument) {}
getTimeDifferenceToNextLineTimestamp(position: vscode.Position) {
const thisLineTimestamp = this.getTimestampFromLine(position.line);
const nextLineTimestamp = this.getTimestampFromLine(position.line + 1);
if (!thisLineTimestamp || !nextLineTimestamp) {
return;
}
return {
difference: nextLineTimestamp.milliseconds - thisLineTimestamp.milliseconds,
thisLineIsEmpty: thisLineTimestamp.text.trim() === '',
};
}
/**
* Retrieves the timestamp and text from the line at the given position in the active text editor.
*
* @param position - The position of the line in the editor.
* @returns An object containing the milliseconds, seconds, and text extracted from the line.
*/
getTimestampFromLine(line: number) {
const lineText = this.document.lineAt(line).text;
return this.getTimestampFromLineText(lineText);
}
getTimestampFromLineText(lineText: string) {
// Extract timestamp [mm:ss.ms] from the current line
const match = lineText.match(/\[(\d+:\d+\.\d+)\](.*)/);
if (!match) {
return;
}
const [, timestamp, text] = match;
const milliseconds = parseTimestamp(timestamp);
const seconds = milliseconds / 1000;
return { milliseconds, seconds, text };
}
// Find a header line of the format
// [header:value]
// at the beginning of the lrc file (before the first empty line)
findHeader(headerName: string) {
for (let line = 0; line < this.document.lineCount; line++) {
const text = this.document.lineAt(line).text;
if (text.trim() === '') {
return;
}
const match = text.match(/^\[(\w+):(.*)\]$/);
if (match && match[1] === headerName) {
return { key: match[1], value: match[2], line: line };
}
}
}
/** Find the bpm header and return the bpm as number, if any */
findBpmHeader() {
const startBpmStr = this.findHeader('bpm')?.value;
let bpm;
if (startBpmStr !== undefined) {
bpm = parseInt(startBpmStr, 10);
if (isNaN(bpm)) {
bpm = undefined;
}
}
return bpm;
}
// check if the given line is a header line
isHeaderLine(line: string) {
return (
line.trim() !== '' &&
line.match(/^\[(\w+):(.*)\]$/) !== null &&
line.match(/^\[\d\d:\d\d.\d+\]/) === null
);
}
// write the given header to the lrc file, if the header already exists, update the value
async writeHeader(headerName: string, value: string) {
const header = this.findHeader(headerName);
const editor = findActiveEditor(this.document);
if (!editor) {
return;
}
if (header) {
const lineRange = this.document.lineAt(header.line).range;
await editor.edit(editBuilder => {
editBuilder.replace(lineRange, `[${headerName}: ${value}]`);
});
} else {
// insert before the first timestamp line if no header is found, or after the last header if there are multiple headers
let insertLine = 0;
let extraNewline = '';
for (let line = 0; line < this.document.lineCount; line++) {
const text = this.document.lineAt(line).text;
// check if header
if (this.isHeaderLine(text)) {
insertLine = line + 1;
} else if (text.trim() === '') {
insertLine = line;
break;
} else {
insertLine = line;
if (line == 0) {
extraNewline = '\n';
}
break;
}
}
await editor.edit(editBuilder => {
editBuilder.insert(
new vscode.Position(insertLine, 0),
`[${headerName}: ${value}]\n${extraNewline}`,
);
});
}
}
// get the lyrics part of the lrc file (after the headers)
getLyricsPart() {
const first = this.document.lineAt(0).text;
let line = 0;
if (this.isHeaderLine(first)) {
// skip all headers (until the first empty line)
for (; line < this.document.lineCount; line++) {
const text = this.document.lineAt(line).text;
if (text.trim() === '') {
line++;
break;
}
}
}
// get the range from the current line to the end of the file
const documentRange = new vscode.Range(
new vscode.Position(line, 0),
this.document.lineAt(this.document.lineCount - 1).range.end,
);
return this.document.getText(documentRange);
}
}
// find an active editor that has the given document opened
function findActiveEditor(document: vscode.TextDocument) {
return vscode.window.visibleTextEditors.find(editor => editor.document === document);
}
function plainLyricsFromLyrics(lyrics: string) {
// remove the timestamp from the beginning of every line
return (
lyrics
.replace(/\[\d\d:\d\d\.\d\d\]\s?/gm, '')
// remove empty lines
.replace(/\n{2,}/g, '\n')
);
}
function parseTimestamp(timestamp: string): number {
// Parse [mm:ss.ms] format into milliseconds
const [min, sec] = timestamp.split(':');
const minutes = parseInt(min, 10);
const seconds = parseFloat(sec);
return minutes * 60 * 1000 + seconds * 1000;
}
function formatTimestamp(ms: number): string {
// Format milliseconds back into [mm:ss.ms]
const minutes = Math.floor(ms / 60000);
ms %= 60000;
const seconds = (ms / 1000).toFixed(2);
return `${String(minutes).padStart(2, '0')}:${seconds.padStart(5, '0')}`;
}
class ManageWarnings {
private warnings: Map<number, string> = new Map();
private diagnostics: vscode.DiagnosticCollection;
constructor() {
this.diagnostics = vscode.languages.createDiagnosticCollection();
}
/** Set a warning message on a line in a document, if null then unset */
setWarning(document: vscode.TextDocument, line: number, message?: string) {
if (message !== undefined) {
this.warnings.set(line, message);
} else {
this.warnings.delete(line);
}
this.updateDiagnostics(document);
}
private updateDiagnostics(document: vscode.TextDocument) {
const mkWarning = (line: number, message: string) => {
const lineRange = document.lineAt(line).range;
return new vscode.Diagnostic(lineRange, message, vscode.DiagnosticSeverity.Warning);
};
const diagnostics: vscode.Diagnostic[] = [];
for (const [line, message] of this.warnings) {
diagnostics.push(mkWarning(line, message));
}
this.diagnostics.delete(document.uri);
this.diagnostics.set(document.uri, diagnostics);
}
}
const global_manageWarnings = new ManageWarnings();

Some files were not shown because too many files have changed in this diff Show more