aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/build-blog.yml24
-rw-r--r--.github/workflows/build-workstation.yml22
-rw-r--r--.gitmodules15
-rw-r--r--FUENTES.md95
-rw-r--r--IDEAS.org161
-rw-r--r--INTERESTING_ARTICLES.org15
-rw-r--r--INTERESTING_PROJECTS.org192
-rw-r--r--PERSONAL_INFRA.md233
-rw-r--r--blog_experiment/.gitignore1
-rw-r--r--blog_experiment/NOTES.md1
-rw-r--r--blog_experiment/blog/__init__.py54
-rw-r--r--blog_experiment/blog/__main__.py36
-rw-r--r--blog_experiment/blog/blog_pages.py139
-rw-r--r--blog_experiment/blog/gemtext.py223
-rw-r--r--blog_experiment/blog/html.py101
-rw-r--r--blog_experiment/blog/meta.py13
-rw-r--r--blog_experiment/blog/page.py37
-rw-r--r--blog_experiment/blog/pretty.py5
-rwxr-xr-xblog_experiment/build.rs26
m---------blog_experiment/gemini_blog8
-rw-r--r--blog_experiment/k8s.yaml91
-rw-r--r--blog_experiment/poetry.lock1324
-rw-r--r--blog_experiment/pyproject.toml27
-rw-r--r--blog_experiment/test_html_rendering.py10
-rw-r--r--emacs/README.md34
-rw-r--r--emacs/emacs.bash5
-rw-r--r--emacs/emacs.el162
-rw-r--r--emacs/plan.org4
-rw-r--r--fiction_writing/es/conmutatividad.md73
-rw-r--r--fiction_writing/es/cuentos_del_triangulo_verde.md133
-rw-r--r--fiction_writing/es/el_principe.md65
-rw-r--r--fiction_writing/es/en_los_mejores_cines.md55
-rw-r--r--fiction_writing/es/invoco_al_diablo.md119
-rw-r--r--fiction_writing/es/lucifer_martinez.md77
-rw-r--r--fiction_writing/es/maldito_clip.md73
-rw-r--r--fiction_writing/es/mariano_el_programador.md51
-rw-r--r--fiction_writing/es/un_paseo_por_el_rio.md35
-rw-r--r--linux/dnie_rhel9.md13
-rw-r--r--linux/running_commands_in_linux.adoc287
-rw-r--r--personal_infra/README.md56
-rw-r--r--personal_infra/ansible.cfg7
-rw-r--r--personal_infra/k8s/base/kustomization.yml32
-rw-r--r--personal_infra/playbooks/patch_rpc_svcgssd_service.yaml21
-rw-r--r--personal_infra/playbooks/roles/apply_puppet/tasks/main.yml127
-rw-r--r--personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml17
-rwxr-xr-xpersonal_infra/playbooks/roles/deploy_ragent/files/get.py16
-rw-r--r--personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml45
-rw-r--r--personal_infra/playbooks/roles/deploy_ragent/vars/main.yml24
-rw-r--r--personal_infra/playbooks/roles/join_ipa/handlers/main.yml4
-rw-r--r--personal_infra/playbooks/roles/join_ipa/tasks/main.yml32
-rw-r--r--personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml92
-rw-r--r--personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml8
-rw-r--r--personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml11
-rw-r--r--personal_infra/playbooks/roles/talos/README.md65
-rw-r--r--personal_infra/playbooks/roles/talos/files/get-ip5
-rw-r--r--personal_infra/playbooks/roles/talos/tasks/main.yaml111
-rw-r--r--personal_infra/playbooks/roles/talos/tasks/proxmox.yml19
-rw-r--r--personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml4
-rw-r--r--personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml13
-rw-r--r--personal_infra/playbooks/setup_blog_keys.yaml23
-rw-r--r--personal_infra/playbooks/setup_tinc_keys.yaml27
-rw-r--r--personal_infra/playbooks/site.yaml63
-rw-r--r--personal_infra/podman.md26
-rwxr-xr-xpersonal_infra/pseudo_resource_exporter.py97
-rw-r--r--personal_infra/puppet/modules/automatic_updates/manifests/init.pp33
-rw-r--r--personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp81
-rw-r--r--personal_infra/puppet/modules/backups/manifests/init.pp11
-rw-r--r--personal_infra/puppet/modules/basic_software/manifests/init.pp7
-rw-r--r--personal_infra/puppet/modules/copr/manifests/init.pp20
-rw-r--r--personal_infra/puppet/modules/debian/manifests/backports.pp9
-rw-r--r--personal_infra/puppet/modules/debian/manifests/init.pp5
-rw-r--r--personal_infra/puppet/modules/dns_dhcp/manifests/init.pp36
-rw-r--r--personal_infra/puppet/modules/dns_dhcp/templates/internal.epp30
-rw-r--r--personal_infra/puppet/modules/freeipa/manifests/server.pp13
-rw-r--r--personal_infra/puppet/modules/ipsilon/manifests/init.pp8
m---------personal_infra/puppet/modules/mailalias_core0
-rw-r--r--personal_infra/puppet/modules/miniflux/manifests/init.pp27
l---------personal_infra/puppet/modules/nagios/files/check_talos_version1
-rw-r--r--personal_infra/puppet/modules/nagios/manifests/init.pp100
-rw-r--r--personal_infra/puppet/modules/nagios/manifests/k8s.pp41
-rw-r--r--personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp1373
m---------personal_infra/puppet/modules/nagios_core0
-rw-r--r--personal_infra/puppet/modules/nextcloud/manifests/init.pp79
-rw-r--r--personal_infra/puppet/modules/nextcloud/templates/www.conf.epp439
-rw-r--r--personal_infra/puppet/modules/ocserv/manifests/init.pp97
-rw-r--r--personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp57
-rw-r--r--personal_infra/puppet/modules/ocserv/templates/port.conf.epp8
-rw-r--r--personal_infra/puppet/modules/podman/manifests/init.pp17
-rw-r--r--personal_infra/puppet/modules/postgres/manifests/init.pp26
-rw-r--r--personal_infra/puppet/modules/proxmox/README.md36
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/freeipa.pp17
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/init.pp38
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/proxy.pp52
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp53
-rw-r--r--personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp2
-rw-r--r--personal_infra/puppet/modules/proxmox/templates/interfaces.epp18
-rw-r--r--personal_infra/puppet/modules/root_mail/manifests/init.pp41
-rw-r--r--personal_infra/puppet/modules/tinc/manifests/init.pp100
-rw-r--r--personal_infra/puppet/modules/tinc/templates/tinc-up.epp11
-rw-r--r--personal_infra/puppet/modules/tinc/templates/tinc.conf.epp8
-rw-r--r--personal_infra/puppet/site/00-common.pp23
-rw-r--r--personal_infra/puppet/site/01-dns.pp10
-rw-r--r--personal_infra/puppet/site/01-ipa.pp30
-rw-r--r--personal_infra/puppet/site/01-tinc.pp39
-rw-r--r--personal_infra/puppet/site/02-tinc-dns.pp5
-rw-r--r--personal_infra/puppet/site/case.ces.int.pdp7.net.pp10
-rw-r--r--personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp15
-rw-r--r--personal_infra/puppet/site/h1.pdp7.net.pp123
-rw-r--r--personal_infra/puppet/site/h2.pdp7.net.pp9
-rw-r--r--personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp2
-rw-r--r--personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp3
-rw-r--r--personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp3
-rw-r--r--personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp10
-rw-r--r--personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp8
-rw-r--r--personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp16
-rw-r--r--personal_infra/puppet/site/nc1.pdp7.net.pp3
-rw-r--r--personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp22
-rw-r--r--personal_infra/puppet/site/pg.h1.int.pdp7.net.pp16
-rw-r--r--personal_infra/puppet/site/ws.h1.int.pdp7.net.pp6
-rw-r--r--personal_infra/requirements.loose4
-rw-r--r--personal_infra/requirements.txt31
-rw-r--r--personal_infra/setup_ipa_replicas.md24
-rwxr-xr-xpersonal_infra/setup_venv6
m---------personal_infra/talos-check0
-rwxr-xr-xpersonal_infra/up.py179
-rw-r--r--programming/python/creating_nice_python_cli_tools.md40
-rw-r--r--programming/python/dependency_handling.md116
-rw-r--r--programming/python/project_setup.md117
-rw-r--r--programming/python/python_modules_primer.md269
-rw-r--r--programming/python/scraping_with_selenium_on_docker.md7
-rwxr-xr-xscripts/ruscreen3
-rw-r--r--workstation/README.md3
m---------workstation/arch-container-builder0
-rwxr-xr-xworkstation/build_workstation35
-rwxr-xr-xworkstation/setup.py67
135 files changed, 9132 insertions, 0 deletions
diff --git a/.github/workflows/build-blog.yml b/.github/workflows/build-blog.yml
new file mode 100644
index 00000000..e63d621f
--- /dev/null
+++ b/.github/workflows/build-blog.yml
@@ -0,0 +1,24 @@
+on:
+ push:
+ paths:
+ - 'blog_experiment/**'
+ branches:
+ - master
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ submodules: true
+ - run: |
+ cd blog_experiment
+ rustup toolchain install nightly --profile minimal
+ podman login quay.io -u $USER -p $TOKEN --authfile ~/docker-config
+ chmod ugo+r ~/docker-config
+ ./build.rs --docker-config ~/docker-config . quay.io/alexpdp7/blog:latest
+ env:
+ USER: ${{ vars.BLOG_ROBOT_USERNAME }}
+ TOKEN: ${{ secrets.BLOG_ROBOT_TOKEN }}
diff --git a/.github/workflows/build-workstation.yml b/.github/workflows/build-workstation.yml
new file mode 100644
index 00000000..7e1d1a82
--- /dev/null
+++ b/.github/workflows/build-workstation.yml
@@ -0,0 +1,22 @@
+on:
+ push:
+ paths:
+ - 'workstation/**'
+ branches:
+ - main
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ submodules: true
+ - run: |
+ cd workstation
+ ./build_workstation
+ podman login -u="alexpdp7+github_workstation_builder" -p="$ROBOT_TOKEN" quay.io
+ podman push localhost/workstation:latest quay.io/alexpdp7/workstation:latest
+ env:
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
diff --git a/.gitmodules b/.gitmodules
new file mode 100644
index 00000000..6e6c22cb
--- /dev/null
+++ b/.gitmodules
@@ -0,0 +1,15 @@
+[submodule "personal_infra/puppet/modules/mailalias_core"]
+ path = personal_infra/puppet/modules/mailalias_core
+ url = https://github.com/puppetlabs/puppetlabs-mailalias_core.git
+[submodule "personal_infra/puppet/modules/nagios_core"]
+ path = personal_infra/puppet/modules/nagios_core
+ url = https://github.com/puppetlabs/puppetlabs-nagios_core.git
+[submodule "workstation/arch-container-builder"]
+ path = workstation/arch-container-builder
+ url = https://github.com/alexpdp7/arch-container-builder.git
+[submodule "personal_infra/talos-check"]
+ path = personal_infra/talos-check
+ url = https://github.com/alexpdp7/talos-check.git
+[submodule "blog_experiment/gemini_blog"]
+ path = blog_experiment/gemini_blog
+ url = https://github.com/alexpdp7/gemini_blog.git
diff --git a/FUENTES.md b/FUENTES.md
new file mode 100644
index 00000000..6d3be369
--- /dev/null
+++ b/FUENTES.md
@@ -0,0 +1,95 @@
+(Véase [el artículo correspondiente en mi blog](https://t.co/Vyzn78riPZ).)
+
+# Gente
+
+* [https://www.jwz.org/blog/](jwz) el blog de JWZ, uno de los inventores de Internet.
+* [Maartje Eyskens](https://blahaj.social/@maartje.rss) brevemente excompañera de trabajo, curioso cruce entre retrotecnologia (faxes, Windows 2000, etc.), trenes y tiburones de IKEA.
+* [Danger Mouse](https://www.dangermouse.net/), astrofísico, es mediofamoso por sus webcómics de Lego, decanos de Internet.
+ El blog es su vida cotidiana, como profesor particular de ética y ciencia.
+ De vez en cuando cuelga artículos divulgativos excelentes.
+* [chechar](https://mastodon.xyz/@chechar) ([blog](https://obm.corcoles.net/)) @chechar es mi nepotismo en forma de hermano.
+ Tecnología y esas cosas.
+* Mark Dominus [shitposts](https://shitpost.plover.com) [blog](https://blog.plover.com/) es un friki de las mates, la informática y la lingüística.
+* [Andrew Braybrook](http://uridiumauthor.blogspot.com/) @UridiumAuthor es eso, el autor del videojuego clásico Uridium (y el Paradroid, etc.).
+ De vez en cuando descarga artículos brutales sobre programación de videojuegos en todas las eras.
+* [Fabien Sanglard](https://fabiensanglard.net/) es un historiador del videojuego.
+* [apenwarr](https://apenwarr.ca/log/) escribe muy ocasionalmente sobre Internet en general.
+
+# Webcómics y humor
+
+* [XKCD](https://xkcd.com/)
+* [El Mundo Today](https://www.elmundotoday.com) aunque ya no es el fenómeno que era, los titulares me hacen reír (aunque nunca entro al artículo).
+* [Penny Arcade](https://www.penny-arcade.com/comic/) una tira clásica sobre subcultura, principalmente videojuegos.
+ A veces descarrilan, a veces se pierden en sus mundos de fantasía, pero ocasionalmente aún me hacen sonreír.
+
+# Potenciales Mastodon
+
+* [David Woodbridge](https://mastodon.au/@Woody) @dwoodbridge es una personalidad del mundo de la accesibilidad.
+* [Jeff Minter](https://toot.wales/@llamasoft_ox) @llamasoft_ox es un programador de videojuegos clásico que retransmite la vida de su granja de ovejas en Gales.
+* [William Gibson](https://mastodon.social/@GreatDismal) @GreatDismal es el escritor cyberpunk más icónico.
+* [Neil Gaiman](https://mastodon.social/@neilhimself) @neilhimself escritor de fantasía bastante referente.
+
+# Cine
+
+* [The Film Experience](http://thefilmexperience.net/blog/) es un blog de cine al que me aficioné en la prehistoria.
+ Lleva activo desde 2009.
+ Ha incorporado nuevos columnistas, aunque a mí la verdad sólo me interesan los artículos de su fundador, Nathaniel Rogers.
+* [LoQueYoTeDiga](https://www.elcinedeloqueyotediga.net/) herederos del programa de cine mítico de Cadena Ser.
+ Tienen un podcast, pero yo no tengo paciencia para los podcasts.
+ Sacan reseñas de todo lo que se estrena en España, el día del estreno.
+
+# Ciencia
+
+* [Quanta Magazine](https://www.quantamagazine.org)
+
+# Tecnología
+
+* [The Verge](https://www.theverge.com/) de los pocos sitios de noticias tecnológicas con calidad editorial.
+* [Arstechnica](https://arstechnica.com) sería el otro, aunque la calidad está bajando últimamente.
+* [Wirecutter](https://thewirecutter.com) es otra cosa.
+ Está organizado por categorías (tablets, smartwatches, sillas de oficina, etc.), y proporciona reseñas de sus dos o tres productos favoritos en sus categorías.
+ Pese a sus tics, es un excelente punto de partida antes de realizar una compra tecnológica.
+* [Lifehacker](https://lifehacker.com) aunque también ha decaído un poco, de cuando en cuando siguen sacando artículos interesantes.
+* [Liliputing](https://liliputing.com) se especializa en cacharritos, pero un poco menos mainstream.
+* [The Internet Archive](https://blog.archive.org/) es literalmente lo que dicen, hablan de cosas interesantes de conservación digital.
+
+# Videojuegos
+
+* [Polygon](https://www.polygon.com/)
+* [Rock Paper Shotgun](https://www.rockpapershotgun.com)
+
+# Noticias generales
+
+El truco general es usar [Google News](https://news.google.com/).
+Google News agrega muchos medios y aunque su RSS no funciona perfectamente (en mi lector, Miniflux, las entradas se repiten), es mejor que la mayoría de RSS de periódicos y demás.
+Si queréis integrarlo en vuestro RSS, funciona bien para titulares, pero también es adecuado usar su web y aplicación móvil directamente.
+
+A diferencia de la mayoria de webs, encontrar los RSS de Google News no es trivial, así que os pongo los que uso yo:
+
+* [Google News - España](https://news.google.com/rss?hl=es&gl=ES&ceid=ES%3Aes&oc=11)
+* [Google News - Estados Unidos](https://news.google.com/rss?hl=en-US&gl=US&ceid=US%3Aen&oc=11)
+* [Google News - Reino Unido](https://news.google.com/rss?hl=en-GB&gl=GB&ceid=GB%3Aen&oc=11)
+
+Otros sitios serían:
+
+* [Fivethirtyeight](https://fivethirtyeight.com) un blog de noticias de todo tipo (pero sobre todo política americana y deportes) basado en datos.
+ Famosos por sus predicciones estadísticas, también ponen cosas como puzzles matemáticos.
+* [The long read](https://www.theguardian.com/news/series/the-long-read) son los artículos "largos" de The Guardian.
+ También tienen podcast.
+
+# Compras
+
+* [El blog de las ofertas](https://www.ofertitas.es) tiene avisos de ofertas bastante buenos.
+ El volumen es excesivo y tiene bastante paja, pero por ejemplo para campañas de ofertas hacen buenos resúmenes.
+
+# Otros
+
+* [La Página Definitiva](https://www.lapaginadefinitiva.com/) es bastante inclasificable.
+ Escriben de todo (principalmente política española y series), pero divertidamente.
+
+# Tuiteros acérrimos
+
+Más que nada pongo esta lista como recordatorio personal.
+Esperemos que algún día se cambien a algo con RSS o abierto.
+
+* @faustianovich un crítico de Fotogramas. Especial porque cada día bien temprano pone todas las películas que echarán en TDT.
diff --git a/IDEAS.org b/IDEAS.org
new file mode 100644
index 00000000..1e953972
--- /dev/null
+++ b/IDEAS.org
@@ -0,0 +1,161 @@
+* Nice mailing lists
+
+- GH-style user/organization creation
+- Every user/organization can create mailing lists
+- Mailing lists have a web UI
+- The mailing list UI can be completely customized to look like microblogging, blogs, comment systems, etc.
+- max n messages over m days feature. n=0, don't send me emails. n=1, send me a digest every m days. n>=2, stop at n-1, then wait until the m period is over to send me a digest.
+
+* Userland WASM Linux distro
+
+- Bootstrap a Linux distro to run on WASM
+- Should be able to run as a non-privileged user, installed in the user's home directory
+- Focused on userland programs, not a full OS
+- Should be able to run on non-POSIX filesystems
+- Could replace Docker/containers for developers
+- Similar to https://github.com/WAVM/Wavix
+
+* Turing Parser
+
+- A toolkit to write parsers for languages which are not CFGs and that need Turing-complete parsing code.
+- Provides tools to implement stateful lexers, keeping line-column information and generating good error messages.
+- Provides tools to transform the token stream, using arbitrary code, to generate annotated ASTs (i.e. with line/column number information)
+- See https://github.com/alexpdp7/lmparser
+
+* Extensible lightweight markup language
+
+- A language similar to AsciiDoc...
+- But with a well-defined AST
+- Complex nested lists
+- Styling of code blocks (highlighting, user input, replaceable...)
+- See https://github.com/alexpdp7/lmparser
+
+* Streaming DB
+
+That's probably https://github.com/MaterializeInc/materialize , but it's not OSS.
+
+- Simple relational database
+- Can act as replication target of other databases
+- Supports a limited SQL subset that can be easily reasoned about functional dependencies
+- Can stream efficiently the results of an SQL query (e.g. keep a query running and receive new/modified rows)
+- Supports efficient replication of a subset of a database (initial checkpoint + streaming or batched updates)
+- Functional dependencies could be used to create materialized views *and* indexes
+
+* CRUD framework
+
+- https://github.com/alexpdp7/v2f
+- https://github.com/alexpdp7/zqxjkcrud
+
+A completely declarative SQL CRUD framework
+
+- CRUD definition is done through SQL objects (schemas, views, etc.)
+- Permissions
+- Row auditing
+- Multiple components
+ - Server-side HTML frontend
+ - REST API
+- Commercial frontend for design
+
+* Key escrow for everyone
+
+- Easy-to-use key escrow/secret sharing
+- Provide emergency recovery of passwords, keys, 2FA, etc.
+
+* SQL2
+
+See https://github.com/EvgSkv/logica
+
+- A new language that compiles to SQL
+- Handles RDBMS differences
+- Makes queries composable (e.g. declare a query object, then add paging/sorting, for framework usage)
+- Declarative join via foreign key constraint names
+- Better ordering for code completion (e.g. FROMs first)
+
+* User-friendly GMail synchronization tool
+
+- Speaks XOAUTH2
+- Syncs to something Mutt/Notmuch/etc. can use easily
+- See https://github.com/alexpdp7/notsomuchcomplex
+
+* Parallel execution framework
+
+- A tool to run stuff such as CI builds which need sophisticated parallelization
+- Create differently sized resource pools (for CPU-bound tasks, "API"-bound, etc.)
+- Local execution of workflows on a workstation, remote execution on a CI server
+- Handle task dependencies and artifact passing from parent to child tasks
+- Capture stdout/stderr of tasks, allow viewing in realtime
+- Timestamp stdout/stderr for crude profiling
+- Implement a CI system on top?
+
+* ActivityPub reverse proxy
+
+Run the reverse proxy on your domain so you can "duplicate" your identity on your own domain
+
+* Statically-typed AST transform-friendly language
+
+- A Haskell/Rusty language, but with GC and nice compiler errors.
+- First-class support for AST transforms- both one-off (apply this transform to the source code and commit the change) and ongoing (define AST transforms to run as part of the compilation process).
+- Maybe such language would never need reflection, so refactoring would be very safe.
+
+* SQL Query tool
+
+- ipython notebook style interface
+- Backend + multiple frontends
+ - HTML/JS Frontend
+ - Curses frontend
+ - Native GUIs frontends
+- Smart join completion
+
+* HTML/JS only browser engine
+
+- Use an existing JS engine to implement a browser that can parse HTML and execute JS, but which has no concept of graphics
+- Could be used to implement a text HTML browser that can access the modern web
+- Extensible and scriptable, to generate "article mode" scripts, scrapers, and "API proxies"
+
+* Presentation tool
+
+- A tool to edit reveal.js-style slides
+- But also record and do basic audio editing
+- So it can help with timing/pacing, and even generate a nice video
+
+* Touch controller/meeting controller/desktop shortcuts
+
+- API server to control Google Meet (mute, etc.)
+- Use a USB gamepad to mute in videoconferences, etc
+- Small webapp for touch interface in phone
+- See https://github.com/alexpdp7/meet-controller
+
+* Scalable personal cloud
+
+1..n node VM/container hosting platform
+
+- ZFS storage (use https://github.com/marcan/takeover.sh for easy set up of a ZFS mirror root system)
+- Automation ready (Ansible, Salt, Foreman, etc.)
+- Monitoring friendly
+- Apt for single public IP
+ - Integrated DNS/DHCP
+ - VPN interfaces
+ - Reverse proxies
+
+* C64 Roguelike
+
+* Software development book ( https://github.com/alexpdp7/wakat )
+
+* Pure HTML5 decentralized IM system
+
+https://letsconvene.im/
+https://github.com/alexpdp7/imnei
+
+- Uses web push
+- Conversations are URLs to be shared
+- "User identity" is a private URL to be shared to handle multi-device
+- End-to-end encryption
+- Can have WebRTC audio/video
+- Anyone could run its own instance, as it is clientless there is no friction to have multiple providers
+- Broadcast channels to be a pseudo-social network
+
+* Federated real-time communications using open protocols https://github.com/alexpdp7/frtcuop
+
+* Deterministic embeddable simple language for calculator and notebooks
+
+https://github.com/alexpdp7/plankalkul/ , that deprecated https://github.com/alexpdp7/pdp7_calc/ .
diff --git a/INTERESTING_ARTICLES.org b/INTERESTING_ARTICLES.org
new file mode 100644
index 00000000..4629e254
--- /dev/null
+++ b/INTERESTING_ARTICLES.org
@@ -0,0 +1,15 @@
+* General
+
+- [[https://lukeplant.me.uk/blog/posts/no-one-actually-wants-simplicity/][No one actually wants simplicity]] Simplicity is sacrifice
+
+* Programming
+
+- [[https://mikehadlow.blogspot.com/2012/05/configuration-complexity-clock.html][The Configuration Complexity Clock]] Programming languages, configuration files, DSLs for configuration
+
+** Python
+
+- [[https://lukeplant.me.uk/blog/posts/pythons-disappointing-superpowers/][Python’s "Disappointing" Superpowers]] A convincing defense of dynamic typing
+
+* Organizations
+
+- [[https://charity.wtf/2017/05/11/the-engineer-manager-pendulum/][The Engineer/Manager Pendulum]] Why people should multiclass engineering and management
diff --git a/INTERESTING_PROJECTS.org b/INTERESTING_PROJECTS.org
new file mode 100644
index 00000000..fdb964e5
--- /dev/null
+++ b/INTERESTING_PROJECTS.org
@@ -0,0 +1,192 @@
+I have decided to not use GitHub stars any more.
+Mainly because it excludes projects not on GitHub.
+Also keeping things in a Org mode means I can classify and add notes.
+
+* Technical writing
+** Document formats
+ - https://github.com/nota-lang/nota - Proper parser, Markdown, LaTeX...
+ - https://github.com/markdoc/markdoc
+ - https://github.com/christianvoigt/argdown - Argumentation
+ - https://github.com/jgm/djot - JGM designs a simpler Markdown, with an AST
+ - https://github.com/typst/ / https://www.user.tu-berlin.de/laurmaedje/programmable-markup-language-for-typesetting.pdf - technically sound modern LaTeX replacement, but not open source (yet?)
+ - https://github.com/nvim-neorg - org-mode alternative, TreeSitter-grammar-first
+*** Markdown
+ - https://github.com/lezer-parser/markdown
+ - https://github.com/kivikakk/comrak
+ - https://github.com/raphlinus/pulldown-cmark
+*** Org
+ - https://github.com/karlicoss/orgparse
+ - https://github.com/200ok-ch/org-parser
+** Spelling, etc.
+ - https://github.com/bminixhofer/nlprule - LanguageTool in Rust
+ - https://github.com/valentjn/ltex-ls - LanguageTool LSP with LaTeX support
+ - https://github.com/jmartorell/LTlab - Spanish tools for LanguageTool
+ - https://github.com/languagetool-org/languagetool
+** ProseMirror
+ - https://github.com/ProseMirror/prosemirror-markdown
+* Parsers
+ - https://github.com/langium/langium - DSLs and LSP
+ - https://github.com/hydro-project/rust-sitter - TreeSitter in Rust
+ - https://github.com/jzimmerman/langcc
+ - https://github.com/petitparser/dart-petitparser
+ - https://github.com/metaborg/sdf
+ - https://github.com/antlr/grammars-v4
+ - https://github.com/tree-sitter/tree-sitter
+ - https://github.com/stephan-tolksdorf/fparsec
+ - https://github.com/Engelberg/instaparse
+ - https://janet-lang.org/docs/peg.html - Janet is a programming language with first-class support for PEGs
+** DCGs/Prolog
+ - https://en.wikipedia.org/wiki/Definite_clause_grammar
+ - https://github.com/mthom/scryer-prolog/
+ - https://github.com/rla/prolog-markdown
+ - https://github.com/aarroyoc/djota/tree/main - A Djot parser in Prolog
+* CI/CD
+ - https://github.com/earthly/earthly - Standalone build tool with containers
+* Systems
+** Identity
+*** Keycloak
+ - https://github.com/adorsys/keycloak-config-cli - Config as code for KeyCloak
+** Email
+ - https://github.com/simonrob/email-oauth2-proxy
+*** Notmuch
+ - https://github.com/gauteh/lieer - GMail
+** Configuration management
+ - https://github.com/habitat-sh/habitat
+** File sync
+ - https://github.com/mutagen-io/mutagen
+ - https://github.com/tkellogg/dura
+ - https://github.com/perkeep/perkeep
+ - https://github.com/upspin/upspin
+ - https://github.com/mickael-kerjean/filestash Web file manager with org-mode support
+ - https://github.com/kd2org/karadav NextCloud-compatible WebDav server
+** Home automation
+ - https://github.com/Yakifo/amqtt
+** PaaS
+ - https://github.com/piku/piku
+** Virtualization
+ - https://github.com/karmab/kcli
+*** WASM
+ - https://github.com/WAVM/Wavix
+** BI
+ - https://github.com/getredash/redash
+ - https://github.com/metabase/metabase
+** Networking
+ - https://github.com/gekmihesg/ansible-openwrt
+ - https://github.com/glennbrown/home-infra/tree/main/ansible/roles/openwrt-dhcp-dns ansible-openwrt samples by glenn on Ars IRC.
+*** VPN
+ - https://github.com/StreisandEffect/streisand
+** Misc
+ - https://github.com/ventoy/Ventoy
+ - https://github.com/marcan/takeover.sh
+ - https://github.com/dflemstr/rq - Record tool
+ - https://github.com/asapach/peerflix-server - Torrent streaming server
+ - https://github.com/hauxir/rapidbay - Torrent streaming server
+ - https://github.com/ltratt/pizauth - OAuth tool
+** Emacs
+ - https://github.com/manateelazycat/lsp-bridge
+* Coding
+** Observability
+ - https://github.com/equinix-labs/otel-cli
+ - https://github.com/timescale/promscale
+ - https://github.com/SigNoz/signoz
+** Web development
+ - https://github.com/yudai/gotty - Terminals on the web
+ - https://github.com/BafS/Gutenberg - Printing
+ - https://github.com/barbajs/barba - Smooth transitions
+ - https://github.com/kristopolous/BOOTSTRA.386
+*** Frameworks
+ - https://github.com/hotwire-django/turbo-django
+ - https://github.com/mit-pdos/noria
+ - https://github.com/frappe/frappe
+ - https://github.com/aquametalabs/aquameta
+ - https://github.com/urweb/urweb
+ - https://github.com/workflowproducts/envelope
+ - https://github.com/PostgREST/postgrest
+** Notebooks
+ - https://github.com/jupytercalpoly/reactivepy
+ - https://github.com/executablebooks/jupyter-book
+** Testing
+ - https://github.com/Mikuu/Micoo - visual diff testing
+ - https://github.com/minimaxir/big-list-of-naughty-strings
+** Literate
+ - https://github.com/kosmikus/lhs2tex
+ - https://github.com/simplistix/sybil - testing in documentation
+** Distributed
+ - https://github.com/unisonweb/unison
+*** RPC
+**** gRPC
+ - https://github.com/grpc/grpc-web
+** Databases
+*** Relational
+ - https://github.com/EvgSkv/logica
+ - https://github.com/edgedb/edgedb - Graphs
+ - https://github.com/lorint/AdventureWorks-for-Postgres
+ - https://duckdb.org/ - SQLite alternative
+ - https://github.com/cozodb/cozo - Datalog in Rust, different backends
+**** Tools
+ - https://github.com/dbcli/pgcli
+ - https://github.com/okbob/pspg
+ - https://github.com/djrobstep/migra
+ - https://github.com/julianhyde/sqlline
+ - https://github.com/monkeysintown/jdbc-ssh
+ - https://github.com/xataio/pgroll PostgreSQL migrations based on switching schemas
+**** Streaming
+ - https://github.com/MaterializeInc/materialize
+ - https://github.com/confluentinc/ksql
+** Low code
+ - https://github.com/microsoft/Power-Fx
+ - https://github.com/carltongibson/neapolitan - one of Django's authors does a variation on the Django admin?
+** Python
+ - https://github.com/inducer/pudb Full-screen console debugger for Python
+ - https://pyoxidizer.readthedocs.io/en/latest/index.html Self-contained Python distributions, packaging, etc.
+** Rust
+ - https://github.com/flutter-rs/flutter-rs
+ - https://github.com/xTibor/rust-on-c64
+ - https://crates.io/crates/crossterm
+ - https://github.com/matklad/xshell - Rust as a shell replacement, with nice interpolation
+** Java
+ - https://github.com/jbangdev/jbang
+ - https://github.com/mabe02/lanterna - TUI
+ - https://github.com/testcontainers/testcontainers-java
+ - https://github.com/google/error-prone
+ - https://github.com/google/auto
+** Email
+ - https://github.com/moggers87/salmon - Mail applications
+** Misc
+ - https://github.com/DavHau/nix-portable - portable Nix, no root required, works in an LXC container
+ - https://github.com/rulex-rs/pomsky - regex alternative
+ - https://github.com/sourcegraph/doctree - code browser
+ - https://github.com/singer-io/getting-started
+ - https://github.com/kellyjonbrazil/jc - JSON converter for common CLI tools
+ - https://github.com/open-meteo/open-meteo/
+* Anti social
+** Platforms
+ - https://github.com/taviso/nntpit - Reddit over NNTP
+ - https://github.com/CyberShadow/DFeed - Web NNTP
+ - https://github.com/epilys/tade - Forum, mailing list, NNTP
+*** Fediverse
+ - https://github.com/bashrc2/epicyon
+ - https://microblog.pub/
+ - https://jointakahe.org/
+** Pubnix
+ - https://github.com/cwmccabe/pubnixhist
+** Other
+ - https://github.com/SimonBrazell/privacy-redirect
+ - https://github.com/benbusby/farside
+ - https://github.com/timhutton/twitter-archive-parser
+* Tabular data
+ - https://github.com/gristlabs/grist-core
+ - https://github.com/aardappel/treesheets
+ - https://www.visidata.org/
+* Hardware
+** Phones
+ - https://github.com/Dakkaron/Fairberry - add Blackberry keyboard to other phones
+ - https://github.com/Genymobile/scrcpy - remote control Android
+ - https://github.com/wolpi/prim-ftpd
+* Other
+ - https://ublue.it/
+ - https://github.com/fsquillace/junest - Arch chroots
+ - https://github.com/jhuangtw/xg2xg
+ - https://github.com/mzucker/noteshrink - handwriting to PDF
+ - https://github.com/kmonad/kmonad - advanced cross-platform keyboard remapping
+ - https://github.com/KDE/kitinerary/tree/master/src/lib/scripts - a lot of travel email scrapers (plane and train tickets, etc.)
diff --git a/PERSONAL_INFRA.md b/PERSONAL_INFRA.md
new file mode 100644
index 00000000..ced7c280
--- /dev/null
+++ b/PERSONAL_INFRA.md
@@ -0,0 +1,233 @@
+# My personal infrastructure
+
+* Hetzner auction server: 48Gb RAM, 2x2Tb HDD. Runs Proxmox, tinc/ocserv, Apache as reverse proxy
+ * LXC container running NextCloud
+ * LXC container running Miniflux
+ * LXC container running bitwarden_rs
+ * LXC container running an acquaintance's Twitter bot
+ * LXC container running Dokku, hosting a few personal apps
+ * LXC container running FreeIPA replica
+ * LXC container running Nagios
+ * LXC container running Grafana
+ * LXC container running Ipsilon
+ * LXC container running PostgreSQL
+ * LXC container running a workstation
+ * LXC container running Gitolite
+ * LXC container running FreeSWITCH
+* Flat 1
+ * HP Proliant Microserver: 4Gb RAM, 2x4Tb HDD
+ * DHCP/DNS
+ * Runs SMB/NFS
+ * ZFS backups on external USB drives
+ * tinc/ocserv
+ * Raspberry Pi (1Gb RAM) running LibreElec + TVHeadend, records to NFS share on HP server
+* Flat 2
+ * Raspberry Pi (1Gb RAM) running Rocky Linux, runs DHCP/DNS, tinc/ocserv
+* OVH 2Gb RAM VPS running FreeIPA (also tinc/ocserv)
+
+## Networking
+
+I like having working DNS, so I run dnsmasq on both flats and for the Proxmox network on the Hetzner server.
+It also does integrated DHCP (mostly everything gets a DHCP IP and thus, a hostname).
+Every environment has a /24 network with DNS/DHCP and their own domain (hetzner.int.mydomain, flat1.int.mydomain, etc.).
+I use Route 53 for DNS records (except those of my own networks). DNS records are created with Ansible playbooks.
+
+I have the following snippets on dnsmasq's configuration:
+
+```
+server=/flat1.mydomain/ip.of.flat1.dns
+rev-server=net.mask.of/flat1,ip.of.flat1.dns
+```
+
+So one dnsmasq instance can lookup records (even reverse DNS) on the other dnsmasq instances, so I can address systems on other networks by their name.
+This could also be achieved by NS records, if I'm not mistaken, but this way everything is private on my own dnsmasq servers and not on public DNS.
+
+I join all networks using tinc in a mesh. Tinc keys are generated and distributed using an Ansible playbook.
+
+On every network I've also set up ocserv to provide remote access if I'm outside these networks; I can pick the closest access point and reach my entire network.
+
+## Authentication
+
+I run a two-node FreeIPA cluster.
+It provides a user directory and centralized auth, with passwordless single-sign on.
+It also has sudo integration, so I can sudo on all systems with a single password.
+
+Many systems and services are integrated in FreeIPA.
+My laptop is joined to the domain so I can even log in to some web applications without typing a password.
+
+Ipsilon adds SAML for some applications which do not support Kerberos.
+
+Ipsilon is backed by Red Hat, although they seem to have shifted their focus to KeyCloak. KeyCloak is much more featureful, but I prefer Ipsilon because:
+
+* It's deployed via RPM
+* Integration with FreeIPA is a one-liner
+* It's still used by the Fedora Project infrastructure
+
+FreeIPA and Ipsilon are running in CentOS 7- I will probably reconsider this stack around 2024 when CentOS gets close to EOL.
+
+## Mail
+
+All systems are running Postfix configured to send emails through an account on my free G Suite account.
+This way I get notifications on failed cronjobs or automated updates.
+
+## TLS
+
+I set up certificates using certbot-route53 on Ansible playbooks.
+DNS verification allows me to run TLS on non-reachable hosts.
+
+I run the playbooks from my workstation periodically to renew certificates.
+
+## Monitoring
+
+I run Nagios monitoring all hosts and services.
+I get alerts for hosts and services being down.
+I use https://github.com/alexpdp7/ragent as the monitor, which also means I get notifications when a host is updated and requires a reboot.
+
+To monitor certain things, such as FreeIPA, I set up cronjobs which run health checks and drop the output somewhere in `/var/www/html/*`, which then I check using check_http.
+
+I also run Netdata on many hosts, which I can access via a reverse proxy at https://netdata.mydomain/$HOSTNAME with single sign on.
+
+## Configuration management
+
+I use Ansible playbooks to provision VMs and LXC containers on Proxmox.
+The playbooks add the new hosts automatically to FreeIPA, set up SSH, etc. See:
+
+https://github.com/alexpdp7/ansible-create-proxmox-host
+https://github.com/alexpdp7/ansible-create-proxmox-centos7-ipa
+
+I also use Ansible for some orchestration tasks (such as deploying FreeIPA replicas, handling Letsencrypt certificates, etc.).
+
+I use an Ansible playbook using https://github.com/alexpdp7/ansible-puppet/ to run Puppet to configure individual systems.
+
+### Operating systems
+
+I use:
+
+* Proxmox, as it provides LXC containers (and VMs if needed) and ZFS storage. I like ZFS for its protection about bitrot, and because send/recv and snapshots are great for backups
+* EL7/EL8, due to the long life cycle and stability. Due to the CentOS 8 life cycle changes, I'm switching CentOS 8 hosts to Rocky Linux, while CentOS 7 remains.
+* Rocky Linux for my server Raspberry.
+* LibreElec for my mediacenter Raspberry. Common distros are not an option, as they don't support hardware video acceleration. LibreElec sets up everything I need with minimal fuss, so while it's the system that doesn't use configuration management, it works fine.
+
+### Software updates
+
+I use `yum-cron` on EL7, `dnf-automatic` on EL8 and `unattended-upgrades` on Debian/Ubuntu so updates are automatically installed.
+
+`ragent` monitors when systems need a reboot and warns me through Nagios.
+
+### Packaging
+
+* https://github.com/alexpdp7/bitwarden_rs/tree/rpm / https://copr.fedorainfracloud.org/coprs/koalillo/bitwarden_rs/
+* https://github.com/alexpdp7/nextcloud-rpm / https://copr.fedorainfracloud.org/coprs/koalillo/nextcloud/
+
+## Storage
+
+I run Nextcloud on an LXC container, files are stored in a ZFS filesystem.
+
+Media and other non-critical files are stored in the Proliant and shared via Samba and NFS.
+
+### Media
+
+I run a Jellyfin server on the Proliant to serve media to phones, a MiBox and a Raspberry running LibreElec.
+
+The Raspberry has a DVB-T tuner and TVHeadend, recordings are stored on the Proliant in an NFS share.
+
+### Backup
+
+Valuable data is on dedicated datasets. Each Proxmox host (the Proliant and the Hetzner server) run scripts daily that create snapshots.
+
+The Hetzner server sends/receives datasets to the Proliant daily.
+
+I send/receive datasets from the Proliant to USB drives using ZFS.
+
+## Dokku
+
+I use Dokku to host a few personal applications, so I can update them with `git push`. I also have Ansible playbooks to set up the applications and handle some of them which have more complex deployments.
+
+## Why not Docker/Kubernetes?
+
+Delivering applications as Docker images is massively popular right now, so it's worth explaining why I'm running VMs and LXC containers and I'm not more container-driven.
+
+#### Some things are not containerized
+
+FreeIPA has some support for running in containers, but it doesn't seem to be the most recommended way of running it.
+While it's not a "core" service, I don't think I have an alternative way to get some of its benefits (single users/passwords database across other services and systems, single-sign on, sudo management, etc.).
+It looks like integrating Ipsilon with FreeIPA if Ipsilon is running in a container would not be easy/supported either.
+
+WordPress has Docker images, but like the EPEL packages I use, they don't seem to be officially supported by Wordpress.
+However, both seem to be well maintained, but EPEL packages are automatically updated using the same process than the rest of my systems (`yum-cron`).
+
+I need to have non-container infrastructure in place, so I have the option of running additional things there or add the overhead of setting up container infrastructure on top.
+Containerization has its advantages, so it's just an equation about how much you benefit from containers compared to the overhead of having container infrastructure.
+
+#### Some containerized things are special
+
+Dokku is its own special system. It could be replaced completely with Kubernetes, but with additional complexity.
+
+#### Containerization infrastructure has its cost
+
+There are lightweight ways to run containers; docker-compose, plain Docker, etc.
+These require significant additional work to automate them to the level of my existing non-containerized infrastructure (automation, backups, etc.), but they consume little additional resources.
+The cost analysis for those is that my existing automation works and the cost of re-implementing them makes them not worthwhile at this point.
+
+Heavyweight solutions like Kubernetes tend to consume more resources, but have better automation features built-in.
+The cost analysis for those is that with the money I'm spending now (single 48gb RAM Hetzner dedicated server) I wouldn't be able to run the same amount of stuff.
+If I was running a significantly greater amount of stuff or I had high-availability requirements, then this would change.
+
+#### Conclusion
+
+If I was starting from scratch, perhaps a light-weight container solution would have been worthwhile, as some services might be easier to provide using a container approach. Also perhaps setting up the automation/etc. would be easier and would give me some advantages.
+
+If I was running more services or had greater availability requirements, a cluster-ready solution like Kubernetes would probably be required.
+
+In my current situation, with the work already performed, I don't think investing more in containers is the most effective use of my limited resources.
+
+## My blog
+
+I was never a fan of Wordpress (I prefer other platforms to PHP and MySQL), Remi maintains very up to date EPEL 7 packages. Remi also maintains some packages for EL 8 (derived from Remi's own packages in Fedora, hosted on Remi's personal repositories).
+
+However, after reading about Geminispace, I decided to port my blog to Geminispace and skip migrating from EL 7 to EL 8. Right now I run some custom scripts that generate a static blog and serves them using Agate in my workstation container. I run a Kineto proxy on Dokku that makes the content available through conventional HTML/HTTP. See details at:
+
+https://github.com/alexpdp7/gemini_blog
+
+## Phones
+
+I wanted to eliminate my landlines, because I get a ton of spam there.
+However, I need to provide calls between my home and another home using physical phones (people like wireless headsets- smartphones are not really well designed for extended phone calls).
+
+The key to this is the SIP protocol.
+You can get classical phones that work using the SIP protocol, or ATA devices that turn a regular phone into a SIP phone.
+
+I installed FreeSWITCH from the [OKay repo](https://okay.network/blog-news/rpm-repositories-for-centos-6-and-7.html).
+FreeSWITCH comes with a fairly complete default configuration.
+By default it will set up extensions in the 1000...1020 range, with a configurable single password for all extensions, plus some extensions for test calls, etc.
+
+The major difficulty in setting a SIP server is networking.
+I run FreeSWITCH in an LXC container on Proxmox.
+I expose the SIP server's SSL TCP port to the Internet, plus a range of UDP ports, using iptables.
+(I consulted some SIP forums, and apparently there are no major hardening requirements in exposing a SIP server to the Internet, although I think maybe it's better to use a SIP proxy.)
+You can also use STUN/TURN servers, but I had lots of trouble getting that set up.
+Also by default, FreeSWITCH figures out a public IP- if you want to get FreeSWITCH working behind a VPN, you need to disable that.
+
+For the phones, I bought and set up two Grandstream HT801 ATA devices.
+Those are quite cheap (around 40€), but they are quite fancy professional network devices, with a rough but featureful UI (they can do OpenVPN, SNMP, etc.).
+They connect directly to FreeSWITCH over the Internet, autoconfiguring via DHCP, so in theory they could work anywhere in the world with a network connection.
+After configuration and assigning an extension, you only need to connect cheap wireless phones to them, and start making calls with the 1000...1020 extensions.
+
+For testing and occasional calls I use [Baresip](https://github.com/baresip/baresip) from F-Droid in my smartphone, and from Debian in my laptop.
+For smartphones, SIP has the drawback that it requires a persistent connection to the SIP server to receive calls- thus draining the battery a bit.
+Some SIP setups use push notifications to get around that, but that seemed to be complex.
+So the only devices that are connected 24/7 are the ATAs, I use my smartphone and my laptop occasionally.
+
+SIP allows many other interesting stuff such as:
+
+* Instant messaging
+* Videoconferencing
+* Advanced phone features (conferences, barging in, voicemail, automation)
+
+So you can do real fancy stuff with it, but I haven't looked at it, because really I just need calls over two households on physical classical wireless handsets.
+
+## Possible improvements
+
+* Better sync'ing of user files. NextCloud out of the box only works on systems with a graphical interface. There are solutions to mount NextCloud using WebDav, but I prefer to do a sync (so if the server is down I still can access my files) and to run the client headless, but I prefer to stay within supported solutions. Probably syncthing would be a good solution for headless systems to sync dotfiles, etc.
+* Add a lab so I can experiment with things in isolated environments.
+* Set up SSO on my smartphone, perhaps do some MDM
diff --git a/blog_experiment/.gitignore b/blog_experiment/.gitignore
new file mode 100644
index 00000000..bee8a64b
--- /dev/null
+++ b/blog_experiment/.gitignore
@@ -0,0 +1 @@
+__pycache__
diff --git a/blog_experiment/NOTES.md b/blog_experiment/NOTES.md
new file mode 100644
index 00000000..247d2096
--- /dev/null
+++ b/blog_experiment/NOTES.md
@@ -0,0 +1 @@
+$ podman run -it --rm -v $PWD:/workspace -v ...auth.json:/home/cnb/.docker/config.json --security-opt label=disable docker.io/paketobuildpacks/builder-jammy-full /cnb/lifecycle/creator -app /workspace quay.io/alexpdp7/blog
diff --git a/blog_experiment/blog/__init__.py b/blog_experiment/blog/__init__.py
new file mode 100644
index 00000000..81332ddd
--- /dev/null
+++ b/blog_experiment/blog/__init__.py
@@ -0,0 +1,54 @@
+import pathlib
+import re
+
+import bicephalus
+
+import htmlgenerator as h
+
+from blog import blog_pages, page, html, pretty, gemtext
+
+
+class SimplePage(page.BasePage):
+ def __init__(self, request, url, title):
+ super().__init__(request)
+ self.url = url
+ self.title = title
+
+ def get_gemini_content(self):
+ return (
+ bicephalus.Status.OK,
+ "text/gemini",
+ pathlib.Path(f"static{self.url}index.gmi").read_text(),
+ )
+
+ def get_http_content(self):
+ return (
+ bicephalus.Status.OK,
+ "text/html",
+ pretty.pretty_html(h.render(
+ h.HTML(
+ h.HEAD(
+ h.TITLE(self.title),
+ ),
+ h.BODY(*html.gemini_to_html(gemtext.parse(self.get_gemini_content()[2])))
+ ), {})),
+ )
+
+
+def handler(request: bicephalus.Request) -> bicephalus.Response:
+ if request.path == "/":
+ return blog_pages.Root(request).response()
+ if re.match(r"/\d{4}/\d{2}/.*/", request.path):
+ blog_file = pathlib.Path("content") / (request.path[1:-1] + ".gmi")
+ if blog_file.exists():
+ return blog_pages.EntryPage(request, blog_file).response()
+ if request.path == "/feed/" and request.proto == bicephalus.Proto.HTTP:
+ return blog_pages.Root(request).feed()
+ if request.path == "/about/":
+ return SimplePage(request, request.path, "About Álex Córcoles").response()
+ if request.path == "/laspelis/":
+ return SimplePage(request, request.path, "laspelis").response()
+ if re.match(r"/laspelis/\d+/?", request.path):
+ return SimplePage(request, request.path.removesuffix("/") + "/", request.path).response()
+
+ return page.NotFound(request).response()
diff --git a/blog_experiment/blog/__main__.py b/blog_experiment/blog/__main__.py
new file mode 100644
index 00000000..21033902
--- /dev/null
+++ b/blog_experiment/blog/__main__.py
@@ -0,0 +1,36 @@
+import argparse
+import logging
+import sys
+
+from bicephalus import main as bicephalus_main
+from bicephalus import otel
+from bicephalus import ssl
+
+import blog
+
+from blog import meta
+
+
+def main():
+ otel.configure_logging(logging.INFO)
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--key-cert", nargs=2, metavar=("KEY", "CERT",), help="Path to a key and a file")
+ parser.add_argument("schema")
+ parser.add_argument("host")
+ args = parser.parse_args()
+ meta.SCHEMA = args.schema
+ meta.HOST = args.host
+
+ if args.key_cert:
+ key, cert = args.key_cert
+ with ssl.ssl_context_from_files(cert, key) as ssl_context:
+ bicephalus_main.main(blog.handler, ssl_context, 8000)
+ sys.exit(0)
+
+ with ssl.temporary_ssl_context("localhost") as ssl_context:
+ bicephalus_main.main(blog.handler, ssl_context, 8000)
+ sys.exit(0)
+
+if __name__ == "__main__":
+ main()
diff --git a/blog_experiment/blog/blog_pages.py b/blog_experiment/blog/blog_pages.py
new file mode 100644
index 00000000..7802c00b
--- /dev/null
+++ b/blog_experiment/blog/blog_pages.py
@@ -0,0 +1,139 @@
+import datetime
+import itertools
+import pathlib
+import textwrap
+
+import bicephalus
+
+import htmlgenerator as h
+
+from feedgen import feed
+
+from blog import html, page, gemtext, meta, pretty
+
+
+def gemini_links():
+ return "\n".join([f"=> {url} {text}" for text, url in meta.LINKS])
+
+
+class Entry:
+ def __init__(self, path: pathlib.Path):
+ assert path.is_relative_to(pathlib.Path("content")), f"bad path {path}"
+ self.path = path
+ self.content = path.read_text()
+
+ @property
+ def title(self):
+ return self.content.splitlines()[0][2:]
+
+ @property
+ def posted(self):
+ return datetime.date.fromisoformat(self.content.splitlines()[1])
+
+ @property
+ def uri(self):
+ return f"/{self.path.parts[1]}/{self.path.parts[2]}/{self.path.stem}/"
+
+ @property
+ def edit_url(self):
+ return f"https://github.com/alexpdp7/gemini_blog/edit/master/content{self.uri[:-1]}.gmi"
+
+ def html(self):
+ parsed = gemtext.parse(self.content)
+
+ assert isinstance(parsed[0], gemtext.Header)
+ assert parsed[0].level == 1
+ assert isinstance(parsed[1], gemtext.Line)
+ assert parsed[2] == gemtext.Line("")
+
+ result = html.gemini_to_html(parsed[3:])
+ result.append(h.P(h.A("Editar", href=self.edit_url)))
+ return result
+
+
+class Root(page.BasePage):
+ def entries(self):
+ entries = map(Entry, pathlib.Path("content").glob("*/*/*.gmi"))
+ return sorted(entries, key=lambda e: e.posted, reverse=True)
+
+ def get_gemini_content(self):
+ posts = "\n".join([f"=> {e.uri} {e.posted} {e.title}" for e in self.entries()])
+ content = (
+ textwrap.dedent(
+ f"""\
+ # {meta.TITLE}
+
+ ## {meta.SUBTITLE}
+
+ """
+ )
+ + gemini_links()
+ + f"\n{meta.EMAIL_TEXT}\n"
+ + "\n"
+ + posts
+ )
+ return bicephalus.Status.OK, "text/gemini", content
+
+ def get_http_content(self):
+ posts = [
+ (h.H3(h.A(f"{e.title} ({e.posted})", href=e.uri))) for e in self.entries()
+ ]
+ return (
+ bicephalus.Status.OK,
+ "text/html",
+ html.html_template(*itertools.chain(posts)),
+ )
+
+ def feed(self):
+ fg = feed.FeedGenerator()
+ fg.title(meta.TITLE)
+ fg.subtitle(meta.SUBTITLE)
+ fg.link(href=f"{meta.SCHEMA}://{meta.HOST}", rel="self")
+
+ for entry in self.entries()[0:10]:
+ fe = fg.add_entry()
+ url = f"{meta.SCHEMA}://{meta.HOST}/{entry.uri}"
+ fe.link(href=url)
+ fe.published(datetime.datetime.combine(entry.posted, datetime.datetime.min.time(), tzinfo=datetime.timezone.utc))
+ fe.title(entry.title)
+ html = h.render(h.BaseElement(*entry.html()), {})
+ html = pretty.pretty_html(html)
+ fe.content(html, type="html")
+
+ return bicephalus.Response(
+ status=bicephalus.Status.OK,
+ content_type="application/rss+xml",
+ content=fg.rss_str(pretty=True),
+ )
+
+
+class EntryPage(page.BasePage):
+ def __init__(self, request, path):
+ super().__init__(request)
+ self.path = path
+ self.entry = Entry(path)
+
+ def get_gemini_content(self):
+ content = (
+ textwrap.dedent(f"""\
+ => gemini://{meta.HOST} alex.corcoles.net
+ {meta.EMAIL_TEXT}
+
+ """) +
+ self.entry.content +
+ textwrap.dedent(f"""\
+ => {self.entry.edit_url} Editar
+ """)
+ )
+
+ return bicephalus.Status.OK, "text/gemini", content
+
+ def get_http_content(self):
+ return (
+ bicephalus.Status.OK,
+ "text/html",
+ html.html_template(
+ *self.entry.html(),
+ page_title=f"{self.entry.title} - {self.entry.posted}",
+ ),
+ )
diff --git a/blog_experiment/blog/gemtext.py b/blog_experiment/blog/gemtext.py
new file mode 100644
index 00000000..66298e3f
--- /dev/null
+++ b/blog_experiment/blog/gemtext.py
@@ -0,0 +1,223 @@
+import dataclasses
+import re
+import typing
+
+
+def parse(s):
+ """
+ >>> parse('''# Header 1
+ ...
+ ... ## Header 2
+ ...
+ ... ### Header 3
+ ...
+ ... * List 1
+ ... * List 2
+ ...
+ ... > First line quote.
+ ... > Second line of quote.
+ ...
+ ... ```
+ ... Fenced
+ ... Lines
+ ... ```
+ ...
+ ... Paragraph.
+ ...
+ ... Another paragraph.
+ ... ''')
+ [Header(level=1, text='Header 1'),
+ Line(text=''),
+ Header(level=2, text='Header 2'),
+ Line(text=''),
+ Header(level=3, text='Header 3'),
+ Line(text=''),
+ List(items=[ListItem(text='List 1'),
+ ListItem(text='List 2')]),
+ Line(text=''),
+ BlockQuote(lines=[BlockQuoteLine(text='First line quote.'),
+ BlockQuoteLine(text='Second line of quote.')]),
+ Line(text=''),
+ Pre(content='Fenced\\nLines\\n'),
+ Line(text=''),
+ Line(text='Paragraph.'),
+ Line(text=''),
+ Line(text='Another paragraph.')]
+ """
+
+ lines = s.splitlines()
+
+ i = 0
+ gem = []
+
+ while i < len(lines):
+ line = parse_line(lines[i])
+
+ if isinstance(line, Link):
+ gem.append(line)
+ i = i + 1
+ continue
+
+ if isinstance(line, Header):
+ gem.append(line)
+ i = i + 1
+ continue
+
+ if isinstance(line, ListItem):
+ items = []
+ while i < len(lines) and isinstance(parse_line(lines[i]), ListItem):
+ items.append(parse_line(lines[i]))
+ i = i + 1
+ gem.append(List(items))
+ continue
+
+ if isinstance(line, BlockQuoteLine):
+ quotes = []
+ while i < len(lines) and isinstance(parse_line(lines[i]), BlockQuoteLine):
+ quotes.append(parse_line(lines[i]))
+ i = i + 1
+ gem.append(BlockQuote(quotes))
+ continue
+
+ if isinstance(line, PreFence):
+ content = ""
+ i = i + 1
+ while i < len(lines) and not isinstance(parse_line(lines[i]), PreFence):
+ content += lines[i]
+ content += "\n"
+ i = i + 1
+ gem.append(Pre(content))
+ i = i + 1
+ continue
+
+ gem.append(line)
+ i = i + 1
+
+ return gem
+
+
+def parse_line(l):
+ if Link.is_link(l):
+ return Link(l)
+ if Header.is_header(l):
+ return Header(l)
+ if ListItem.is_list_item(l):
+ return ListItem(l)
+ if BlockQuoteLine.is_block_quote_line(l):
+ return BlockQuoteLine(l)
+ if PreFence.is_pre_fence(l):
+ return PreFence()
+ return Line(l)
+
+
+@dataclasses.dataclass
+class Link:
+ """
+ >>> Link("=> http://example.com")
+ Link(url='http://example.com', text=None)
+
+ >>> Link("=> http://example.com Example text")
+ Link(url='http://example.com', text='Example text')
+ """
+
+ url: str
+ text: typing.Optional[str]
+
+ def __init__(self, line: str):
+ assert Link.is_link(line)
+ parts = line.split(None, 2)
+ self.url = parts[1]
+ self.text = parts[2] if len(parts) > 2 else None
+
+ @staticmethod
+ def is_link(line: str):
+ return line.startswith("=>")
+
+@dataclasses.dataclass
+class Header:
+ """
+ >>> Header("# Level one")
+ Header(level=1, text='Level one')
+
+ >>> Header("## Level two")
+ Header(level=2, text='Level two')
+
+ >>> Header("### Level three")
+ Header(level=3, text='Level three')
+ """
+
+ level: int
+ text: str
+
+ def __init__(self, line: str):
+ assert Header.is_header(line)
+ hashes, self.text = line.split(None, 1)
+ self.level = len(hashes)
+
+ @staticmethod
+ def is_header(line: str):
+ return re.match("#{1,3} .*", line)
+
+@dataclasses.dataclass
+class ListItem:
+ """
+ >>> ListItem("* foo")
+ ListItem(text='foo')
+ """
+
+ text: str
+
+ def __init__(self, line: str):
+ assert ListItem.is_list_item(line)
+ self.text = line[2:]
+
+ @staticmethod
+ def is_list_item(line: str):
+ return line.startswith("* ")
+
+
+@dataclasses.dataclass
+class BlockQuoteLine:
+ """
+ >>> BlockQuoteLine("> foo")
+ BlockQuoteLine(text='foo')
+
+ >>> BlockQuoteLine(">foo")
+ BlockQuoteLine(text='foo')
+ """
+
+ text: str
+
+ def __init__(self, line: str):
+ assert BlockQuoteLine.is_block_quote_line(line)
+ self.text = line[2:] if line.startswith("> ") else line[1:]
+
+ @staticmethod
+ def is_block_quote_line(line: str):
+ return line.startswith(">")
+
+
+class PreFence:
+ @staticmethod
+ def is_pre_fence(line: str):
+ return line == "```"
+
+
+@dataclasses.dataclass
+class Line:
+ text: str
+
+
+@dataclasses.dataclass
+class List:
+ items: typing.List[ListItem]
+
+
+@dataclasses.dataclass
+class BlockQuote:
+ lines: typing.List[BlockQuoteLine]
+
+
+@dataclasses.dataclass
+class Pre:
+ content: str
diff --git a/blog_experiment/blog/html.py b/blog_experiment/blog/html.py
new file mode 100644
index 00000000..8aa92f4a
--- /dev/null
+++ b/blog_experiment/blog/html.py
@@ -0,0 +1,101 @@
+import itertools
+
+import htmlgenerator as h
+
+from blog import meta, pretty, gemtext
+
+
+def html_template(*content, page_title=None):
+ title = [h.A(meta.TITLE, href=f"{meta.SCHEMA}://{meta.HOST}")]
+ if page_title:
+ title += f" - {page_title}"
+
+ title = h.BaseElement(*title)
+
+ links = list(itertools.chain(*[(h.A(text, href=href), ", ") for text, href in meta.LINKS]))
+
+ links += h.BaseElement(f" {meta.EMAIL_TEXT}")
+
+ return pretty.pretty_html(h.render(
+ h.HTML(
+ h.HEAD(
+ h.TITLE(meta.TITLE + (f" - {page_title}" if page_title else "")),
+ h.LINK(rel="alternate", type="application/rss+xml", title=meta.TITLE, href=f"{meta.SCHEMA}://{meta.HOST}/feed/"),
+ ),
+ h.BODY(
+ h.H1(title),
+ h.H2(meta.SUBTITLE),
+ h.P(*links),
+ *content,
+ ),
+ doctype="html",
+ ),
+ {},
+ ))
+
+
+def gemini_to_html(parsed):
+ i = 0
+ result = []
+ while i < len(parsed):
+ gem_element = parsed[i]
+
+ if isinstance(gem_element, gemtext.Header):
+ header = [h.H1, h.H2, h.H3, h.H4, h.H5, h.H6][gem_element.level - 1]
+ result.append(header(gem_element.text))
+ i = i + 1
+ continue
+
+ if isinstance(gem_element, gemtext.List):
+ result.append(h.UL(*[h.LI(i.text) for i in gem_element.items]))
+ i = i + 1
+ continue
+
+ if isinstance(gem_element, gemtext.Link):
+ url = gem_element.url
+ if url.startswith("gemini://"):
+ if url.startswith("gemini://alex.corcoles.net/"):
+ url = url.replace("gemini://alex.corcoles.net/", f"{meta.SCHEMA}://{meta.HOST}/")
+ else:
+ url = url.replace("gemini://", "https://portal.mozz.us/gemini/")
+
+ result.append(h.P(h.A(gem_element.text or gem_element.url, href=url)))
+ i = i + 1
+ continue
+
+ if gem_element == gemtext.Line(""):
+ i = i + 1
+ continue
+
+ if isinstance(gem_element, gemtext.BlockQuote):
+ content = []
+ for line in gem_element.lines:
+ if line.text:
+ content.append(line.text)
+ content.append(h.BR())
+ result.append(h.BLOCKQUOTE(*content))
+ i = i + 1
+ continue
+
+ if isinstance(gem_element, gemtext.Line):
+ paragraph = [gem_element.text]
+ i = i + 1
+ while i < len(parsed):
+ gem_element = parsed[i]
+ if isinstance(gem_element, gemtext.Line) and gem_element.text != "":
+ paragraph.append(h.BR())
+ paragraph.append(gem_element.text)
+ i = i + 1
+ else:
+ break
+ result.append(h.P(*paragraph))
+ continue
+
+ if isinstance(gem_element, gemtext.Pre):
+ result.append(h.PRE(gem_element.content))
+ i = i + 1
+ continue
+
+ assert False, f"unknown element {gem_element}"
+
+ return result
diff --git a/blog_experiment/blog/meta.py b/blog_experiment/blog/meta.py
new file mode 100644
index 00000000..e92b2acb
--- /dev/null
+++ b/blog_experiment/blog/meta.py
@@ -0,0 +1,13 @@
+TITLE = "El blog es mío"
+SUBTITLE = "Hay otros como él, pero este es el mío"
+HOST = None
+SCHEMA = None
+
+LINKS = (
+ ("GitHub", "https://github.com/alexpdp7/"),
+ ("LinkedIn", "https://es.linkedin.com/in/alexcorcoles"),
+ ("Project Euler", "https://projecteuler.net/profile/koalillo.png"),
+ ("Stack Exchange", "https://stackexchange.com/users/13361/alex"),
+)
+
+EMAIL_TEXT = "escríbeme cogiendo el dominio de esta web y cambiando el primer punto por una arroba"
diff --git a/blog_experiment/blog/page.py b/blog_experiment/blog/page.py
new file mode 100644
index 00000000..fcc4841a
--- /dev/null
+++ b/blog_experiment/blog/page.py
@@ -0,0 +1,37 @@
+import bicephalus
+
+
+class BasePage:
+ def __init__(self, request):
+ self.request = request
+
+ def response(self):
+ if self.request.proto == bicephalus.Proto.GEMINI:
+ status, content_type, content = self.get_gemini_content()
+ elif self.request.proto == bicephalus.Proto.HTTP:
+ status, content_type, content = self.get_http_content()
+ else:
+ assert False, f"unknown protocol {self.request.proto}"
+
+ return bicephalus.Response(
+ content=content.encode("utf8"),
+ content_type=content_type,
+ status=bicephalus.Status.OK,
+ )
+
+
+class NotFound(BasePage):
+ def get_gemini_content(self):
+ # TODO: does not work!
+ return (
+ bicephalus.Status.NOT_FOUND,
+ "text/gemini",
+ f"{self.request.path} not found",
+ )
+
+ def get_http_content(self):
+ return (
+ bicephalus.Status.NOT_FOUND,
+ "text/html",
+ f"{self.request.path} not found",
+ )
diff --git a/blog_experiment/blog/pretty.py b/blog_experiment/blog/pretty.py
new file mode 100644
index 00000000..2ae916a7
--- /dev/null
+++ b/blog_experiment/blog/pretty.py
@@ -0,0 +1,5 @@
+from lxml import etree, html
+
+
+def pretty_html(s):
+ return etree.tostring(html.fromstring(s), pretty_print=True).decode("utf8")
diff --git a/blog_experiment/build.rs b/blog_experiment/build.rs
new file mode 100755
index 00000000..0e366350
--- /dev/null
+++ b/blog_experiment/build.rs
@@ -0,0 +1,26 @@
+#!/usr/bin/env -S cargo +nightly -Zscript
+```cargo
+[dependencies]
+clap = { version = "4.4.6", features = ["derive", "env"] }
+paars = { git = "https://github.com/alexpdp7/paars.git" }
+```
+use std::path::PathBuf;
+use clap::Parser;
+
+#[derive(Parser, Debug)]
+#[command()]
+struct Args {
+ #[clap(long, env)]
+ docker_config: PathBuf,
+
+ #[clap()]
+ dir: PathBuf,
+
+ #[clap()]
+ image: String,
+}
+
+fn main() {
+ let args = Args::parse();
+ paars::build_image(args.dir, args.docker_config, args.image);
+}
diff --git a/blog_experiment/gemini_blog b/blog_experiment/gemini_blog
new file mode 160000
+Subproject 4f86ec0553d659fe3253a92657d85a96734d004
diff --git a/blog_experiment/k8s.yaml b/blog_experiment/k8s.yaml
new file mode 100644
index 00000000..0e96851e
--- /dev/null
+++ b/blog_experiment/k8s.yaml
@@ -0,0 +1,91 @@
+apiVersion: v1
+kind: List
+items:
+- apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: blog
+- apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ labels:
+ app: blog
+ name: blog
+ namespace: blog
+ spec:
+ selector:
+ matchLabels:
+ app: blog
+ template:
+ metadata:
+ labels:
+ app: blog
+ spec:
+ containers:
+ - command:
+ - /cnb/lifecycle/launcher
+ - python
+ - -m
+ - blog
+ - --key-cert
+ - /tls-gemini/tls.key
+ - /tls-gemini/tls.crt
+ - https
+ - blog.pdp7.net
+ image: quay.io/alexpdp7/blog:latest
+ imagePullPolicy: Always
+ name: blog
+ volumeMounts:
+ - mountPath: /tls-gemini
+ name: tls-gemini
+ volumes:
+ - name: tls-gemini
+ secret:
+ secretName: tls-gemini
+- apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ app: blog
+ name: http
+ namespace: blog
+ spec:
+ ports:
+ - port: 8000
+ protocol: TCP
+ targetPort: 8000
+ selector:
+ app: blog
+- apiVersion: v1
+ kind: Service
+ metadata:
+ labels:
+ app: blog
+ name: gemini
+ namespace: blog
+ spec:
+ ports:
+ - nodePort: 31965
+ port: 1965
+ protocol: TCP
+ targetPort: 1965
+ selector:
+ app: blog
+ type: NodePort
+- apiVersion: networking.k8s.io/v1
+ kind: Ingress
+ metadata:
+ name: http
+ namespace: blog
+ spec:
+ rules:
+ - host: blog.pdp7.net
+ http:
+ paths:
+ - backend:
+ service:
+ name: http
+ port:
+ number: 8000
+ path: /
+ pathType: Prefix
diff --git a/blog_experiment/poetry.lock b/blog_experiment/poetry.lock
new file mode 100644
index 00000000..57ce657d
--- /dev/null
+++ b/blog_experiment/poetry.lock
@@ -0,0 +1,1324 @@
+# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+
+[[package]]
+name = "aiohttp"
+version = "3.8.6"
+description = "Async http client/server framework (asyncio)"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:41d55fc043954cddbbd82503d9cc3f4814a40bcef30b3569bc7b5e34130718c1"},
+ {file = "aiohttp-3.8.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1d84166673694841d8953f0a8d0c90e1087739d24632fe86b1a08819168b4566"},
+ {file = "aiohttp-3.8.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:253bf92b744b3170eb4c4ca2fa58f9c4b87aeb1df42f71d4e78815e6e8b73c9e"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3fd194939b1f764d6bb05490987bfe104287bbf51b8d862261ccf66f48fb4096"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c5f938d199a6fdbdc10bbb9447496561c3a9a565b43be564648d81e1102ac22"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2817b2f66ca82ee699acd90e05c95e79bbf1dc986abb62b61ec8aaf851e81c93"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0fa375b3d34e71ccccf172cab401cd94a72de7a8cc01847a7b3386204093bb47"},
+ {file = "aiohttp-3.8.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9de50a199b7710fa2904be5a4a9b51af587ab24c8e540a7243ab737b45844543"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e1d8cb0b56b3587c5c01de3bf2f600f186da7e7b5f7353d1bf26a8ddca57f965"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8e31e9db1bee8b4f407b77fd2507337a0a80665ad7b6c749d08df595d88f1cf5"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:7bc88fc494b1f0311d67f29fee6fd636606f4697e8cc793a2d912ac5b19aa38d"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ec00c3305788e04bf6d29d42e504560e159ccaf0be30c09203b468a6c1ccd3b2"},
+ {file = "aiohttp-3.8.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad1407db8f2f49329729564f71685557157bfa42b48f4b93e53721a16eb813ed"},
+ {file = "aiohttp-3.8.6-cp310-cp310-win32.whl", hash = "sha256:ccc360e87341ad47c777f5723f68adbb52b37ab450c8bc3ca9ca1f3e849e5fe2"},
+ {file = "aiohttp-3.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:93c15c8e48e5e7b89d5cb4613479d144fda8344e2d886cf694fd36db4cc86865"},
+ {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e2f9cc8e5328f829f6e1fb74a0a3a939b14e67e80832975e01929e320386b34"},
+ {file = "aiohttp-3.8.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e6a00ffcc173e765e200ceefb06399ba09c06db97f401f920513a10c803604ca"},
+ {file = "aiohttp-3.8.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:41bdc2ba359032e36c0e9de5a3bd00d6fb7ea558a6ce6b70acedf0da86458321"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14cd52ccf40006c7a6cd34a0f8663734e5363fd981807173faf3a017e202fec9"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2d5b785c792802e7b275c420d84f3397668e9d49ab1cb52bd916b3b3ffcf09ad"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1bed815f3dc3d915c5c1e556c397c8667826fbc1b935d95b0ad680787896a358"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96603a562b546632441926cd1293cfcb5b69f0b4159e6077f7c7dbdfb686af4d"},
+ {file = "aiohttp-3.8.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d76e8b13161a202d14c9584590c4df4d068c9567c99506497bdd67eaedf36403"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e3f1e3f1a1751bb62b4a1b7f4e435afcdade6c17a4fd9b9d43607cebd242924a"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:76b36b3124f0223903609944a3c8bf28a599b2cc0ce0be60b45211c8e9be97f8"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a2ece4af1f3c967a4390c284797ab595a9f1bc1130ef8b01828915a05a6ae684"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:16d330b3b9db87c3883e565340d292638a878236418b23cc8b9b11a054aaa887"},
+ {file = "aiohttp-3.8.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42c89579f82e49db436b69c938ab3e1559e5a4409eb8639eb4143989bc390f2f"},
+ {file = "aiohttp-3.8.6-cp311-cp311-win32.whl", hash = "sha256:efd2fcf7e7b9d7ab16e6b7d54205beded0a9c8566cb30f09c1abe42b4e22bdcb"},
+ {file = "aiohttp-3.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:3b2ab182fc28e7a81f6c70bfbd829045d9480063f5ab06f6e601a3eddbbd49a0"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:fdee8405931b0615220e5ddf8cd7edd8592c606a8e4ca2a00704883c396e4479"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d25036d161c4fe2225d1abff2bd52c34ed0b1099f02c208cd34d8c05729882f0"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5d791245a894be071d5ab04bbb4850534261a7d4fd363b094a7b9963e8cdbd31"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0cccd1de239afa866e4ce5c789b3032442f19c261c7d8a01183fd956b1935349"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f13f60d78224f0dace220d8ab4ef1dbc37115eeeab8c06804fec11bec2bbd07"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a9b5a0606faca4f6cc0d338359d6fa137104c337f489cd135bb7fbdbccb1e39"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:13da35c9ceb847732bf5c6c5781dcf4780e14392e5d3b3c689f6d22f8e15ae31"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:4d4cbe4ffa9d05f46a28252efc5941e0462792930caa370a6efaf491f412bc66"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:229852e147f44da0241954fc6cb910ba074e597f06789c867cb7fb0621e0ba7a"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:713103a8bdde61d13490adf47171a1039fd880113981e55401a0f7b42c37d071"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:45ad816b2c8e3b60b510f30dbd37fe74fd4a772248a52bb021f6fd65dff809b6"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-win32.whl", hash = "sha256:2b8d4e166e600dcfbff51919c7a3789ff6ca8b3ecce16e1d9c96d95dd569eb4c"},
+ {file = "aiohttp-3.8.6-cp36-cp36m-win_amd64.whl", hash = "sha256:0912ed87fee967940aacc5306d3aa8ba3a459fcd12add0b407081fbefc931e53"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e2a988a0c673c2e12084f5e6ba3392d76c75ddb8ebc6c7e9ead68248101cd446"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebf3fd9f141700b510d4b190094db0ce37ac6361a6806c153c161dc6c041ccda"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3161ce82ab85acd267c8f4b14aa226047a6bee1e4e6adb74b798bd42c6ae1f80"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d95fc1bf33a9a81469aa760617b5971331cdd74370d1214f0b3109272c0e1e3c"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c43ecfef7deaf0617cee936836518e7424ee12cb709883f2c9a1adda63cc460"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca80e1b90a05a4f476547f904992ae81eda5c2c85c66ee4195bb8f9c5fb47f28"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:90c72ebb7cb3a08a7f40061079817133f502a160561d0675b0a6adf231382c92"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:bb54c54510e47a8c7c8e63454a6acc817519337b2b78606c4e840871a3e15349"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:de6a1c9f6803b90e20869e6b99c2c18cef5cc691363954c93cb9adeb26d9f3ae"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:a3628b6c7b880b181a3ae0a0683698513874df63783fd89de99b7b7539e3e8a8"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fc37e9aef10a696a5a4474802930079ccfc14d9f9c10b4662169671ff034b7df"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-win32.whl", hash = "sha256:f8ef51e459eb2ad8e7a66c1d6440c808485840ad55ecc3cafefadea47d1b1ba2"},
+ {file = "aiohttp-3.8.6-cp37-cp37m-win_amd64.whl", hash = "sha256:b2fe42e523be344124c6c8ef32a011444e869dc5f883c591ed87f84339de5976"},
+ {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e2ee0ac5a1f5c7dd3197de309adfb99ac4617ff02b0603fd1e65b07dc772e4b"},
+ {file = "aiohttp-3.8.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:01770d8c04bd8db568abb636c1fdd4f7140b284b8b3e0b4584f070180c1e5c62"},
+ {file = "aiohttp-3.8.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c68330a59506254b556b99a91857428cab98b2f84061260a67865f7f52899f5"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89341b2c19fb5eac30c341133ae2cc3544d40d9b1892749cdd25892bbc6ac951"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71783b0b6455ac8f34b5ec99d83e686892c50498d5d00b8e56d47f41b38fbe04"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f628dbf3c91e12f4d6c8b3f092069567d8eb17814aebba3d7d60c149391aee3a"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04691bc6601ef47c88f0255043df6f570ada1a9ebef99c34bd0b72866c217ae"},
+ {file = "aiohttp-3.8.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee912f7e78287516df155f69da575a0ba33b02dd7c1d6614dbc9463f43066e3"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9c19b26acdd08dd239e0d3669a3dddafd600902e37881f13fbd8a53943079dbc"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:99c5ac4ad492b4a19fc132306cd57075c28446ec2ed970973bbf036bcda1bcc6"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f0f03211fd14a6a0aed2997d4b1c013d49fb7b50eeb9ffdf5e51f23cfe2c77fa"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8d399dade330c53b4106160f75f55407e9ae7505263ea86f2ccca6bfcbdb4921"},
+ {file = "aiohttp-3.8.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ec4fd86658c6a8964d75426517dc01cbf840bbf32d055ce64a9e63a40fd7b771"},
+ {file = "aiohttp-3.8.6-cp38-cp38-win32.whl", hash = "sha256:33164093be11fcef3ce2571a0dccd9041c9a93fa3bde86569d7b03120d276c6f"},
+ {file = "aiohttp-3.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:bdf70bfe5a1414ba9afb9d49f0c912dc524cf60141102f3a11143ba3d291870f"},
+ {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d52d5dc7c6682b720280f9d9db41d36ebe4791622c842e258c9206232251ab2b"},
+ {file = "aiohttp-3.8.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4ac39027011414dbd3d87f7edb31680e1f430834c8cef029f11c66dad0670aa5"},
+ {file = "aiohttp-3.8.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3f5c7ce535a1d2429a634310e308fb7d718905487257060e5d4598e29dc17f0b"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b30e963f9e0d52c28f284d554a9469af073030030cef8693106d918b2ca92f54"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:918810ef188f84152af6b938254911055a72e0f935b5fbc4c1a4ed0b0584aed1"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:002f23e6ea8d3dd8d149e569fd580c999232b5fbc601c48d55398fbc2e582e8c"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4fcf3eabd3fd1a5e6092d1242295fa37d0354b2eb2077e6eb670accad78e40e1"},
+ {file = "aiohttp-3.8.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:255ba9d6d5ff1a382bb9a578cd563605aa69bec845680e21c44afc2670607a95"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d67f8baed00870aa390ea2590798766256f31dc5ed3ecc737debb6e97e2ede78"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:86f20cee0f0a317c76573b627b954c412ea766d6ada1a9fcf1b805763ae7feeb"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:39a312d0e991690ccc1a61f1e9e42daa519dcc34ad03eb6f826d94c1190190dd"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:e827d48cf802de06d9c935088c2924e3c7e7533377d66b6f31ed175c1620e05e"},
+ {file = "aiohttp-3.8.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd111d7fc5591ddf377a408ed9067045259ff2770f37e2d94e6478d0f3fc0c17"},
+ {file = "aiohttp-3.8.6-cp39-cp39-win32.whl", hash = "sha256:caf486ac1e689dda3502567eb89ffe02876546599bbf915ec94b1fa424eeffd4"},
+ {file = "aiohttp-3.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3f0e27e5b733803333bb2371249f41cf42bae8884863e8e8965ec69bebe53132"},
+ {file = "aiohttp-3.8.6.tar.gz", hash = "sha256:b0cf2a4501bff9330a8a5248b4ce951851e415bdcce9dc158e76cfd55e15085c"},
+]
+
+[package.dependencies]
+aiosignal = ">=1.1.2"
+async-timeout = ">=4.0.0a3,<5.0"
+attrs = ">=17.3.0"
+charset-normalizer = ">=2.0,<4.0"
+frozenlist = ">=1.1.1"
+multidict = ">=4.5,<7.0"
+yarl = ">=1.0,<2.0"
+
+[package.extras]
+speedups = ["Brotli", "aiodns", "cchardet"]
+
+[[package]]
+name = "aiosignal"
+version = "1.3.1"
+description = "aiosignal: a list of registered asynchronous callbacks"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
+ {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
+]
+
+[package.dependencies]
+frozenlist = ">=1.1.0"
+
+[[package]]
+name = "appnope"
+version = "0.1.3"
+description = "Disable App Nap on macOS >= 10.9"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"},
+ {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"},
+]
+
+[[package]]
+name = "asttokens"
+version = "2.4.0"
+description = "Annotate AST trees with source code positions"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"},
+ {file = "asttokens-2.4.0.tar.gz", hash = "sha256:2e0171b991b2c959acc6c49318049236844a5da1d65ba2672c4880c1c894834e"},
+]
+
+[package.dependencies]
+six = ">=1.12.0"
+
+[package.extras]
+test = ["astroid", "pytest"]
+
+[[package]]
+name = "async-timeout"
+version = "4.0.3"
+description = "Timeout context manager for asyncio programs"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
+ {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
+]
+
+[[package]]
+name = "attrs"
+version = "23.1.0"
+description = "Classes Without Boilerplate"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+
+[[package]]
+name = "backcall"
+version = "0.2.0"
+description = "Specifications for callback functions passed in to an API"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"},
+ {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
+]
+
+[[package]]
+name = "beautifulsoup4"
+version = "4.12.2"
+description = "Screen-scraping library"
+category = "main"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"},
+ {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"},
+]
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
+
+[[package]]
+name = "bicephalus"
+version = "0.1.0"
+description = ""
+category = "main"
+optional = false
+python-versions = "^3.9"
+files = []
+develop = false
+
+[package.dependencies]
+aiohttp = "^3.8.4"
+opentelemetry-api = "^1.20.0"
+opentelemetry-sdk = "^1.20.0"
+
+[package.source]
+type = "git"
+url = "https://github.com/alexpdp7/bicephalus.git"
+reference = "HEAD"
+resolved_reference = "ca0c7e85d9f6926a08040fc492a31e5b129defa0"
+
+[[package]]
+name = "charset-normalizer"
+version = "3.3.0"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+category = "main"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.3.0.tar.gz", hash = "sha256:63563193aec44bce707e0c5ca64ff69fa72ed7cf34ce6e11d5127555756fd2f6"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:effe5406c9bd748a871dbcaf3ac69167c38d72db8c9baf3ff954c344f31c4cbe"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4162918ef3098851fcd8a628bf9b6a98d10c380725df9e04caf5ca6dd48c847a"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0570d21da019941634a531444364f2482e8db0b3425fcd5ac0c36565a64142c8"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5707a746c6083a3a74b46b3a631d78d129edab06195a92a8ece755aac25a3f3d"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:278c296c6f96fa686d74eb449ea1697f3c03dc28b75f873b65b5201806346a69"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a4b71f4d1765639372a3b32d2638197f5cd5221b19531f9245fcc9ee62d38f56"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f5969baeaea61c97efa706b9b107dcba02784b1601c74ac84f2a532ea079403e"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3f93dab657839dfa61025056606600a11d0b696d79386f974e459a3fbc568ec"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:db756e48f9c5c607b5e33dd36b1d5872d0422e960145b08ab0ec7fd420e9d649"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:232ac332403e37e4a03d209a3f92ed9071f7d3dbda70e2a5e9cff1c4ba9f0678"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e5c1502d4ace69a179305abb3f0bb6141cbe4714bc9b31d427329a95acfc8bdd"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:2502dd2a736c879c0f0d3e2161e74d9907231e25d35794584b1ca5284e43f596"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23e8565ab7ff33218530bc817922fae827420f143479b753104ab801145b1d5b"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-win32.whl", hash = "sha256:1872d01ac8c618a8da634e232f24793883d6e456a66593135aeafe3784b0848d"},
+ {file = "charset_normalizer-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:557b21a44ceac6c6b9773bc65aa1b4cc3e248a5ad2f5b914b91579a32e22204d"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d7eff0f27edc5afa9e405f7165f85a6d782d308f3b6b9d96016c010597958e63"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6a685067d05e46641d5d1623d7c7fdf15a357546cbb2f71b0ebde91b175ffc3e"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0d3d5b7db9ed8a2b11a774db2bbea7ba1884430a205dbd54a32d61d7c2a190fa"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2935ffc78db9645cb2086c2f8f4cfd23d9b73cc0dc80334bc30aac6f03f68f8c"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fe359b2e3a7729010060fbca442ca225280c16e923b37db0e955ac2a2b72a05"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:380c4bde80bce25c6e4f77b19386f5ec9db230df9f2f2ac1e5ad7af2caa70459"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0d1e3732768fecb052d90d62b220af62ead5748ac51ef61e7b32c266cac9293"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b2919306936ac6efb3aed1fbf81039f7087ddadb3160882a57ee2ff74fd2382"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f8888e31e3a85943743f8fc15e71536bda1c81d5aa36d014a3c0c44481d7db6e"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:82eb849f085624f6a607538ee7b83a6d8126df6d2f7d3b319cb837b289123078"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7b8b8bf1189b3ba9b8de5c8db4d541b406611a71a955bbbd7385bbc45fcb786c"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5adf257bd58c1b8632046bbe43ee38c04e1038e9d37de9c57a94d6bd6ce5da34"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c350354efb159b8767a6244c166f66e67506e06c8924ed74669b2c70bc8735b1"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-win32.whl", hash = "sha256:02af06682e3590ab952599fbadac535ede5d60d78848e555aa58d0c0abbde786"},
+ {file = "charset_normalizer-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:86d1f65ac145e2c9ed71d8ffb1905e9bba3a91ae29ba55b4c46ae6fc31d7c0d4"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3b447982ad46348c02cb90d230b75ac34e9886273df3a93eec0539308a6296d7"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:abf0d9f45ea5fb95051c8bfe43cb40cda383772f7e5023a83cc481ca2604d74e"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b09719a17a2301178fac4470d54b1680b18a5048b481cb8890e1ef820cb80455"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3d9b48ee6e3967b7901c052b670c7dda6deb812c309439adaffdec55c6d7b78"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:edfe077ab09442d4ef3c52cb1f9dab89bff02f4524afc0acf2d46be17dc479f5"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3debd1150027933210c2fc321527c2299118aa929c2f5a0a80ab6953e3bd1908"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86f63face3a527284f7bb8a9d4f78988e3c06823f7bea2bd6f0e0e9298ca0403"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:24817cb02cbef7cd499f7c9a2735286b4782bd47a5b3516a0e84c50eab44b98e"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c71f16da1ed8949774ef79f4a0260d28b83b3a50c6576f8f4f0288d109777989"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9cf3126b85822c4e53aa28c7ec9869b924d6fcfb76e77a45c44b83d91afd74f9"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b3b2316b25644b23b54a6f6401074cebcecd1244c0b8e80111c9a3f1c8e83d65"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:03680bb39035fbcffe828eae9c3f8afc0428c91d38e7d61aa992ef7a59fb120e"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cc152c5dd831641e995764f9f0b6589519f6f5123258ccaca8c6d34572fefa8"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-win32.whl", hash = "sha256:b8f3307af845803fb0b060ab76cf6dd3a13adc15b6b451f54281d25911eb92df"},
+ {file = "charset_normalizer-3.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:8eaf82f0eccd1505cf39a45a6bd0a8cf1c70dcfc30dba338207a969d91b965c0"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dc45229747b67ffc441b3de2f3ae5e62877a282ea828a5bdb67883c4ee4a8810"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f4a0033ce9a76e391542c182f0d48d084855b5fcba5010f707c8e8c34663d77"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ada214c6fa40f8d800e575de6b91a40d0548139e5dc457d2ebb61470abf50186"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1121de0e9d6e6ca08289583d7491e7fcb18a439305b34a30b20d8215922d43c"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1063da2c85b95f2d1a430f1c33b55c9c17ffaf5e612e10aeaad641c55a9e2b9d"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70f1d09c0d7748b73290b29219e854b3207aea922f839437870d8cc2168e31cc"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:250c9eb0f4600361dd80d46112213dff2286231d92d3e52af1e5a6083d10cad9"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:750b446b2ffce1739e8578576092179160f6d26bd5e23eb1789c4d64d5af7dc7"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:fc52b79d83a3fe3a360902d3f5d79073a993597d48114c29485e9431092905d8"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:588245972aca710b5b68802c8cad9edaa98589b1b42ad2b53accd6910dad3545"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e39c7eb31e3f5b1f88caff88bcff1b7f8334975b46f6ac6e9fc725d829bc35d4"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-win32.whl", hash = "sha256:abecce40dfebbfa6abf8e324e1860092eeca6f7375c8c4e655a8afb61af58f2c"},
+ {file = "charset_normalizer-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:24a91a981f185721542a0b7c92e9054b7ab4fea0508a795846bc5b0abf8118d4"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:67b8cc9574bb518ec76dc8e705d4c39ae78bb96237cb533edac149352c1f39fe"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac71b2977fb90c35d41c9453116e283fac47bb9096ad917b8819ca8b943abecd"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3ae38d325b512f63f8da31f826e6cb6c367336f95e418137286ba362925c877e"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:542da1178c1c6af8873e143910e2269add130a299c9106eef2594e15dae5e482"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30a85aed0b864ac88309b7d94be09f6046c834ef60762a8833b660139cfbad13"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aae32c93e0f64469f74ccc730a7cb21c7610af3a775157e50bbd38f816536b38"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15b26ddf78d57f1d143bdf32e820fd8935d36abe8a25eb9ec0b5a71c82eb3895"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f5d10bae5d78e4551b7be7a9b29643a95aded9d0f602aa2ba584f0388e7a557"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:249c6470a2b60935bafd1d1d13cd613f8cd8388d53461c67397ee6a0f5dce741"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c5a74c359b2d47d26cdbbc7845e9662d6b08a1e915eb015d044729e92e7050b7"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b5bcf60a228acae568e9911f410f9d9e0d43197d030ae5799e20dca8df588287"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:187d18082694a29005ba2944c882344b6748d5be69e3a89bf3cc9d878e548d5a"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:81bf654678e575403736b85ba3a7867e31c2c30a69bc57fe88e3ace52fb17b89"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-win32.whl", hash = "sha256:85a32721ddde63c9df9ebb0d2045b9691d9750cb139c161c80e500d210f5e26e"},
+ {file = "charset_normalizer-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:468d2a840567b13a590e67dd276c570f8de00ed767ecc611994c301d0f8c014f"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e0fc42822278451bc13a2e8626cf2218ba570f27856b536e00cfa53099724828"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:09c77f964f351a7369cc343911e0df63e762e42bac24cd7d18525961c81754f4"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12ebea541c44fdc88ccb794a13fe861cc5e35d64ed689513a5c03d05b53b7c82"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:805dfea4ca10411a5296bcc75638017215a93ffb584c9e344731eef0dcfb026a"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:96c2b49eb6a72c0e4991d62406e365d87067ca14c1a729a870d22354e6f68115"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:aaf7b34c5bc56b38c931a54f7952f1ff0ae77a2e82496583b247f7c969eb1479"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:619d1c96099be5823db34fe89e2582b336b5b074a7f47f819d6b3a57ff7bdb86"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0ac5e7015a5920cfce654c06618ec40c33e12801711da6b4258af59a8eff00a"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:93aa7eef6ee71c629b51ef873991d6911b906d7312c6e8e99790c0f33c576f89"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7966951325782121e67c81299a031f4c115615e68046f79b85856b86ebffc4cd"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:02673e456dc5ab13659f85196c534dc596d4ef260e4d86e856c3b2773ce09843"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:c2af80fb58f0f24b3f3adcb9148e6203fa67dd3f61c4af146ecad033024dde43"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:153e7b6e724761741e0974fc4dcd406d35ba70b92bfe3fedcb497226c93b9da7"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-win32.whl", hash = "sha256:d47ecf253780c90ee181d4d871cd655a789da937454045b17b5798da9393901a"},
+ {file = "charset_normalizer-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:d97d85fa63f315a8bdaba2af9a6a686e0eceab77b3089af45133252618e70884"},
+ {file = "charset_normalizer-3.3.0-py3-none-any.whl", hash = "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2"},
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "decorator"
+version = "5.1.1"
+description = "Decorators for Humans"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"},
+ {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"},
+]
+
+[[package]]
+name = "deprecated"
+version = "1.2.14"
+description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"},
+ {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"},
+]
+
+[package.dependencies]
+wrapt = ">=1.10,<2"
+
+[package.extras]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.1.3"
+description = "Backport of PEP 654 (exception groups)"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"},
+ {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "executing"
+version = "2.0.0"
+description = "Get the currently executing AST node of a frame, and other information"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "executing-2.0.0-py2.py3-none-any.whl", hash = "sha256:06df6183df67389625f4e763921c6cf978944721abf3e714000200aab95b0657"},
+ {file = "executing-2.0.0.tar.gz", hash = "sha256:0ff053696fdeef426cda5bd18eacd94f82c91f49823a2e9090124212ceea9b08"},
+]
+
+[package.extras]
+tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"]
+
+[[package]]
+name = "feedgen"
+version = "0.9.0"
+description = "Feed Generator (ATOM, RSS, Podcasts)"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "feedgen-0.9.0.tar.gz", hash = "sha256:8e811bdbbed6570034950db23a4388453628a70e689a6e8303ccec430f5a804a"},
+]
+
+[package.dependencies]
+lxml = "*"
+python-dateutil = "*"
+
+[[package]]
+name = "frozenlist"
+version = "1.4.0"
+description = "A list-like structure which implements collections.abc.MutableSequence"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"},
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"},
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"},
+ {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"},
+ {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"},
+ {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"},
+ {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"},
+ {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"},
+ {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"},
+ {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"},
+ {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"},
+]
+
+[[package]]
+name = "htmlgenerator"
+version = "1.2.28"
+description = "Declarative HTML templating system with lazy rendering"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "htmlgenerator-1.2.28-py3-none-any.whl", hash = "sha256:3299b67da0d4bb7b4c640dd5dd7a41743d78cf3264c85564010444a691483d0b"},
+ {file = "htmlgenerator-1.2.28.tar.gz", hash = "sha256:e127d46311b9d28a6be6ce43417dc2a338b6c1a6f0aa5d2ba14319c7f5beb295"},
+]
+
+[package.extras]
+all = ["beautifulsoup4", "black", "lxml"]
+
+[[package]]
+name = "idna"
+version = "3.4"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+]
+
+[[package]]
+name = "importlib-metadata"
+version = "6.8.0"
+description = "Read metadata from Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"},
+ {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"},
+]
+
+[package.dependencies]
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+perf = ["ipython"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "ipython"
+version = "8.16.1"
+description = "IPython: Productive Interactive Computing"
+category = "dev"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "ipython-8.16.1-py3-none-any.whl", hash = "sha256:0852469d4d579d9cd613c220af7bf0c9cc251813e12be647cb9d463939db9b1e"},
+ {file = "ipython-8.16.1.tar.gz", hash = "sha256:ad52f58fca8f9f848e256c629eff888efc0528c12fe0f8ec14f33205f23ef938"},
+]
+
+[package.dependencies]
+appnope = {version = "*", markers = "sys_platform == \"darwin\""}
+backcall = "*"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+decorator = "*"
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
+jedi = ">=0.16"
+matplotlib-inline = "*"
+pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
+pickleshare = "*"
+prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0"
+pygments = ">=2.4.0"
+stack-data = "*"
+traitlets = ">=5"
+typing-extensions = {version = "*", markers = "python_version < \"3.10\""}
+
+[package.extras]
+all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
+black = ["black"]
+doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
+kernel = ["ipykernel"]
+nbconvert = ["nbconvert"]
+nbformat = ["nbformat"]
+notebook = ["ipywidgets", "notebook"]
+parallel = ["ipyparallel"]
+qtconsole = ["qtconsole"]
+test = ["pytest (<7.1)", "pytest-asyncio", "testpath"]
+test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"]
+
+[[package]]
+name = "jedi"
+version = "0.19.1"
+description = "An autocompletion tool for Python that can be used for text editors."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"},
+ {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"},
+]
+
+[package.dependencies]
+parso = ">=0.8.3,<0.9.0"
+
+[package.extras]
+docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
+qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
+testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
+
+[[package]]
+name = "lxml"
+version = "4.9.3"
+description = "Powerful and Pythonic XML processing library combining libxml2/libxslt with the ElementTree API."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*"
+files = [
+ {file = "lxml-4.9.3-cp27-cp27m-macosx_11_0_x86_64.whl", hash = "sha256:b0a545b46b526d418eb91754565ba5b63b1c0b12f9bd2f808c852d9b4b2f9b5c"},
+ {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:075b731ddd9e7f68ad24c635374211376aa05a281673ede86cbe1d1b3455279d"},
+ {file = "lxml-4.9.3-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1e224d5755dba2f4a9498e150c43792392ac9b5380aa1b845f98a1618c94eeef"},
+ {file = "lxml-4.9.3-cp27-cp27m-win32.whl", hash = "sha256:2c74524e179f2ad6d2a4f7caf70e2d96639c0954c943ad601a9e146c76408ed7"},
+ {file = "lxml-4.9.3-cp27-cp27m-win_amd64.whl", hash = "sha256:4f1026bc732b6a7f96369f7bfe1a4f2290fb34dce00d8644bc3036fb351a4ca1"},
+ {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c0781a98ff5e6586926293e59480b64ddd46282953203c76ae15dbbbf302e8bb"},
+ {file = "lxml-4.9.3-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:cef2502e7e8a96fe5ad686d60b49e1ab03e438bd9123987994528febd569868e"},
+ {file = "lxml-4.9.3-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b86164d2cff4d3aaa1f04a14685cbc072efd0b4f99ca5708b2ad1b9b5988a991"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:42871176e7896d5d45138f6d28751053c711ed4d48d8e30b498da155af39aebd"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:ae8b9c6deb1e634ba4f1930eb67ef6e6bf6a44b6eb5ad605642b2d6d5ed9ce3c"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:411007c0d88188d9f621b11d252cce90c4a2d1a49db6c068e3c16422f306eab8"},
+ {file = "lxml-4.9.3-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cd47b4a0d41d2afa3e58e5bf1f62069255aa2fd6ff5ee41604418ca925911d76"},
+ {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e2cb47860da1f7e9a5256254b74ae331687b9672dfa780eed355c4c9c3dbd23"},
+ {file = "lxml-4.9.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1247694b26342a7bf47c02e513d32225ededd18045264d40758abeb3c838a51f"},
+ {file = "lxml-4.9.3-cp310-cp310-win32.whl", hash = "sha256:cdb650fc86227eba20de1a29d4b2c1bfe139dc75a0669270033cb2ea3d391b85"},
+ {file = "lxml-4.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:97047f0d25cd4bcae81f9ec9dc290ca3e15927c192df17331b53bebe0e3ff96d"},
+ {file = "lxml-4.9.3-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:1f447ea5429b54f9582d4b955f5f1985f278ce5cf169f72eea8afd9502973dd5"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:57d6ba0ca2b0c462f339640d22882acc711de224d769edf29962b09f77129cbf"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:9767e79108424fb6c3edf8f81e6730666a50feb01a328f4a016464a5893f835a"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:71c52db65e4b56b8ddc5bb89fb2e66c558ed9d1a74a45ceb7dcb20c191c3df2f"},
+ {file = "lxml-4.9.3-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d73d8ecf8ecf10a3bd007f2192725a34bd62898e8da27eb9d32a58084f93962b"},
+ {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0a3d3487f07c1d7f150894c238299934a2a074ef590b583103a45002035be120"},
+ {file = "lxml-4.9.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e28c51fa0ce5674be9f560c6761c1b441631901993f76700b1b30ca6c8378d6"},
+ {file = "lxml-4.9.3-cp311-cp311-win32.whl", hash = "sha256:0bfd0767c5c1de2551a120673b72e5d4b628737cb05414f03c3277bf9bed3305"},
+ {file = "lxml-4.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:25f32acefac14ef7bd53e4218fe93b804ef6f6b92ffdb4322bb6d49d94cad2bc"},
+ {file = "lxml-4.9.3-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:d3ff32724f98fbbbfa9f49d82852b159e9784d6094983d9a8b7f2ddaebb063d4"},
+ {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48d6ed886b343d11493129e019da91d4039826794a3e3027321c56d9e71505be"},
+ {file = "lxml-4.9.3-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9a92d3faef50658dd2c5470af249985782bf754c4e18e15afb67d3ab06233f13"},
+ {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b4e4bc18382088514ebde9328da057775055940a1f2e18f6ad2d78aa0f3ec5b9"},
+ {file = "lxml-4.9.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fc9b106a1bf918db68619fdcd6d5ad4f972fdd19c01d19bdb6bf63f3589a9ec5"},
+ {file = "lxml-4.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d37017287a7adb6ab77e1c5bee9bcf9660f90ff445042b790402a654d2ad81d8"},
+ {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56dc1f1ebccc656d1b3ed288f11e27172a01503fc016bcabdcbc0978b19352b7"},
+ {file = "lxml-4.9.3-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:578695735c5a3f51569810dfebd05dd6f888147a34f0f98d4bb27e92b76e05c2"},
+ {file = "lxml-4.9.3-cp35-cp35m-win32.whl", hash = "sha256:704f61ba8c1283c71b16135caf697557f5ecf3e74d9e453233e4771d68a1f42d"},
+ {file = "lxml-4.9.3-cp35-cp35m-win_amd64.whl", hash = "sha256:c41bfca0bd3532d53d16fd34d20806d5c2b1ace22a2f2e4c0008570bf2c58833"},
+ {file = "lxml-4.9.3-cp36-cp36m-macosx_11_0_x86_64.whl", hash = "sha256:64f479d719dc9f4c813ad9bb6b28f8390360660b73b2e4beb4cb0ae7104f1c12"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:dd708cf4ee4408cf46a48b108fb9427bfa00b9b85812a9262b5c668af2533ea5"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c31c7462abdf8f2ac0577d9f05279727e698f97ecbb02f17939ea99ae8daa98"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e3cd95e10c2610c360154afdc2f1480aea394f4a4f1ea0a5eacce49640c9b190"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_28_x86_64.whl", hash = "sha256:4930be26af26ac545c3dffb662521d4e6268352866956672231887d18f0eaab2"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4aec80cde9197340bc353d2768e2a75f5f60bacda2bab72ab1dc499589b3878c"},
+ {file = "lxml-4.9.3-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:14e019fd83b831b2e61baed40cab76222139926b1fb5ed0e79225bc0cae14584"},
+ {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:0c0850c8b02c298d3c7006b23e98249515ac57430e16a166873fc47a5d549287"},
+ {file = "lxml-4.9.3-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:aca086dc5f9ef98c512bac8efea4483eb84abbf926eaeedf7b91479feb092458"},
+ {file = "lxml-4.9.3-cp36-cp36m-win32.whl", hash = "sha256:50baa9c1c47efcaef189f31e3d00d697c6d4afda5c3cde0302d063492ff9b477"},
+ {file = "lxml-4.9.3-cp36-cp36m-win_amd64.whl", hash = "sha256:bef4e656f7d98aaa3486d2627e7d2df1157d7e88e7efd43a65aa5dd4714916cf"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:46f409a2d60f634fe550f7133ed30ad5321ae2e6630f13657fb9479506b00601"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:4c28a9144688aef80d6ea666c809b4b0e50010a2aca784c97f5e6bf143d9f129"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:141f1d1a9b663c679dc524af3ea1773e618907e96075262726c7612c02b149a4"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:53ace1c1fd5a74ef662f844a0413446c0629d151055340e9893da958a374f70d"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:17a753023436a18e27dd7769e798ce302963c236bc4114ceee5b25c18c52c693"},
+ {file = "lxml-4.9.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d298a1bd60c067ea75d9f684f5f3992c9d6766fadbc0bcedd39750bf344c2f4"},
+ {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:081d32421db5df44c41b7f08a334a090a545c54ba977e47fd7cc2deece78809a"},
+ {file = "lxml-4.9.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:23eed6d7b1a3336ad92d8e39d4bfe09073c31bfe502f20ca5116b2a334f8ec02"},
+ {file = "lxml-4.9.3-cp37-cp37m-win32.whl", hash = "sha256:1509dd12b773c02acd154582088820893109f6ca27ef7291b003d0e81666109f"},
+ {file = "lxml-4.9.3-cp37-cp37m-win_amd64.whl", hash = "sha256:120fa9349a24c7043854c53cae8cec227e1f79195a7493e09e0c12e29f918e52"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4d2d1edbca80b510443f51afd8496be95529db04a509bc8faee49c7b0fb6d2cc"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8d7e43bd40f65f7d97ad8ef5c9b1778943d02f04febef12def25f7583d19baac"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:71d66ee82e7417828af6ecd7db817913cb0cf9d4e61aa0ac1fde0583d84358db"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:6fc3c450eaa0b56f815c7b62f2b7fba7266c4779adcf1cece9e6deb1de7305ce"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:65299ea57d82fb91c7f019300d24050c4ddeb7c5a190e076b5f48a2b43d19c42"},
+ {file = "lxml-4.9.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:eadfbbbfb41b44034a4c757fd5d70baccd43296fb894dba0295606a7cf3124aa"},
+ {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3e9bdd30efde2b9ccfa9cb5768ba04fe71b018a25ea093379c857c9dad262c40"},
+ {file = "lxml-4.9.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fcdd00edfd0a3001e0181eab3e63bd5c74ad3e67152c84f93f13769a40e073a7"},
+ {file = "lxml-4.9.3-cp38-cp38-win32.whl", hash = "sha256:57aba1bbdf450b726d58b2aea5fe47c7875f5afb2c4a23784ed78f19a0462574"},
+ {file = "lxml-4.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:92af161ecbdb2883c4593d5ed4815ea71b31fafd7fd05789b23100d081ecac96"},
+ {file = "lxml-4.9.3-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9bb6ad405121241e99a86efff22d3ef469024ce22875a7ae045896ad23ba2340"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8ed74706b26ad100433da4b9d807eae371efaa266ffc3e9191ea436087a9d6a7"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:fbf521479bcac1e25a663df882c46a641a9bff6b56dc8b0fafaebd2f66fb231b"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:303bf1edce6ced16bf67a18a1cf8339d0db79577eec5d9a6d4a80f0fb10aa2da"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:5515edd2a6d1a5a70bfcdee23b42ec33425e405c5b351478ab7dc9347228f96e"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:690dafd0b187ed38583a648076865d8c229661ed20e48f2335d68e2cf7dc829d"},
+ {file = "lxml-4.9.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b6420a005548ad52154c8ceab4a1290ff78d757f9e5cbc68f8c77089acd3c432"},
+ {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bb3bb49c7a6ad9d981d734ef7c7193bc349ac338776a0360cc671eaee89bcf69"},
+ {file = "lxml-4.9.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d27be7405547d1f958b60837dc4c1007da90b8b23f54ba1f8b728c78fdb19d50"},
+ {file = "lxml-4.9.3-cp39-cp39-win32.whl", hash = "sha256:8df133a2ea5e74eef5e8fc6f19b9e085f758768a16e9877a60aec455ed2609b2"},
+ {file = "lxml-4.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:4dd9a263e845a72eacb60d12401e37c616438ea2e5442885f65082c276dfb2b2"},
+ {file = "lxml-4.9.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6689a3d7fd13dc687e9102a27e98ef33730ac4fe37795d5036d18b4d527abd35"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:f6bdac493b949141b733c5345b6ba8f87a226029cbabc7e9e121a413e49441e0"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:05186a0f1346ae12553d66df1cfce6f251589fea3ad3da4f3ef4e34b2d58c6a3"},
+ {file = "lxml-4.9.3-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c2006f5c8d28dee289f7020f721354362fa304acbaaf9745751ac4006650254b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-macosx_11_0_x86_64.whl", hash = "sha256:5c245b783db29c4e4fbbbfc9c5a78be496c9fea25517f90606aa1f6b2b3d5f7b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:4fb960a632a49f2f089d522f70496640fdf1218f1243889da3822e0a9f5f3ba7"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:50670615eaf97227d5dc60de2dc99fb134a7130d310d783314e7724bf163f75d"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9719fe17307a9e814580af1f5c6e05ca593b12fb7e44fe62450a5384dbf61b4b"},
+ {file = "lxml-4.9.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:3331bece23c9ee066e0fb3f96c61322b9e0f54d775fccefff4c38ca488de283a"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-macosx_11_0_x86_64.whl", hash = "sha256:ed667f49b11360951e201453fc3967344d0d0263aa415e1619e85ae7fd17b4e0"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:8b77946fd508cbf0fccd8e400a7f71d4ac0e1595812e66025bac475a8e811694"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4da8ca0c0c0aea88fd46be8e44bd49716772358d648cce45fe387f7b92374a7"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fe4bda6bd4340caa6e5cf95e73f8fea5c4bfc55763dd42f1b50a94c1b4a2fbd4"},
+ {file = "lxml-4.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f3df3db1d336b9356dd3112eae5f5c2b8b377f3bc826848567f10bfddfee77e9"},
+ {file = "lxml-4.9.3.tar.gz", hash = "sha256:48628bd53a426c9eb9bc066a923acaa0878d1e86129fd5359aee99285f4eed9c"},
+]
+
+[package.extras]
+cssselect = ["cssselect (>=0.7)"]
+html5 = ["html5lib"]
+htmlsoup = ["BeautifulSoup4"]
+source = ["Cython (>=0.29.35)"]
+
+[[package]]
+name = "matplotlib-inline"
+version = "0.1.6"
+description = "Inline Matplotlib backend for Jupyter"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"},
+ {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"},
+]
+
+[package.dependencies]
+traitlets = "*"
+
+[[package]]
+name = "multidict"
+version = "6.0.4"
+description = "multidict implementation"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"},
+ {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"},
+ {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"},
+ {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"},
+ {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"},
+ {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"},
+ {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"},
+ {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"},
+ {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"},
+ {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"},
+ {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"},
+ {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"},
+ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"},
+]
+
+[[package]]
+name = "opentelemetry-api"
+version = "1.20.0"
+description = "OpenTelemetry Python API"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "opentelemetry_api-1.20.0-py3-none-any.whl", hash = "sha256:982b76036fec0fdaf490ae3dfd9f28c81442a33414f737abc687a32758cdcba5"},
+ {file = "opentelemetry_api-1.20.0.tar.gz", hash = "sha256:06abe351db7572f8afdd0fb889ce53f3c992dbf6f6262507b385cc1963e06983"},
+]
+
+[package.dependencies]
+deprecated = ">=1.2.6"
+importlib-metadata = ">=6.0,<7.0"
+
+[[package]]
+name = "opentelemetry-sdk"
+version = "1.20.0"
+description = "OpenTelemetry Python SDK"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "opentelemetry_sdk-1.20.0-py3-none-any.whl", hash = "sha256:f2230c276ff4c63ea09b3cb2e2ac6b1265f90af64e8d16bbf275c81a9ce8e804"},
+ {file = "opentelemetry_sdk-1.20.0.tar.gz", hash = "sha256:702e432a457fa717fd2ddfd30640180e69938f85bb7fec3e479f85f61c1843f8"},
+]
+
+[package.dependencies]
+opentelemetry-api = "1.20.0"
+opentelemetry-semantic-conventions = "0.41b0"
+typing-extensions = ">=3.7.4"
+
+[[package]]
+name = "opentelemetry-semantic-conventions"
+version = "0.41b0"
+description = "OpenTelemetry Semantic Conventions"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "opentelemetry_semantic_conventions-0.41b0-py3-none-any.whl", hash = "sha256:45404391ed9e50998183a4925ad1b497c01c143f06500c3b9c3d0013492bb0f2"},
+ {file = "opentelemetry_semantic_conventions-0.41b0.tar.gz", hash = "sha256:0ce5b040b8a3fc816ea5879a743b3d6fe5db61f6485e4def94c6ee4d402e1eb7"},
+]
+
+[[package]]
+name = "packaging"
+version = "23.2"
+description = "Core utilities for Python packages"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
+]
+
+[[package]]
+name = "parso"
+version = "0.8.3"
+description = "A Python Parser"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"},
+ {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"},
+]
+
+[package.extras]
+qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
+testing = ["docopt", "pytest (<6.0.0)"]
+
+[[package]]
+name = "pexpect"
+version = "4.8.0"
+description = "Pexpect allows easy control of interactive console applications."
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
+ {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"},
+]
+
+[package.dependencies]
+ptyprocess = ">=0.5"
+
+[[package]]
+name = "pickleshare"
+version = "0.7.5"
+description = "Tiny 'shelve'-like database with concurrency support"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"},
+ {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"},
+]
+
+[[package]]
+name = "pluggy"
+version = "1.3.0"
+description = "plugin and hook calling mechanisms for python"
+category = "dev"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
+ {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "prompt-toolkit"
+version = "3.0.39"
+description = "Library for building powerful interactive command lines in Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"},
+ {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"},
+]
+
+[package.dependencies]
+wcwidth = "*"
+
+[[package]]
+name = "ptyprocess"
+version = "0.7.0"
+description = "Run a subprocess in a pseudo terminal"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
+ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
+]
+
+[[package]]
+name = "pure-eval"
+version = "0.2.2"
+description = "Safely evaluate AST nodes without side effects"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"},
+ {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"},
+]
+
+[package.extras]
+tests = ["pytest"]
+
+[[package]]
+name = "pygments"
+version = "2.16.1"
+description = "Pygments is a syntax highlighting package written in Python."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"},
+ {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"},
+]
+
+[package.extras]
+plugins = ["importlib-metadata"]
+
+[[package]]
+name = "pytest"
+version = "7.4.2"
+description = "pytest: simple powerful testing with Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.2-py3-none-any.whl", hash = "sha256:1d881c6124e08ff0a1bb75ba3ec0bfd8b5354a01c194ddd5a0a870a48d99b002"},
+ {file = "pytest-7.4.2.tar.gz", hash = "sha256:a766259cfab564a2ad52cb1aae1b881a75c3eb7e34ca3779697c23ed47c47069"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "soupsieve"
+version = "2.5"
+description = "A modern CSS selector implementation for Beautiful Soup."
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"},
+ {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"},
+]
+
+[[package]]
+name = "stack-data"
+version = "0.6.3"
+description = "Extract data from python stack frames and tracebacks for informative displays"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"},
+ {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"},
+]
+
+[package.dependencies]
+asttokens = ">=2.1.0"
+executing = ">=1.2.0"
+pure-eval = "*"
+
+[package.extras]
+tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "traitlets"
+version = "5.11.2"
+description = "Traitlets Python configuration system"
+category = "dev"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "traitlets-5.11.2-py3-none-any.whl", hash = "sha256:98277f247f18b2c5cabaf4af369187754f4fb0e85911d473f72329db8a7f4fae"},
+ {file = "traitlets-5.11.2.tar.gz", hash = "sha256:7564b5bf8d38c40fa45498072bf4dc5e8346eb087bbf1e2ae2d8774f6a0f078e"},
+]
+
+[package.extras]
+docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
+test = ["argcomplete (>=3.0.3)", "mypy (>=1.5.1)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"]
+
+[[package]]
+name = "typing-extensions"
+version = "4.8.0"
+description = "Backported and Experimental Type Hints for Python 3.8+"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
+ {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
+]
+
+[[package]]
+name = "wcwidth"
+version = "0.2.8"
+description = "Measures the displayed width of unicode strings in a terminal"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "wcwidth-0.2.8-py2.py3-none-any.whl", hash = "sha256:77f719e01648ed600dfa5402c347481c0992263b81a027344f3e1ba25493a704"},
+ {file = "wcwidth-0.2.8.tar.gz", hash = "sha256:8705c569999ffbb4f6a87c6d1b80f324bd6db952f5eb0b95bc07517f4c1813d4"},
+]
+
+[[package]]
+name = "wrapt"
+version = "1.15.0"
+description = "Module for decorators, wrappers and monkey patching."
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
+ {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
+ {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
+ {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
+ {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
+ {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
+ {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
+ {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
+ {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
+ {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
+ {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
+ {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
+ {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
+]
+
+[[package]]
+name = "yarl"
+version = "1.9.2"
+description = "Yet another URL library"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"},
+ {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"},
+ {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"},
+ {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"},
+ {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"},
+ {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"},
+ {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"},
+ {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"},
+ {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"},
+ {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"},
+ {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"},
+ {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"},
+ {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"},
+]
+
+[package.dependencies]
+idna = ">=2.0"
+multidict = ">=4.0"
+
+[[package]]
+name = "zipp"
+version = "3.17.0"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+category = "main"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"},
+ {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.9"
+content-hash = "97fc5ab9e66a2dd243b910c6408cbf77194f61b070c35ef9d2aca9ea1c329c56"
diff --git a/blog_experiment/pyproject.toml b/blog_experiment/pyproject.toml
new file mode 100644
index 00000000..b236e8ea
--- /dev/null
+++ b/blog_experiment/pyproject.toml
@@ -0,0 +1,27 @@
+[tool.poetry]
+name = "blog"
+version = "0.1.0"
+description = ""
+authors = ["alex <alex@pdp7.net>"]
+packages = [{include = "blog"}]
+
+[tool.poetry.dependencies]
+python = "^3.9"
+# bicephalus = { path = "../../bicephalus", develop = true }
+bicephalus = { git = "https://github.com/alexpdp7/bicephalus.git" }
+htmlgenerator = "^1.2.28"
+beautifulsoup4 = "^4.12.2"
+feedgen = "^0.9.0"
+lxml = "^4.9.3"
+
+[tool.poetry.group.dev.dependencies]
+pytest = "^7.4.2"
+ipython = "^8.15.0"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
+
+[tool.pytest.ini_options]
+addopts = "--doctest-modules"
+doctest_optionflags = "NORMALIZE_WHITESPACE"
diff --git a/blog_experiment/test_html_rendering.py b/blog_experiment/test_html_rendering.py
new file mode 100644
index 00000000..71bdd399
--- /dev/null
+++ b/blog_experiment/test_html_rendering.py
@@ -0,0 +1,10 @@
+import pathlib
+
+import pytest
+
+from blog import blog_pages
+
+
+@pytest.mark.parametrize("entry", list(pathlib.Path("content").glob("*/*/*.gmi")))
+def test_html_rendering(entry):
+ blog_pages.Entry(entry).html()
diff --git a/emacs/README.md b/emacs/README.md
new file mode 100644
index 00000000..d26b89d8
--- /dev/null
+++ b/emacs/README.md
@@ -0,0 +1,34 @@
+# Don't fear the Emacs
+
+If you are here, you are probably thinking about adopting Emacs as your editor.
+Perhaps you are facing analysis paralysis, wondering what's the best way to do it.
+
+Just do it!
+It's a bit alien at first, but I didn't need much time to do all my editing in Emacs.
+I haven't learnt Emacs Lisp and I haven't adopted any large configuration package.
+
+[My `emacs.el` right now is 80 lines](https://github.com/alexpdp7/alexpdp7/blob/811f60a331da44c9621d771ccc34ee0c0555080e/emacs/emacs.el).
+Perhaps when you read this, my current config will be much bigger..
+But I'm definitely happy today with my 80-line config.
+
+You can start without a configuration.
+Whenever you can't do something, search Internet.
+You will quickly learn the hotkeys you need the most.
+Once you can search, undo, cut, copy, and paste, you can take your time with the rest.
+Don't avoid the menus.
+Sometimes it's just easier to hit F10 and find something in the menus.
+You can also M-x to execute commands, like `indent-region`.
+
+When you get to the point where you really need to add packages, I do recommend you use [straight.el](https://github.com/radian-software/straight.el), it makes installing packages easy.
+Maybe it has some drawbacks, but with straight.el, I've been able to create a configuration that I feel is productive.
+(Although it seems to have some issues with corporate firewalls. But I added comments about solving that.)
+
+Some of the stuff in my `emacs.el` is maybe not critical, like Helm and Projectile.
+I really like Projectile, but often I just run `emacs $file` in a new terminal tab.
+It's easy, and you don't need to learn a ton of window/buffer/etc. management.
+(Be sure to check [emacs.bash](https://github.com/alexpdp7/alexpdp7/blob/master/emacs/emacs.bash) for something you can source in your bash to prevent frequent Emacs startups.)
+
+Many other stuff is support for things I do: AsciiDoc, Vale, Rust, Python, Java, YAML, Ansible, Puppet.
+You probably need other plugins, and maybe you don't need them right now.
+
+Maybe try out some of the large configurations, to learn what fancy stuff is available and add it as you become comfortable with the previous thing you configured.
diff --git a/emacs/emacs.bash b/emacs/emacs.bash
new file mode 100644
index 00000000..2312f786
--- /dev/null
+++ b/emacs/emacs.bash
@@ -0,0 +1,5 @@
+# source this file from your bash startup script
+
+alias emacs="emacsclient --create-frame -t"
+export ALTERNATE_EDITOR=""
+export EDITOR="emacsclient -t"
diff --git a/emacs/emacs.el b/emacs/emacs.el
new file mode 100644
index 00000000..cd4ba72d
--- /dev/null
+++ b/emacs/emacs.el
@@ -0,0 +1,162 @@
+;; symlink this file to ~/.emacs
+
+;; if you get "End of file during parsing", refer to:
+;;
+;; https://github.com/radian-software/straight.el#debugging
+;;
+;; , particularly the note "Sometimes, in a corporate environment"... you
+;; might need to clone straight.el into ~/.emacs.d manually
+
+(defvar bootstrap-version)
+(let ((bootstrap-file
+ (expand-file-name "straight/repos/straight.el/bootstrap.el" user-emacs-directory))
+ (bootstrap-version 6))
+ (unless (file-exists-p bootstrap-file)
+ (with-current-buffer
+ (url-retrieve-synchronously
+ "https://raw.githubusercontent.com/radian-software/straight.el/develop/install.el"
+ 'silent 'inhibit-cookies)
+ (goto-char (point-max))
+ (eval-print-last-sexp)))
+ (load bootstrap-file nil 'nomessage))
+
+;; Nicer defaults
+
+(setq compilation-scroll-output t)
+(setq column-number-mode t)
+(setq-default show-trailing-whitespace t)
+
+;; From https://www.emacswiki.org/emacs/SmoothScrolling#h5o-8
+(setq scroll-step 1)
+(setq scroll-conservatively 10000)
+(setq auto-window-vscroll nil)
+
+;; Colorblind friendly theme.
+;; Emacs 28 has modus themes, but EL9 only has emacs 27
+
+(straight-use-package 'modus-themes)
+(require 'modus-themes)
+(load-theme 'modus-operandi :no-confirm)
+
+;; Install xclip so cutting/copying in Emacs on a terminal affects the graphical clipboard
+
+(straight-use-package 'xclip)
+(xclip-mode 1)
+
+;; Fancy undo
+
+(straight-use-package 'undo-tree)
+(global-undo-tree-mode)
+(setq undo-tree-visualizer-diff t)
+(setq undo-tree-visualizer-timestamp t)
+(setq undo-tree-auto-save-history t)
+
+;; Do not spill temporary files everywhere
+
+;; https://stackoverflow.com/a/18330742
+(defvar --backup-directory (concat user-emacs-directory "backups"))
+(if (not (file-exists-p --backup-directory))
+ (make-directory --backup-directory t))
+(setq backup-directory-alist `(("." . ,--backup-directory)))
+
+;; https://www.reddit.com/r/emacs/comments/tejte0/undotree_bug_undotree_files_scattering_everywhere/?rdt=39892
+(setq undo-tree-history-directory-alist '(("." . "~/.emacs.d/undo")))
+
+;; nicer completion UI
+
+(straight-use-package 'helm)
+
+(global-set-key (kbd "M-x") #'helm-M-x)
+(global-set-key (kbd "C-x C-f") #'helm-find-files)
+(global-set-key (kbd "C-x C-b") #'helm-mini)
+
+(setq helm-ff-skip-boring-files t)
+
+;; nicer project support
+
+(straight-use-package 'projectile)
+(straight-use-package 'helm-projectile)
+
+(projectile-mode +1)
+(define-key projectile-mode-map (kbd "C-c p") 'projectile-command-map)
+(helm-projectile-on)
+
+;; LSP base for Rust and Java
+
+(straight-use-package 'lsp-mode)
+(straight-use-package 'company-mode)
+(straight-use-package 'lsp-ui)
+(straight-use-package 'yasnippet)
+
+(add-hook 'java-mode-hook (lambda ()
+ (setq c-basic-offset 2
+ indent-tabs-mode f)))
+
+(yas-global-mode 1)
+
+;; Rust support
+
+(straight-use-package 'rust-mode)
+(add-hook 'rust-mode-hook #'lsp)
+(add-hook 'rust-mode-hook
+ (lambda () (setq indent-tabs-mode nil)))
+(setq rust-format-on-save t)
+
+;; Python support
+
+(straight-use-package 'elpy)
+(elpy-enable)
+
+;;; Java Support
+
+(straight-use-package 'lsp-java)
+(add-hook 'java-mode-hook 'lsp)
+
+;; YAML support
+
+(straight-use-package 'yaml-mode)
+
+;; lsp-mode seems unusably slow, so don't install the Ansible language server
+;; if you want to get it working, try https://www.reddit.com/r/emacs/comments/ybbkks/how_to_properly_set_up_lsp_ansible_language/itfxoaa/
+
+(straight-use-package 'ansible)
+(add-hook 'yaml-mode-hook 'ansible)
+
+;; Puppet support; mostly for syntax highlighting
+
+(straight-use-package 'puppet-mode)
+
+;; ==== WORK ====
+
+;; Abbrevs for work, declared in emacs.el for version control
+
+(clear-abbrev-table global-abbrev-table)
+
+(progn
+ (when (boundp 'daoc-mode-abbrev-table)
+ (clear-abbrev-table adoc-mode-abbrev-table))
+ (define-abbrev-table 'adoc-mode-abbrev-table
+ '(
+ ("oomit" "_...output omitted..._")
+)))
+
+(set-default 'abbrev-mode t)
+
+(setq save-abbrevs nil)
+
+;; AsciiDoc + Vale + Aspell support for work
+
+(straight-use-package 'adoc-mode)
+
+(straight-use-package
+ '(flymake-vale :type git :host github :repo "tpeacock19/flymake-vale"))
+
+(add-hook 'adoc-mode-hook #'flymake-vale-load)
+(add-hook 'find-file-hook 'flymake-vale-maybe-load)
+(add-hook 'adoc-mode-hook 'flymake-mode)
+
+(straight-use-package 'flymake-aspell)
+(add-hook 'adoc-mode-hook #'flymake-aspell-setup)
+(setq ispell-dictionary "en_US-RH")
+
+(add-hook 'adoc-mode-hook (lambda () (setq flymake-aspell-aspell-mode "asciidoc")))
diff --git a/emacs/plan.org b/emacs/plan.org
new file mode 100644
index 00000000..2edcc74b
--- /dev/null
+++ b/emacs/plan.org
@@ -0,0 +1,4 @@
+* https://www.reddit.com/r/emacs/comments/10nmcus/dont_fear_the_emacs/
+* Magit
+* mu4e
+* Elfeed + elfeed-protocol/fever
diff --git a/fiction_writing/es/conmutatividad.md b/fiction_writing/es/conmutatividad.md
new file mode 100644
index 00000000..c1cd6671
--- /dev/null
+++ b/fiction_writing/es/conmutatividad.md
@@ -0,0 +1,73 @@
+# Conmutatividad
+
+## 1.
+
+Era tarde y el bar de la planta estaba prácticamente vacío. El barman limpiaba vasos tranquilamente mientras un par de grupillos hablaba distendidamente.
+
+Un tipo corpulento entró de repente con demasiados ánimos para un ambiente tan apaciguado. Buscaba alguien con quien hablar y le tocó a un hombre mayor- según el identificador de su mono, un tal J. SMITH. El otro, R. WAGNER tampoco reparó mucho en detalles; se sentó casi sin mirarle y empezó a hablarle.
+
+Si le hubiese mirado, habría visto una cara cansada; toda personalidad había sido erosionada por el trabajo en la mina. Los ojos daban una inquietante sensación de vacío. Quizá no se hubiera sentado si le hubiese observado detenidamente.
+
+-¿Menuda cosa esta, eh? Picas piedra en la mina un tiempo y te dejan hacer lo que quieras. ¿Quieres meterte lo que sea? Un par de añitos y andando. La leche.
+
+El tratado Koslov. La radioactividad de la maldita luna habia subido tanto y estaban tan alejados de cualquier base que no había otra. A trabajar la mina a destajo y tirar el material radioactivo al espacio o todos con tumores como balones de fútbol en pocos años. Pero claro, nadie quería dejarse la salud y la vida en los túneles.
+
+Primero enrolaron a los presos, pero no había suficientes. Entonces el gobernador Koslov tuvo la brillante idea de que si los condenados podían conmutar sus penas por trabajo en la mina tras el delito... ¿por qué no conmutar antes de delinquir? Quizás habría gente que trabajaría por poder cometer un delito impunemente. Así pues, se retoca un poco el código penal, se endurecen las penas un poco y ya tienes mineros. Un éxito total.
+
+-Tío, cuando cumpla mi tiempo aquí me voy a meter de todo y voy a hacer lo que me dé la gana. ¡Y me darán una palmadita en la espalda!
+
+El viejo miró el cartel detrás de la barra.
+
+ROBO CON VIOLENCIA …................. 3 años
+
+-¿Tú qué vas a hacer, viejo? ¿Follarte a una jovencita?
+
+El viejo le clavo la mirada unos instantes y el otro aparto los ojos un poco de inmediato.
+
+-¿Cuántos años llevas aquí, viejo?- dijo más cuidadosamente
+
+-45 años
+
+Leyó otra línea, se la sabía casi de memoria ya
+
+AGRESIÓN CON DAÑOS CORPORALES GRAVES ….......... 15 años
+
+-Joder- musitó
+
+El hombre grande se giró y leyó lentamente la tabla
+
+HOMICIDIO EN PRIMER GRADO ….............. 45 años
+
+-Joder, ¿qué vas a hacer?
+
+-No lo he pensado
+
+## 2.
+
+-¿Te has enterado de lo del tío que se encontraron muerto anoche en su habitación?
+
+-¿Qué tío?
+
+-¿John Smith, no te suena?
+
+Los dos guardias miraban las cámaras de seguridad mientras charlaban
+
+-Llevaba 45 años picando piedra. Coge, hace el papeleo para conmutar y la palma de viejo esa misma noche. Ataque al corazón, dicen.
+
+-Menudo palo
+
+-Sí, tío. Pero no sabes lo mejor. El tío no quería conmutar para él. Quería sacar a otro de la cárcel.
+
+-¿Y eso?
+
+-Nadie lo sabe. Hasta hizo el papeleo. El otro saldrá, pero ni podrá darle las gracias.
+
+-Joder. Te pasas 45 años ahí abajo, te dejas tu vida ahí... y no puedes ver lo que te has trabajado. Vaya mierda. Al menos el otro saldrá.
+
+## 3.
+
+-El jurado condena a Wolfgang Kauffman a cadena perpetua por el asesinato del pequeño Sebastian Smith, con los agravantes de indefensión y ensañamiento. Cumplirá su condena en el penal de máxima seguridad de Weinholtz.
+
+Jacob Smith no cambió el gesto en ningún momento del juicio. El pequeño Sebastian ya no estaba y para él, su vida estaba rota. Ni siquiera podía tocar a Wolfgang, ni mucho menos tomarse la venganza que tanto deseaba y que le hacía arder las entrañas.
+
+Wolfgang era intocable, al menos de momento, y eso le estaba matando lentamente.
diff --git a/fiction_writing/es/cuentos_del_triangulo_verde.md b/fiction_writing/es/cuentos_del_triangulo_verde.md
new file mode 100644
index 00000000..3d37bfaa
--- /dev/null
+++ b/fiction_writing/es/cuentos_del_triangulo_verde.md
@@ -0,0 +1,133 @@
+# Cuentos del Triángulo Verde
+
+## 1.
+
+Nadie recordaba ya nada de la antigua civilización, pero Grub estaba bastante seguro de que el templo de suministros había sido uno de sus pilares. Entre polvo y escombros, el templo se alzaba majestuoso desafiando al sol y a las tormentas de arena.
+
+Su tribu anhelaba los extraños artefactos que atesoraba, pero muchos de sus guerreros habían sido diezmados intentando asaltarlo o interceptando los convoys fuertemente armados que lo abastecían periódicamente.
+
+Recordaba el salvoconducto que había encontrado su abuelo cuando Grub era apenas un cachorro. El esqueleto se aferraba a su posesión más valiosa, pero apenas un tirón bastó para arrancarle el pequeño rectángulo plastificado. Casi no pudo contener la emoción cuando su abuelo se lo enseñó al guardia y éste les invitó a entrar.
+
+No sabían qué magia iluminaba el lugar, ni qué extraña fuerza impulsaba las vías a las plantas superiores, ni de dónde salía ese frescor que al principio te hacía olvidar el infierno exterior, pero que luego te helaba hasta los huesos.
+
+Pasaron varias horas recogiendo tesoros, que los propios guardias les colocaban en alforjas de plástico simplemente presentando el salvoconducto.
+
+El día acabó cuando un guardia se quedó con el salvoconducto a cambio del último artefacto y les escoltó hacia la puerta, pero fue una jornada gloriosa de la cual seguían hablando cada noche al caer el sol, aunque ni siquiera eran capaces de comprender su botín.
+
+Esos borrosos recuerdos emergían en la mente de Grub mientras se dirigía al templo. Como explorador, le correspondía introducirse e interpretar los auspicios regularmente. Esto era más complicado de lo que parecía e infinitamente más peligroso. Habían descubierto que la actitud exacta, con un preciso equilibrio de interés y desinterés, podía mantener a los guardias a raya. Si no mirabas lo suficiente, te conducían a empujones hasta la entrada. Pero si demostrabas mucho interés por un artefacto, tu destino era mucho peor. El guardia se acercaba y entablaba conversación. Nadie conocía el lenguaje ni las encantaciones apropiadas, sólo arrodillarse y ofrecerles un salvoconducto los apaciguaba. Nunca descubrieron qué hacían con los cadáveres.
+
+Grub deambulaba entre las estanterías. De vez en cuando añadía su toque personal, cogía algo y hacía ver que interpretaba sus escrituras, mientras controlaba al guardia por el rabillo del ojo. Esto parecía satisfacerles.
+
+Las miradas de los guardias eran cada vez más insistentes y cuando Grub estaba a punto de dar por concluida su incursión, lo vio. Cayó de rodillas donde se encontraba. Su padre le había enseñado a interpretar los cuatro auspicios, igual que su padre antes que él, y aquel era el peor de todos.
+
+El árbol de frutos redondos y brillantes, el hombre de rojo y los abrigos de animales.
+
+En dos o tres lunas nuevas, llegaría el frío y la desolación. Su abuelo le había explicado cómo el anterior invierno prácticamente acabó con ellos. Grub escuchó una voz, pero no supo interpretarla. Sus sollozos acabaron de ahogar la locución mística y la tétrica melodía.
+
+«Ya es Navidad en El Corte Inglés.»
+
+## 2.
+
+—No lo queremos.
+
+—¿Cómo?
+
+—Que no lo queremos.
+
+El robot se encogió de hombros. No era algo habitual, pero esto había excedido su programación.
+
+—Tendrán que hablar con mi supervisor.
+
+Eva y Lucas se acomodaron en sus asientos.
+
+—De acuerdo.
+
+El robot se levantó, dio la vuelta, se dirigió al hombre del despacho y le explicó todo. Antes de salir a hablar con ellos, el supervisor convirtió su cara de sorpresa en su mejor sonrisa.
+
+—Buenos días. RA21 me ha comentado su caso y creo que no le he entendido del todo bien.
+
+—Que queremos devolverlo.
+
+—Ya —enterró la vista en los papeles que había en la mesa—. Pero veo que no tiene ningún problema, ningún defecto de fábrica.
+
+—Eso es técnicamente cierto, desde luego —dijo ella.
+
+—Pero queremos devolverlo —dijo él.
+
+—No lo comprendo.
+
+—No forma parte de su… ¿política?
+
+—Sé lo que quieren decir, pero esto es del todo irregular. Cubrimos los defectos de fabricación, pero…
+
+—Pero es que no nos gusta. Es un… pesado.
+
+—Y un cabroncete.
+
+—¿Cómo? —A pesar de su amplia experiencia, no pudo evitar la sorpresa. Se recompuso rápido.
+
+—Mire, al principio éramos comprensivos. No nos dejaba dormir, pero… pensábamos que era normal. Pero iba pasando el tiempo y no mejoraba.
+
+—Lo que mi mujer quiere decir… pues sí, crecía con normalidad, dentro de lo esperado. Pero…
+
+—Es un gamberro. Y no para de tocarnos las narices.
+
+El supervisor, bajo una mueca de atención absoluta, no daba crédito. En un mundo saturado de contaminación, la tecnología que los chicos del supermercado habían inventado no sólo producía unos filetes excelentes, sino que resultó ser la mejor manera de tener hijos sanos y perfectos, completamente libres de la plaga de las mutaciones. Buscando atender todas las necesidades de sus clientes, abrieron el departamento de reproducción asistida hacía apenas seis años.
+
+—Pero todo esto son criterios subjetivos. El niño no presenta ninguna mutación. Su ADN es —Alzó los papeles y les enseñó los marcadores— completamente armonioso. El fenotipo... admitirán que el parecido es notable.
+
+—Sí, pero el crío es un maldito caprichoso e insoportable.
+
+—Debe disculparla, ayer volvieron a expulsarle del centro educativo. Es que no hay manera.
+
+—Mire, nos da igual el dinero. Sólo queremos que… que se lo queden. Ustedes siempre insisten en eso, ¿no?
+
+El supervisor suspiró sonoramente. «La política que ha sobrevivido una guerra nuclear», pensó. Entonó la letanía con resignación.
+
+«Si no quedan satisfechos, les devolvemos su dinero.»
+
+## 3.
+
+Se sentó delante del ordenador. Era un tipo bajito, algo barrigón y bastante calvo. Sudaba.
+
+Abrió el navegador e introdujo la dirección.
+
+«Instalando el plugin Mindterest 3.5 para una perfecta experiencia de compra», decía la pantalla, mientras el hipnótico círculo giraba en su danza infinita.
+
+Golpeteó nerviosamente el ratón.
+
+La página cargó, con el habitual listado de categorías. Vio la muchacha tridimensional en la parte derecha de la pantalla. «Haga clic aquí si desea saber más sobre su nueva experiencia de compra.» Era bastante atractiva, pensó, y le echaba una mirada inquisitiva. Sonrió levemente.
+
+«Me temo que no ofrecemos ese tipo de servicios.» Había desaprobación en ese cuadro de texto.
+
+Enrojeció levemente. Clic, clic, cancelar.
+
+«En nuestra sección de psicología encontrará títulos como ‘Superar el ridículo’ y ‘Conteniendo sus deseos’, haga clic en los títulos para más información.»
+
+Una gota de sudor recorrió su frente. Clic, clic, cancelar.
+
+Se echó una ojeada rápida, avergonzado.
+
+«El departamento de alimentación tiene una amplia selección de comida dietética preparada por nuestros especialistas en nutrición.”
+
+CLIC, CLIC…
+
+«... programas de ejercicios...», los popups emergían más rápido de lo que podía cerrarlos.
+
+¡CANCELAR!
+
+Joder con el plugin, era peor que el Flash de los antepasados. Su vida era una mierda, pero no necesitaba que un avatar impoluto se lo recordase.
+
+«Quizás podrían interesarle nuestros nuevos servicios de psicología, haga clic aquí para obtener detalles.»
+
+Alzó una ceja. En aquel momento tampoco le pareció tan mala idea. Quizás le haría bien.
+
+Clic, clic.
+
+La página cargó lentamente. Sin pensarlo, se desplazó hasta la parte inferior para ver los precios. «Puf, prohibitivos», pensó, y vio su desilusión reflejada en la pantalla.
+
+«En el supermercado encontrará una promoción 3x2 en cuchillas de afeitar. Bueno, en realidad sólo necesitará una…»
+
+Reparó con resignación en el eslogan en el pie de la página y se quedó mirando al infinito.
+
+«Especialistas en ti.»
diff --git a/fiction_writing/es/el_principe.md b/fiction_writing/es/el_principe.md
new file mode 100644
index 00000000..f1db64a9
--- /dev/null
+++ b/fiction_writing/es/el_principe.md
@@ -0,0 +1,65 @@
+# El príncipe, el rey y el verdadero mal
+
+No paraba de darle vueltas en su cabeza. Faltaban tres días y él, del que se decía que tenía recursos para todo, aún no lo había conseguido. Su gato le observaba aburrido desde lo alto del armario, sus ojos brillantes como carbones al rojo. No le quedaba otra, iría mañana mismo y sellaría el trato.
+
+Se despertó por la mañana tras sueños angustiosos. Su gato seguía dormido a sus pies. Tras sus rituales matutinos y enfundarse en sus mejores galas, salió al mundo exterior.
+
+Después de un corto trayecto de metro, salió a la calle. En la distancia, el gigante de cristal y hormigón se alzaba sobre otros edificios de menor importancia. Un poco más de curvatura en esos enormes ventanales y la convección convertiría el interior en una enorme caldera. Deformación profesional, pensó, y una pícara sonrisa se le dibujó en los labios.
+
+Al entrar, la fastuosa decoración marmórea le recordó a sus propias oficinas. En el centro, Laocoonte y sus hijos, una de sus esculturas favoritas. Siempre pensaba en las almas atrapadas para toda la eternidad en la piedra, forcejeando inmóviles sin poder separarse de sus enemigos. De repente, se dio cuenta que la cola había crecido y se apresuró a ponerse al final.
+
+En la cola, los problemas de los mortales eran los de siempre. Desde los romanos y la sal, muchos de ellos se habían vuelto simples guarismos y le aburrían. Otros se ocupaban de torturar a las almas por cifras.
+
+Estos pensamientos entretuvieron su mente hasta que alcanzó la caja. Recorrió con su mirada a la cajera y un escalofrío la recorrió a ella.
+
+—Deseo hablar con Emilio.
+
+—Me temo que no atiende visitas, pero créame, podré atenderle en lo que usted necesite.
+
+Se lo explicó. Se lo explicó de nuevo.
+
+—Sí, espere un momento. Le avisaré y le atenderá enseguida.
+
+Esperó un poco más. Siempre esperar. Ellos, con sus vidas cortas, no parecían entender el drama de las pérdidas de tiempo. Tenía un círculo especial para los impuntuales y demás escoria.
+
+Finalmente, le condujeron a lo largo de infinidad de pasillos hasta las entrañas de la criatura. Entró y allí estaba, diminuto y perdido en su gran silla. Cabe decir que no era un despacho particularmente grande ni ostentoso, pero había pequeñas muestras de poder por todas partes. También había un desorden que le hería en lo más profundo. Él llevaba sus asuntos como un reloj, el caos no tenía lugar en los negocios.
+
+Finalmente el hombrecillo alzó la vista de los papeles.
+
+—Pues mire, cuando mi padre me legó el banco y por extensión esta oficina, se sentó conmigo y me enseñó esos contratos.
+
+Se dirigió a uno de los armarios de su oficina, se subió a un taburete y cogió una carpeta que había encima. La sacudió un poco para quitarle el polvo y la llevó a la mesa.
+
+—Yo pensaba que era una especie de broma, una tradición gótica de banqueros. Cuando mi padre murió los cogí y los volví a leer, pasé una tarde muy amena.
+
+Un cordel deshilachado aprisionaba el papel amarillento. Tuvo ciertas dificultades con el nudo, pero al final los documentos quedaron libres.
+
+—¡Ah! Éste es.
+
+Lo colocó ante él.
+
+—Verá que es un contrato bastante estándar, a pesar de la temática. Las cláusulas son un tanto arcaicas, pero sospecho que no serán problema para usted.
+
+Una pequeña funda rígida cilíndrica dentro de su abrigo contenía sus gafas, que pronto reposaron sobre su nariz aguileña. Las bifocales le hacían parecer mucho más viejo.
+
+—Creo que está todo en orden.
+
+Armado con una pluma ornamentada con sutiles incrustaciones de rubí, se enfrentó al blanco. Bajo el epígrafe “objeto del trueque” rasgó delicadamente “Entradas para el partido Foot-Ball Club Barcelona, Madrid Football Club que se disputará el domingo 14 de abril de 2013 en el estadio Nuevo Chamartín en la Avenida de la Concha Espina 1, Madrid, España”.
+
+—Pues ahora nos quedaría la firma.
+
+Se diría que el cielo ennegreció justo cuando se puso firme en el centro del despacho.
+
+—Yo, Lucifer, Padre de la Mentira, Dios de Este Siglo y Príncipe de las Tinieblas, declaro que, en plena posesión de mis facultades mentales, canjeo mi alma inmortal con Banco Martínez y Asociados, por una entrada a un partido de fútbol, y para que conste, firmo con mi sangre este contrato.
+
+Un abrecartas del escritorio le sirvió para realizar una pequeña punción en su dedo índice. Un par de ágiles movimientos y el líquido rojo oscuro hirvió sobre el papel, dejando escrito su nombre con impecable caligrafía.
+
+El director notó el temblor de su mano al tomar el contrato. Lo examinó cuidadosamente e imprimió su sello. No sin cierta ceremonia, lo archivó como si fuese un bebé en una brillante funda roja carmesí.
+
+—Todo en orden, si viene usted mañana y se identifica con el DNI, cualquiera de nuestros cajeros podrá entregarle las localidades.
+
+--------------
+
+Un jugador del Barça hizo una rápida internada por la banda derecha driblando a un defensa. El líbero acudió a cubrir la marca con velocidad, pero le alcanzó en falta justo en el borde del área. Cien mil voces desgarradas alzaban su voz ante la peor de las injusticias.
+
+Un rostro inexpresivo contemplaba el espectáculo. Sus ojos recorrían ausentes la arena de los gladiadores, ahora incapaces de encontrar lo que buscaban. Finalmente, desistió. Extrajo el estilizadísimo móvil de las profundidades de su abrigo y se dejó hipnotizar por las luces de un jueguecito endiablado.
diff --git a/fiction_writing/es/en_los_mejores_cines.md b/fiction_writing/es/en_los_mejores_cines.md
new file mode 100644
index 00000000..7b663b02
--- /dev/null
+++ b/fiction_writing/es/en_los_mejores_cines.md
@@ -0,0 +1,55 @@
+Fred se sentó en la cómoda butaca con sus palomitas. Una ligera brisa le cosquilleaba la nuca mientras se acomodaba y colocaba todo en su sitio. Al poco, se apagaron las luces y una sonrisa apareció en su cara.
+
+POR FAVOR, FIJE LA VISTA EN EL PUNTO BLANCO EN EL CENTRO DE LA PANTALLA
+
+PROCEDEREMOS A BORRAR SUS RECUERDOS DE LA PELÍCULA EN 10 SEGUNDOS PARA QUE LA PUEDA DISFRUTAR DE NUEVO
+
+SI NOTA MAREO O SOMNOLENCIA, POR FAVOR, PULSE EL BOTÓN ROJO EN EL REPOSABRAZOS
+
+Era 2127, y el borrado selectivo de recuerdos había resucitado la industria del cine. Ante la crisis de ideas y la sensación de que todo había sido rodado ya, a un productor se le ocurrió utilizar un curioso avance de neurociencia- inútil para el público en general y para el que sólo se habían encontrado usos oscuros- para permitir que a la gente no le importase la falta de originalidad del cine. Podían ver la misma película repetidamente y cada vez la disfrutaban como la primera vez.
+
+Pese a las reticencias iniciales, el público comprendió rápidamente que era mejor esto que ver la enésima secuela que conservar su cerebelo intacto.
+
+La estrategia tuvo tanto éxito que en breve, cesó la producción de cine nuevo. Años más tarde, incluso, se comprobó que los cines del mundo apenas daban un centenar de películas diferentes- y curiosamente aquellas cuyos derechos de exhibición habían caducado hace tiempo.
+
+Fred se relajó. Los Caballeros de la Mesa Cuadrada. Si no le fallaban las cuentas, ya la había visto 137 veces. Y siempre acababa con dolor en la barriga de tanto reír. Se echó atrás y se dispuso para las carcajadas.
+
+# 2.
+
+Fred estaba perplejo. Tenía una mueca inexpresiva en la cara, mientras la gente alrededor aún reía o, como mínimo, tenía una expresión risueña y felicidad.
+
+Pero él no se había reído.
+
+Nada.
+
+¿Cómo podía ser? Recordaba haberse reído las 136 veces anteriores. Era una de sus favoritas. Era infalible en quitarle sus preocupaciones.
+
+Fred se incorporó. La verdad es que estaba un poco cabreado. Se dirigió a la taquilla mientras la gente comentaba la jugada a su alrededor.
+
+-No me he reído
+
+-Bueno, -respondió la muchacha distraída- no hace reír a todo el mundo, ya...
+
+-¡Pero la he visto muchas veces!- le interrumpió
+
+-¿Cómo?
+
+-Siempre me hace gracia. Es la mejor comedia que hay. La he visto mil veces
+
+La muchacha se le quedó mirando. Parece que esto no entraba en su guión.
+
+-Lo lamento señor, pero no puedo hacer nada
+
+-¡Devuélvame mi dinero!
+
+-No puede ser. Nadie se ha quejado de la proyección, ni del borrado, ni... nada, en realidad
+
+-¡Pero no me ha gustado!
+
+-Me temo que nunca se ha devuelto el precio de una entrada porque la película no ha gustado. Además, a todo el mundo le gustan los Monty Python...
+
+Fred se indigno. Quería decir algo- y abrió la boca para decirlo, pero no supo qué decir.
+
+-Si quiere puede presentar una reclamación
+
+-Da igual. Muchas gracias, señorita.
diff --git a/fiction_writing/es/invoco_al_diablo.md b/fiction_writing/es/invoco_al_diablo.md
new file mode 100644
index 00000000..691a8194
--- /dev/null
+++ b/fiction_writing/es/invoco_al_diablo.md
@@ -0,0 +1,119 @@
+# Invocó al diablo y lo que sucedió a continuación te sorprenderá
+
+--Tía, que no lo veo nada claro
+
+--Que sí, que para qué quieres el alma, eso es algo de boomers
+
+--No me convence, que lo vi en una serie y es algo chungo
+
+--¿Pero tú quieres ir a ver el concierto o no?
+
+--Bueno, ¿pues cómo lo hacemos?
+
+--Busquemos en Youtube
+
+--¿Como ves este? “Los curas lo odian: invoca a Satanás con este extraño truco”
+
+--No sé. A mi me sale “Como vender el alma al Diablo con productos del Mercadona”
+
+--¡Ah! Pues ese, que lo tenemos aquí al lado y aún tardarán en cerrar
+
+------------------------------------------
+
+Miró el móvil. Un formulario AAD-35. No lo había visto nunca. Tuvo que consultar el manual, llevaba pocos siglos allí y no había tenido que tratar con ese nunca. Guardó el móvil, se levantó, se subió los pantalones y tiró de la cadena. Siempre le invocaban cuando estaba en el retrete.
+
+------------------------------------------
+
+--¿Has oído eso? ¿Quién hay en el baño?
+
+--Joer, qué mal rollo. Ve a mirar, anda
+
+--¿Yo? Es tu casa
+
+(Lejano) --Ya voy, paciencia, narices
+
+------------------------------------------
+
+--Bueno, pues ya me tenéis aquí
+
+--¿Usted es el diablo?
+
+--Hombre, pues no. Joer, ¿pensabais que os iba a atender el jefe? Cuando vais a pedir una tarjeta, ¿esperáis que os atienda el Botín?
+
+--Ya, bueno, pero entonces, ¿puedes ocuparte tú?
+
+--Sí, qué pasa, que porque soy joven no puedo asumir responsabilidades
+
+--Bueno, no te pongas así. A ver, ¿cómo va esto?
+
+--Pues nada, ¿qué me ofrecéis?
+
+--¿Que qué te ofrecemos? ¿Será que qué te pedimos, no?
+
+--¿Perdón?
+
+--Queremos unas entradas para el concierto
+
+--¿Qué?
+
+--Unas entradas. Son un trozo de papel, lo das al segurata y te deja entrar
+
+--Creo que os estáis liando. A ver, me ha llegado un AAD-35. Queréis comprarle el alma al diablo. Atípico, pero puede hacerse
+
+--No, no, queríamos *vender* nuestra alma al diablo.
+
+Suspiró
+
+--Déjame adivinar: la loca esa fanática del Deliplus, ¿no? Joer, me lo habían contado los del turno de mañana y no me lo creía. Crédulos humanos, os lo ponen en Youtube y no sabéis salir de ahí
+
+--Oiga, sin faltar, ¿eh?
+
+--Mira bonita, ¿queréis el alma del diablo o qué? ¿Qué dais a cambio?
+
+--Que no queremos eso, ¿pa qué sirve? Eso luego se va a un desván y luego no lo usas para nada
+
+--Malditas millenials… Mira, yo me las piro que tengo cosas que hacer. Agur
+
+------------------------------------------
+
+Tía, no avises al jefe, porfa. Que llevo poco tiempo aquí y no me va mal. Venga, si no le dices nada, te consigo un par de entradas para el concierto de Rosalía, ¿vale?
+
+------------------------------------------
+
+Pulsa el icono de la ruedecita 6 veces. Debería salirte la cabrita. ¿Te sale? Vale, ahora le das 6 vueltas. Muy bien, ahora te salen la parrafada esa, saltas al final y le das a aceptar.
+
+------------------------------------------
+
+Sí, mira, si entras por nuestra página de Facebook te viene un Lucifer más… de padres, ¿vale? Pero por Insta y Snapchat pues nos envían a nosotros.
+
+------------------------------------------
+
+(el diablo becario cogiendo el teléfono) No se moleste, que esto es un móvil de empresa y no puedo cambiarme de operadora. Ah, coño, perdona. Sí, sí, es aquí
+
+------------------------------------------
+
+---A ver, he encontrado unos cuantos yutus del tema.
+
+---Pero pilla uno que vaya con Android, ¿eh?
+
+---¿Qué te parece este? “Los curas lo odian: invoca a Satanás con este truco raro”
+
+---No sé, Rick, parece falso
+
+------------------------------------------
+
+No olvidéis suscribiros a nuestro canal de Youtube y darle al like. ¿Habéis probado nuestros filtros?
+
+------------------------------------------
+
+Oye, ¿nos podemos ir p’adentro? Aquí fuera hace un frío de cojones y yo estoy más acostumbrada a las calderitas hirviendo y todo eso.
+
+------------------------------------------
+
+Bueno, vuélvemelo a explicar. Que me he dormido con la nota de voz de 10 minutos.
+
+------------------------------------------
+
+---¡Que vuelvan las Spice!
+---No me jodas
+---Na, es coña
diff --git a/fiction_writing/es/lucifer_martinez.md b/fiction_writing/es/lucifer_martinez.md
new file mode 100644
index 00000000..d410f065
--- /dev/null
+++ b/fiction_writing/es/lucifer_martinez.md
@@ -0,0 +1,77 @@
+# Lucifer Martínez
+
+Como casi cada tarde, Lucifer Martínez apagó su ordenador a las 18:00, se despidió de sus compañeros de trabajo y caminó el corto trecho hasta la estación de tren de Móstoles.
+
+El tren no estaba exageradamente lleno y pudo coger sitio con ventanilla, su favorito. Apoyo la cabeza en el cristal y dejó que su mente divagase. El negocio iba bien, viento en popa, casi demasiado bien se decía. Había cancelado la campaña de regalar el nuevo iPhone a cambio de tu alma inmortal. Era demasiado fácil, pensaba. En esta época, su trabajo carecía de creatividad, de emoción. Antes los hombres eran de otra manera.
+
+La gente de la oficina le apreciaba. Era un buen jefe. No como Dios. Lucifer no entendía como había llegado tan lejos. Con la coña de que había que dejar que el rebaño se pastorease solo, pasaba de todo. Él había entrevistado personalmente a todos sus empleados y lo sabía todo sobre ellos (deformación profesional). Con el Todopoderoso todo eran cuadros intermedios, burocracia infinita… como todas las grandes empresas, al final nadie daba un palo al agua, pero daba igual, la inercia llevaba a la empresa adelante.
+
+[...]
+
+Dejó el abrigo en el colgador.
+
+-¿Cariño, ya estás aquí? Espera que salgo…
+
+Una mujer espectacular salió del baño. Su larga melena empapada no osaba cubrir sus más que generosos atributos. Cualquier hombre hubiese traicionado a su patria por esas piernas gloriosas.
+
+-¡Mamá! ¿No podrías tener un poco de pudor de cuando en cuando?
+
+-Joder, hijo, siempre igual. Parece mentira que siendo quien eres sigas con estas pamplinas puritanas.
+
+-¡Pero es que no puedes ir en pelotas por casa siempre, por Dios! Las madres normales no van por ahí enseñando las domingas a sus hijos.
+
+-Mira, hablando de eso. Mi profe de Zumba va a venir a casa- miró el reloj- en 10 minutos. ¿No podrías irte a dar una vuelta?
+
+Lucifer frunció el ceño. Siempre igual. Cogió el abrigo muy lentamente y se dirigió a la puerta.
+
+-¡No te pongas así, hombre! Por cierto, te he dejado un tupper en la nevera para mañana, no te olvides de cogerlo.
+
+Refunfuñó algo y cerró la puerta. Al salir del ascensor se topó con el que sospechaba era el profe de Zumba. Era enorme y llevaba una enorme sonrisa de oreja a oreja. Lucifer agachó la cabeza e intentó mantener todas esas imágenes de su madre con ese maromo de la cabeza, sin demasiado éxito.
+
+Se metió en el bar de la esquina. Allí se sentía cómodo, y dados los hábitos de su madre, pasaba allí casi tanto tiempo como en su casa. Sabía que tenía que buscarse otro sitio, pero tal como están los pisos, tampoco podía permitirse muchas maravillas.
+
+-Hombre, Luci… ¿una cervecita?
+
+-Claro que sí, Paco, muchas gracias.
+
+El dueño era un tipo majo. Llevaba ahí cuarenta años y sabía perfectamente cómo tratar a su clientela. En este caso, sabía que no tenía que hablarle de su santa madre.
+
+[...]
+
+Se había tomado un par de cervecitas más de las que tocaba y seguramente hubiese tenido que irse para casa, pero no quería arriesgarse a que el profesor de Zumba siguiese allí. Seguramente les hubiese oído nada más salir del ascensor, pero eso ya era demasiado para él.
+
+Se puso andar. La cerveza le estaba llevando a una fase eufórica y sus pasos tenían cada vez más energía. Sabía que no tenía que ir allí, pero en su alterado estado mental, tenía ganas de juerga.
+
+Era una caminata de lo menos veinte minutos, hacía la zona buena. Al llegar, miró al séptimo piso. Había luz y parecía haber una fiesta. Se puso a gritar.
+
+-¡¡¡Ehhhh, Todopoderoooosooooo!!!
+
+-¡¡Baja a hablar con tu colegaaaaaa!!
+
+Estuvo así un rato, sin obtener resultado alguno.
+
+-¡¡¡Capulloooooo!!! ¡Montando una fiesta de Navidad! ¡¡¡Que crucificaron a tu hijooooooo!!!
+
+Se abrió una ventana. Le pareció ver a alguien encogerse de hombro y volver a meterse. Se sentó en un bordillo.
+
+Al cabo de un rato, bajó y se sentó a su lado.
+
+-Feliz navidad
+
+-Sí tíiio, feliz navidad
+
+-¿Qué te trae por aquí? No me lo digas- se apartó un poco para evitar el hálito alcohólico.
+
+-¿Por qué tío? ¿Por qué eres así?
+
+-A mi qué me cuentas. Yo soy como soy.
+
+-Eres un mierdas. Tú lo arreglas todo siempre así. Este es mi garito y me meo dentro si me apetece. Pues lo tienes hecho una mierda.- se le envidriaron un poco los ojos
+
+-Venga hombre, es Navidad. Súbete. Hay cordero asado.
+
+Le pasó la mano por el hombro. Joder, siempre le funcionaba ese truco. Lucifer le miró a los ojos y rompió a llorar.
+
+-Venga, venga. No pasa nada. Tú también eres como eres. Y no hay nada que te guste más que el cordero asado. Te irá bien.
+
+Le ayudó a levantarse y desaparecieron por el umbral de la puerta.
diff --git a/fiction_writing/es/maldito_clip.md b/fiction_writing/es/maldito_clip.md
new file mode 100644
index 00000000..a29efd6b
--- /dev/null
+++ b/fiction_writing/es/maldito_clip.md
@@ -0,0 +1,73 @@
+Maldito clip
+
+En una habitación de joven, a oscuras, EL JOVEN BILL GATES teclea furiosamente. De repente, se detiene y contempla extasiado el monitor.
+
+EL JOVEN BILL GATES
+
+¡Madre! ¡Madre! ¡Venga aquí, lo he inventado!
+
+MADRE DE BILL GATES
+
+¿Qué quieres, hijo? Son las tres de la mañana… ¿Qué has inventado esta vez?
+
+EL JOVEN BILL GATES
+
+¡El corrector perfecto! Madre, usted podrá teclear cualquier frase, con los peores errores de sintaxis y los más graves gazapos y el ordenador le replicará con sus pecados corregidos.
+
+MADRE DE BILL GATES
+
+Pero hijo, ¡eso es una abominación! Las madres estamos para corregir los errores de nuestros hijos. ¿Qué haremos si nos quitas eso?
+
+EL JOVEN BILL GATES
+
+Madre, ¡es el progreso! Todos podrán expresarse sin que el listillo de turno les corrija a cada frase...
+
+MADRE DE BILL GATES
+
+¡Ni progreso ni progresa! A la cama y borra ese programa del demonio.
+
+EL JOVEN BILL GATES
+
+¡Pero madre!
+
+MADRE DE BILL GATES
+
+¡No me repliques! Y a ver si dedicas tu tiempo a cosas de provecho en vez de a esas tonterías.
+
+El joven Bill Gates se dirige refunfuñando a su cama, enojado. Una vez acostado, una explosión de luz y humo inunda su cuarto. Un extraño hombre barbudo con una túnica estampada de ceros y unos se planta en medio de la estancia.
+
+EL JOVEN BILL GATES
+
+¿Quién anda ahí?
+
+EL DIOS DE LA COMPUTACIÓN
+
+¡Bill! ¡Bill! ¡Has osado violar la ley de Turing de lo computable y lo indecidible!
+
+EL JOVEN BILL GATES
+
+¡Pero si yo sólo he escrito un corrector ortográfico! Además, ¿no llega usted un poco tarde?
+
+EL DIOS DE LA COMPUTACIÓN
+
+¡Ni pero ni peras! Además, en esta época los módems van a 300 baudios y ha tardado mucho en llegarme lo tuyo. Te salvas de que te enseñe el futuro desastroso que traería tu creación, que vamos justos de extensión y tu madre te ha dado lo suyo. Además, esto no es Dickens.
+
+EL JOVEN BILL GATES
+
+¡Gracias, oh dios de la computación! Corregiré mi camino y me dedicaré a escribir el buscaminas.
+
+EL DIOS DE LA COMPUTACIÓN
+
+¡No creas que te vas a librar tan fácilmente! ¡Yo te maldigo! Por querer corregir todo error humano y con ello, privarles de la virtud de aprender de sus errores y hacer de las máquinas dioses, yo te castigo. Tus programas serán populares en el mundo entero, pero todos tus usuarios te odiarán por cualquier bug. Cualquier programa tocado por ti frenará el más potente superordenador y Moore no te salvará.
+
+EL JOVEN BILL GATES
+
+¿Pero eso quiere decir que me puedo forrar igual, no?
+
+EL DIOS DE LA COMPUTACIÓN
+
+Por tu insolencia, ¡añado que las mujeres te ignorarán a ti y a los de tu especie!
+
+EL JOVEN BILL GATES
+
+¡NOOOOOOOOOOOOOO!
diff --git a/fiction_writing/es/mariano_el_programador.md b/fiction_writing/es/mariano_el_programador.md
new file mode 100644
index 00000000..aa19c1dd
--- /dev/null
+++ b/fiction_writing/es/mariano_el_programador.md
@@ -0,0 +1,51 @@
+Mariano era un programador. No era el mejor, ni tampoco el peor. Había aprendido poco a poco, por su cuenta, hasta que un día descubrió que se podía ganar la vida con ello. Acabó trabajando en un olvidado engranaje del gobierno, programando poco a poco, día a día, tranquilamente.
+
+Mariano era el responsable de un pequeño programa que tomaba decisiones muy simples sobre gastos de un departamento muy pequeño de una subdelegación de un pequeño departamento que no estaba en boca de todos. Una vez al mes, alguien pulsaba un botón en el programa de Mariano y el presupuesto se repartía. No era un reparto particularmente ingenioso o eficiente, pero funcionaba- bueno, de cuando en cuando había algún problema pero Mariano siempre estaba ahí para resolverlo.
+
+Pero era un programa que ahorraba a un subsecretario hacer una hoja de cálculo tediosa, y si bien Mariano no se podía sentir orgulloso al lado de los gigantes de la informática que consiguieron hacernos felices acariciando cristales, sabía que su programa ayudaba a alguien, y eso le hacía feliz.
+
+El programa funcionaba, y un día el subsecretario se lo explicó a otro subsecretario mientras jugaban al pádel. El primer subsecretario concertó una reunión inmediata entre el segundo subsecretario y Mariano, para ver si éste podría ayudarle a automatizar unas cosas de su departamento, que le quitaban mucho tiempo.
+
+Mariano le escuchó y le propuso un par de ideas, y vio como el segundo subsecretario asentía con la mirada perdida. Mariano le preguntó un par de cosas, y el segundo subsecretario le dijo que no lo veía claro, que qué proponía Mariano. Su propuesta le pareció fantástica, y al poco tiempo Mariano recibió una nota de que ampliase su programa para manejar las tareas del segundo subsecretario.
+
+En un par de días, Mariano tenía un prototipo. Se lo enseñó a la ayudante del segundo subsecretario, que le hizo un par de correcciones pero que parecía contenta. Mariano era feliz, por fin su programa hacía más cosas. Estuvo unos días ocupado y al final el programa fue capaz de asumir muchas funciones del segundo subsecretario.
+
+Mariano tenía ahora un poco más de trabajo, pero las funciones del primer programa ya estaban bastante pulidas y se podía dedicar casi plenamente a los problemas de la nueva funcionalidad.
+
+El segundo subsecretario decidió pasarse al golf, donde podía codearse con adjuntos superiores. Estos, sorprendidos por el hecho de que un subsecretario jugase a golf con ellos, no tardaron en interrogarle y descubrir las bondades del programa de Mariano.
+
+Pronto estos iniciaron un proyecto conjunto, con un presupuesto generoso para ampliar el programa de Mariano. Le ayudaron a contratar unos cuantos programadores más para ayudarle y que pudiese asumir más funcionalidades dentro del gobierno.
+
+Mariano titubeó. Él, un simple programador autodidacta, liderando un proyecto para automatizar los trabajos de varios adjuntos superiores. Sin embargo, vio cómo confiaban en él y asumió el reto.
+
+Fue complicado, pero resultó ser que Mariano era especialmente habilidoso entendiendo los problemas y proponiendo soluciones que los adjuntos aceptaban con la mirada perdida, y que luego sus ayudantes le acababan de corregir. Mariano iba interiorizando cómo funcionaba el gobierno, y cada vez le resultaba más simple extender el programa.
+
+Había problemas, sí; los gobiernos no son organismos simples, pero Mariano y sus ayudantes iban encontrando soluciones y haciéndolo funcionar todo.
+
+Pero un día, Mariano se dio cuenta de algo. Tenía un problema con un pequeño módulo que tenía que lidiar en un problema interdepartamental, y no encontraba manera de hacerlo. Preguntó a los viceadjuntos medios, que eran los únicos que quedaban por las oficinas, removiendo sus cafés con la mirada perdida, pero pronto vio que no sabían cómo funcionaba el programa de Mariano y que mucho menos podrían resolverle el problema.
+
+Mariano preguntó y preguntó, pero todo el mundo parecía haber olvidado qué problemas resolvía el programa de Mariano, y mucho menos cómo lo hacía.
+
+Mariano y sus programadores discurrieron durante un par de días y al final hallaron una solución. No les complacía demasiado, pero era la única que tenían. En un par de semanas la tuvieron lista, pero estaban un poco preocupados porque no sabrían si era correcta o si funcionaría bien.
+
+Pensaron que si fallaba, alguien se quejaría, ¿no? Con cierta preocupación, pulsaron el botón que la puso en marcha.
+
+Pasaron semanas, y no sólo nadie se quejó, sino que seguían lloviendo carpetas con más tareas a automatizar. Con cierto orgullo, Mariano y sus programadores- que cada vez eran más- seguían automatizando y automatizando.
+
+Como pequeñas hormiguitas, un día, un submódulo del programa de Mariano emitió un mensaje que el propio Mariano había programado como pequeña broma para sí mismo. Todos los departamentos del gobierno habían sido automatizados. El programa de Mariano había asumido el control.
+
+Las cosas ni iban ni mejor bajo el programa de Mariano. Siento tener que explicar que este cuento no es ni una utopía ni una distopía. El gobierno iba tirando como antes, si acaso, las pistas de pádel y de golf estaban algo más ocupadas.
+
+Hasta que un día, Mariano cayó en algo. Sólo él realmente sabía cómo encajaba todo. Sus programadores conocían algún módulo, pero eran muchos módulos y muchos programadores, algunos de los cuales ni se conocían, pero Mariano les conocía a todos, y podía ver sus líneas de código en sus sueños. ¡Qué funcional bucle el de Javier! ¡Qué adecuada subrutina la de Antonia!
+
+Mariano así, con la ayuda de sus programadores iba retocando el programa. El mundo seguía igual, pero el programa de Mariano exigía sus horas de mantenimiento. Ya hasta los programadores de Mariano comenzaban a tener la mirada un poco perdida, picando teclas con eficiencia, como Mariano les había enseñado; resolviendo las incidencias de los módulos con cariño, como Mariano les había mostrado.
+
+Los programadores de Mariano eran extensiones de Mariano; con peinados y colores de ojo diferentes, pero eran Mariano. El gobierno, así mismo, se había convertido en Mariano, o quizás en el programa de Mariano, y no había nada que se escapase de sus millones de líneas de código, ni especialmente eficientes ni especialmentes elegantes, pero funcionales, como le gustan a Mariano.
+
+Hasta que un día, uno de los programadores de Mariano descubrió un pequeño fallo en el programa de Mariano. Una pequeña subrutina tenía un sutil defecto, que hacía que el conteo de vacaciones no se realizase correctamente. El único afectado, curiosamente, era Mariano. Al corregirlo, el sistema sufrió un pequeño desbordamiento, que pudieron resolver entre un par de programadores. Tras un par de días de impresión, le entregaron a Mariano una pequeña pila de papel continuo con el cálculo exacto de días de vacaciones que tenía pendientes.
+
+Mariano, que había aprendido a confiar en su programa como todo el mundo en el país, miró un segundo al infinito y lo aceptó. Avisó al programa de Mariano que en un par de semanas se cogería vacaciones y así lo hizo.
+
+Llevaba ya un par de años de vacaciones sin sobresaltos, pero con la mirada un poco perdida eso sí, cuando le llegó un telegrama. Miro al cielo, puso el telegrama otra vez en la mesa y pensó para sus adentros:
+
+-Que lo resuelva Mariano, que yo estoy de vacaciones.
diff --git a/fiction_writing/es/un_paseo_por_el_rio.md b/fiction_writing/es/un_paseo_por_el_rio.md
new file mode 100644
index 00000000..834e215e
--- /dev/null
+++ b/fiction_writing/es/un_paseo_por_el_rio.md
@@ -0,0 +1,35 @@
+Se hallaba como otras tantas veces en frente de la temida página en blanco. No sabía qué escribir. Le daba vueltas, buscando una idea. Eso ya lo había escrito. Eso otro ya lo había escrito mejor otro.
+
+Dejó volar su imaginación. Su alma de juntaletras voló por la estratosfera hacia el norte. Vio a lo lejos las luces, la torre Eiffel. “¡Eso es!”, pensó, “Paris, la ciudad del amor y los poetas”. Descendió a toda velocidad y pronto se encontró en su orilla preferida del Sena.
+
+-¡No empuje!
+
+-¡Sin colarse, eh!
+
+Eran escritores, de eso no le cabía la menor duda. Las gafas, los Macs bajo el brazo y, sobre todo, sus egos les delataban.
+
+-La cola acaba ahí, a esperar como todos.
+
+Se giró y la vió. “Inspiración a orillas del Sena” rezaba el letrero más adelante. Se dirigió al plumilla que tenía más cerca.
+
+-Oye, ¿qué es esto? Vengo muchísimo por aquí y nunca había visto esto.
+
+-Ya, lo pusieron el verano pasado. Venimos tantos que al final es normal. Por mucho que la imaginación sea infinita, al final todos recalamos aquí y han decidido hacer algo con las aglomeraciones.
+
+-Pero este es el pasaje que necesito. No puedo esperar a esa cola. Mis personajes tienen que darse un paseo por aquí para que el lector se ambiente.
+
+-¿Pareja de enamorados?
+
+-Sí.
+
+-Los míos llevan 20 años casados y han venido aquí a encontrarse a sí mismos.
+
+-¡Qué tópico!- saltó uno algo más atrás- Los míos son un vampiro y una humana y él la trae aquí por primera vez para…
+
+Alguien le tiró una piedra y se oyeron unas cuantas carcajadas.
+
+-Oye, en serio, que lo mío es original…
+
+Le interrumpieron más risas.
+
+-¡Sí sí, y lo mío!- tenía en su mirada la pesadez de quien escribe por encargo- Mire, calle un rato y haga la cola. Si no le gusta, flote por ahí hasta encontrar algo más bonito y que enganche a sus lectores.
diff --git a/linux/dnie_rhel9.md b/linux/dnie_rhel9.md
new file mode 100644
index 00000000..f320975f
--- /dev/null
+++ b/linux/dnie_rhel9.md
@@ -0,0 +1,13 @@
+# Usando el DNIe en CentOS 9 Stream
+
+El RPM que anuncian para Fedora/SuSE funciona bien en CentOS 9 Stream.
+Pero depende de un paquete deprecado "pinentry-gtk2".
+No es necesario este paquete para el correcto funcionamiento, así que podemos usar `create-fake-rpm` para crear un paquete falso que nos permita instalar el RPM del DNI electrónico.
+
+```
+sudo dnf install create-fake-rpm
+create-fake-rpm --build pinentry-gtk2 pinentry-gtk2
+sudo dnf install noarch/fake-pinentry-gtk2-0-0.noarch.rpm https://www.dnielectronico.es/descargas/distribuciones_linux/libpkcs11-dnie-1.6.8-1.x86_64.rpm
+```
+
+Una vez hecho esto, cargar `/usr/lib64/libpkcs11-dnie.so` como nuevo "security device" en Firefox.
diff --git a/linux/running_commands_in_linux.adoc b/linux/running_commands_in_linux.adoc
new file mode 100644
index 00000000..e236ec58
--- /dev/null
+++ b/linux/running_commands_in_linux.adoc
@@ -0,0 +1,287 @@
+= Notes on Running Commands in Linux
+
+== Motivating Examples
+
+=== CWE-78: Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')
+
+The https://cwe.mitre.org/data/definitions/1337.html[2021 CWE Top 25 Most Dangerous Software Weaknesses] helps focus on the biggest security issues that developers face.
+Number 5 on that list is https://cwe.mitre.org/data/definitions/78.html[Improper Neutralization of Special Elements used in an OS Command ('OS Command Injection')].
+
+Software developers often write code that invokes other programs.
+For example, shell scripts tend to be mostly composed of invocations of programs such as `find`, `grep`, etc.
+Even software developed in languages such as Python, C, or Java often invokes other programs.
+
+Python software developers use the `subprocess` module to perform this task.
+Other languages provide similar facilities, with
+
+Consider the two following Python sessions to execute an equivalent to the `bash` statement `cat /etc/passwd`:
+
+----
+$ python3
+>>> import subprocess
+>>> subprocess.run(["cat", "/etc/passwd"])
+----
+
+----
+$ python3
+>>> import subprocess
+>>> subprocess.run("cat /etc/passwd", shell=True)
+----
+
+Both scripts use the same `run` function, with different values of the `shell` parameter (the `shell` parameter defaults to `True`).
+When executing a command with many arguments, `shell=True` seems to be terser.
+`a b c d e` is shorter and easier to read than `["a", "b", "c", "d", "e"]`.
+Readable code is easier to maintain, so a software developer could prefer the `shell=True` version.
+
+However, using `shell=True` can introduce the "OS Command Injection" weakness easily.
+
+Create a file named "injection.py" with the following contents:
+
+----
+import sys
+import subprocess
+
+subprocess.run(f"cat {sys.argv[1]}", shell=True)
+----
+
+This program uses the `cat` command to display the contents of a file.
+For example, if you run (using Python 3.6 or higher):
+
+----
+$ python3 injection.py /etc/passwd
+----
+
+The terminal shows the contents of the `/etc/passwd` file.
+
+However, if you run:
+
+----
+$ python3 injection.py '/etc/passwd ; touch injected'
+----
+
+The terminal shows the same file, but a file named `injected` also appears in the current directory.
+
+Create a file named "safe.py" with the following contents:
+
+----
+import sys
+import subprocess
+
+subprocess.run(["cat", sys.argv[1]])
+----
+
+Running `python3 safe.py /etc/passwd` has the same behavior as using `injection.py`.
+However, repeating the command that creates a file using `safe.py` results in:
+
+----
+$ python3 safe.py '/etc/passwd ; touch injected'
+cat: '/etc/passwd ; touch injected': No such file or directory
+----
+
+`injection.py` is vulnerable to "OS Command Injection" because it uses `shell=True`, whereas `safe.py` is not.
+
+If a malicious user can get strings such as `/etc/passwd ; touch injected` to code that uses `shell=True`, then the user can execute arbitrary code in the system.
+Code that does not handle user input might not be exposed to such issues, but user input might creep in and introduce unexpected vulnerabilities.
+Avoiding the use of `shell=True` and similar features can be safer than making sure that user input is correctly handled in all cases.
+
+=== Writing Shell Scripts that Handle Files with Spaces in Their Names
+
+Create a file called `backup.sh` with the following contents:
+
+----
+#!/bin/bash
+
+for a in $1/* ; do
+ cp $a $a.bak
+done
+----
+
+Run the following statements in the terminal to create a sample directory with files.
+
+----
+$ mkdir backup_example_1
+$ for a in $(seq 1 9) ; do echo $a >backup_example_1/$a ; done
+----
+
+These statements create the `backup_example_1` directory, and files named `1`, ..., `9`.
+
+The `backup.sh` script creates a copy of each file in a directory.
+If you run:
+
+----
+$ bash backup.sh backup_example_1/
+----
+
+Then the script will copy `1` to `1.bak`, and so on.
+
+However, if you create a new directory with files whose names have spaces:
+
+----
+$ mkdir backup_example_2
+$ for a in $(seq 1 9) ; do echo $a >backup_example_1/"file $a" ; done
+----
+
+Then the `backup.sh` script does not work correctly:
+
+----
+$ bash backup.sh backup_example_2/
+cp: cannot stat 'backup_example_2//*': No such file or directory
+----
+
+In order to fix the script, change the contents of `backup.sh` to:
+
+----
+#!/bin/bash
+
+for a in "$1/*" ; do
+ cp "$a" "$a.bak"
+done
+----
+
+
+== Background
+
+=== `int main(int argc, char *argv[])`
+
+Programs written in C for Linux define a function called `main` that is the entry point of the program.
+Documents such as http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2310.pdf[the _N2310_ draft of the C language standard] describe the `main` function.
+Page 11, section 5.1.2.2.1, _Program startup_, provides a common definition of `main`:
+
+----
+int main(int argc, char *argv[]) { /* ... */ }
+----
+
+The `argc` parameter contains the **c**ount of the arguments provided to the program.
+The `argv` parameter contains their **v**alues.
+
+Create a file named `argv.c` with the following contents:
+
+----
+#include <stdio.h>
+
+int main(int argc, char *argv[]) {
+ for(int i=0; i<argc; i++) {
+ printf("Argument %d -%s-\n", i, argv[i]);
+ }
+}
+----
+
+Compile the file running the following command:
+
+----
+$ cc argv.c
+----
+
+This produces an executable file named `a.out`.
+This executable will print the arguments you provide via the command line:
+
+----
+$ ./a.out
+Argument 0 -./a.out-
+----
+
+----
+$ ./a.out arg1 arg2 arg3
+Argument 0 -./a.out-
+Argument 1 -arg1-
+Argument 2 -arg2-
+Argument 3 -arg3-
+----
+
+Note that the first argument is the name of the executable file itself.
+
+Note that when using quoting, the program produces prints things like:
+
+----
+$ ./a.out "a b" c
+Argument 0 -./a.out-
+Argument 1 -a b-
+Argument 2 -c-
+----
+
+So the first argument is `a b` (without quotes).
+
+=== `exec(3)`
+
+UNIX-like operating systems provide the `exec` family of functions to invoke commands.
+`man 3 exec` describes the `exec` family of functions in Linux.
+Linux provides the `execl`, `execlp`, `execle`, `execv`, `execvp`, and `execvpe` functions.
+These functions allow us to execute a command from within a C program.
+
+Create a file named `execlp.c` with the following contents:
+
+----
+#include <stdlib.h>
+#include <unistd.h>
+
+int main() {
+ exit(execlp("cat", "cat", "/etc/passwd", NULL));
+}
+----
+
+Compile the file running the following command:
+
+----
+$ cc execlp.c
+----
+
+This produces an executable file named `a.out`.
+Execute it:
+
+----
+$ ./a.out
+----
+
+This is equivalent to running in a shell the statement `cat /etc/passwd`.
+
+This article does not describe the intricacies of the `exec` family of functions.
+However, let's analyze the call to `execlp`.
+
+The `exec` functions whose name contains a `p` look up the command to execute by searching for executables named like the first argument in the directories listed in the `PATH` environment variable.
+In the example, `execlp` looks up the `cat` executable in directories such as `/usr/bin`.
+
+The second argument is also the name of the program.
+
+[NOTE]
+====
+Note that in the preceding `argv.c` example, the zeroth argument is the name of the program being executed.
+
+Some executables in Linux systems are present under different names (using symbolic links).
+For example, `xzcat` is a symbolic link to `xz`.
+Running `xzcat` or `xz` runs the same executable file, but the executable uses the zeroth argument to change its behavior.
+
+This technique is a simple way to "share" code between similar programs.
+The https://www.busybox.net/about.html[BusyBox] project provides many common utilities, such as `ls` and `cat`, in a single executable.
+By sharing code among all utilities, the BusyBox executable is smaller.
+====
+
+The rest of the parameters to `execlp` are the arguments for the executable file.
+
+In a way, `exec` functions "call" the `main` function of other programs.
+The parameters to `exec` are "passed" to the `main` function.
+
+=== Shells
+
+Programs such as `bash` provide a way to execute other programs.
+When you type a statement such as `cat /etc/passwd`, `bash` parses the statement into a command to execute and arguments.
+Then, `bash` uses an `exec` function to run the program with arguments.
+
+The simplest `bash` statements are words separated by spaces, of the form `arg0 arg1 arg2 _..._ argn`.
+
+On such a statement, `bash` executes something like:
+
+----
+execlp(arg0, arg0, arg1, _..._, argn, NULL)
+----
+
+And the program will receive the string `arg0` as the zeroth argument, `arg1` as the first argument, and so forth.
+
+However, using `cat` to view the contents of files, the user might want to view a file whose name contains spaces.
+
+The statement `cat a b` has two arguments: `a` and `b`.
+For each argument, `cat` prints the file of that name.
+So the `cat a b` statement prints the contents of the `a` and `b` files, not of a file named `a b`.
+
+== TODO
+
+* SSH particularities: https://news.ycombinator.com/item?id=36722570[]
diff --git a/personal_infra/README.md b/personal_infra/README.md
new file mode 100644
index 00000000..a3249853
--- /dev/null
+++ b/personal_infra/README.md
@@ -0,0 +1,56 @@
+# Personal infra
+
+This is a collection of files I use setting up my personal infrastructure.
+This is a work in progress, as I am redoing a bit how I do configuration management.
+The main source is in a private repo, but I put here as much material as I can make public.
+Inventory, vaults, etc. remain in the private repo.
+
+## Ansible
+
+### Initial setup
+
+Symlink everything in this directory into your root infrastructure directory.
+
+Create an `inventory` file.
+
+Run `./setup_venv` to create a virtual environment.
+
+Create `vault_password` with a vault password.
+
+### Usage
+
+Run `. .venv/bin/activate` to activate the virtual environment.
+
+Run Ansible commands normally.
+
+## Ansible/Puppet integration
+
+I prefer using Ansible for orchestration, and Puppet for configuration management.
+
+* `up.py` compiles Puppet catalogs without a Puppet Server.
+* `pseudo_resource_exporter.py` simulates exported resources on the catalogs generated by `up.py`.
+ You can use this script as a template to implement your own catalog manipulations.
+* `playbooks/roles/apply_puppet/` uses `up.py` to apply Puppet to Ansible hosts.
+ This script collects facts, adds the Ansible inventory to Hiera (so you can use Ansible inventory data to parameterize Puppet), compiles the catalogs, ships them to Ansible nodes, and executes Puppet.
+
+Except for exported resources, which work differently, this setup has most of the benefits of Puppet Server without having to run a Puppet Server and PuppetDB.
+
+Being able to simulate exported resources without a master lets you use the `nagios_core` module without infrastructure.
+With the `nagios_core` module, Puppet code, such as a module which sets up a web server, can define "inline" Puppet monitoring for the managed resources.
+
+## Puppet
+
+For the moment, I'm managing the following distros using this setup.
+
+| Distro | Puppet version |
+| --------------- | ------------------ |
+| Debian 11 (PVE) | Puppet 5.5 |
+| EL8 | Puppet 6.26 (EPEL) |
+| EL9 | Puppet 7.20 (EPEL) |
+
+I perform catalog compilation on my laptop running EL9.
+Although [support across Puppet 5.5-7 is not documented](https://www.puppet.com/docs/puppet/7/platform_lifecycle.html#primary-agent-compatibility), catalogs still seem to be compatible.
+
+## Misc
+
+* [Podman](podman.md)
diff --git a/personal_infra/ansible.cfg b/personal_infra/ansible.cfg
new file mode 100644
index 00000000..c7e2ae61
--- /dev/null
+++ b/personal_infra/ansible.cfg
@@ -0,0 +1,7 @@
+[defaults]
+inventory = inventory
+vault_password_file = vault_password
+callbacks_enabled = ansible.posix.profile_tasks
+
+# TODO: Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host's fingerprint to your known_hosts file to manage this host.
+host_key_checking = False
diff --git a/personal_infra/k8s/base/kustomization.yml b/personal_infra/k8s/base/kustomization.yml
new file mode 100644
index 00000000..05b0b21e
--- /dev/null
+++ b/personal_infra/k8s/base/kustomization.yml
@@ -0,0 +1,32 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/cloud/deploy.yaml
+ - https://github.com/alexpdp7/talos-check/raw/main/manifest.yaml
+
+patches:
+ - patch: |-
+ - op: add
+ path: "/metadata/annotations/ingressclass.kubernetes.io~1is-default-class"
+ value: true
+ target:
+ kind: IngressClass
+ name: nginx
+ - patch: |
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: ingress-nginx
+ labels:
+ pod-security.kubernetes.io/enforce: privileged
+ - patch: |
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+ spec:
+ template:
+ spec:
+ hostNetwork: true
diff --git a/personal_infra/playbooks/patch_rpc_svcgssd_service.yaml b/personal_infra/playbooks/patch_rpc_svcgssd_service.yaml
new file mode 100644
index 00000000..683cb045
--- /dev/null
+++ b/personal_infra/playbooks/patch_rpc_svcgssd_service.yaml
@@ -0,0 +1,21 @@
+---
+- hosts: patch_rpc_svcgssd_service
+ collections: freeipa.ansible_freeipa
+ vars:
+ ansible_user: alex
+ ansible_become: True
+ tasks:
+ - name: del nfs service
+ command: ipa service-del nfs/{{ inventory_hostname }}
+ ignore_errors: True
+ - name: create nfs service
+ command: ipa service-add nfs/{{ inventory_hostname }}
+ - name: clean keytab
+ command: ipa-rmkeytab -p nfs/{{ inventory_hostname }} -k /etc/krb5.keytab
+ ignore_errors: True
+ - name: get keytab
+ command: ipa-getkeytab -p nfs/{{ inventory_hostname }} -k /etc/krb5.keytab
+ - name: restart
+ service:
+ name: rpc-svcgssd.service
+ state: restarted
diff --git a/personal_infra/playbooks/roles/apply_puppet/tasks/main.yml b/personal_infra/playbooks/roles/apply_puppet/tasks/main.yml
new file mode 100644
index 00000000..2d6bdb23
--- /dev/null
+++ b/personal_infra/playbooks/roles/apply_puppet/tasks/main.yml
@@ -0,0 +1,127 @@
+---
+- name: clean puppet build directory
+ local_action:
+ module: file
+ path: "{{ inventory_dir }}/build/puppet"
+ state: absent
+ run_once: True
+ tags: puppet_fast
+- name: create puppet build directories
+ local_action:
+ module: file
+ path: "{{ inventory_dir }}/{{ item }}"
+ state: directory
+ loop:
+ - build/puppet/global_vars
+ - build/puppet/host_vars
+ - build/puppet/facts
+ run_once: True
+ tags: puppet_fast
+- name: create puppet build host vars directories
+ local_action:
+ module: file
+ path: "{{ inventory_dir }}/build/puppet/host_vars/{{ inventory_hostname }}"
+ state: directory
+ tags: puppet_fast
+- name: dump hostvars
+ local_action:
+ module: copy
+ dest: "{{ inventory_dir }}/build/puppet/global_vars/hostvars.json"
+ content: "{'hostvars': {{ hostvars }} }"
+ run_once: True
+ tags: puppet_fast
+- name: dump this
+ local_action:
+ module: copy
+ dest: "{{ inventory_dir }}/build/puppet/host_vars/{{ inventory_hostname }}/this.json"
+ content: "{{ hostvars[inventory_hostname] }}"
+ tags: puppet_fast
+- name: install epel
+ package:
+ name: epel-release
+ when: ansible_distribution_file_variety == 'RedHat'
+- name: install packages
+ package:
+ name:
+ - puppet
+ - unzip
+- name: get facts
+ command: facter -y
+ register: facter_output
+ tags: puppet_fast
+- name: dump facts
+ local_action:
+ module: copy
+ dest: "{{ inventory_dir }}/build/puppet/facts/{{ inventory_hostname }}.yaml"
+ content: "{{ facter_output.stdout }}"
+ delegate_to: 127.0.0.1
+ tags: puppet_fast
+- name: compile puppet catalogs
+ local_action:
+ module: command
+ cmd: "{{ inventory_dir }}/up.py {{ inventory_dir }}/build/puppet {{ inventory_dir }}/puppet/modules {{ inventory_dir }}/puppet/site {% for host in ansible_play_batch %}{{ host }} {% endfor %}"
+ tags: puppet_fast
+ run_once: True
+- name: simulate exported resources
+ local_action:
+ module: command
+ cmd: "./pseudo_resource_exporter.py"
+ chdir: "{{ inventory_dir }}"
+ tags: puppet_fast
+ run_once: True
+- name: package catalog
+ archive:
+ path: "{{ inventory_dir }}/build/puppet/build/output/{{ inventory_hostname }}"
+ dest: "{{ inventory_dir }}/build/puppet/puppet_catalog_{{ inventory_hostname }}.zip"
+ format: zip
+ delegate_to: 127.0.0.1
+ tags: puppet_fast
+- name: create remote temporary directory
+ tempfile:
+ state: directory
+ register: remote_temp
+ tags: puppet_fast
+- name: unpackage catalog
+ unarchive:
+ src: "{{ inventory_dir }}/build/puppet/puppet_catalog_{{ inventory_hostname }}.zip"
+ dest: "{{ remote_temp.path }}"
+ tags: puppet_fast
+- name: preview catalog
+ command: puppet apply --catalog {{ remote_temp.path }}/{{ inventory_hostname }}/catalog.json --noop --test --modulepath={{ remote_temp.path }}/{{ inventory_hostname }}/modules/
+ register: catalog_apply
+ tags: puppet_fast
+- name: display catalog preview stdout
+ debug:
+ msg: "{{ catalog_apply.stdout_lines }}"
+ tags: puppet_fast
+- name: display catalog preview stderr
+ debug:
+ msg: "{{ catalog_apply.stderr_lines }}"
+ tags: puppet_fast
+- name: pause to confirm
+ pause:
+ tags: pause
+- name: apply catalog
+ command: puppet apply --catalog {{ remote_temp.path }}/{{ inventory_hostname }}/catalog.json --modulepath={{ remote_temp.path }}/{{ inventory_hostname }}/modules/
+ register: catalog_apply
+ tags: puppet_fast
+- name: display catalog apply stdout
+ debug:
+ msg: "{{ catalog_apply.stdout_lines }}"
+ tags: puppet_fast
+- name: display catalog apply stderr
+ debug:
+ msg: "{{ catalog_apply.stderr_lines }}"
+ tags: puppet_fast
+- name: clean up remote temporary directory
+ file:
+ state: absent
+ path: "{{ remote_temp.path }}"
+ tags: puppet_fast
+- name: clean up local temporary directory
+ file:
+ state: absent
+ path: "{{ inventory_dir }}/build/puppet/"
+ delegate_to: 127.0.0.1
+ tags: puppet_fast
+ run_once: True
diff --git a/personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml b/personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml
new file mode 100644
index 00000000..11080d89
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- name: install ipsilon (if this task fails, run kinit as root)
+ command: ipsilon-server-install --hostname {{ ipsilon.hostname }} --ipa yes --openidc yes --admin-user {{ ipsilon.admin_user }} --info-sssd=yes --form=yes --root-instance
+ args:
+ creates: /etc/ipsilon/idp
+- name: fix permissions
+ command: chown -R ipsilon:ipsilon /var/lib/ipsilon/ /etc/ipsilon/
+- name: create public host
+ shell: ipa host-find {{ ipsilon.hostname }} || ipa host-add {{ ipsilon.hostname }}
+- name: create public service
+ shell: ipa service-find HTTP/{{ ipsilon.hostname }} || ipa service-add HTTP/{{ ipsilon.hostname }}
+- name: add public service to keytab
+ shell: klist -k /etc/httpd/conf/http.keytab | grep HTTP/{{ ipsilon.hostname }} || ipa-getkeytab -p HTTP/{{ ipsilon.hostname }} -k /etc/httpd/conf/http.keytab
+- name: restart httpd
+ service:
+ name: httpd
+ state: restarted
diff --git a/personal_infra/playbooks/roles/deploy_ragent/files/get.py b/personal_infra/playbooks/roles/deploy_ragent/files/get.py
new file mode 100755
index 00000000..d0a78618
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ragent/files/get.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python3
+
+import json
+import sys
+import urllib.request
+
+run_id = sys.argv[1]
+
+if run_id == "last":
+ runs = json.loads(urllib.request.urlopen("https://api.github.com/repos/alexpdp7/ragent/actions/runs?branch=master").read().decode('utf8'))
+ run_id = runs["workflow_runs"][0]["id"]
+
+run = json.loads(urllib.request.urlopen("https://api.github.com/repos/alexpdp7/ragent/actions/runs/%s" % run_id).read().decode('utf8'))
+artifacts = json.loads(urllib.request.urlopen(run['artifacts_url']).read().decode('utf8'))['artifacts']
+urls = {a['name']: 'https://api.github.com/repos/alexpdp7/ragent/actions/artifacts/%s/zip' % a["id"] for a in artifacts}
+print(json.dumps(urls))
diff --git a/personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml b/personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml
new file mode 100644
index 00000000..72bd5bed
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+- name: get url
+ local_action:
+ module: command
+ cmd: "{{ inventory_dir }}/playbooks/roles/deploy_ragent/files/get.py {{ run_id|default('last') }}"
+ run_once: True
+ register: url
+- name: download package
+ uri:
+ url: "{{ url.stdout|from_json|json_query(ragent['package_key'][ansible_os_family][ansible_distribution_major_version][ansible_architecture]) }}"
+ dest: /tmp/ragent_downloaded_package.zip
+ user: " {{ ragent_download['user'] }}"
+ password: "{{ ragent_download['token'] }}"
+ force_basic_auth: yes
+ follow_redirects: all
+- name: unzip
+ unarchive:
+ src: /tmp/ragent_downloaded_package.zip
+ dest: /tmp
+ list_files: yes
+ remote_src: yes
+ register: unzipped
+- name: remove previous
+ package:
+ name: ragent
+ state: absent
+- name: install package
+ command: "{{ ragent['install_command'][ansible_os_family] }} /tmp/{{ unzipped.files[0] }}"
+- name: configure service
+ service:
+ name: ragent
+ enabled: yes
+ state: restarted
+- name: open firewall
+ command: firewall-cmd --permanent --add-port=21488/tcp
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version in ("7", "8", "9") and ansible_virtualization_type != "lxc" and not network.disable_firewall|default(False)
+- name: reload firewall
+ command: firewall-cmd --reload
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version in ("7", "8", "9") and ansible_virtualization_type != "lxc" and not network.disable_firewall|default(False)
+- name: force check
+ community.general.nagios:
+ action: forced_check
+ host: "{{ inventory_hostname }}"
+ service: check_ragent
+ delegate_to: nagios.h1.int.pdp7.net
diff --git a/personal_infra/playbooks/roles/deploy_ragent/vars/main.yml b/personal_infra/playbooks/roles/deploy_ragent/vars/main.yml
new file mode 100644
index 00000000..0ac33330
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ragent/vars/main.yml
@@ -0,0 +1,24 @@
+---
+ragent:
+ package_key:
+ Debian:
+ '10':
+ x86_64: debian_buster
+ armv7l: debian_buster_rpi3
+ '11': # apparently the buster package works for bullseye
+ x86_64: debian_buster
+ armv7l: debian_buster_rpi3
+ '20': # and it also works for Ubuntu 20.04
+ x86_64: debian_buster
+ RedHat:
+ '7':
+ x86_64: el7
+ '8':
+ x86_64: el8
+ aarch64: el8_rpi4
+ '9':
+ x86_64: el8
+ aarch64: el8_rpi4
+ install_command:
+ Debian: "dpkg -i"
+ RedHat: "rpm -i"
diff --git a/personal_infra/playbooks/roles/join_ipa/handlers/main.yml b/personal_infra/playbooks/roles/join_ipa/handlers/main.yml
new file mode 100644
index 00000000..da74d5ee
--- /dev/null
+++ b/personal_infra/playbooks/roles/join_ipa/handlers/main.yml
@@ -0,0 +1,4 @@
+- name: restart_container
+ delegate_to: "{{ proxmox.host }}"
+ command: pct reboot {{ proxmox.id }}
+
diff --git a/personal_infra/playbooks/roles/join_ipa/tasks/main.yml b/personal_infra/playbooks/roles/join_ipa/tasks/main.yml
new file mode 100644
index 00000000..0fd8f5d0
--- /dev/null
+++ b/personal_infra/playbooks/roles/join_ipa/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+- name: join
+ shell: getent passwd admin || ipa-client-install -U {% if 'lxc' in group_names %} -N {% endif %} --domain={{ freeipa.domain }} -w {{ freeipa.join_password|trim }} --mkhomedir -p {{ freeipa.join_user }}
+# proxmox reorders the configuration file and misbehaves quite a bit :(
+- name: proxmox idmaps
+ when: "'lxc' in group_names"
+ block:
+ - name: set id mappings copy out
+ copy:
+ remote_src: yes
+ src: /etc/pve/lxc/{{ proxmox.id }}.conf
+ dest: /tmp/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: read conf
+ slurp:
+ src: /tmp/{{ proxmox.id }}.conf
+ register: proxmox_conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: set id mappings
+ blockinfile:
+ path: /tmp/{{ proxmox.id }}.conf
+ block: |
+ lxc.idmap = u 0 100000 65536
+ lxc.idmap = g 0 100000 65536
+ lxc.idmap = u {{ freeipa.idrange_start }} {{ freeipa.idrange_start }} {{ freeipa.idrange_size }}
+ lxc.idmap = g {{ freeipa.idrange_start }} {{ freeipa.idrange_start }} {{ freeipa.idrange_size }}
+ when: not proxmox_conf['content']|b64decode is search('lxc.idmap') and not proxmox.privileged|default(False)
+ notify: restart_container
+ delegate_to: "{{ proxmox.host }}"
+ - name: set id mappings copy in
+ command: cp /tmp/{{ proxmox.id }}.conf /etc/pve/lxc/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
diff --git a/personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml b/personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml
new file mode 100644
index 00000000..4a5d37b6
--- /dev/null
+++ b/personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml
@@ -0,0 +1,92 @@
+---
+- name: download template
+ command: "pveam download local {{ flavors[proxmox.flavor].template }}_amd64.tar.xz"
+ args:
+ creates: "/var/lib/vz/template/cache/{{ flavors[proxmox.flavor].template }}_amd64.tar.xz"
+ delegate_to: "{{ proxmox.host }}"
+- name: create host
+ command: >
+ pct create {{ proxmox.id }} "/var/lib/vz/template/cache/{{ flavors[proxmox.flavor].template }}_amd64.tar.xz"
+ --hostname {{ inventory_hostname }}
+ --storage local-zfs
+ -net0 name=eth0,bridge=vmbr0,ip=dhcp
+ -onboot 1
+ {% if not proxmox.privileged|default(false) %} -unprivileged {% endif %}
+ {% if proxmox.features|default(None) %} -features {{ proxmox.features }} {% endif %}
+ {% if proxmox.memory|default(None) %} -memory {{ proxmox.memory }} {% endif %}
+ {% for disk in proxmox.disks|default([]) %}
+ --mp{{ disk.index }} volume={{ disk.storage }}:{{ disk.size_gb }},mp={{ disk.path }}
+ {% endfor %}
+ -rootfs local-zfs:{{ proxmox.disk|default(4) }}
+ --password {{ ansible_password|trim }}
+ --nameserver {{ hostvars[proxmox.host].network.self_internal_ip }}
+ --ostype {{ flavors[proxmox.flavor].pct_ostype }}
+ args:
+ creates: "/etc/pve/lxc/{{ proxmox.id }}.conf"
+ delegate_to: "{{ proxmox.host }}"
+- name: allow backups
+ shell: "zfs allow -u backups mount,send,hold,snapshot,destroy rpool/data/$(pct config {{ proxmox.id }} | grep mp{{ item.index }} | cut -d , -f 1 | cut -d : -f 3)"
+ delegate_to: "{{ proxmox.host }}"
+ loop: "{{ proxmox.disks|default([]) }}"
+- name: proxmox extra
+ when: proxmox.extra|default(None)
+ block:
+ - name: set proxmox extra copy out
+ copy:
+ remote_src: yes
+ src: /etc/pve/lxc/{{ proxmox.id }}.conf
+ dest: /tmp/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: read conf
+ slurp:
+ src: /tmp/{{ proxmox.id }}.conf
+ register: proxmox_conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: set proxmox extra
+ lineinfile:
+ path: /tmp/{{ proxmox.id }}.conf
+ line: "{{ item }}"
+ loop: "{{ proxmox.extra }}"
+ delegate_to: "{{ proxmox.host }}"
+ - name: set proxmox extra copy in
+ command: cp /tmp/{{ proxmox.id }}.conf /etc/pve/lxc/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
+# https://bugzilla.proxmox.com/show_bug.cgi?id=4515
+- name: set hosts
+ copy:
+ content: |
+ 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+ ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+
+ {% if network is defined and network.ip is defined %}
+ {{ network.ip }} {{ inventory_hostname }} {{ inventory_hostname|regex_search('^[^.]*') }}
+ {% endif %}
+ dest: /rpool/data/subvol-{{ proxmox.id }}-disk-0/etc/hosts
+ delegate_to: "{{ proxmox.host }}"
+- name: prevent proxmox from manipulating hosts
+ file:
+ path: /etc/.pve-ignore.hosts
+ state: touch
+ delegate_to: "{{ proxmox.host }}"
+- name: start host
+ shell: "{ pct status {{ proxmox.id }} | grep running ; } || pct start {{ proxmox.id }}"
+ delegate_to: "{{ proxmox.host }}"
+- name: update packages to prevent automatic updates causing issues later. retry until network available
+ command: pct exec {{ proxmox.id }} -- dnf update -y
+ retries: 10
+ delay: 1
+ until: result.rc == 0
+ register: result
+ delegate_to: "{{ proxmox.host }}"
+- name: install ssh
+ command: pct exec {{ proxmox.id }} -- dnf install -y openssh-server
+ delegate_to: "{{ proxmox.host }}"
+- name: permit root password
+ lineinfile:
+ path: /rpool/data/subvol-{{ proxmox.id }}-disk-0/etc/ssh/sshd_config
+ regexp: "^#?PermitRootLogin"
+ line: "PermitRootLogin yes"
+ delegate_to: "{{ proxmox.host }}"
+- name: enable ssh
+ command: pct exec {{ proxmox.id }} -- systemctl enable --now sshd
+ delegate_to: "{{ proxmox.host }}"
diff --git a/personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml b/personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml
new file mode 100644
index 00000000..b9bdcc6a
--- /dev/null
+++ b/personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml
@@ -0,0 +1,8 @@
+---
+flavors:
+ el8:
+ template: rockylinux-8-default_20210929
+ pct_ostype: centos
+ el9:
+ template: rockylinux-9-default_20221109
+ pct_ostype: centos
diff --git a/personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml b/personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml
new file mode 100644
index 00000000..dbedc734
--- /dev/null
+++ b/personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: create DNS entries
+ local_action:
+ module: amazon.aws.route53
+ zone: "{{ network.dns_zone }}"
+ record: "{{ item }}"
+ type: CNAME
+ value: "{{ network.public_hostname }}"
+ wait: true
+ state: present
+ loop: "{{ network.proxmox.proxy_hosts }}"
diff --git a/personal_infra/playbooks/roles/talos/README.md b/personal_infra/playbooks/roles/talos/README.md
new file mode 100644
index 00000000..4ef4e8de
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/README.md
@@ -0,0 +1,65 @@
+# Talos Ansible role
+
+This role helps provision Talos clusters using Ansible.
+Currently, this role only supports VMs on Proxmox and single node clusters.
+
+## Variables
+
+### Host variables
+
+```
+proxmox:
+ id: 123
+ host: inventory_name_of_proxmox_host
+ cores: n
+ memory: in MB
+ disk: in GB
+network:
+ ip: x.y.z.t
+talos_host:
+ talos_cluster: cluster_name
+ install_disk: /dev/vda
+```
+
+### Group variables
+
+```
+talos_clusters:
+ cluster_name: # you can have multiple clusters
+ endpoint: https://host_name:6443
+```
+
+## Talos configuration
+
+Only the Talos secret for the cluster must be stored in version control.
+You must create the secret and vault it:
+
+```
+$ talosctl gen secrets -o talos/${cluster_name}-secrets.yaml
+$ ansible-vault encrypt talos/${cluster_name}-secrets.yaml
+```
+
+## Role
+
+With the above configuration, the role will:
+
+* Create the VM in Proxmox.
+Until Talos includes https://github.com/siderolabs/talos/pull/5897 , [the playbook fishes the IP from the dnsmasq Proxmox instance](tasks/proxmox.yml#L13) by using [this script](files/get-ip).
+* Sets up Talos.
+* Fetches the kubeconfig.
+* Deploys kustomizations in `k8s/base`.
+See [my kustomizations](../../../k8s/base/).
+
+## Updates
+
+To update Talos, update `talosctl`, then:
+
+```
+$ talosctl upgrade --preserve --talosconfig talos/talosconfig-k8s-test.example --nodes k8s-test.example.com --wait
+```
+
+To update K8S:
+
+```
+$ talosctl upgrade-k8s --talosconfig talos/talosconfig-k8s-test.example --nodes k8s-test.example.com
+```
diff --git a/personal_infra/playbooks/roles/talos/files/get-ip b/personal_infra/playbooks/roles/talos/files/get-ip
new file mode 100644
index 00000000..a96dab61
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/files/get-ip
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+set -ueo pipefail
+
+grep $(cat /etc/pve/qemu-server/$1.conf | grep net0: | sed 's/^.*virtio=\([0-9A-F:]*\),.*$/\1/' | tr [:upper:] [:lower:]) /var/lib/misc/dnsmasq.leases | cut -d " " -f 3
diff --git a/personal_infra/playbooks/roles/talos/tasks/main.yaml b/personal_infra/playbooks/roles/talos/tasks/main.yaml
new file mode 100644
index 00000000..44189484
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/tasks/main.yaml
@@ -0,0 +1,111 @@
+---
+- name: generate controlplane patch
+ copy:
+ content: |
+ cluster:
+ allowSchedulingOnControlPlanes: true
+ machine:
+ install:
+ disk: {{ talos_host.install_disk }}
+ network:
+ hostname: {{ inventory_hostname }}
+ nameservers:
+ - {{ hostvars[proxmox.host].network.self_internal_ip }}
+ interfaces:
+ - interface: eth0
+ addresses:
+ - {{ network.ip }}/24
+ routes:
+ - network: 0.0.0.0/0
+ gateway: {{ hostvars[proxmox.host].network.self_internal_ip }}
+ nodeLabels:
+ role: ingress-controller
+ dest: "{{ inventory_dir }}/talos/host-{{ inventory_hostname }}.patch"
+ delegate_to: 127.0.0.1
+
+- name: generate controlplane config
+ shell:
+ cmd: talosctl gen config -t controlplane -o talos/host-{{ inventory_hostname }}.yaml --with-secrets <(ansible-vault view talos/{{ talos_host.talos_cluster }}-secrets.yaml) --config-patch-control-plane @talos/host-{{ inventory_hostname }}.patch {{ talos_host.talos_cluster }} {{ talos_clusters[talos_host.talos_cluster].endpoint }} --force
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+
+- name: generate talosconfig
+ shell:
+ cmd: talosctl gen config -t talosconfig -o talos/talosconfig-{{ talos_host.talos_cluster }} --with-secrets <(ansible-vault view talos/{{ talos_host.talos_cluster }}-secrets.yaml) {{ talos_host.talos_cluster }} {{ talos_clusters[talos_host.talos_cluster].endpoint }} --force
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+
+- name: set talosconfig endpoint
+ shell:
+ cmd: talosctl --talosconfig=talos/talosconfig-{{ talos_host.talos_cluster }} config endpoint {{ inventory_hostname }}
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+
+- name: get kubeconfig
+ command:
+ cmd: talosctl kubeconfig --talosconfig talos/talosconfig-{{ talos_host.talos_cluster }} --nodes {{ inventory_hostname }} -f
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ throttle: 1
+
+- name: check node ready
+ k8s_info:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ kind: Node
+ wait: true
+ wait_condition:
+ status: True
+ type: Ready
+ delegate_to: 127.0.0.1
+ register: nodes
+ ignore_errors: true
+
+- name: setup proxmox hosts
+ import_tasks: proxmox.yml
+ when: "'k8s_proxmox' in group_names and not 'resources' in nodes or nodes.resources|length == 0"
+
+- name: apply config
+ command:
+ cmd: talosctl apply-config --insecure --nodes {{ ip.stdout }} --file talos/host-{{ inventory_hostname }}.yaml
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ when: "not 'resources' in nodes or nodes.resources|length == 0"
+
+- name: bootstrap cluster
+ command:
+ cmd: talosctl bootstrap --nodes {{ inventory_hostname }} --talosconfig talos/talosconfig-{{ talos_host.talos_cluster }}
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ register: bootstrap
+ until: bootstrap.rc == 0
+ retries: 12
+ delay: 1
+ when: "not 'resources' in nodes or nodes.resources|length == 0"
+
+- name: get kubeconfig
+ command:
+ cmd: talosctl kubeconfig --talosconfig talos/talosconfig-{{ talos_host.talos_cluster }} --nodes {{ inventory_hostname }} -f
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ when: "not 'resources' in nodes or nodes.resources|length == 0"
+ throttle: 1
+
+- name: wait node ready
+ k8s_info:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ kind: Node
+ wait: true
+ wait_condition:
+ status: True
+ type: Ready
+ delegate_to: 127.0.0.1
+ register: nodes
+ until: nodes.resources is defined and nodes.resources|length > 0
+ retries: 35
+ delay: 1
+
+- name: deploy kustomizations
+ k8s:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ definition: "{{ lookup('kubernetes.core.kustomize', dir='k8s/base') }}"
+ delegate_to: 127.0.0.1
diff --git a/personal_infra/playbooks/roles/talos/tasks/proxmox.yml b/personal_infra/playbooks/roles/talos/tasks/proxmox.yml
new file mode 100644
index 00000000..21bc4c83
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/tasks/proxmox.yml
@@ -0,0 +1,19 @@
+---
+- name: download iso
+ get_url:
+ url: https://github.com/siderolabs/talos/releases/download/v1.4.4/talos-amd64.iso
+ dest: /var/lib/vz/template/iso/talos-amd64-v1.4.4.iso
+ delegate_to: "{{ proxmox.host }}"
+ run_once: True
+- name: create vm
+ command: qm create {{ proxmox.id }} --cdrom local:iso/talos-amd64-v1.4.4.iso --cores 12 --memory 8192 --name {{ inventory_hostname }} --onboot 1 --start 1 --virtio0 local-zfs:32 --cpu host --net0 virtio,bridge=vmbr0
+ delegate_to: "{{ proxmox.host }}"
+ args:
+ creates: /etc/pve/qemu-server/{{ proxmox.id }}.conf
+- name: get ip
+ script: get-ip {{ proxmox.id }}
+ delegate_to: "{{ proxmox.host }}"
+ register: ip
+ until: ip.rc == 0
+ retries: 20
+ delay: 1
diff --git a/personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml b/personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml
new file mode 100644
index 00000000..da205cf4
--- /dev/null
+++ b/personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml
@@ -0,0 +1,4 @@
+- name: send root mail
+ command:
+ cmd: mail -s "ansible test {{ inventory_hostname }}" root
+ stdin: test
diff --git a/personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml b/personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml
new file mode 100644
index 00000000..9aa2dc16
--- /dev/null
+++ b/personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+- name: render manifests
+ command: kubectl run --context {{ context }} -q -n default -i --rm builder --image quay.io/alexpdp7/zqxjkcrud:master --restart=Never --image-pull-policy=Always --command -- zqxjkcrud-manifest-builder
+ args:
+ stdin: "{{ definition|to_yaml }}"
+ register: manifestbuild
+ delegate_to: 127.0.0.1
+- k8s:
+ context: "{{ context }}"
+ state: present
+ apply: true
+ definition: "{{ manifestbuild.stdout | from_yaml_all }}"
+ delegate_to: 127.0.0.1
diff --git a/personal_infra/playbooks/setup_blog_keys.yaml b/personal_infra/playbooks/setup_blog_keys.yaml
new file mode 100644
index 00000000..b664bcb8
--- /dev/null
+++ b/personal_infra/playbooks/setup_blog_keys.yaml
@@ -0,0 +1,23 @@
+---
+- hosts: h1.pdp7.net
+ tasks:
+ - name: get public cert
+ ansible.builtin.slurp:
+ src: "/etc/apache2/md/domains/blog.pdp7.net/pubcert.pem"
+ register: public_cert
+ - name: get private key
+ ansible.builtin.slurp:
+ src: "/etc/apache2/md/domains/blog.pdp7.net/privkey.pem"
+ register: private_key
+ - k8s:
+ context: "admin@k8s-test.h1"
+ state: present
+ definition:
+ kind: Secret
+ metadata:
+ namespace: blog
+ name: tls-gemini
+ data:
+ tls.crt: "{{ public_cert.content }}"
+ tls.key: "{{ private_key.content }}"
+ delegate_to: 127.0.0.1
diff --git a/personal_infra/playbooks/setup_tinc_keys.yaml b/personal_infra/playbooks/setup_tinc_keys.yaml
new file mode 100644
index 00000000..bdd67116
--- /dev/null
+++ b/personal_infra/playbooks/setup_tinc_keys.yaml
@@ -0,0 +1,27 @@
+---
+- hosts: tinc
+ tasks:
+ - name: create tinc folder
+ file:
+ path: /etc/ansible/tinc/
+ state: directory
+ recurse: yes
+ - name: generate key
+ command: openssl genrsa -out /etc/ansible/tinc/private.pem 2048
+ args:
+ creates: /etc/ansible/tinc/private.pem
+ - name: generate public
+ command: openssl rsa -in /etc/ansible/tinc/private.pem -outform PEM -pubout -out /etc/ansible/tinc/public_{{ network.public_hostname }}.pem
+ args:
+ creates: /etc/ansible/tinc/public_{{ network.public_hostname }}.pem
+ - name: get public
+ fetch:
+ src: "/etc/ansible/tinc/public_{{ network.public_hostname }}.pem"
+ dest: /tmp/
+ flat: yes
+ - name: distribute public
+ copy:
+ src: "/tmp/public_{{ hostvars[item].network.public_hostname }}.pem"
+ dest: "/etc/ansible/tinc/"
+ with_inventory_hostnames:
+ - tinc
diff --git a/personal_infra/playbooks/site.yaml b/personal_infra/playbooks/site.yaml
new file mode 100644
index 00000000..b9852a44
--- /dev/null
+++ b/personal_infra/playbooks/site.yaml
@@ -0,0 +1,63 @@
+---
+- name: create lxc
+ hosts: lxc
+ gather_facts: false
+ tags: create_lxc
+ roles:
+ - proxmox_create_lxc
+
+- name: complete provision
+ hosts: all,!k8s
+ tags: puppet
+ roles:
+ - apply_puppet
+
+- name: join ipa
+ hosts: join_ipa
+ tags: join_ipa
+ roles:
+ - join_ipa
+
+- name: deploy ragent
+ hosts: all,!k8s
+ tags: deploy_ragent
+ roles:
+ - deploy_ragent
+
+- name: verify root mail
+ hosts: all,!k8s
+ tags: verify_root_mail
+ roles:
+ - verify_root_mail
+
+- name: create k8s
+ hosts: k8s
+ tags: k8s
+ gather_facts: false
+ roles:
+ - talos
+
+- name: deploy ipsilon
+ hosts: ipsilon
+ tags: ipsilon
+ roles:
+ - deploy_ipsilon
+
+- name: proxmox route 53
+ hosts: proxmox
+ tags: proxmox_route_53
+ gather_facts: false
+ roles:
+ - proxmox_route_53
+
+- name: deploy weight
+ hosts: k8s-prod.h1.int.pdp7.net
+ tags:
+ - k8s
+ - weight
+ gather_facts: false
+ roles:
+ - role: zqxjkcrud
+ vars:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ definition: "{{ weight }}"
diff --git a/personal_infra/podman.md b/personal_infra/podman.md
new file mode 100644
index 00000000..703b6edf
--- /dev/null
+++ b/personal_infra/podman.md
@@ -0,0 +1,26 @@
+# Podman
+
+You can create LXC containers in Proxmox (using ZFS) that can run rootless Podman.
+
+The [`proxmox_create_lxc`](playbooks/roles/proxmox_create_lxc/) role can create the LXC container with the necessary options with the following configuration:
+
+```
+proxmox:
+...
+ privileged: true
+ features: fuse=1,nesting=1
+ extra:
+ - "lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file"
+ - "lxc.cgroup2.devices.allow: c 10:200 rwm"
+```
+
+The [`podman`](puppet/modules/podman/) Puppet module can add the necessary configuration:
+
+```
+class {'podman':
+ user => 'your_username',
+ storage_driver => 'zfs',
+}
+```
+
+This module configures subuids/subgids, but until you reboot, you will get some warnings using Podman.
diff --git a/personal_infra/pseudo_resource_exporter.py b/personal_infra/pseudo_resource_exporter.py
new file mode 100755
index 00000000..9ef498bb
--- /dev/null
+++ b/personal_infra/pseudo_resource_exporter.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+import json
+import pathlib
+import subprocess
+
+
+"""
+This is an ugly hack.
+
+Puppet exported resources are very nice to generate monitoring configuration
+along with your Puppet resources. As you define something like an Apache
+virtual host, you can create a Nagios service check for it.
+
+But this requires a PuppetDB, and does not play nice with having no central
+Puppet infra.
+
+With its sibling script up.py, this script takes the JSON files generated by
+that, and manipulates them. This script moves Nagios resources to a specific
+host and does ugly trickery to fool Puppet into accepting that.
+
+This is like exported resources, but you don't need to declare a resource as
+exported.
+"""
+
+
+def load_json(path):
+ with open(path) as f:
+ return json.load(f)
+
+
+def save_json(r, path):
+ with open(path, "w") as f:
+ json.dump(r, f)
+
+
+nagios_catalog_file = pathlib.Path("build/puppet/build/output/nagios.h1.int.pdp7.net/catalog.json")
+
+if nagios_catalog_file.exists():
+ nagios_catalog = load_json(nagios_catalog_file)
+
+ nagios_contacts = [r for r in nagios_catalog["resources"] if r["type"] == "Nagios_contact"]
+ assert len(nagios_contacts) == 1, f"found multiple nagios contacts {nagios_contacts}"
+ nagios_contact = nagios_contacts[0]
+
+
+ail = subprocess.run(["ansible-inventory", "--list"], check=True, stdout=subprocess.PIPE)
+inventory = json.loads(ail.stdout)
+total_hosts_in_inventory = len(inventory["_meta"]["hostvars"].keys())
+k8s_hosts_in_inventory = len(inventory["k8s"]["hosts"])
+puppet_hosts_in_inventory = total_hosts_in_inventory - k8s_hosts_in_inventory
+
+catalog_files = list(pathlib.Path("build/puppet/build/output/").glob("*/catalog.json"))
+
+if nagios_catalog_file.exists():
+ assert len(catalog_files) == puppet_hosts_in_inventory, f"catalogs {catalog_files} quantity different from total hosts in inventory {puppet_hosts_in_inventory}"
+
+
+nagios_resources = []
+nagios_edge_targets = []
+
+
+def is_nagios_resource(r):
+ return r["type"].startswith("Nagios")
+
+
+def is_nagios_edge(e):
+ return e["target"].startswith("Nagios")
+
+
+for catalog_file in catalog_files:
+ if catalog_file == nagios_catalog_file:
+ continue
+ catalog = load_json(catalog_file)
+ nagios_resources += [r for r in catalog["resources"] if is_nagios_resource(r)]
+ catalog["resources"] = [r for r in catalog["resources"] if not is_nagios_resource(r)]
+ nagios_edge_targets += [e["target"] for e in catalog["edges"] if is_nagios_edge(e)]
+ catalog["edges"] = [e for e in catalog["edges"] if not is_nagios_edge(e)]
+ save_json(catalog, catalog_file)
+
+
+if nagios_catalog_file.exists():
+ nagios_contact_position = nagios_catalog["resources"].index(nagios_contact)
+
+ def copy_parameters(r):
+ for p in ["require", "notify", "owner"]:
+ r["parameters"][p] = nagios_contact["parameters"][p]
+ return r
+
+ nagios_catalog["resources"] = (
+ nagios_catalog["resources"][0:nagios_contact_position] +
+ list(map(copy_parameters, nagios_resources)) +
+ nagios_catalog["resources"][nagios_contact_position:]
+ )
+
+ nagios_catalog["edges"] += [{"source": "Class[Nagios]", "target": t} for t in nagios_edge_targets]
+
+ save_json(nagios_catalog, nagios_catalog_file)
diff --git a/personal_infra/puppet/modules/automatic_updates/manifests/init.pp b/personal_infra/puppet/modules/automatic_updates/manifests/init.pp
new file mode 100644
index 00000000..8585b0ae
--- /dev/null
+++ b/personal_infra/puppet/modules/automatic_updates/manifests/init.pp
@@ -0,0 +1,33 @@
+class automatic_updates {
+ if ($facts['os']['family'] == 'Debian') {
+ package {["unattended-upgrades", "apt-listchanges"]:}
+ }
+ elsif ($facts['os']['family'] == 'RedHat') {
+ if ($facts['os']['release']['major'] == '7') {
+ package {'yum-cron':}
+ ->
+ file {"/etc/yum/yum-cron.conf":
+ content => epp('automatic_updates/yum-cron.conf'),
+ }
+ ~>
+ service {'yum-cron':
+ ensure => running,
+ enable => true,
+ }
+ }
+ elsif ($facts['os']['release']['major'] == '8' or $facts['os']['release']['major'] == '9') {
+ package {'dnf-automatic':}
+ ->
+ service {'dnf-automatic-install.timer':
+ ensure => running,
+ enable => true,
+ }
+ }
+ else {
+ fail($facts['os']['release']['major'])
+ }
+ }
+ else {
+ fail($facts['os'])
+ }
+}
diff --git a/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp b/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp
new file mode 100644
index 00000000..bd1ec685
--- /dev/null
+++ b/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp
@@ -0,0 +1,81 @@
+[commands]
+# What kind of update to use:
+# default = yum upgrade
+# security = yum --security upgrade
+# security-severity:Critical = yum --sec-severity=Critical upgrade
+# minimal = yum --bugfix update-minimal
+# minimal-security = yum --security update-minimal
+# minimal-security-severity:Critical = --sec-severity=Critical update-minimal
+update_cmd = default
+
+# Whether a message should be emitted when updates are available,
+# were downloaded, or applied.
+update_messages = yes
+
+# Whether updates should be downloaded when they are available.
+download_updates = yes
+
+# Whether updates should be applied when they are available. Note
+# that download_updates must also be yes for the update to be applied.
+apply_updates = yes
+
+# Maximum amout of time to randomly sleep, in minutes. The program
+# will sleep for a random amount of time between 0 and random_sleep
+# minutes before running. This is useful for e.g. staggering the
+# times that multiple systems will access update servers. If
+# random_sleep is 0 or negative, the program will run immediately.
+# 6*60 = 360
+random_sleep = 360
+
+
+[emitters]
+# Name to use for this system in messages that are emitted. If
+# system_name is None, the hostname will be used.
+system_name = None
+
+# How to send messages. Valid options are stdio and email. If
+# emit_via includes stdio, messages will be sent to stdout; this is useful
+# to have cron send the messages. If emit_via includes email, this
+# program will send email itself according to the configured options.
+# If emit_via is None or left blank, no messages will be sent.
+emit_via = stdio
+
+# The width, in characters, that messages that are emitted should be
+# formatted to.
+output_width = 80
+
+
+[email]
+# The address to send email messages from.
+# NOTE: 'localhost' will be replaced with the value of system_name.
+email_from = root@localhost
+
+# List of addresses to send messages to.
+email_to = root
+
+# Name of the host to connect to to send email messages.
+email_host = localhost
+
+
+[groups]
+# NOTE: This only works when group_command != objects, which is now the default
+# List of groups to update
+group_list = None
+
+# The types of group packages to install
+group_package_types = mandatory, default
+
+[base]
+# This section overrides yum.conf
+
+# Use this to filter Yum core messages
+# -4: critical
+# -3: critical+errors
+# -2: critical+errors+warnings (default)
+debuglevel = -2
+
+# skip_broken = True
+mdpolicy = group:main
+
+# Uncomment to auto-import new gpg keys (dangerous)
+# assumeyes = True
diff --git a/personal_infra/puppet/modules/backups/manifests/init.pp b/personal_infra/puppet/modules/backups/manifests/init.pp
new file mode 100644
index 00000000..f98d598f
--- /dev/null
+++ b/personal_infra/puppet/modules/backups/manifests/init.pp
@@ -0,0 +1,11 @@
+class backups($sanoid_config) {
+ package {'sanoid':}
+
+ file {'/etc/sanoid':
+ ensure => directory,
+ }
+ ->
+ file {'/etc/sanoid/sanoid.conf':
+ content => $sanoid_config,
+ }
+}
diff --git a/personal_infra/puppet/modules/basic_software/manifests/init.pp b/personal_infra/puppet/modules/basic_software/manifests/init.pp
new file mode 100644
index 00000000..fcceefb2
--- /dev/null
+++ b/personal_infra/puppet/modules/basic_software/manifests/init.pp
@@ -0,0 +1,7 @@
+class basic_software {
+ package {['less', 'mlocate', 'traceroute', 'nmap', 'tree', 'tar']:}
+
+ if($facts['os']['family'] == 'RedHat') {
+ package {'which':}
+ }
+}
diff --git a/personal_infra/puppet/modules/copr/manifests/init.pp b/personal_infra/puppet/modules/copr/manifests/init.pp
new file mode 100644
index 00000000..2d0474a5
--- /dev/null
+++ b/personal_infra/puppet/modules/copr/manifests/init.pp
@@ -0,0 +1,20 @@
+define copr (
+ String[1] $user,
+ String[1] $project = $title,
+ String[1] $dist,
+) {
+ file {"/etc/yum.repos.d/_copr:copr.fedorainfracloud.org:$user:$project.repo":
+ content => @("REPO"/$)
+ [copr:copr.fedorainfracloud.org:$user:$project]
+ name=Copr repo for $project owned by $user
+ baseurl=https://download.copr.fedorainfracloud.org/results/$user/$project/$dist-\$basearch/
+ type=rpm-md
+ skip_if_unavailable=True
+ gpgcheck=1
+ gpgkey=https://download.copr.fedorainfracloud.org/results/$user/$project/pubkey.gpg
+ repo_gpgcheck=0
+ enabled=1
+ enabled_metadata=1
+ | - REPO
+ }
+}
diff --git a/personal_infra/puppet/modules/debian/manifests/backports.pp b/personal_infra/puppet/modules/debian/manifests/backports.pp
new file mode 100644
index 00000000..4f33bf22
--- /dev/null
+++ b/personal_infra/puppet/modules/debian/manifests/backports.pp
@@ -0,0 +1,9 @@
+class debian::backports {
+ $codename = $facts['os']['distro']['codename']
+
+ file {'/etc/apt/sources.list.d/backports.list':
+ content => "deb http://deb.debian.org/debian ${codename}-backports main\n",
+ }
+ ~>
+ Exec["/usr/bin/apt update"]
+}
diff --git a/personal_infra/puppet/modules/debian/manifests/init.pp b/personal_infra/puppet/modules/debian/manifests/init.pp
new file mode 100644
index 00000000..fd85713d
--- /dev/null
+++ b/personal_infra/puppet/modules/debian/manifests/init.pp
@@ -0,0 +1,5 @@
+class debian {
+ exec {'/usr/bin/apt update':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp b/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp
new file mode 100644
index 00000000..f7c79724
--- /dev/null
+++ b/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp
@@ -0,0 +1,36 @@
+class dns_dhcp {
+ $domain = lookup('network.dns_dhcp.domain')
+
+ $hostvars = lookup('hostvars')
+ $fixed_dhcp_host_vars = $hostvars.filter |$host, $vars| { $vars['network'] and $vars['network']['dhcp_server'] == $facts["networking"]["fqdn"] }
+ $fixed_dhcp_hosts = Hash($fixed_dhcp_host_vars.map |$host, $vars| { [$host.match(/^[-a-z0-9]+/)[0], $vars['network']['ip'] ] })
+
+ $fixed_host_vars = $hostvars.filter |$host, $vars| { $vars['network'] and $vars['network']['register_dns_server'] == $facts["networking"]["fqdn"] }
+ $fixed_hosts = Hash($fixed_host_vars.map |$host, $vars| { [$host.match(/^[-a-z0-9]+/)[0], $vars['network']['ip'] ] })
+
+
+ package {'dnsmasq':}
+ ->
+ file {'/etc/dnsmasq.d':
+ ensure => directory,
+ purge => true,
+ recurse => true,
+ }
+ file {'/etc/dnsmasq.d/internal':
+ content => epp('dns_dhcp/internal', {
+ 'dns_dhcp' => lookup("network.dns_dhcp"),
+ 'dns_other_server_defs' => $dns_other_server_defs,
+ 'fixed_dhcp_hosts' => $fixed_dhcp_hosts,
+ 'fixed_hosts' => $fixed_hosts,
+ }),
+ }
+ ~>
+ service {'dnsmasq':
+ enable => true,
+ ensure => running,
+ }
+ ->
+ file {'/etc/resolv.conf':
+ content => "domain ${domain}\nsearch ${domain}\nnameserver 127.0.0.1\n",
+ }
+}
diff --git a/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp b/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp
new file mode 100644
index 00000000..9b751855
--- /dev/null
+++ b/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp
@@ -0,0 +1,30 @@
+domain-needed
+no-resolv
+no-hosts
+
+server=<%= $dns_dhcp['upstream_dns'] %>
+<% if $dns_dhcp['domain'] { %>
+local=/<%= $dns_dhcp['domain'] %>/
+domain=<%= $dns_dhcp['domain'] %>
+<% } %>
+
+<% if $dns_dhcp['dhcp_range'] { %>
+dhcp-range=<%= $dns_dhcp['dhcp_range'] %>
+
+dhcp-option=option:router,<%= $dns_dhcp['router'] %>
+<% } %>
+
+interface=<%= join($dns_dhcp['interfaces'], ',') %>
+
+<% $dns_other_server_defs.each |$server_def| { %>
+server=/<%= $server_def['network_name'] %>/<%= $server_def['dns_ip'] %>
+rev-server=<%= $server_def['reverse_ip_range'] %>,<%= $server_def['dns_ip'] %>
+<% } %>
+
+<% $fixed_dhcp_hosts.each |$host, $ip| { %>
+dhcp-host=<%= $host %>,<%= $ip %>,<%= $host %>
+<% } %>
+
+<% $fixed_hosts.each |$host, $ip| { %>
+host-record=<%= $host %>.<%= $dns_dhcp['domain'] %>,<%= $host %>,<%= $ip %>
+<% } %>
diff --git a/personal_infra/puppet/modules/freeipa/manifests/server.pp b/personal_infra/puppet/modules/freeipa/manifests/server.pp
new file mode 100644
index 00000000..6ca10a43
--- /dev/null
+++ b/personal_infra/puppet/modules/freeipa/manifests/server.pp
@@ -0,0 +1,13 @@
+class freeipa::server {
+ package {['ipa-server', 'ipa-server-dns', 'ipa-healthcheck']:}
+ ~>
+ service {'ipa-healthcheck.timer':
+ ensure => running,
+ enable => true,
+ }
+
+ # weak dependency that does not work on LXC[I
+ package {'low-memory-monitor':
+ ensure => purged,
+ }
+}
diff --git a/personal_infra/puppet/modules/ipsilon/manifests/init.pp b/personal_infra/puppet/modules/ipsilon/manifests/init.pp
new file mode 100644
index 00000000..aa0908aa
--- /dev/null
+++ b/personal_infra/puppet/modules/ipsilon/manifests/init.pp
@@ -0,0 +1,8 @@
+class ipsilon {
+ package {['ipsilon-tools-ipa', 'ipsilon-openidc']:}
+
+ service {'httpd':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/mailalias_core b/personal_infra/puppet/modules/mailalias_core
new file mode 160000
+Subproject e6230faf076a5ed7b474ed67a4c6c0802d0b7b5
diff --git a/personal_infra/puppet/modules/miniflux/manifests/init.pp b/personal_infra/puppet/modules/miniflux/manifests/init.pp
new file mode 100644
index 00000000..179cfc14
--- /dev/null
+++ b/personal_infra/puppet/modules/miniflux/manifests/init.pp
@@ -0,0 +1,27 @@
+class miniflux($database_url, $polling_frequency, $batch_size, $polling_parser_error_limit) {
+ file {'/etc/yum.repos.d/miniflux.repo':
+ content => "[miniflux]
+name=Miniflux Repository
+baseurl=https://repo.miniflux.app/yum/
+enabled=1
+gpgcheck=0
+",
+ }
+ ->
+ package {'miniflux':}
+ ->
+ file {'/etc/miniflux.conf':
+ content => "LISTEN_ADDR=0.0.0.0:8080
+RUN_MIGRATIONS=1
+DATABASE_URL=$database_url
+POLLING_FREQUENCY=$polling_frequency
+BATCH_SIZE=$batch_size
+POLLING_PARSING_ERROR_LIMIT=$polling_parser_error_limit
+",
+ }
+ ~>
+ service {'miniflux':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/files/check_talos_version b/personal_infra/puppet/modules/nagios/files/check_talos_version
new file mode 120000
index 00000000..16932959
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/files/check_talos_version
@@ -0,0 +1 @@
+../../../../talos-check/check_talos_version \ No newline at end of file
diff --git a/personal_infra/puppet/modules/nagios/manifests/init.pp b/personal_infra/puppet/modules/nagios/manifests/init.pp
new file mode 100644
index 00000000..5568fbf9
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/manifests/init.pp
@@ -0,0 +1,100 @@
+class nagios {
+ package {'nagios':}
+ ->
+ service {'nagios':
+ ensure => running,
+ enable => true,
+ }
+
+ file {
+ default:
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ ;
+ '/etc/nagios':
+ ensure => directory,
+ recurse => true,
+ force => true,
+ purge => true,
+ ;
+ '/etc/nagios/nagios.cfg':
+ content => epp('nagios/nagios.cfg'),
+ ;
+ # leave these unaffected
+ ['/etc/nagios/passwd', '/etc/nagios/cgi.cfg', '/etc/nagios/private/resource.cfg', '/etc/nagios/objects', '/etc/nagios/private', '/etc/nagios/objects/commands.cfg', '/etc/nagios/objects/timeperiods.cfg', '/etc/nagios/objects/templates.cfg']:
+ ensure => present,
+ ;
+ }
+
+ nagios_contact {'nagiosadmin':
+ use => 'generic-contact',
+ email => lookup('mail.root_mail'),
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_contactgroup {'admins':
+ members => 'nagiosadmin',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_command {'check_ragent':
+ command_name => 'check_ragent',
+ command_line => '/usr/bin/check_ragent http://$HOSTADDRESS$:21488/ --warning-units dnf-makecache.service --warning-units dnf-automatic-install.service',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_hostgroup {'linux':
+ hostgroup_name => 'linux',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_servicegroup {'ragent':
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_service {'check_ragent':
+ use => 'generic-service',
+ hostgroup_name => 'linux',
+ service_description => 'check_ragent',
+ servicegroups => 'ragent',
+ check_command => 'check_ragent',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_service {'check_ssh':
+ use => 'generic-service',
+ hostgroup_name => 'linux',
+ service_description => 'ssh',
+ check_command => 'check_ssh',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ package {'httpd':}
+ ->
+ service {'httpd':
+ ensure => running,
+ enable => true,
+ }
+
+ if $facts['virtual'] == 'lxc' {
+ file {'/bin/ping':
+ mode => 'u+s',
+ }
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/manifests/k8s.pp b/personal_infra/puppet/modules/nagios/manifests/k8s.pp
new file mode 100644
index 00000000..8eada3c9
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/manifests/k8s.pp
@@ -0,0 +1,41 @@
+class nagios::k8s {
+ file {'/usr/local/bin/check_talos_version':
+ content => file('nagios/check_talos_version'),
+ mode => '0755',
+ links => follow,
+ }
+
+ nagios_command {'check_talos':
+ command_name => 'check_talos',
+ command_line => '/usr/local/bin/check_talos_version http://$HOSTADDRESS$ monitor',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_hostgroup {'k8s':
+ hostgroup_name => 'k8s',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_servicegroup {'talos_check':
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_service {'talos-check':
+ use => 'generic-service',
+ hostgroup_name => 'k8s',
+ service_description => 'check_talos',
+ servicegroups => 'talos_check',
+ check_command => 'check_talos',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp b/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp
new file mode 100644
index 00000000..8e28ceb8
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp
@@ -0,0 +1,1373 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 4.4.9
+#
+# Read the documentation for more information on this configuration
+# file. I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+#
+##############################################################################
+
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes. This should be the first option specified
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# puppet generated
+cfg_file=/etc/nagios/nagios_contactgroup.cfg
+cfg_file=/etc/nagios/nagios_contact.cfg
+cfg_file=/etc/nagios/nagios_command.cfg
+cfg_file=/etc/nagios/nagios_hostgroup.cfg
+cfg_file=/etc/nagios/nagios_servicegroup.cfg
+cfg_file=/etc/nagios/nagios_host.cfg
+cfg_file=/etc/nagios/nagios_service.cfg
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts. The CGIs read object definitions from
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/spool/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file. You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/spool/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions. The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios/private/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored. Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+# restarts.
+
+status_file=/var/spool/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).
+# By default Nagios will check for external commands.
+# If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody'). Permissions should be set at the
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/spool/nagios/cmd/nagios.cmd
+
+
+
+# QUERY HANDLER INTERFACE
+# This is the socket that is created for the Query Handler interface
+
+#query_socket=/var/spool/nagios/cmd/nagios.qh
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/run/nagios/nagios.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc. This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/spool/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values: 0 = Broker nothing
+# -1 = Broker everything
+# <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup. Use multiple directives if you want
+# to load more than one module. Arguments that should be passed to
+# the module at startup are separated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory. This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem. And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+# 1. Shutdown Nagios, replace the module file, restart Nagios
+# 2. Delete the original module file, move the new module file into place,
+# restart Nagios
+#
+# Example:
+#
+# broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+# n = None - don't rotate the log
+# h = Hourly rotation (top of the hour)
+# d = Daily rotation (midnight every day)
+# w = Weekly rotation (midnight on Saturday evening)
+# m = Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1. If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0. If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0. If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0. If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1. If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option. In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# CURRENT STATES LOGGING OPTION
+# If you don't want Nagios to log all current host and service states
+# after log has been rotated to the main log file, you can disable this
+# option by setting this value to 0. Default value is 1.
+
+log_current_states=1
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0. If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0. If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)! This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed. Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts. Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks. Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+# s = Use "smart" interleave factor calculation
+# x = Use an interleave factor of x, where x is a
+# number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed. Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized. A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that a single
+# check result reaper event will be allowed to run before
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!
+
+check_result_path=/var/spool/nagios/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid. Files older than this
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks. Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option.
+# Values:
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time. This can help balance the load on
+# the monitoring server.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks. This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled. Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off. Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands. All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+ochp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down. Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor. This is useful for
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts. Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down. The state
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/spool/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting. If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set
+# program status variables based on the values saved in the
+# retention file. If you want to use retained program status
+# information, set this value to 1. If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file. If you
+# If you want to use retained scheduling info, set this
+# value to 1. If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options. For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options. For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files. Setting this to 60 means
+# that each interval is one minute long (60 seconds). Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available. It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios. Nagios is critical to you - make sure you keep it in
+# good shape. Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance
+# with our privacy policy - see https://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option determines what data Nagios will send to api.nagios.org when
+# it checks for updates. By default, Nagios will send information on the
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not. Nagios Enterprises uses
+# this data to determine the number of users running specific version of
+# Nagios. Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default). Otherwise set this value to 1 to
+# enable the aggressive check option. Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started. Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks. If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below). Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed. These commands are executed only if the
+# enable_performance_data option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/var/log/nagios/host-perfdata
+#service_perfdata_file=/var/log/nagios/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files. The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text. A newline is automatically added after each write
+# to the performance data file. Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the default append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below. A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files. The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESS EMPTY RESULTS
+# These options determine whether the core will process empty perfdata
+# results or not. This is needed for distributed monitoring, and intentionally
+# turned on by default.
+# If you don't require empty perfdata - saving some cpu cycles
+# on unwanted macro calculation - you can turn that off. Be careful!
+# Values: 1 = enable, 0 = disable
+
+#host_perfdata_process_empty_results=1
+#service_perfdata_process_empty_results=1
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_services option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_hosts option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios. This option is useful
+# if you have distributed or failover monitoring setup. In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts. If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance. Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT. By default, a passive host check
+# result will put a host into a HARD state type. This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically
+# check for orphaned host service checks. Since service checks are
+# not rescheduled until the results of their previous execution
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled. A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks. Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results. If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# SERVICE CHECK TIMEOUT STATE
+# This setting determines the state Nagios will report when a
+# service check times out - that is does not respond within
+# service_check_timeout seconds. This can be useful if a
+# machine is running at too high a load and you do not want
+# to consider a failed service check to be critical (the default).
+# Valid settings are:
+# c - Critical (default)
+# u - Unknown
+# w - Warning
+# o - OK
+
+service_check_timeout_state=c
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results. If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".
+# Flapping occurs when a host or service changes between
+# states too frequently. When Nagios detects that a
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping. Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+# 0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does. This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+# us (MM-DD-YYYY HH:MM:SS)
+# euro (DD-MM-YYYY HH:MM:SS)
+# iso8601 (YYYY-MM-DD HH:MM:SS)
+# strict-iso8601 (YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in. If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path
+# to include your timezone. Example:
+#
+# <Directory "/usr/local/nagios/sbin/">
+# SetEnv TZ "Australia/Brisbane"
+# ...
+# </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc. This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+# $HOSTOUTPUT$
+# $LONGHOSTOUTPUT$
+# $HOSTPERFDATA$
+# $HOSTACKAUTHOR$
+# $HOSTACKCOMMENT$
+# $SERVICEOUTPUT$
+# $LONGSERVICEOUTPUT$
+# $SERVICEPERFDATA$
+# $SERVICEACKAUTHOR$
+# $SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files. Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression
+# matching takes place in the object config files. This option
+# only has an effect if regular expression matching is enabled
+# (see above). If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?). If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon. Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes. Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+# 0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+# 0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.
+# Enabling this is a very bad idea for anything but very small setups,
+# as it means plugins, notification scripts and eventhandlers may run
+# out of environment space. It will also cause a significant increase
+# in CPU- and memory usage and drastically reduce the number of checks
+# you can run.
+# Values: 1 - Enable environment variable macros
+# 0 - Disable environment variable macros (default)
+
+enable_environment_macros=0
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks). If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+# 0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks). Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems. Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this. If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+# 0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file. OR values together to log multiple
+# types of information.
+# Values:
+# -1 = Everything
+# 0 = Nothing
+# 1 = Functions
+# 2 = Configuration
+# 4 = Process information
+# 8 = Scheduled events
+# 16 = Host/service checks
+# 32 = Notifications
+# 64 = Event broker
+# 128 = External commands
+# 256 = Commands
+# 512 = Scheduled downtime
+# 1024 = Comments
+# 2048 = Macros
+# 4096 = Interprocess communication
+# 8192 = Scheduling
+# 16384 = Workers
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+# 1 = More detailed
+# 2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file. If
+# the file grows larger than this size, it will be renamed with a .old
+# extension. If a file already exists with a .old extension it will
+# automatically be deleted. This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+
+
+# Should we allow hostgroups to have no hosts, we default this to off since
+# that was the old behavior
+
+allow_empty_hostgroup_assignment=0
+
+
+
+# Normally worker count is dynamically allocated based on 1.5 * number of cpu's
+# with a minimum of 4 workers. This value will override the defaults
+
+#check_workers=3
+
+
+
+# DISABLE SERVICE CHECKS WHEN HOST DOWN
+# This option will disable all service checks if the host is not in an UP state
+#
+# While desirable in some environments, enabling this value can distort report
+# values as the expected quantity of checks will not have been performed
+
+#host_down_disable_service_checks=0
+
+
+
+# SET SERVICE/HOST STATUS WHEN SERVICE CHECK SKIPPED
+# These options will allow you to set the status of a service when its
+# service check is skipped due to one of three reasons:
+# 1) failed dependency check; 2) parent's status; 3) host not up
+# Number 3 can only happen if 'host_down_disable_service_checks' above
+# is set to 1.
+# Valid values for the service* options are:
+# -1 Do not change the service status (default - same as before 4.4)
+# 0 Set the service status to STATE_OK
+# 1 Set the service status to STATE_WARNING
+# 2 Set the service status to STATE_CRITICAL
+# 3 Set the service status to STATE_UNKNOWN
+# The host_skip_check_dependency_status option will allow you to set the
+# status of a host when itscheck is skipped due to a failed dependency check.
+# Valid values for the host_skip_check_dependency_status are:
+# -1 Do not change the service status (default - same as before 4.4)
+# 0 Set the host status to STATE_UP
+# 1 Set the host status to STATE_DOWN
+# 2 Set the host status to STATE_UNREACHABLE
+# We may add one or more statuses in the future.
+
+#service_skip_check_dependency_status=-1
+#service_skip_check_parent_status=-1
+#service_skip_check_host_down_status=-1
+#host_skip_check_dependency_status=-1
+
+
+
+# LOAD CONTROL OPTIONS
+# To get current defaults based on your system, issue this command to
+# the query handler:
+# echo -e '@core loadctl\0' | nc -U /usr/local/nagios/var/rw/nagios.qh
+#
+# Please note that used incorrectly these options can induce enormous latency.
+#
+# loadctl_options:
+# jobs_max The maximum amount of jobs to run at one time
+# jobs_min The minimum amount of jobs to run at one time
+# jobs_limit The maximum amount of jobs the current load lets us run
+# backoff_limit The minimum backoff_change
+# backoff_change # of jobs to remove from jobs_limit when backing off
+# rampup_limit Minimum rampup_change
+# rampup_change # of jobs to add to jobs_limit when ramping up
+
+#loadctl_options=jobs_max=100;backoff_limit=10;rampup_change=5
diff --git a/personal_infra/puppet/modules/nagios_core b/personal_infra/puppet/modules/nagios_core
new file mode 160000
+Subproject 8dbf9f12383bd29973963a52968b2850d98292f
diff --git a/personal_infra/puppet/modules/nextcloud/manifests/init.pp b/personal_infra/puppet/modules/nextcloud/manifests/init.pp
new file mode 100644
index 00000000..1c41215e
--- /dev/null
+++ b/personal_infra/puppet/modules/nextcloud/manifests/init.pp
@@ -0,0 +1,79 @@
+class nextcloud(
+ $database_name,
+ $database_user,
+ $database_host,
+) {
+
+ file {'/etc/yum.repos.d/koalillo-nextcloud-epel-9.repo':
+ content => @("EOT"/$)
+ [copr:copr.fedorainfracloud.org:koalillo:nextcloud-test]
+ name=Copr repo for nextcloud owned by koalillo
+ baseurl=https://download.copr.fedorainfracloud.org/results/koalillo/nextcloud-test/epel-9-\$basearch/
+ type=rpm-md
+ skip_if_unavailable=True
+ gpgcheck=1
+ gpgkey=https://download.copr.fedorainfracloud.org/results/koalillo/nextcloud-test/pubkey.gpg
+ repo_gpgcheck=0
+ enabled=1
+ enabled_metadata=1
+ | EOT
+ ,
+ }
+
+ package {'remi-release':
+ source => 'https://rpms.remirepo.net/enterprise/remi-release-9.rpm',
+ }
+ ->
+ exec {'/usr/bin/dnf module enable -y php:remi-8.2':
+ unless => '/usr/bin/dnf module list --enabled php | grep remi-8.2',
+ }
+
+ package {['nextcloud-httpd', 'nextcloud-postgresql', 'php82-php-pecl-apcu', 'php-sodium', 'php-opcache',]:
+ require => [
+ Exec['/usr/bin/dnf module enable -y php:remi-8.2'],
+ File['/etc/yum.repos.d/koalillo-nextcloud-epel-9.repo'],
+ ],
+ }
+
+ service {'httpd':
+ enable => true,
+ ensure => running,
+ subscribe => Package['nextcloud-httpd'],
+ }
+
+ service {'nextcloud-cron.timer':
+ ensure => running,
+ enable => true,
+ require => Package['nextcloud-httpd'],
+ }
+
+ file {'/etc/php-fpm.d/www.conf':
+ content => epp("nextcloud/www.conf", {}),
+ }
+ ~>
+ service {'php-fpm':
+ enable => true,
+ ensure => running,
+ subscribe => Package['nextcloud-httpd'],
+ }
+
+ file {'/etc/httpd/conf.d/z-nextcloud-access.conf':
+ ensure => '/etc/httpd/conf.d/nextcloud-access.conf.avail',
+ require => Package['nextcloud-httpd'],
+ notify => Service['httpd'],
+ }
+
+ package {['php-intl', 'php-bcmath']:}
+
+ file {'/etc/php.d/99-apcu-cli.ini':
+ content => @("EOT")
+ apc.enable_cli=1
+ | EOT
+ ,
+ }
+
+ cron {"nextcloud-previews":
+ command => "sudo -u apache php -d memory_limit=512M /usr/share/nextcloud/occ preview:generate-all",
+ minute => "41",
+ }
+}
diff --git a/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp b/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp
new file mode 100644
index 00000000..70db53e4
--- /dev/null
+++ b/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp
@@ -0,0 +1,439 @@
+; Start a new pool named 'www'.
+; the variable $pool can be used in any directive and will be replaced by the
+; pool name ('www' here)
+[www]
+
+; Per pool prefix
+; It only applies on the following directives:
+; - 'access.log'
+; - 'slowlog'
+; - 'listen' (unixsocket)
+; - 'chroot'
+; - 'chdir'
+; - 'php_values'
+; - 'php_admin_values'
+; When not set, the global prefix (or @php_fpm_prefix@) applies instead.
+; Note: This directive can also be relative to the global prefix.
+; Default Value: none
+;prefix = /path/to/pools/$pool
+
+; Unix user/group of processes
+; Note: The user is mandatory. If the group is not set, the default user's group
+; will be used.
+; RPM: apache user chosen to provide access to the same directories as httpd
+user = apache
+; RPM: Keep a group allowed to write in log dir.
+group = apache
+
+; The address on which to accept FastCGI requests.
+; Valid syntaxes are:
+; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
+; a specific port;
+; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
+; a specific port;
+; 'port' - to listen on a TCP socket to all addresses
+; (IPv6 and IPv4-mapped) on a specific port;
+; '/path/to/unix/socket' - to listen on a unix socket.
+; Note: This value is mandatory.
+listen = /run/php-fpm/www.sock
+
+; Set listen(2) backlog.
+; Default Value: 511
+;listen.backlog = 511
+
+; Set permissions for unix socket, if one is used. In Linux, read/write
+; permissions must be set in order to allow connections from a web server.
+; Default Values: user and group are set as the running user
+; mode is set to 0660
+;listen.owner = nobody
+;listen.group = nobody
+;listen.mode = 0660
+
+; When POSIX Access Control Lists are supported you can set them using
+; these options, value is a comma separated list of user/group names.
+; When set, listen.owner and listen.group are ignored
+listen.acl_users = apache,nginx
+;listen.acl_groups =
+
+; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
+; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
+; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
+; must be separated by a comma. If this value is left blank, connections will be
+; accepted from any ip address.
+; Default Value: any
+listen.allowed_clients = 127.0.0.1
+
+; Specify the nice(2) priority to apply to the pool processes (only if set)
+; The value can vary from -19 (highest priority) to 20 (lower priority)
+; Note: - It will only work if the FPM master process is launched as root
+; - The pool processes will inherit the master process priority
+; unless it specified otherwise
+; Default Value: no set
+; process.priority = -19
+
+; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
+; or group is differrent than the master process user. It allows to create process
+; core dump and ptrace the process for the pool user.
+; Default Value: no
+; process.dumpable = yes
+
+; Choose how the process manager will control the number of child processes.
+; Possible Values:
+; static - a fixed number (pm.max_children) of child processes;
+; dynamic - the number of child processes are set dynamically based on the
+; following directives. With this process management, there will be
+; always at least 1 children.
+; pm.max_children - the maximum number of children that can
+; be alive at the same time.
+; pm.start_servers - the number of children created on startup.
+; pm.min_spare_servers - the minimum number of children in 'idle'
+; state (waiting to process). If the number
+; of 'idle' processes is less than this
+; number then some children will be created.
+; pm.max_spare_servers - the maximum number of children in 'idle'
+; state (waiting to process). If the number
+; of 'idle' processes is greater than this
+; number then some children will be killed.
+; ondemand - no children are created at startup. Children will be forked when
+; new requests will connect. The following parameter are used:
+; pm.max_children - the maximum number of children that
+; can be alive at the same time.
+; pm.process_idle_timeout - The number of seconds after which
+; an idle process will be killed.
+; Note: This value is mandatory.
+pm = dynamic
+
+; The number of child processes to be created when pm is set to 'static' and the
+; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
+; This value sets the limit on the number of simultaneous requests that will be
+; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
+; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
+; CGI. The below defaults are based on a server without much resources. Don't
+; forget to tweak pm.* to fit your needs.
+; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
+; Note: This value is mandatory.
+pm.max_children = 50
+
+; The number of child processes created on startup.
+; Note: Used only when pm is set to 'dynamic'
+; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
+pm.start_servers = 5
+
+; The desired minimum number of idle server processes.
+; Note: Used only when pm is set to 'dynamic'
+; Note: Mandatory when pm is set to 'dynamic'
+pm.min_spare_servers = 5
+
+; The desired maximum number of idle server processes.
+; Note: Used only when pm is set to 'dynamic'
+; Note: Mandatory when pm is set to 'dynamic'
+pm.max_spare_servers = 35
+
+; The number of seconds after which an idle process will be killed.
+; Note: Used only when pm is set to 'ondemand'
+; Default Value: 10s
+;pm.process_idle_timeout = 10s;
+
+; The number of requests each child process should execute before respawning.
+; This can be useful to work around memory leaks in 3rd party libraries. For
+; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
+; Default Value: 0
+;pm.max_requests = 500
+
+; The URI to view the FPM status page. If this value is not set, no URI will be
+; recognized as a status page. It shows the following informations:
+; pool - the name of the pool;
+; process manager - static, dynamic or ondemand;
+; start time - the date and time FPM has started;
+; start since - number of seconds since FPM has started;
+; accepted conn - the number of request accepted by the pool;
+; listen queue - the number of request in the queue of pending
+; connections (see backlog in listen(2));
+; max listen queue - the maximum number of requests in the queue
+; of pending connections since FPM has started;
+; listen queue len - the size of the socket queue of pending connections;
+; idle processes - the number of idle processes;
+; active processes - the number of active processes;
+; total processes - the number of idle + active processes;
+; max active processes - the maximum number of active processes since FPM
+; has started;
+; max children reached - number of times, the process limit has been reached,
+; when pm tries to start more children (works only for
+; pm 'dynamic' and 'ondemand');
+; Value are updated in real time.
+; Example output:
+; pool: www
+; process manager: static
+; start time: 01/Jul/2011:17:53:49 +0200
+; start since: 62636
+; accepted conn: 190460
+; listen queue: 0
+; max listen queue: 1
+; listen queue len: 42
+; idle processes: 4
+; active processes: 11
+; total processes: 15
+; max active processes: 12
+; max children reached: 0
+;
+; By default the status page output is formatted as text/plain. Passing either
+; 'html', 'xml' or 'json' in the query string will return the corresponding
+; output syntax. Example:
+; http://www.foo.bar/status
+; http://www.foo.bar/status?json
+; http://www.foo.bar/status?html
+; http://www.foo.bar/status?xml
+;
+; By default the status page only outputs short status. Passing 'full' in the
+; query string will also return status for each pool process.
+; Example:
+; http://www.foo.bar/status?full
+; http://www.foo.bar/status?json&full
+; http://www.foo.bar/status?html&full
+; http://www.foo.bar/status?xml&full
+; The Full status returns for each process:
+; pid - the PID of the process;
+; state - the state of the process (Idle, Running, ...);
+; start time - the date and time the process has started;
+; start since - the number of seconds since the process has started;
+; requests - the number of requests the process has served;
+; request duration - the duration in µs of the requests;
+; request method - the request method (GET, POST, ...);
+; request URI - the request URI with the query string;
+; content length - the content length of the request (only with POST);
+; user - the user (PHP_AUTH_USER) (or '-' if not set);
+; script - the main script called (or '-' if not set);
+; last request cpu - the %cpu the last request consumed
+; it's always 0 if the process is not in Idle state
+; because CPU calculation is done when the request
+; processing has terminated;
+; last request memory - the max amount of memory the last request consumed
+; it's always 0 if the process is not in Idle state
+; because memory calculation is done when the request
+; processing has terminated;
+; If the process is in Idle state, then informations are related to the
+; last request the process has served. Otherwise informations are related to
+; the current request being served.
+; Example output:
+; ************************
+; pid: 31330
+; state: Running
+; start time: 01/Jul/2011:17:53:49 +0200
+; start since: 63087
+; requests: 12808
+; request duration: 1250261
+; request method: GET
+; request URI: /test_mem.php?N=10000
+; content length: 0
+; user: -
+; script: /home/fat/web/docs/php/test_mem.php
+; last request cpu: 0.00
+; last request memory: 0
+;
+; Note: There is a real-time FPM status monitoring sample web page available
+; It's available in: @EXPANDED_DATADIR@/fpm/status.html
+;
+; Note: The value must start with a leading slash (/). The value can be
+; anything, but it may not be a good idea to use the .php extension or it
+; may conflict with a real PHP file.
+; Default Value: not set
+;pm.status_path = /status
+
+; The ping URI to call the monitoring page of FPM. If this value is not set, no
+; URI will be recognized as a ping page. This could be used to test from outside
+; that FPM is alive and responding, or to
+; - create a graph of FPM availability (rrd or such);
+; - remove a server from a group if it is not responding (load balancing);
+; - trigger alerts for the operating team (24/7).
+; Note: The value must start with a leading slash (/). The value can be
+; anything, but it may not be a good idea to use the .php extension or it
+; may conflict with a real PHP file.
+; Default Value: not set
+;ping.path = /ping
+
+; This directive may be used to customize the response of a ping request. The
+; response is formatted as text/plain with a 200 response code.
+; Default Value: pong
+;ping.response = pong
+
+; The access log file
+; Default: not set
+;access.log = log/$pool.access.log
+
+; The access log format.
+; The following syntax is allowed
+; %%: the '%' character
+; %C: %CPU used by the request
+; it can accept the following format:
+; - %{user}C for user CPU only
+; - %{system}C for system CPU only
+; - %{total}C for user + system CPU (default)
+; %d: time taken to serve the request
+; it can accept the following format:
+; - %{seconds}d (default)
+; - %{miliseconds}d
+; - %{mili}d
+; - %{microseconds}d
+; - %{micro}d
+; %e: an environment variable (same as $_ENV or $_SERVER)
+; it must be associated with embraces to specify the name of the env
+; variable. Some exemples:
+; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
+; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
+; %f: script filename
+; %l: content-length of the request (for POST request only)
+; %m: request method
+; %M: peak of memory allocated by PHP
+; it can accept the following format:
+; - %{bytes}M (default)
+; - %{kilobytes}M
+; - %{kilo}M
+; - %{megabytes}M
+; - %{mega}M
+; %n: pool name
+; %o: output header
+; it must be associated with embraces to specify the name of the header:
+; - %{Content-Type}o
+; - %{X-Powered-By}o
+; - %{Transfert-Encoding}o
+; - ....
+; %p: PID of the child that serviced the request
+; %P: PID of the parent of the child that serviced the request
+; %q: the query string
+; %Q: the '?' character if query string exists
+; %r: the request URI (without the query string, see %q and %Q)
+; %R: remote IP address
+; %s: status (response code)
+; %t: server time the request was received
+; it can accept a strftime(3) format:
+; %d/%b/%Y:%H:%M:%S %z (default)
+; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
+; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
+; %T: time the log has been written (the request has finished)
+; it can accept a strftime(3) format:
+; %d/%b/%Y:%H:%M:%S %z (default)
+; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
+; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
+; %u: remote user
+;
+; Default: "%R - %u %t \"%m %r\" %s"
+;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
+
+; The log file for slow requests
+; Default Value: not set
+; Note: slowlog is mandatory if request_slowlog_timeout is set
+slowlog = /var/log/php-fpm/www-slow.log
+
+; The timeout for serving a single request after which a PHP backtrace will be
+; dumped to the 'slowlog' file. A value of '0s' means 'off'.
+; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
+; Default Value: 0
+;request_slowlog_timeout = 0
+
+; Depth of slow log stack trace.
+; Default Value: 20
+;request_slowlog_trace_depth = 20
+
+; The timeout for serving a single request after which the worker process will
+; be killed. This option should be used when the 'max_execution_time' ini option
+; does not stop script execution for some reason. A value of '0' means 'off'.
+; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
+; Default Value: 0
+;request_terminate_timeout = 0
+
+; Set open file descriptor rlimit.
+; Default Value: system defined value
+;rlimit_files = 1024
+
+; Set max core size rlimit.
+; Possible Values: 'unlimited' or an integer greater or equal to 0
+; Default Value: system defined value
+;rlimit_core = 0
+
+; Chroot to this directory at the start. This value must be defined as an
+; absolute path. When this value is not set, chroot is not used.
+; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
+; of its subdirectories. If the pool prefix is not set, the global prefix
+; will be used instead.
+; Note: chrooting is a great security feature and should be used whenever
+; possible. However, all PHP paths will be relative to the chroot
+; (error_log, sessions.save_path, ...).
+; Default Value: not set
+;chroot =
+
+; Chdir to this directory at the start.
+; Note: relative path can be used.
+; Default Value: current directory or / when chroot
+;chdir = /var/www
+
+; Redirect worker stdout and stderr into main error log. If not set, stdout and
+; stderr will be redirected to /dev/null according to FastCGI specs.
+; Note: on highloaded environement, this can cause some delay in the page
+; process time (several ms).
+; Default Value: no
+;catch_workers_output = yes
+
+; Clear environment in FPM workers
+; Prevents arbitrary environment variables from reaching FPM worker processes
+; by clearing the environment in workers before env vars specified in this
+; pool configuration are added.
+; Setting to "no" will make all environment variables available to PHP code
+; via getenv(), $_ENV and $_SERVER.
+; Default Value: yes
+;clear_env = no
+
+; Limits the extensions of the main script FPM will allow to parse. This can
+; prevent configuration mistakes on the web server side. You should only limit
+; FPM to .php extensions to prevent malicious users to use other extensions to
+; execute php code.
+; Note: set an empty value to allow all extensions.
+; Default Value: .php
+;security.limit_extensions = .php .php3 .php4 .php5 .php7
+
+; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
+; the current environment.
+; Default Value: clean env
+;env[HOSTNAME] = $HOSTNAME
+;env[PATH] = /usr/local/bin:/usr/bin:/bin
+;env[TMP] = /tmp
+;env[TMPDIR] = /tmp
+;env[TEMP] = /tmp
+
+; Additional php.ini defines, specific to this pool of workers. These settings
+; overwrite the values previously defined in the php.ini. The directives are the
+; same as the PHP SAPI:
+; php_value/php_flag - you can set classic ini defines which can
+; be overwritten from PHP call 'ini_set'.
+; php_admin_value/php_admin_flag - these directives won't be overwritten by
+; PHP call 'ini_set'
+; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
+
+; Defining 'extension' will load the corresponding shared extension from
+; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
+; overwrite previously defined php.ini values, but will append the new value
+; instead.
+
+; Note: path INI options can be relative and will be expanded with the prefix
+; (pool, global or @prefix@)
+
+; Default Value: nothing is defined by default except the values in php.ini and
+; specified at startup with the -d argument
+;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
+;php_flag[display_errors] = off
+php_admin_value[error_log] = /var/log/php-fpm/www-error.log
+php_admin_flag[log_errors] = on
+php_admin_value[memory_limit] = 512M
+php_admin_value[output_buffering] = off
+
+; Set the following data paths to directories owned by the FPM process user.
+;
+; Do not change the ownership of existing system directories, if the process
+; user does not have write permission, create dedicated directories for this
+; purpose.
+;
+; See warning about choosing the location of these directories on your system
+; at http://php.net/session.save-path
+php_value[session.save_handler] = files
+php_value[session.save_path] = /var/lib/php/session
+php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache
+;php_value[opcache.file_cache] = /var/lib/php/opcache
diff --git a/personal_infra/puppet/modules/ocserv/manifests/init.pp b/personal_infra/puppet/modules/ocserv/manifests/init.pp
new file mode 100644
index 00000000..b9ead95b
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/manifests/init.pp
@@ -0,0 +1,97 @@
+class ocserv($ocserv_tcp_port,
+ $ocserv_udp_port,
+ $ocserv_default_domain,
+ $ocserv_ipv4_network,
+ $ocserv_dns,
+ $ocserv_split_dns,
+ $ocserv_routes,
+ $firewall = true)
+{
+ $run_as_user = $facts['os']['family'] ? {
+ 'Debian' => 'nobody',
+ 'RedHat' => 'ocserv',
+ }
+
+ $run_as_group = $facts['os']['family'] ? {
+ 'Debian' => 'daemon',
+ 'RedHat' => 'ocserv',
+ }
+
+ $socket_file = $facts['os']['family'] ? {
+ 'Debian' => '/var/run/ocserv-socket',
+ 'RedHat' => 'ocserv.sock',
+ }
+
+ $chroot_dir = $facts['os']['family'] ? {
+ 'Debian' => undef,
+ 'RedHat' => '/var/lib/ocserv',
+ }
+
+ $server_cert = $facts['os']['family']? {
+ 'Debian' => '/etc/ssl/certs/ssl-cert-snakeoil.pem',
+ 'RedHat' => '/etc/pki/ocserv/public/server.crt',
+ }
+
+ $server_key = $facts['os']['family'] ? {
+ 'Debian' => '/etc/ssl/private/ssl-cert-snakeoil.key',
+ 'RedHat' => '/etc/pki/ocserv/private/server.key',
+ }
+
+ package {'ocserv':}
+ ->
+ file {'/etc/ocserv/ocserv.conf':
+ content => epp('ocserv/ocserv.conf', {'tcp_port' => $ocserv_tcp_port,
+ 'udp_port' => $ocserv_udp_port,
+ 'run_as_user' => $run_as_user,
+ 'run_as_group' => $run_as_group,
+ 'socket_file' => $socket_file,
+ 'chroot_dir' => $chroot_dir,
+ 'server_cert' => $server_cert,
+ 'server_key' => $server_key,
+ 'default_domain' => $ocserv_default_domain,
+ 'ipv4_network' => $ocserv_ipv4_network,
+ 'dns' => $ocserv_dns,
+ 'split_dns' => $ocserv_split_dns,
+ 'routes' => $ocserv_routes,
+ }),
+ }
+ ~>
+ service {'ocserv':
+ enable => true,
+ ensure => running,
+ }
+
+ if ($facts['os']['family'] == 'RedHat' and $firewall) {
+ exec {'add masquerade for ocserv':
+ command => '/usr/bin/firewall-cmd --permanent --add-masquerade',
+ unless => '/usr/bin/firewall-cmd --query-masquerade',
+ notify => Exec['reload firewall for ocserv'],
+ }
+
+ exec {'open firewall for ocserv':
+ command => '/usr/bin/firewall-cmd --permanent --add-port=444/{tcp,udp}',
+ unless => '/usr/bin/firewall-cmd --query-port=444/udp',
+ }
+ ~>
+ exec {'reload firewall for ocserv':
+ command => '/usr/bin/firewall-cmd --reload',
+ refreshonly => true,
+ }
+ }
+
+ if ($facts['os']['family'] == 'Debian') {
+ file {'/etc/systemd/system/ocserv.socket.d/':
+ ensure => directory,
+ }
+ ->
+ file {'/etc/systemd/system/ocserv.socket.d/port.conf':
+ content => epp('ocserv/port.conf', {'tcp_port' => $ocserv_tcp_port,
+ 'udp_port' => $ocserv_udp_port,
+ }),
+ }
+ ~>
+ exec {'/bin/systemctl daemon-reload && systemctl restart ocserv.socket':
+ refreshonly => true,
+ }
+ }
+}
diff --git a/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp b/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp
new file mode 100644
index 00000000..b4ca12e7
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp
@@ -0,0 +1,57 @@
+<%- | $tcp_port,
+ $udp_port,
+ $run_as_user,
+ $run_as_group,
+ $socket_file,
+ $chroot_dir,
+ $server_cert,
+ $server_key,
+ $default_domain,
+ $ipv4_network,
+ $dns,
+ $split_dns,
+ $routes,
+| -%>
+auth = "pam"
+listen-host-is-dyndns = true
+# note, those are not used on Debian
+tcp-port = <%= $tcp_port %>
+udp-port = <%= $udp_port %>
+run-as-user = <%= $run_as_user %>
+run-as-group = <%= $run_as_group %>
+socket-file = <%= $socket_file %>
+<% if $chroot_dir { -%>
+chroot-dir = <%= $chroot_dir %>
+<% } -%>
+server-cert = <%= $server_cert %>
+server-key = <%= $server_key %>
+isolate-workers = true
+keepalive = 32400
+dpd = 90
+mobile-dpd = 1800
+switch-to-tcp-timeout = 25
+try-mtu-discovery = false
+compression = true
+tls-priorities = "NORMAL:%SERVER_PRECEDENCE:%COMPAT:-RSA:-VERS-SSL3.0:-ARCFOUR-128"
+auth-timeout = 240
+min-reauth-time = 3
+cookie-timeout = 300
+deny-roaming = false
+rekey-time = 172800
+rekey-method = ssl
+use-utmp = true
+pid-file = /var/run/ocserv.pid
+device = vpns
+predictable-ips = true
+default-domain = <%= $default_domain %>
+ipv4-network = <%= $ipv4_network %>
+#tunnel-all-dns = true
+dns = <%= $dns %>
+split-dns = <%= $split_dns %>
+ping-leases = true
+cisco-client-compat = true
+dtls-psk = false
+dtls-legacy = true
+<% $routes.each | $route | { -%>
+route = <%= $route %>
+<% } %>
diff --git a/personal_infra/puppet/modules/ocserv/templates/port.conf.epp b/personal_infra/puppet/modules/ocserv/templates/port.conf.epp
new file mode 100644
index 00000000..223c9961
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/templates/port.conf.epp
@@ -0,0 +1,8 @@
+<%- | $tcp_port,
+ $udp_port,
+| -%>
+[Socket]
+ListenStream=
+ListenDatagram=
+ListenStream=<%= $tcp_port %>
+ListenDatagram=<%= $udp_port %>
diff --git a/personal_infra/puppet/modules/podman/manifests/init.pp b/personal_infra/puppet/modules/podman/manifests/init.pp
new file mode 100644
index 00000000..17247aa2
--- /dev/null
+++ b/personal_infra/puppet/modules/podman/manifests/init.pp
@@ -0,0 +1,17 @@
+class podman($user, $storage_driver) {
+ package {'podman':}
+
+ file {['/etc/subuid', '/etc/subgid']:
+ content => "${user}:100000:65536\n",
+ }
+
+ exec {"/usr/bin/sed -i 's/driver = \".*\"/driver = \"${storage_driver}\"/g' /etc/containers/storage.conf":
+ require => Package['podman'],
+ unless => "/usr/bin/grep 'driver = \"${storage_driver}\"' /etc/containers/storage.conf",
+ }
+
+ exec {"/usr/bin/sed -i 's|#mount_program = \"/usr/bin/fuse-overlayfs\"|mount_program = \"/usr/bin/fuse-overlayfs\"|g' /etc/containers/storage.conf":
+ require => Package['podman'],
+ unless => "/usr/bin/grep '^#mount_program = \"/usr/bin/fuse-overlayfs\"'",
+ }
+}
diff --git a/personal_infra/puppet/modules/postgres/manifests/init.pp b/personal_infra/puppet/modules/postgres/manifests/init.pp
new file mode 100644
index 00000000..40fd5726
--- /dev/null
+++ b/personal_infra/puppet/modules/postgres/manifests/init.pp
@@ -0,0 +1,26 @@
+class postgres($pg_hba_conf) {
+ package {'pgdg-redhat-repo':
+ source => 'https://download.postgresql.org/pub/repos/yum/reporpms/EL-9-x86_64/pgdg-redhat-repo-latest.noarch.rpm',
+ }
+ ->
+ package {'postgresql15-server':}
+ ->
+ exec {'/usr/pgsql-15/bin/postgresql-15-setup initdb':
+ creates => '/var/lib/pgsql/15/data/PG_VERSION',
+ }
+ ->
+ [
+ file {'/var/lib/pgsql/15/data/pg_hba.conf':
+ # template at /usr/pgsql-15/share/pg_hba.conf.sample
+ content => $pg_hba_conf,
+ },
+ exec {'/bin/sed -i "s/#listen_addresses = \'localhost\'/listen_addresses = \'*\' /" /var/lib/pgsql/15/data/postgresql.conf':
+ unless => '/bin/grep "listen_addresses = \'\\*\'" /var/lib/pgsql/15/data/postgresql.conf',
+ }
+ ]
+ ~>
+ service {'postgresql-15':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/README.md b/personal_infra/puppet/modules/proxmox/README.md
new file mode 100644
index 00000000..5e5f8bc6
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/README.md
@@ -0,0 +1,36 @@
+# Proxmox
+
+## Networking
+
+Configures a public Internet IP, and an internal network with forwarding for containers and virtual machines.
+
+Add the following to your Proxmox host Ansible variables:
+
+```
+network:
+ ip: dotted.ip.notation
+ netmask: 255.255.255.0
+ gateway: dotted.ip.notation
+ proxmox:
+ ip: 10.3.3.1
+ netmask: 255.255.255.0
+ network: 10.3.3.0/24
+```
+
+## Proxy
+
+Class `proxmox::proxy` can handle proxying internal web servers.
+
+```
+class {'proxmox::proxy':
+ mail => lookup('mail.root_mail'),
+ base_hostname => lookup('network.public_hostname'),
+}
+```
+
+This uses the Apache HTTP Server and mod_md to obtain certificates.
+Your hostname must be publicly accessible, because http challenges are used.
+
+You receive mails to restart your server when required.
+
+The `base_hostname` certificate is injected daily to pveproxy.
diff --git a/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp b/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp
new file mode 100644
index 00000000..f3464c78
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp
@@ -0,0 +1,17 @@
+class proxmox::freeipa {
+ class {'proxmox':}
+
+ file {['/etc/subuid', '/etc/subgid']:
+ content => epp('proxmox/freeipa_subxid', {'freeipa' => lookup('freeipa')}),
+ }
+
+ # TODO
+ service {['sssd-ssh.socket', 'sssd-pam-priv.socket', 'sssd-nss.socket', 'sssd-sudo.socket', 'sssd-pam.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+ ~>
+ exec {'/usr/bin/systemctl reset-failed':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/init.pp b/personal_infra/puppet/modules/proxmox/manifests/init.pp
new file mode 100644
index 00000000..b3297eb4
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/init.pp
@@ -0,0 +1,38 @@
+class proxmox {
+ file {'/etc/network/interfaces':
+ content => epp('proxmox/interfaces', {
+ "network" => lookup("network"),
+ }),
+ }
+ ~>
+ exec {'/usr/sbin/ifreload -a':
+ refreshonly => true
+ }
+
+ # to prevent Germany/Hetzner abuse complaints
+ service {['rpcbind.target', 'rpcbind.service', 'rpcbind.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+
+ # TODO: secure this. Right now I don't use VMs, so just disable it
+ service {'spiceproxy':
+ ensure => stopped,
+ enable => mask,
+ }
+
+ file {'/etc/logrotate.d/pve':
+ ensure => absent,
+ }
+ ~>
+ service {'logrotate':}
+
+ file {'/etc/apt/sources.list.d/pve-enterprise.list':
+ ensure => absent,
+ }
+
+ file {'/etc/apt/sources.list.d/pve-no-subscription.list':
+ content => 'deb http://download.proxmox.com/debian/pve bullseye pve-no-subscription
+',
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/proxy.pp b/personal_infra/puppet/modules/proxmox/manifests/proxy.pp
new file mode 100644
index 00000000..cb3c0bd4
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/proxy.pp
@@ -0,0 +1,52 @@
+class proxmox::proxy ($mail, $base_hostname) {
+ package {'apache2':}
+ ->
+ service {'apache2':
+ enable => true,
+ ensure => running,
+ require => File['/usr/local/bin/notify_md_renewal'],
+ }
+
+ $apache_dep = {
+ require => Package['apache2'],
+ notify => Service['apache2'],
+ }
+
+ ['md', 'ssl', 'proxy_http', 'proxy'].each |$mod| {
+ exec {"/usr/sbin/a2enmod $mod":
+ creates => "/etc/apache2/mods-enabled/$mod.load",
+ * => $apache_dep,
+ }
+ }
+
+ file {'/etc/apache2/sites-enabled/test.conf':
+ content => @("EOT")
+ MDomain $base_hostname auto
+ MDCertificateAgreement accepted
+ MDContactEmail $mail
+ MDNotifyCmd /usr/local/bin/notify_md_renewal
+
+ <VirtualHost *:443>
+ ServerName $base_hostname
+ SSLEngine on
+ </VirtualHost>
+ | EOT
+ ,
+ * => $apache_dep
+ }
+
+ file {'/usr/local/bin/notify_md_renewal':
+ content => @("EOT"/$)
+ #!/bin/sh
+
+ systemctl restart apache2
+ pvenode cert set /etc/apache2/md/domains/$base_hostname/pubcert.pem /etc/apache2/md/domains/$base_hostname/privkey.pem --force 1 --restart 1
+
+ for hook in /usr/local/bin/notify_md_renewal_hook_* ; do
+ \$hook
+ done
+ | EOT
+ ,
+ mode => '0755',
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp b/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp
new file mode 100644
index 00000000..b60caf4c
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp
@@ -0,0 +1,53 @@
+define proxmox::proxy_host (String[1] $target, Optional[String[1]] $overwrite_rh_certs = undef) {
+ if $target =~ /^https:/ {
+ $ssl_fragment = @("EOT")
+ SSLProxyEngine on
+ SSLProxyCheckPeerName off
+ | EOT
+ }
+ else {
+ $ssl_fragment = ""
+ }
+
+ file {"/etc/apache2/sites-enabled/$title.conf":
+ content => @("EOT")
+ MDomain $title
+
+ <VirtualHost *:80>
+ ServerName $title
+ Redirect permanent / https://$title/
+ </VirtualHost>
+
+ <VirtualHost *:443>
+ ServerName $title
+ SSLEngine on
+
+ ProxyPass "/" "$target"
+ ProxyPassReverse "/" "$target"
+ ProxyPreservehost On
+ $ssl_fragment
+ </VirtualHost>
+ | EOT
+ ,
+ }
+ ~>
+ Service['apache2']
+
+ if $overwrite_rh_certs {
+ $pveid = lookup("hostvars.'$overwrite_rh_certs'.proxmox.id");
+
+ file {"/usr/local/bin/notify_md_renewal_hook_$overwrite_rh_certs":
+ content => @("EOT"/$)
+ #!/bin/sh
+
+ cp /etc/apache2/md/domains/$title/pubcert.pem /rpool/data/subvol-$pveid-disk-0/etc/pki/tls/certs/localhost.crt
+ cp /etc/apache2/md/domains/$title/privkey.pem /rpool/data/subvol-$pveid-disk-0/etc/pki/tls/private/localhost.key
+ pct exec $pveid systemctl restart httpd
+ | EOT
+ ,
+ mode => '0755',
+ }
+ }
+
+
+}
diff --git a/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp b/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp
new file mode 100644
index 00000000..c72d1d04
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp
@@ -0,0 +1,2 @@
+root:100000:65536
+root:<%= $freeipa['idrange_start'] %>:<%= $freeipa['idrange_size'] %>
diff --git a/personal_infra/puppet/modules/proxmox/templates/interfaces.epp b/personal_infra/puppet/modules/proxmox/templates/interfaces.epp
new file mode 100644
index 00000000..e0bfeceb
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/templates/interfaces.epp
@@ -0,0 +1,18 @@
+auto lo
+iface lo inet loopback
+
+auto eno1
+iface eno1 inet static
+ address <%= $network['ip'] %>
+ netmask <%= $network['netmask'] %>
+ gateway <%= $network['gateway'] %>
+
+auto vmbr0
+iface vmbr0 inet static
+ address <%= $network['proxmox']['ip'] %>
+ netmask <%= $network['proxmox']['netmask'] %>
+ bridge_ports none
+ bridge_stp off
+ bridge_fd 0
+ post-up echo 1 > /proc/sys/net/ipv4/ip_forward
+ post-up iptables -t nat -A POSTROUTING -s '<%= $network['proxmox']['network'] %>' -o eno1 -j MASQUERADE
diff --git a/personal_infra/puppet/modules/root_mail/manifests/init.pp b/personal_infra/puppet/modules/root_mail/manifests/init.pp
new file mode 100644
index 00000000..66cfeb87
--- /dev/null
+++ b/personal_infra/puppet/modules/root_mail/manifests/init.pp
@@ -0,0 +1,41 @@
+class root_mail {
+ package {'postfix':}
+ ->
+ service {'postfix':
+ ensure => running,
+ enable => true,
+ }
+
+ $cron_service = case $facts['os']['family'] {
+ 'Debian': { 'cron' }
+ 'RedHat': { 'crond' }
+ default: { fail($facts['os']['family']) }
+ }
+
+ # if crond doesn't see /usr/bin/sendmail on startup, it won't send mails
+ Package['postfix']
+ ~>
+ service{$cron_service:
+ ensure => running,
+ }
+
+ if($facts['os']['family'] == 'RedHat') {
+ if($facts['os']['release']['major'] == '9') {
+ package {'s-nail':}
+ }
+ else {
+ package {'mailx':}
+ }
+ }
+
+ mailalias {'root':
+ recipient => lookup('mail.root_mail'),
+ require => Package['postfix'],
+ }
+ ~>
+ exec {'/usr/sbin/postalias /etc/aliases':
+ creates => '/etc/aliases.db',
+ }
+ ~>
+ Service['postfix']
+}
diff --git a/personal_infra/puppet/modules/tinc/manifests/init.pp b/personal_infra/puppet/modules/tinc/manifests/init.pp
new file mode 100644
index 00000000..5ae78126
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/manifests/init.pp
@@ -0,0 +1,100 @@
+class tinc($tinc_name, $tinc_location, $tinc_connect_to, $tinc_locations, $tinc_ip, $tinc_netmask, $tinc_other_networks, $firewall = true) {
+ # https://bugzilla.redhat.com/show_bug.cgi?id=2153663
+ if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9') {
+ copr {'tinc':
+ user => 'koalillo',
+ dist => 'epel-9',
+ }
+ ->
+ Package['tinc']
+ }
+
+ package {'tinc':}
+ ->
+ file {'/etc/tinc':
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}":
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}/hosts":
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}/tinc.conf":
+ content => epp('tinc/tinc.conf', {'tinc_name' => $tinc_name,
+ 'tinc_location' => $tinc_location,
+ 'tinc_connect_to' => $tinc_connect_to,
+ }),
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ $tinc_locations.each |$name, $location| {
+ file {"/etc/tinc/${tinc_name}/generate_host_${name}.sh":
+ content => "#!/bin/sh
+
+set -ue
+
+echo Subnet = ${location['subnet']} >/etc/tinc/${tinc_name}/hosts/${name}
+echo Address = ${location['address']} >>/etc/tinc/${tinc_name}/hosts/${name}
+cat /etc/ansible/tinc/public_${location['address']}.pem >>/etc/tinc/${tinc_name}/hosts/${name}
+ ",
+ mode => '755',
+ }
+ ~>
+ exec {"/etc/tinc/${tinc_name}/generate_host_${name}.sh":
+ require => File["/etc/tinc/${tinc_name}/hosts"],
+ notify => Service["tinc@${tinc_name}"],
+ creates => "/etc/tinc/${tinc_name}/hosts/${name}",
+ }
+ }
+
+ service {"tinc@${tinc_name}":
+ ensure => running,
+ enable => true,
+ }
+
+ if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9') {
+ service {"tinc":
+ ensure => running,
+ enable => true,
+ }
+ }
+ exec {"/bin/cp /etc/ansible/tinc/private.pem /etc/tinc/${tinc_name}/rsa_key.priv":
+ creates => "/etc/tinc/${tinc_name}/rsa_key.priv",
+ require => File["/etc/tinc/${tinc_name}"],
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ file {"/etc/tinc/${tinc_name}/tinc-up":
+ content => epp('tinc/tinc-up', {'ip' => $tinc_ip,
+ 'netmask' => $tinc_netmask,
+ 'tinc_other_networks' => $tinc_other_networks,}),
+ require => File["/etc/tinc/${tinc_name}"],
+ mode => '777',
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ if ($facts['os']['family'] == 'RedHat' and $firewall) {
+ exec {'open firewall for tinc':
+ command => '/usr/bin/firewall-cmd --permanent --add-port=655/{tcp,udp}',
+ unless => '/usr/bin/firewall-cmd --query-port=655/udp',
+ }
+ ~>
+ exec {'reload firewall for tinc':
+ command => '/usr/bin/firewall-cmd --reload',
+ refreshonly => true,
+ }
+ }
+
+ file {'/etc/sysctl.d/tinc.conf':
+ content => "net.ipv4.ip_forward=1\nnet.ipv4.conf.all.proxy_arp=0\n",
+ }
+ ~>
+ exec {'reload sysctl for tinc':
+ command => '/sbin/sysctl --system',
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/tinc/templates/tinc-up.epp b/personal_infra/puppet/modules/tinc/templates/tinc-up.epp
new file mode 100644
index 00000000..7c89098f
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/templates/tinc-up.epp
@@ -0,0 +1,11 @@
+<%- | $ip,
+ $netmask,
+ $tinc_other_networks,
+| -%>
+#!/bin/sh
+
+ifconfig $INTERFACE <%= $ip %> netmask 255.255.255.255
+
+<% $tinc_other_networks.each |$tinc_other_network| { %>
+ route add -net <%= $tinc_other_network %> dev $INTERFACE
+<% } %>
diff --git a/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp b/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp
new file mode 100644
index 00000000..959fb949
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp
@@ -0,0 +1,8 @@
+<%- | $tinc_name,
+ $tinc_location,
+ $tinc_connect_to,
+| -%>
+Name = <%= $tinc_location %>
+<% $tinc_connect_to.each | $tinc_connection | { -%>
+ConnectTo = <%= $tinc_connection %>
+<% } %>
diff --git a/personal_infra/puppet/site/00-common.pp b/personal_infra/puppet/site/00-common.pp
new file mode 100644
index 00000000..b9f2a6f7
--- /dev/null
+++ b/personal_infra/puppet/site/00-common.pp
@@ -0,0 +1,23 @@
+include automatic_updates
+include basic_software
+include root_mail
+
+if $facts['os']['family'] == 'Debian' {
+ class {'debian':}
+}
+
+$nagios_host = $facts['networking']['fqdn']
+
+nagios_host {$nagios_host:
+ use => 'generic-host',
+ address => lookup({name => 'nagios.address', default_value => $facts['networking']['fqdn']}),
+ max_check_attempts => 5,
+ contact_groups => 'admins',
+ hostgroups => 'linux',
+ check_command => 'check-host-alive',
+}
+
+# https://github.com/alexpdp7/ragent/issues/352
+if $facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9' {
+ package {'compat-openssl11':}
+}
diff --git a/personal_infra/puppet/site/01-dns.pp b/personal_infra/puppet/site/01-dns.pp
new file mode 100644
index 00000000..eab766a4
--- /dev/null
+++ b/personal_infra/puppet/site/01-dns.pp
@@ -0,0 +1,10 @@
+$dns_source_hosts = lookup("dns.source_hosts")
+$dns_other_hosts = $dns_source_hosts.filter |$host_name| { $host_name != $facts["networking"]["fqdn"] }
+
+$dns_other_server_defs = $dns_other_hosts.map |$host_name| {
+ {
+ network_name => join([lookup("hostvars.'$host_name'.network.network_name"), lookup('dns.internal_domain')], '.'),
+ reverse_ip_range => lookup("hostvars.'$host_name'.network.self_internal_network"),
+ dns_ip => lookup("hostvars.'$host_name'.network.self_internal_ip"),
+ }
+}
diff --git a/personal_infra/puppet/site/01-ipa.pp b/personal_infra/puppet/site/01-ipa.pp
new file mode 100644
index 00000000..0aa7a6b4
--- /dev/null
+++ b/personal_infra/puppet/site/01-ipa.pp
@@ -0,0 +1,30 @@
+$ipa_client_package = case $facts['os']['family'] {
+ 'Debian': { 'freeipa-client' }
+ 'RedHat': { 'ipa-client' }
+ default: { fail($facts['os']['family']) }
+}
+
+if $facts['os']['family'] == 'Debian' and $facts['os']['release']['major'] == "11" {
+ class {'debian::backports':}
+ ->
+ Package[$ipa_client_package]
+
+ service {['sssd-pac.service', 'sssd-pac.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+}
+
+package {$ipa_client_package:}
+package {'sudo':}
+
+if 'lxc' in lookup("group_names") {
+ service {['var-lib-nfs-rpc_pipefs.mount', 'chronyd.service', 'sys-kernel-config.mount', 'sys-kernel-debug.mount', 'auth-rpcgss-module.service']:
+ ensure => stopped,
+ enable => mask,
+ }
+ ~>
+ exec {'/usr/bin/systemctl reset-failed':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/site/01-tinc.pp b/personal_infra/puppet/site/01-tinc.pp
new file mode 100644
index 00000000..6acbbd2e
--- /dev/null
+++ b/personal_infra/puppet/site/01-tinc.pp
@@ -0,0 +1,39 @@
+$tinc_hosts = lookup("groups.tinc")
+$tinc_other_hosts = $tinc_hosts.filter |$host_name| { $host_name != $facts["networking"]["fqdn"] }
+
+$tinc_locations = Hash($tinc_hosts.map |$host_name| { [
+ lookup("hostvars.'$host_name'.network.tinc.location"),
+ {
+ subnet => lookup("hostvars.'$host_name'.network.self_internal_network"),
+ address => lookup("hostvars.'$host_name'.network.public_hostname"),
+ }
+] })
+
+$tinc_connect_to = $tinc_other_hosts.map |$host_name| { lookup("hostvars.'$host_name'.network.tinc.location") }
+
+$tinc_other_networks = $tinc_other_hosts.map |$host_name| { lookup("hostvars.'$host_name'.network.self_internal_network") }
+$ocserv_networks = $tinc_hosts.map |$host_name| { lookup("hostvars.'$host_name'.network.self_internal_network") }
+
+if 'tinc' in lookup("group_names") {
+ class {'tinc':
+ tinc_name => lookup("tinc_global.name"),
+ tinc_location => lookup("network.tinc.location"),
+ tinc_connect_to => $tinc_connect_to,
+ tinc_locations => $tinc_locations,
+ tinc_ip => lookup("network.self_internal_ip"),
+ tinc_netmask => lookup("network.self_internal_netmask"),
+ tinc_other_networks => $tinc_other_networks,
+ firewall => !lookup({"name" => "network.disable_firewall", "default_value" => false}),
+ }
+
+ class {'ocserv':
+ ocserv_tcp_port => 444,
+ ocserv_udp_port => 444,
+ ocserv_default_domain => "int.pdp7.net",
+ ocserv_ipv4_network => lookup("network.ocserv.network"),
+ ocserv_dns => lookup("network.self_internal_ip"),
+ ocserv_split_dns => lookup("tinc_global.ocserv_domain"),
+ ocserv_routes => $ocserv_networks,
+ firewall => !lookup({"name" => "network.disable_firewall", "default_value" => false}),
+ }
+}
diff --git a/personal_infra/puppet/site/02-tinc-dns.pp b/personal_infra/puppet/site/02-tinc-dns.pp
new file mode 100644
index 00000000..ba7d57f6
--- /dev/null
+++ b/personal_infra/puppet/site/02-tinc-dns.pp
@@ -0,0 +1,5 @@
+if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9' and 'tinc' in lookup("group_names") and 'dns' in lookup("group_names")) {
+ exec {'/bin/sed -i "s/^bind-interfaces/bind-dynamic #bind-interfaces/" /etc/dnsmasq.conf':
+ unless => '/bin/grep "bind-dynamic #bind-interfaces" /etc/dnsmasq.conf',
+ }
+}
diff --git a/personal_infra/puppet/site/case.ces.int.pdp7.net.pp b/personal_infra/puppet/site/case.ces.int.pdp7.net.pp
new file mode 100644
index 00000000..2b4844cf
--- /dev/null
+++ b/personal_infra/puppet/site/case.ces.int.pdp7.net.pp
@@ -0,0 +1,10 @@
+node 'case.ces.int.pdp7.net' {
+ class {'dns_dhcp':}
+ ->
+ file {'/etc/dnsmasq.d/static.conf':
+ content => 'host-record=router,router.ces.int.pdp7.net,10.17.19.1
+host-record=tplink,tplink.ces.int.pdp7.net,10.17.19.2
+host-record=case.ces.int.pdp7.net,case,10.17.19.3
+',
+ }
+}
diff --git a/personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp b/personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp
new file mode 100644
index 00000000..5453eb4c
--- /dev/null
+++ b/personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp
@@ -0,0 +1,15 @@
+node 'dixie.bcn.int.pdp7.net' {
+ class {'dns_dhcp':}
+ file {'/etc/dnsmasq.d/static.conf':
+ content => "host-record=router,router.bcn.int.pdp7.net,192.168.76.1
+host-record=archerc7,archerc7.bcn.int.pdp7.net,192.168.76.6
+host-record=dixie.bcn.int.pdp7.net,dixie,192.168.76.2
+dhcp-option=121,10.0.0.0/8,192.168.76.2
+",
+ notify => Service["dnsmasq"],
+ }
+
+ class {'backups':
+ sanoid_config => "",
+ }
+}
diff --git a/personal_infra/puppet/site/h1.pdp7.net.pp b/personal_infra/puppet/site/h1.pdp7.net.pp
new file mode 100644
index 00000000..a3d62bbc
--- /dev/null
+++ b/personal_infra/puppet/site/h1.pdp7.net.pp
@@ -0,0 +1,123 @@
+node 'h1.pdp7.net' {
+ class {'proxmox::freeipa':}
+ class {'dns_dhcp':}
+
+ class {'backups':
+ sanoid_config => @("EOT")
+ # pg data
+ [rpool/data/subvol-204-disk-1]
+ use_template = backup
+
+ # nextcloud
+ [rpool/data/subvol-208-disk-1]
+ use_template = backup
+
+ [template_backup]
+ frequently=0
+ hourly=0
+ daily=100000
+ monthly=0
+ yearly=0
+ autosnap=yes
+ | EOT
+ ,
+ }
+
+ # TODO: ugly; tinc scripts require this :(
+ package {'net-tools':}
+
+ # https://lists.fedorahosted.org/archives/list/freeipa-users@lists.fedorahosted.org/thread/EZSM6LQPSNRY4WA52IYVR46RSXIDU3U7/
+ # SSH hack
+ file {'/etc/ssh/sshd_config.d/weak-gss.conf':
+ content => "GSSAPIStrictAcceptorCheck no\n",
+ }
+ ~>
+ service {'sshd':}
+
+ class {'proxmox::proxy':
+ mail => lookup('mail.root_mail'),
+ base_hostname => lookup('network.public_hostname'),
+ }
+
+ proxmox::proxy_host {'idp.pdp7.net':
+ target => 'https://ipsilon.h1.int.pdp7.net/',
+ overwrite_rh_certs => 'ipsilon.h1.int.pdp7.net',
+ }
+
+ proxmox::proxy_host {'weight.pdp7.net':
+ target => 'https://k8s-prod.h1.int.pdp7.net/',
+ }
+
+ proxmox::proxy_host {'blog.pdp7.net':
+ target => 'https://k8s-test.h1.int.pdp7.net/',
+ }
+
+ proxmox::proxy_host {'miniflux.pdp7.net':
+ target => 'http://miniflux.h1.int.pdp7.net:8080/',
+ }
+
+ proxmox::proxy_host {'nextcloud.pdp7.net':
+ target => 'http://nextcloud.h1.int.pdp7.net/',
+ }
+
+ package {'haproxy':}
+ ->
+ file {'/etc/haproxy/haproxy.cfg':
+ content => @("EOT")
+ global
+ log /dev/log local0
+ log /dev/log local1 notice
+ chroot /var/lib/haproxy
+ stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
+ stats timeout 30s
+ user haproxy
+ group haproxy
+ daemon
+
+ # Default SSL material locations
+ ca-base /etc/ssl/certs
+ crt-base /etc/ssl/private
+
+ # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
+ ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
+ ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
+ ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
+
+ defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+ errorfile 400 /etc/haproxy/errors/400.http
+ errorfile 403 /etc/haproxy/errors/403.http
+ errorfile 408 /etc/haproxy/errors/408.http
+ errorfile 500 /etc/haproxy/errors/500.http
+ errorfile 502 /etc/haproxy/errors/502.http
+ errorfile 503 /etc/haproxy/errors/503.http
+ errorfile 504 /etc/haproxy/errors/504.http
+
+ frontend gemini
+ bind :1965
+ mode tcp
+ option tcplog
+ default_backend blog
+ # TODO: sni
+ # tcp-request inspect-delay 5s
+ # acl blog req_ssl_sni blog.pdp7.net
+ # use_backend blog if blog
+
+ backend blog
+ mode tcp
+ server blog k8s-test.h1.int.pdp7.net:31965
+ | EOT
+ ,
+ }
+ ~>
+ service {'haproxy':
+ enable => true,
+ ensure => running,
+ }
+}
diff --git a/personal_infra/puppet/site/h2.pdp7.net.pp b/personal_infra/puppet/site/h2.pdp7.net.pp
new file mode 100644
index 00000000..51dda7a1
--- /dev/null
+++ b/personal_infra/puppet/site/h2.pdp7.net.pp
@@ -0,0 +1,9 @@
+node 'h2.pdp7.net' {
+ class {'dns_dhcp':}
+
+ file {'/etc/dnsmasq.d/static.conf':
+ content => "dhcp-host=freeswitch,10.42.42.3,freeswitch
+host-record=h2.h2.int.pdp7.net,10.42.42.1
+",
+ }
+}
diff --git a/personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..ef81ee3f
--- /dev/null
+++ b/personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp
@@ -0,0 +1,2 @@
+node 'ipa8.h1.int.pdp7.net' {
+}
diff --git a/personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..2228f424
--- /dev/null
+++ b/personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp
@@ -0,0 +1,3 @@
+node 'ipa9.h1.int.pdp7.net' {
+ class {'freeipa::server':}
+}
diff --git a/personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..b5f756cf
--- /dev/null
+++ b/personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp
@@ -0,0 +1,3 @@
+node 'ipsilon.h1.int.pdp7.net' {
+ class {'ipsilon':}
+}
diff --git a/personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp b/personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp
new file mode 100644
index 00000000..064af4d4
--- /dev/null
+++ b/personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp
@@ -0,0 +1,10 @@
+node 'maelcum.mad.int.pdp7.net' {
+ class {'dns_dhcp':}
+ file {'/etc/dnsmasq.d/static.conf':
+ content => 'host-record=router,router.mad.int.pdp7.net,10.34.10.1
+dhcp-host=d8:8c:79:1a:11:59,chromecast,10.34.10.3
+host-record=maelcum.mad.int.pdp7.net,maelcum,10.34.10.2
+',
+ notify => Service['dnsmasq'],
+ }
+}
diff --git a/personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp b/personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..c6c0862e
--- /dev/null
+++ b/personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp
@@ -0,0 +1,8 @@
+node 'miniflux.h1.int.pdp7.net' {
+ class {'miniflux':
+ database_url => "host=pg.h1.int.pdp7.net user=miniflux dbname=miniflux sslmode=disable",
+ polling_frequency => 60,
+ batch_size => 100,
+ polling_parser_error_limit => 0,
+ }
+}
diff --git a/personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp b/personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..6db53e62
--- /dev/null
+++ b/personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp
@@ -0,0 +1,16 @@
+node 'nagios.h1.int.pdp7.net' {
+ class {'nagios':}
+ class {'nagios::k8s':}
+
+ $k8s_hosts = lookup("groups.k8s")
+
+ $k8s_hosts.each |String $k8s_host| {
+ nagios_host {$k8s_host:
+ use => 'generic-host',
+ max_check_attempts => 5,
+ contact_groups => 'admins',
+ hostgroups => 'k8s',
+ check_command => 'check-host-alive',
+ }
+ }
+}
diff --git a/personal_infra/puppet/site/nc1.pdp7.net.pp b/personal_infra/puppet/site/nc1.pdp7.net.pp
new file mode 100644
index 00000000..e6939c8e
--- /dev/null
+++ b/personal_infra/puppet/site/nc1.pdp7.net.pp
@@ -0,0 +1,3 @@
+node 'nc1.pdp7.net' {
+ class {'freeipa::server':}
+}
diff --git a/personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp b/personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..29753d5b
--- /dev/null
+++ b/personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp
@@ -0,0 +1,22 @@
+node 'nextcloud.h1.int.pdp7.net' {
+ class {'nextcloud':
+ database_name => 'nextcloud',
+ database_user => 'nextcloud',
+ database_host => 'pg.h1.int.pdp7.net',
+ }
+
+ file {'/var/lib/nextcloud/apps':
+ ensure => 'link',
+ target => '/nextcloud/apps/',
+ }
+
+ file {'/var/lib/nextcloud/data':
+ ensure => 'link',
+ target => '/nextcloud/data/',
+ }
+
+ file {'/etc/nextcloud/config.php':
+ ensure => 'link',
+ target => '/nextcloud/config.php',
+ }
+}
diff --git a/personal_infra/puppet/site/pg.h1.int.pdp7.net.pp b/personal_infra/puppet/site/pg.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..6c5ef035
--- /dev/null
+++ b/personal_infra/puppet/site/pg.h1.int.pdp7.net.pp
@@ -0,0 +1,16 @@
+node 'pg.h1.int.pdp7.net' {
+ class {'postgres':
+ pg_hba_conf => @(EOT)
+ # TYPE DATABASE USER ADDRESS METHOD
+ # "local" is for Unix domain socket connections only
+ local all all peer
+ host weight k8s_prod k8s-prod.h1.int.pdp7.net trust
+ host weight grafana grafana.h2.int.pdp7.net trust
+ host miniflux miniflux miniflux.h1.int.pdp7.net trust
+ host nextcloud nextcloud nextcloud.h1.int.pdp7.net trust
+ | EOT
+ ,
+ }
+
+ package {'postgresql15-contrib':} # hstore for miniflux
+}
diff --git a/personal_infra/puppet/site/ws.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ws.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..d667839f
--- /dev/null
+++ b/personal_infra/puppet/site/ws.h1.int.pdp7.net.pp
@@ -0,0 +1,6 @@
+node 'ws.h1.int.pdp7.net' {
+ class {'podman':
+ user => 'alex',
+ storage_driver => 'zfs',
+ }
+}
diff --git a/personal_infra/requirements.loose b/personal_infra/requirements.loose
new file mode 100644
index 00000000..0b49404e
--- /dev/null
+++ b/personal_infra/requirements.loose
@@ -0,0 +1,4 @@
+ansible
+kubernetes
+boto3
+botocore
diff --git a/personal_infra/requirements.txt b/personal_infra/requirements.txt
new file mode 100644
index 00000000..8eb95cd8
--- /dev/null
+++ b/personal_infra/requirements.txt
@@ -0,0 +1,31 @@
+ansible==8.2.0
+ansible-core==2.15.2
+boto3==1.28.17
+botocore==1.31.17
+cachetools==5.3.1
+certifi==2023.7.22
+cffi==1.15.1
+charset-normalizer==3.2.0
+cryptography==41.0.3
+google-auth==2.22.0
+idna==3.4
+importlib-resources==5.0.7
+Jinja2==3.1.2
+jmespath==1.0.1
+kubernetes==27.2.0
+MarkupSafe==2.1.3
+oauthlib==3.2.2
+packaging==23.1
+pyasn1==0.5.0
+pyasn1-modules==0.3.0
+pycparser==2.21
+python-dateutil==2.8.2
+PyYAML==6.0.1
+requests==2.31.0
+requests-oauthlib==1.3.1
+resolvelib==1.0.1
+rsa==4.9
+s3transfer==0.6.1
+six==1.16.0
+urllib3==1.26.16
+websocket-client==1.6.1
diff --git a/personal_infra/setup_ipa_replicas.md b/personal_infra/setup_ipa_replicas.md
new file mode 100644
index 00000000..95c9321b
--- /dev/null
+++ b/personal_infra/setup_ipa_replicas.md
@@ -0,0 +1,24 @@
+Update and reboot all IPA servers: https://lists.fedorahosted.org/archives/list/freeipa-users@lists.fedorahosted.org/thread/2WMK5QOAI4TYF23UKODW3M6WB65BJCHT/
+
+If the host has a firewall (e.g. physical or virtual, not LXC container):
+
+```
+firewall-cmd --permanent --add-port={80/tcp,443/tcp,389/tcp,636/tcp,88/tcp,88/udp,464/tcp,464/udp,53/
+firewall-cmd --reload
+```
+
+Join the server to IPA:
+
+```
+ipa-client-install -p principal --domain=ipa.pdp7.net -W --mkhomedir --ntp-pool=pool.ntp.org --force-join
+```
+
+Replace `--ntp-pool` with `-N` if this is a host without clock (e.g. an LXC container).
+Remove `--force-join` if you have never added this host to IPA.
+
+```
+ipa-replica-install --ip-address=thishostaddress -n ipa.pdp7.net -P alex --setup-ca --setup-dns --forwarder=upstreamdnsforthishost
+```
+
+FreeIPA doesn't seem to like having different versions. When updating, when you add a new server with a new version, remove the rest of servers.
+You might have issues joining new replicas otherwise.
diff --git a/personal_infra/setup_venv b/personal_infra/setup_venv
new file mode 100755
index 00000000..0ff1e11b
--- /dev/null
+++ b/personal_infra/setup_venv
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+rm -rf .venv
+python3 -m venv .venv
+.venv/bin/pip install -U pip
+.venv/bin/pip install -r requirements.txt
diff --git a/personal_infra/talos-check b/personal_infra/talos-check
new file mode 160000
+Subproject a7b290dd08729299b05c632d4348d0e1c931a59
diff --git a/personal_infra/up.py b/personal_infra/up.py
new file mode 100755
index 00000000..131fc9e6
--- /dev/null
+++ b/personal_infra/up.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python3
+import argparse
+from concurrent import futures
+import pathlib
+import shlex
+import shutil
+import subprocess
+import textwrap
+import yaml
+
+
+"""
+This script performs Puppet catalog compilation without a central server.
+
+It receives the following arguments:
+
+* directory: a working directory. The script expects to find some data, like
+ variables to use in the compilation process, facts, etc. The script also
+ generates intermediate files and output there.
+
+* modulepath: path to your modules directory
+* manifest: path to your site directory
+* host: the hosts to compile catalogs to
+
+The script expects the following content on the working directory:
+
+directory/
+ global_vars/*.json: these JSON files will be available to all hosts
+ host_vars/{host}/*.json: these JSON files will be available in each host
+ facts/{host}.json: output from "facter -y" for each host
+
+And produces the following files:
+
+directory/
+ output/
+ {host}/
+ catalog.json: the compiled catalog for the host
+ modules: a copy of the module directory
+
+Just ship the {host} directory to each host and run:
+
+$ puppet apply --catalog .../catalog.json --modulepath=.../modules/
+
+Check the apply_catalog Ansible role for example usage.
+
+As we have the catalogs, we can manipulate them. See
+pseudo_resource_exporter.py for an example hack. We can simulate exported
+resources without PuppetDB.
+"""
+
+
+def build_hiera(directory, build_host_dir, host):
+ hiera_data_dir = build_host_dir / "data"
+ hiera_data_dir.mkdir()
+
+ hiera = {
+ "version": 5,
+ "hierarchy": []
+ }
+
+ global_vars_dir = directory / "global_vars"
+
+ for global_var in global_vars_dir.glob("*.json"):
+ shutil.copy(global_var, hiera_data_dir / global_var.name)
+ hiera["hierarchy"].append({
+ "name": global_var.name.removesuffix(".json"),
+ "path": global_var.name,
+ "data_hash": "json_data",
+ })
+
+ host_vars_dir = directory / "host_vars" / host
+
+ for host_var in host_vars_dir.glob("*.json"):
+ shutil.copy(host_var, hiera_data_dir / host_var.name)
+ hiera["hierarchy"].append({
+ "name": host_var.name.removesuffix(".json"),
+ "path": host_var.name,
+ "data_hash": "json_data",
+ })
+
+ hiera_path = build_host_dir / "hiera.yaml"
+ with open(hiera_path, "w") as f:
+ yaml.dump(hiera, f)
+
+ return hiera_path
+
+
+def build_facts(directory, build_host_dir, host):
+ source_facts_dir = directory / "facts"
+
+ with open(source_facts_dir / f"{host}.yaml") as f:
+ facts_yaml_content = f.read()
+
+ dest_facts_dir = build_host_dir / "yaml" / "facts"
+ dest_facts_dir.mkdir(parents=True)
+
+ with open(dest_facts_dir / f"{host}.yaml", "w") as f:
+ f.write("--- !ruby/object:Puppet::Node::Facts\nvalues:\n")
+ f.write(textwrap.indent(facts_yaml_content, " "))
+
+
+def compile_catalog(directory, build_dir, modulepath, manifest, output_dir,
+ host):
+ build_host_dir = build_dir / host
+ build_host_dir.mkdir()
+
+ hiera_path = build_hiera(directory, build_host_dir, host)
+
+ build_facts(directory, build_host_dir, host)
+
+ cmd = [
+ "puppet", "catalog", "compile",
+ f"--modulepath={modulepath}",
+ f"--hiera_config={hiera_path}",
+ f"--manifest={manifest}",
+ "--terminus", "compiler",
+ "--vardir", build_host_dir,
+ "--facts_terminus", "yaml",
+ host
+ ]
+ print(shlex.join(map(str, cmd)))
+ catalog_compile = subprocess.run(
+ cmd, check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ encoding="utf8"
+ )
+ assert not catalog_compile.stderr, catalog_compile.stderr
+
+ catalog_stdout = catalog_compile.stdout
+
+ _, catalog = catalog_stdout.split("\n", 1)
+
+ host_output_dir = output_dir / host
+ host_output_dir.mkdir()
+ with open(host_output_dir / "catalog.json", "w") as f:
+ f.write(catalog)
+
+ shutil.copytree(modulepath, host_output_dir / "modules")
+
+
+def up(directory: pathlib.Path, modulepath, manifest, hosts: list[str]):
+ build_dir = directory / "build"
+ build_dir.mkdir()
+
+ output_dir = build_dir / "output"
+ output_dir.mkdir()
+
+ def _compile_catalog(host):
+ compile_catalog(
+ directory=directory,
+ build_dir=build_dir,
+ modulepath=modulepath,
+ manifest=manifest,
+ output_dir=output_dir,
+ host=host)
+
+ # list because exceptions do not happen unless you iterate over the result
+ list(futures.ThreadPoolExecutor().map(_compile_catalog, hosts))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("directory")
+ parser.add_argument("modulepath")
+ parser.add_argument("manifest")
+ parser.add_argument("hosts", nargs="+", metavar="host")
+
+ args = parser.parse_args()
+ up(
+ directory=pathlib.Path(args.directory),
+ modulepath=args.modulepath,
+ manifest=args.manifest,
+ hosts=args.hosts
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/programming/python/creating_nice_python_cli_tools.md b/programming/python/creating_nice_python_cli_tools.md
new file mode 100644
index 00000000..53b3f51b
--- /dev/null
+++ b/programming/python/creating_nice_python_cli_tools.md
@@ -0,0 +1,40 @@
+Following this advice can make your tools easy to install by others, pleasant to use, robust, cross-platform, and powerful.
+
+* Use [my suggestions for setting up Python projects](project_setup.md), particularly:
+ * Provide instructions for installing your tool using [pipx](https://github.com/pypa/pipx).
+ Using pipx, people can install and upgrade your script using a simple command that requires no administrative privileges (but it requires having Python and pipx installed).
+ * As you are using [poetry](https://python-poetry.org/), following the indications above:
+ * Use [Poetry's support for specifying scripts](https://python-poetry.org/docs/pyproject/#scripts), so when installing your tool via pipx or other means, your scripts are added to the user's path.
+ * Dependencies you define will be installed automatically along with your application.
+ This reduces the effort users need to use your application if you need third-party libraries.
+ However, I would still advise to avoid unnecessary dependencies (for simple HTTP requests you can use the base library. If you do complex requests, then using a third-party library might be much simpler).
+ As you are using pipx, those dependencies will be installed to a isolated virtualenv, so they will not interfere with anything on your system.
+ * As your application is properly packaged, you can split your code into different Python files and use imports without issues.
+* If your application requires secrets, such as credentials or others, consider using:
+ * The standard [getpass](https://docs.python.org/3/library/getpass.html) module.
+ This prompts for a string on the command line, hiding what the user types.
+ * The [keyring](https://pypi.org/project/keyring/) library.
+ This stores secrets using your operating system facilities.
+* Use the [appdirs](https://pypi.org/project/appdirs/) library to obtain "user paths", such as the users directory for configuration, cache, or data.
+ appdirs knows the proper paths for Linux, macOS and Windows.
+ So for example, if your tool caches files and uses appdirs to find the cache directory, you might gain benefits such as cache files being excluded from backups.
+* If your tool requires significant time to complete a process:
+ * Use the [tqdm](https://tqdm.github.io/) library to add a progress bar.
+ * But also consider using the standard [concurrent.futures](https://docs.python.org/3/library/concurrent.futures.html) module to add parallelism if you can.
+ The [map](https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Executor.map) function is particularly easy to use.
+ Use it with a [ThreadPoolExecutor](https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor) if the parallel tasks are IO-bound or invoke other programs, or with [ProcessPoolExecutor](https://docs.python.org/3/library/concurrent.futures.html#processpoolexecutor) if they perform significant CPU work in Python (to avoid the [GIL](https://wiki.python.org/moin/GlobalInterpreterLock)).
+ * Consider using the standard [logging](https://docs.python.org/3/library/logging.html) module with a format that uses a timestamp, so users can inspect how much time is spent in different parts of the program.
+ You can also use logging module to implement flags such as `--debug` and `--verbose`.
+* Although fancier tools exist, the standard [argparse](https://docs.python.org/3/library/argparse.html) module is good enough for most argument parsing.
+ It has decent support for [sub-commands](https://docs.python.org/3/library/argparse.html#sub-commands), and the linked document describes a very nice pattern to define functions for sub-commands, under "One particularly effective way of handling sub-commands..."
+ Provide help text for non-obvious parameters.
+ argparse supports a lot of different argument types with a lot of functionality out of the box, such as enumerated options, integers, and file names.
+ The main reason for using a fancier argument parsing is that argparse does not have autocomplete support, but you can add [argcomplete](https://github.com/kislyuk/argcomplete) to an argparse program with minimal modifications to retrofit autocomplete.
+* Remember that the standard [json](https://docs.python.org/3/library/json.html) module is built-in.
+ You can use it to add a mode to your tool that generates JSON output instead of human-readable output, for easy automation of your tool, maybe using [jq](https://stedolan.github.io/jq/) or [fx](https://github.com/antonmedv/fx).
+* Use the standard [subprocess](https://docs.python.org/3/library/subprocess.html) module to execute other commands.
+ * Remember never to use `shell=True`, so among other things, your tool will work correctly with files using spaces in their names.
+ * Use `check=True` so if the subprocess fails, an exception will be raised.
+ This is likely the best default behavior, although the error is a bit ugly, this normally prevents ugly problems and it's a safe option.
+
+You can find examples for many of those techniques in my [repos](https://github.com/alexpdp7?tab=repositories&q=&type=&language=python&sort=).
diff --git a/programming/python/dependency_handling.md b/programming/python/dependency_handling.md
new file mode 100644
index 00000000..3f1db103
--- /dev/null
+++ b/programming/python/dependency_handling.md
@@ -0,0 +1,116 @@
+# Some brief notes about Python dependency management
+
+This article is mostly written for people who have already used Setuptools and have faced issues derived from its "limitations".
+Specifically, if you have seen files named `requirements.txt` and have wondered how they work, what problem do they solve, and if they are something you should investigate, I hope you find this article interesting.
+
+If you are starting to write Python software and you are looking at an introductory text about distributing your software and using dependencies, I would recommend you to skip directly to using the "new generation" Python packaging tools.
+This way, you can avoid most of the complexities in this post.
+You can also check out the [Python Packaging User Guide](https://packaging.python.org/en/latest/) and [my own prescriptive project setup recommendations](project_setup.md).
+
+Most programs can use third-party libraries to implement parts of their functionality without implementing everything from scratch.
+
+pip is the recommended package installer for Python.
+Python installers include pip, although pip is a component that can be installed separately from Python.
+Some Linux distributions separate pip from the main Python package (for example, Debian has a `python3` package and a `python3-pip` package), but a Python install without `pip` is not really fully functional for many purposes.
+
+pip fetches Python packages from diverse sources and adds them to a Python installation.
+Python packages can specify other packages as dependencies, so when pip installs a package, it also installs the required dependency chain.
+
+The traditional mechanism for packages to specify dependencies is Setuptools and other closely related projects.
+
+## About Setuptools
+
+Setuptools is a build and distribution system based on the distutils module that was part of the base Python library.
+
+Package metadata in Setuptools can be defined in many different ways, such as a `setup.py` file, a `setup.cfg` file, or a `pyproject.toml` file.
+In these files, you list the dependencies for your package, specifying the name of the package and constraints.
+
+Constraints define which version of a dependency you want to use.
+The constraint does not be an exact version, it can also be a range of versions, or a constraint such as "lower than version n".
+
+(Constraints additionally can specify other restrictions, such as requiring different versions for different Python versions, and other interesting possibilities.)
+
+When using setuptools and dependencies using setuptools, you quickly can run into problems.
+
+If packages specify exact dependency versions, then there are many changes of packages having conflicting requirements.
+
+If packages do not specify exact dependency versions, then the actual versions that pip installs can vary as new versions of packages are released.
+This can lead to bugs, because code might not work properly when using newer versions of dependencies.
+
+## Version locking and `requirements.txt`
+
+There is a dependency-management approach that can be very effective in many cases.
+
+This approach involves differentiating between "applications" and "libraries".
+
+Libraries are Python packages meant to be used as a dependency by other Python code.
+Applications are Python code that may use other libraries as dependencies, but which no other Python code depends on.
+
+### Specifying dependencies for libraries
+
+Libraries specify coarse but safe dependency requirements.
+
+Suppose that we are developing the foo library.
+The foo library depends on the bar library.
+The bar library uses a versioning scheme similar to semantic versioning.
+When we develop the foo library, we use version 1.2.3 of the bar library.
+
+Then, we specify that the foo library depends on the bar library, with a version constraint like `>=1.2.3, <1.3`.
+This version constraint lets the library to be used with the 1.2.4 version, which is likely compatible with the code in the foo library, and even introduce valuable bug fixes.
+However, the 1.3.0 version of the bar library would not be a valid dependency.
+This is probably a good idea; the 1.3.0 may contain changes that the foo code is incompatible with.
+(When we later create new versions of the foo library, we may want to consider depending on newer versions of the bar library, and possibly update the code so it continues working correctly.)
+
+This helps reduce conflicts.
+As libraries specify coarse dependencies, the chances of two libraries having incompatible requirements is lower.
+However, specifying coarse dependencies probably requires more testing to ensure that if different dependency versions are installed, the library works correctly.
+
+### Specifying dependencies for applications
+
+Applications specify exact dependency requirements.
+
+While libraries are not usually run on their own, applications are executed directly by end users.
+If a library does not work well, then you can temporarily go back to an older version or apply other fixes.
+But if an application does not work correctly, you have worse problems.
+
+If you specify exact dependency versions for an application, users of the application will always use a single combination of dependencies, which makes making things robust easy.
+
+A popular approach is for applications to specify Setuptools requirements with coarse versioning (just like libraries do), but to provide a list of the specific versions used for development and deployment.
+To create this list of dependencies, you can install your application using pip or some other mechanism, then extract a list of the dependency versions that were installed and store it in a file.
+For example, you can do this by executing:
+
+```
+$ pip install . # executed from the root of the application source code
+$ pip freeze >requirements.txt
+```
+
+Later on, if you install the application using the following command:
+
+```
+$ pip install -r requirements.txt
+```
+
+Then you will always install the same set of dependencies, preventing issues by updated dependencies.
+
+Note: pip and other package installers do *not* use `requirements.txt` or any other similar file outside the `setup.cfg` file and the other files defined in Setuptools.
+If you do not install your application explicitly using `pip install -r requirements.txt`, you will probably install a different set of dependencies.
+
+## Beyond version locking
+
+Following the approach above can be enough to use dependencies correctly.
+
+However, maintaining the Setuptools version dependencies and `requirements.txt` is straightforward, but tedious.
+Also, this approach of dependency management is not obvious, and may not be easy to get right completely.
+
+For these reasons, several projects have appeared that implement approaches similar to the one described above, but more automatic and prescriptive.
+These projects often manage automatically a file equivalent to `requirements.txt`, while the developer only specifies coarse dependencies for applications.
+
+Some of these tools are listed by [a page about relevant projects about packaging](https://packaging.python.org/en/latest/key_projects/) maintained by the [Python Packaging Authority](https://www.pypa.io/).
+Look for tools about managing dependencies and packaging.
+
+Thanks to some improvements in the Python ecosystem, pip can nowadays install dependencies using many different packaging tools correctly.
+
+These projects can also offer some other improvements, so I would encourage Python developers to investigate them and try them out.
+
+However, also note that following a correct approach, Setuptools and manual version locking are perfectly valid ways to manage Python code dependencies.
+Also, there are projects such as [pip-tools](https://github.com/jazzband/pip-tools) that complement Setuptools, addressing many of the issues described here, without requiring entirely new packaging tools.
diff --git a/programming/python/project_setup.md b/programming/python/project_setup.md
new file mode 100644
index 00000000..e945be71
--- /dev/null
+++ b/programming/python/project_setup.md
@@ -0,0 +1,117 @@
+There is a significant amount of Python project tooling. This document collects my personal recommendations on how to set up a Python project.
+
+It is not meant to reflect the best or most common practices, just my personal taste.
+
+# Use pipx
+
+Pipx is a tool that installs Python packages to your user environment. It creates an isolated environment for every tool, so if you install multiple packages they won't have version conflicts. It also takes care of adding a module's entrypoints to your user path.
+
+Pipx is useful for two purposes:
+
+* To install tools such as poetry
+* To let other users install your software easily
+
+# Use Poetry
+
+When using third-party dependencies in your Python code, it is highly interesting to avoid installing any project-specific dependency outside the project.
+
+To achieve that, traditionally virtualenvs are used; those are miniature Python installations where you can install any library you want. Virtualenvs need to be explicitly activated to be used, so it is easy to have a virtualenv for each Python project you are working on.
+
+Poetry is a tool that leverages virtualenvs to manage a project's dependencies, managing virtualenvs automatically.
+
+There are many similar tools such as pipenv and there are many multiple ways to specify a project's dependencies (`setup.py`, `requirements.txt`, etc.); Poetry provides a convenient way to do everything.
+
+You can install poetry using pipx.
+
+Commit `poetry.lock` to version control. For runtime dependencies, specify bounded dependency ranges. For development dependencies, use unbounded dependencies.
+
+# Test your code
+
+Write the necessary amount of tests so you can make changes to your code with confidence.
+
+If you find yourself iterating over a piece of code slowly, try to isolate the code you are writing so it can be tested in isolation for faster iteration.
+
+## Use pytest for testing
+
+Python provides *two* testing frameworks in its standard library, but they have some limitations:
+
+* `unittest` is an xUnit-style testing framework which follows non-PEP-8 naming conventions (probably because it copied the Java's jUnit), so extra work needs to be done to make your test cases PEP-8 compliant
+* `doctest` is a tool which allows you to run tests embedded in comments. For some code, it is great and helps you provide good, up-to-date documentation. However, a significant amount of code is awkward to test using `doctest`.
+
+Use `doctest` whenever you can, but outside that, use `pytest` to write PEP-8-compliant tests.
+
+Ensure that your test suite runs correctly by running `pytest` without any arguments.
+
+Use plain Python's `assert` statements to check assertions in your tests; `pytest` does some magic to provide nice error messages on failed assertions.
+
+## Gate your changes with testing
+
+Set up your version control so changes cannot be made to your main codeline without passing continuous integration tests (and possibly, code review).
+
+# Perform automated code formatting and static checking
+
+## Use Black
+
+Use Black to format your code.
+
+## Use flake8
+
+Use `flake8` to gate changes. Use `flake8-black` to prevent committed code which does not follow Black style.
+
+## Evaluate the use of mypy
+
+If you think it will benefit your codebase, consider integrating mypy as soon as possible.
+
+# Version control
+
+## Use a minimal gitignore file
+
+Keep editor-specific ignores in a personal `excludesfile`. Do not include patterns in gitignore which do not match anything generated by documented and supported development procedures.
+
+## Keep your code together
+
+All the code you modify as part of the project should be kept in a single repository so you can make atomic changes. If you find yourself making changes across multiple repositories and having to coordinate them, consider merging those repositories.
+
+Use git submodules or similar mechanisms to refer to code you modify that must be kept external.
+
+Use git subrepo to publish parts of the repository outside the main repository if needed.
+
+# Support multiple modern versions of Python
+
+Unless you have a specific requirement to support Python 2, don't.
+
+It is reasonable to support multiple versions of Python 3 from 3.4 onwards. Supporting the oldest versions might limit the features you can use (although features from more modern versions have been backported), so evaluate which operating systems and versions you need to support and try to support Python versions readily available for them (in Linux, by using mainline distro repos, for instance).
+
+Even if you are not running your code using the latest versions of Python, try to support all the newest available versions.
+
+Use continuous integration to run your tests in all supported versions of Python.
+
+This implies that development should be possible to do without using a specific version of Python, so pyenv or similar is not strictly needed.
+
+# Use ipython and ipdb
+
+Add ipython and ipdb as development dependencies.
+
+# Versioning
+
+Unless you have a specific requirement to support multiple versions of your code or to distribute to a platform that *requires* versioning (such as pypi), do not explicitly version your code but allow implicit versioning (e.g. it should be possible to identify which Git commit deployed code comes from).
+
+# Documentation
+
+Provide a `README` containing:
+
+* The purpose of the code
+* How to use the code
+* How to develop the code
+
+If the `README` becomes unwieldly, separate usage instructions to `USAGE` and/or development instructions to `HACKING`.
+
+Provide docstrings detailing the external interface of Python modules. Provide internal comments in modules detailing implementation.
+
+Consider the use of Sphinx to render documentation and publish it to the web if developing a library/framework.
+
+# Distribution
+
+If your code can be executed from a command line, consider documenting installation via `pipx`.
+
+If your code has significant binary dependencies, consider publishing a Docker image. Design your Docker images so rebuilding the image on most changes is fast.
diff --git a/programming/python/python_modules_primer.md b/programming/python/python_modules_primer.md
new file mode 100644
index 00000000..8932c19f
--- /dev/null
+++ b/programming/python/python_modules_primer.md
@@ -0,0 +1,269 @@
+# Python Modules Primer
+
+## Prerequisites
+
+These instructions assume a Linux environment.
+A macOS environment is similar, but not identical.
+A Windows environment is more different.
+
+## Previous knowledge
+
+### A refresher on the `PATH` variable
+
+If you execute the following command in your terminal:
+
+```
+$ echo hello
+```
+
+, the shell searches for the `echo` command in the directories listed in your `PATH` environment variable.
+You can display your `PATH` variable by running:
+
+```
+$ echo $PATH
+/home/user/.local/bin:/home/user/bin:/usr/share/Modules/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin
+```
+
+The contents of the `PATH` variable depend on your particular environment.
+
+If you run the following command:
+
+```
+$ which echo
+/usr/bin/echo
+```
+
+The `which` command prints where the shell locates the `echo` command.
+
+### A refresher on shell scripts
+
+If you create a file named `foo.sh` with the following contents:
+
+```
+#!/bin/sh
+
+echo hello
+```
+
+You define a "shell script".
+The first line indicates that this shell script is executed by using the `/bin/sh` command.
+The rest of the file are commands to be executed by the shell command.
+These commands behave as if you typed them into your terminal, so if you execute this script, the command `echo hello` will be executed, printing `hello`.
+
+If you try to run `foo.sh` like you run the `echo` command, by typing its name, it does not work:
+
+```
+$ foo.sh
+bash: foo.sh: command not found...
+```
+
+, because the shell looks for the `foo.sh` in the directories listed in the `PATH` variable.
+Unless you created the `foo.sh` file in a directory like `/usr/bin`, the shell will not find the `foo.sh` command.
+
+A solution to this problem is to specify the path to the `foo.sh` file, instead of relying on the `PATH` variable.
+However, if you do this, you face a second problem.
+
+```
+$ ./foo.sh
+bash: ./foo.sh: Permission denied
+```
+
+This happens because only files with the executable permission can be executed in this way.
+To solve this, add the executable permission; then it works:
+
+```
+$ chmod +x foo.sh
+$ ./foo.sh
+hello
+```
+
+## The `import` statement in Python
+
+### Importing from the Python standard library
+
+Run the following commands by using the Python REPL:
+
+```
+$ python3
+Python 3.9.17 (main, Aug 9 2023, 00:00:00)
+[GCC 11.4.1 20230605 (Red Hat 11.4.1-2)] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+>>> import datetime
+>>> datetime.datetime.now()
+datetime.datetime(2023, 9, 11, 21, 53, 16, 331236)
+```
+
+`import` works in a similar way to running a command in the shell.
+Python searches a number of directories looking for the `datetime` module.
+
+To see which directories are searched, run:
+
+```
+$ python3
+>>> import sys
+>>> sys.path
+['', '/usr/lib64/python39.zip', '/usr/lib64/python3.9', '/usr/lib64/python3.9/lib-dynload', '/home/alex/.local/lib/python3.9/site-packages', '/usr/lib64/python3.9/site-packages', '/usr/lib/python3.9/site-packages']
+```
+
+`sys.path` is a list of the directories that the `import` command searches.
+The contents of `sys.path` depend on your operating system and Python installation method.
+
+In my system, the `/usr/lib64/python3.9` directory contains the `datetime.py` module.
+
+```
+$ head /usr/lib64/python3.9/datetime.py
+"""Concrete date/time and related types.
+
+See http://www.iana.org/time-zones/repository/tz-link.html for
+time zone and DST data sources.
+"""
+
+__all__ = ("date", "datetime", "time", "timedelta", "timezone", "tzinfo",
+ "MINYEAR", "MAXYEAR")
+...
+```
+
+`/usr/lib64/python3.9` contains the modules in [the Python standard library](https://docs.python.org/3/library/).
+
+### Importing your Python files
+
+If you create a file with the `a.py` name:
+
+```
+def f():
+ return 2
+```
+
+, and another with the `b.py` name:
+
+```
+import a
+
+print(a.f())
+```
+
+, then:
+
+```
+$ python b.py
+2
+```
+
+
+This works, because `sys.path` contains `''`, which means "the current directory".
+
+(`sys.path` is very similar to the `PATH` variable. However, `sys.path` contains the current directory by default, whereas `PATH` does not.)
+
+When `import a` is executed, then Python searches the directories in `sys.path` for an `a.py` file; it is found when checking the `''` path.
+When `import datetime` is executed, Python searches in the current directory (because `''` comes first in the path), doesn't find it, but then finds it in the following `/usr/lib64/python3.9` directory.
+Python iterates over the `sys.path` directories, and loads the *first* matching file.
+
+## Installing libraries
+
+When writing Python software, sometimes it is enough with the modules included in the standard library.
+However, frequently you want to use other libraries.
+To use Python libraries, you must install them using the `pip` program.
+
+The `pip` program is not part of the `python3` package in some Linux distributions, and comes from the `python3-pip` package.
+
+The `pip` program can download libraries from https://pypi.org/ , the Python package index, and install them.
+`pip` installs libraries to a "Python environment".
+
+Old versions of `pip` defaulted to installing libraries to the "system" Python environment.
+In a Linux system, the system Python environment is located in a directory such as `/usr/lib64/python3.9`.
+By default, normal Linux users cannot write to `/usr`, so installing a package would fail.
+
+Modern versions of `pip` detect that they cannot write to the "system" Python environment, and then redirect the install to the "user" Python environment.
+The "user" Python environment is in a directory such as `~/.local/lib/python3.9`.
+
+You could use a command such as `sudo pip install` to grant `pip` the privileges required to write to `/usr`.
+However, this can make a Linux system unusable.
+Most Linux systems use software that uses the "system" Python environment.
+Altering the "system" Python environment can break such software.
+Do not run `sudo pip install` with root privileges unless you know why you need this.
+
+If you use a modern `pip` (or use the `--user` option), you can install libraries to the "user" Python environment.
+However, this is problematic because a Python environment can only contain a single version of a Python library.
+If you have two different Python programs that different versions of the same library, then these two programs cannot coexist in the "user" Python environment.
+
+In general, Python virtual environments are used to address this problem.
+
+## Creating Python virtual environments
+
+If you run:
+
+```
+$ python3 -m venv <some path>
+```
+
+This will create a directory with the path you specify, with the following contents:
+
+```
+<some path>
+├── bin
+│   ├── activate
+│   ├── pip
+│   ├── python
+├── include
+├── lib
+│   └── python3.9
+```
+
+The `python` and `pip` commands are copies of the same commands from the "system" Python environment.
+
+But these commands work differently from the "system" Python environment commands:
+
+```
+$ <some path>/bin/python
+>>> import sys
+>>> sys.path
+['', '/usr/lib64/python39.zip', '/usr/lib64/python3.9', '/usr/lib64/python3.9/lib-dynload', '<some path>/lib64/python3.9/site-packages', '<some path>/lib/python3.9/site-packages']
+```
+
+`sys.path` uses the `lib` directories in the virtual environment.
+
+When you use the `pip` program from the virtual environment, it installs the libraries to the virtual environment.
+
+You can create as many virtual environments as you need, and you can install different versions of libraries to each virtual environment.
+
+## Activating Python environments
+
+You can run the `python` and `pip` commands by specifying the full path, like we did when executing the `foo.sh` command earlier.
+
+By default, if you run `python`, the shell will invoke the `python` command from the "system" Python environment because it is in a directory included in the `PATH` variable.
+If you specify the full path, you override this.
+
+To save typing, the `bin` directory of a virtual environment contains an `activate` file.
+The `activate` file is a "special" shell script that must be invoked in one of the following two ways:
+
+```
+$ source <some path>/bin/activate
+```
+
+```
+$ . <some path>/bin/activate
+```
+
+`source` and `.` are synonyms.
+They are special shell commands that are needed for the `activate` command to work correctly.
+
+`activate` alters your path, so that the `bin` directory in your virtual environment comes first in your path.
+
+```
+$ echo $PATH
+/home/user/.local/bin:/home/user/bin:/usr/share/Modules/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin
+$ . <some path>/bin/activate
+(some path) $ echo $PATH
+<some path>/bin:/home/user/.local/bin:/home/user/bin:/usr/share/Modules/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin
+```
+
+, and thus if you run `python`, `<some path>/bin/python` will be executed instead of `/usr/bin/python`.
+
+Besides changing your prompt to indicate the virtual environment is activated, `activate` only alters your `PATH`.
+You can never use `activate` if you always specify the path to the virtual environment commands.
+
+## Further reading
+
+* [Some brief notes about Python dependency management](dependency_handling.md) continues this explanation, introducing the need for packaging tools.
+* [Installing Python Modules](https://docs.python.org/3/installing/index.html), from the official Python documentation, describes the `pip` program in more depth.
+* [`venv` - Creation of virtual environments](https://docs.python.org/3/library/venv.html), from the official Python documentation, describes virtual environments in more depth.
diff --git a/programming/python/scraping_with_selenium_on_docker.md b/programming/python/scraping_with_selenium_on_docker.md
new file mode 100644
index 00000000..61ba1c12
--- /dev/null
+++ b/programming/python/scraping_with_selenium_on_docker.md
@@ -0,0 +1,7 @@
+Don't use Selenium, use [Playwright](https://playwright.dev/python/):
+
+* Playwright automatically sets up headless browsers.
+* Provides convenient abstractions for locating elements in a page (mostly no XPath required. It can match "intelligently" using text).
+* Has a handy UI tool that records your actions in a browser and writes equivalent *readable* Playwright code.
+
+Further reading: https://new.pythonforengineers.com/blog/web-automation-dont-use-selenium-use-playwright/
diff --git a/scripts/ruscreen b/scripts/ruscreen
new file mode 100755
index 00000000..e9991eeb
--- /dev/null
+++ b/scripts/ruscreen
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+exec autossh -M 0 -t "$@" "screen -RdU"
diff --git a/workstation/README.md b/workstation/README.md
new file mode 100644
index 00000000..c47bb533
--- /dev/null
+++ b/workstation/README.md
@@ -0,0 +1,3 @@
+```
+$ python3 <(curl https://github.com/alexpdp7/alexpdp7/raw/master/workstation/setup.py -L)
+```
diff --git a/workstation/arch-container-builder b/workstation/arch-container-builder
new file mode 160000
+Subproject 644b9991864010fe53a56e5e59ff31ad39f0f45
diff --git a/workstation/build_workstation b/workstation/build_workstation
new file mode 100755
index 00000000..eee907c0
--- /dev/null
+++ b/workstation/build_workstation
@@ -0,0 +1,35 @@
+#!/bin/sh
+
+set -ue
+
+(
+ cd arch-container-builder/
+ ./builder.py \
+ --package ansible \
+ --package kubectl \
+ --package emacs-nox \
+ --package puppet \
+ --package sshpass \
+ --package git \
+ --package rust \
+ --package ipython \
+ --package maven \
+ --package python-pip \
+ --package python-pipx \
+ --package python-poetry \
+ --package python-jmespath \
+ --package python-kubernetes \
+ --package python-botocore \
+ --package python-boto3 \
+ --package certbot \
+ --package bitwarden-cli \
+ --package xclip \
+ --package screen \
+ --package man-db \
+ --package isync \
+ --aur-package mu \
+ --aur-package talosctl \
+ --trusted-key-id FE042E3D4085A811 \
+ workstation
+ # for talos, public key "Andrey Smirnov <andrey.smirnov@siderolabs.com>" FE042E3D4085A811
+)
diff --git a/workstation/setup.py b/workstation/setup.py
new file mode 100755
index 00000000..ee824491
--- /dev/null
+++ b/workstation/setup.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+import pathlib
+import shutil
+import subprocess
+import textwrap
+
+
+def _(t):
+ return textwrap.dedent(t).lstrip()
+
+
+print("Installing some packages...")
+subprocess.run(["sudo", "dnf", "install", "-y", "rclone", "fuse", "git", "podman-docker", "wget"], check=True)
+
+home = pathlib.Path.home()
+
+if not (home / ".config" / "rclone" / "rclone.conf").exists():
+ print(_("""
+ Visit https://nextcloud.pdp7.net/nextcloud/index.php/settings/user/security , create an app password
+ """))
+
+ subprocess.run(["rclone", "config", "create", "nextcloud", "webdav", "url=https://nextcloud.pdp7.net/nextcloud/remote.php/dav/files/alex/", "vendor=nextcloud", "user=alex", "--all"], check=True)
+
+
+(home / "Nextcloud").mkdir(exist_ok=True)
+
+
+nextcloud_service_path = home / ".config" / "systemd" / "user" / "nextcloud.service"
+nextcloud_service_path.parent.mkdir(parents=True, exist_ok=True)
+
+
+with open(nextcloud_service_path, "w", encoding="utf8") as f:
+ f.write(_("""
+ [Unit]
+
+ [Service]
+ ExecStart=/usr/bin/rclone mount --vfs-cache-mode=full --dir-perms 700 --file-perms 600 nextcloud: /home/alex/Nextcloud/
+
+ [Install]
+ WantedBy=default.target
+ """))
+
+subprocess.run(["systemctl", "--user", "enable", "--now", "nextcloud"], check=True)
+
+if not (home / ".ssh").exists():
+ subprocess.run(["ln", "-s", "Nextcloud/_ssh", ".ssh"], check=True)
+
+
+dotfiles_dir = home / "Nextcloud" / "dotfiles"
+
+for dotfile in dotfiles_dir.glob("*"):
+ relative_dotfile = dotfile.relative_to(dotfiles_dir)
+ replaced_dotfile = pathlib.Path.home() / ("." + relative_dotfile.parts[0][1:])
+ if not replaced_dotfile.exists():
+ subprocess.run(["ln", "-s", dotfile, replaced_dotfile], check=True)
+
+
+(home / ".local" / "bin").mkdir(exist_ok=True, parents=True)
+
+(home / "git").mkdir(exist_ok=True, parents=True)
+
+distrobox_dir = home / "git" / "distrobox"
+
+if not distrobox_dir.exists():
+ subprocess.run(["git", "clone", "https://github.com/89luca89/distrobox.git", distrobox_dir], check=True)
+
+subprocess.run([distrobox_dir / "install"], check=True)