aboutsummaryrefslogtreecommitdiff
path: root/personal_infra
diff options
context:
space:
mode:
Diffstat (limited to 'personal_infra')
-rw-r--r--personal_infra/README.md56
-rw-r--r--personal_infra/ansible.cfg7
-rw-r--r--personal_infra/k8s/base/kustomization.yml32
-rw-r--r--personal_infra/playbooks/patch_rpc_svcgssd_service.yaml21
-rw-r--r--personal_infra/playbooks/roles/apply_puppet/tasks/main.yml127
-rw-r--r--personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml17
-rwxr-xr-xpersonal_infra/playbooks/roles/deploy_ragent/files/get.py16
-rw-r--r--personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml45
-rw-r--r--personal_infra/playbooks/roles/deploy_ragent/vars/main.yml24
-rw-r--r--personal_infra/playbooks/roles/join_ipa/handlers/main.yml4
-rw-r--r--personal_infra/playbooks/roles/join_ipa/tasks/main.yml32
-rw-r--r--personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml92
-rw-r--r--personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml8
-rw-r--r--personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml11
-rw-r--r--personal_infra/playbooks/roles/talos/README.md65
-rw-r--r--personal_infra/playbooks/roles/talos/files/get-ip5
-rw-r--r--personal_infra/playbooks/roles/talos/tasks/main.yaml111
-rw-r--r--personal_infra/playbooks/roles/talos/tasks/proxmox.yml19
-rw-r--r--personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml4
-rw-r--r--personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml13
-rw-r--r--personal_infra/playbooks/setup_blog_keys.yaml23
-rw-r--r--personal_infra/playbooks/setup_tinc_keys.yaml27
-rw-r--r--personal_infra/playbooks/site.yaml63
-rw-r--r--personal_infra/podman.md26
-rwxr-xr-xpersonal_infra/pseudo_resource_exporter.py97
-rw-r--r--personal_infra/puppet/modules/automatic_updates/manifests/init.pp33
-rw-r--r--personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp81
-rw-r--r--personal_infra/puppet/modules/backups/manifests/init.pp11
-rw-r--r--personal_infra/puppet/modules/basic_software/manifests/init.pp7
-rw-r--r--personal_infra/puppet/modules/copr/manifests/init.pp20
-rw-r--r--personal_infra/puppet/modules/debian/manifests/backports.pp9
-rw-r--r--personal_infra/puppet/modules/debian/manifests/init.pp5
-rw-r--r--personal_infra/puppet/modules/dns_dhcp/manifests/init.pp36
-rw-r--r--personal_infra/puppet/modules/dns_dhcp/templates/internal.epp30
-rw-r--r--personal_infra/puppet/modules/freeipa/manifests/server.pp13
-rw-r--r--personal_infra/puppet/modules/ipsilon/manifests/init.pp8
m---------personal_infra/puppet/modules/mailalias_core0
-rw-r--r--personal_infra/puppet/modules/miniflux/manifests/init.pp27
l---------personal_infra/puppet/modules/nagios/files/check_talos_version1
-rw-r--r--personal_infra/puppet/modules/nagios/manifests/init.pp100
-rw-r--r--personal_infra/puppet/modules/nagios/manifests/k8s.pp41
-rw-r--r--personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp1373
m---------personal_infra/puppet/modules/nagios_core0
-rw-r--r--personal_infra/puppet/modules/nextcloud/manifests/init.pp79
-rw-r--r--personal_infra/puppet/modules/nextcloud/templates/www.conf.epp439
-rw-r--r--personal_infra/puppet/modules/ocserv/manifests/init.pp97
-rw-r--r--personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp57
-rw-r--r--personal_infra/puppet/modules/ocserv/templates/port.conf.epp8
-rw-r--r--personal_infra/puppet/modules/podman/manifests/init.pp17
-rw-r--r--personal_infra/puppet/modules/postgres/manifests/init.pp26
-rw-r--r--personal_infra/puppet/modules/proxmox/README.md36
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/freeipa.pp17
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/init.pp38
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/proxy.pp52
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp53
-rw-r--r--personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp2
-rw-r--r--personal_infra/puppet/modules/proxmox/templates/interfaces.epp18
-rw-r--r--personal_infra/puppet/modules/root_mail/manifests/init.pp41
-rw-r--r--personal_infra/puppet/modules/tinc/manifests/init.pp100
-rw-r--r--personal_infra/puppet/modules/tinc/templates/tinc-up.epp11
-rw-r--r--personal_infra/puppet/modules/tinc/templates/tinc.conf.epp8
-rw-r--r--personal_infra/puppet/site/00-common.pp23
-rw-r--r--personal_infra/puppet/site/01-dns.pp10
-rw-r--r--personal_infra/puppet/site/01-ipa.pp30
-rw-r--r--personal_infra/puppet/site/01-tinc.pp39
-rw-r--r--personal_infra/puppet/site/02-tinc-dns.pp5
-rw-r--r--personal_infra/puppet/site/case.ces.int.pdp7.net.pp10
-rw-r--r--personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp15
-rw-r--r--personal_infra/puppet/site/h1.pdp7.net.pp123
-rw-r--r--personal_infra/puppet/site/h2.pdp7.net.pp9
-rw-r--r--personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp2
-rw-r--r--personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp3
-rw-r--r--personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp3
-rw-r--r--personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp10
-rw-r--r--personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp8
-rw-r--r--personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp16
-rw-r--r--personal_infra/puppet/site/nc1.pdp7.net.pp3
-rw-r--r--personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp22
-rw-r--r--personal_infra/puppet/site/pg.h1.int.pdp7.net.pp16
-rw-r--r--personal_infra/puppet/site/ws.h1.int.pdp7.net.pp6
-rw-r--r--personal_infra/requirements.loose4
-rw-r--r--personal_infra/requirements.txt31
-rw-r--r--personal_infra/setup_ipa_replicas.md24
-rwxr-xr-xpersonal_infra/setup_venv6
m---------personal_infra/talos-check0
-rwxr-xr-xpersonal_infra/up.py179
86 files changed, 4436 insertions, 0 deletions
diff --git a/personal_infra/README.md b/personal_infra/README.md
new file mode 100644
index 00000000..a3249853
--- /dev/null
+++ b/personal_infra/README.md
@@ -0,0 +1,56 @@
+# Personal infra
+
+This is a collection of files I use setting up my personal infrastructure.
+This is a work in progress, as I am redoing a bit how I do configuration management.
+The main source is in a private repo, but I put here as much material as I can make public.
+Inventory, vaults, etc. remain in the private repo.
+
+## Ansible
+
+### Initial setup
+
+Symlink everything in this directory into your root infrastructure directory.
+
+Create an `inventory` file.
+
+Run `./setup_venv` to create a virtual environment.
+
+Create `vault_password` with a vault password.
+
+### Usage
+
+Run `. .venv/bin/activate` to activate the virtual environment.
+
+Run Ansible commands normally.
+
+## Ansible/Puppet integration
+
+I prefer using Ansible for orchestration, and Puppet for configuration management.
+
+* `up.py` compiles Puppet catalogs without a Puppet Server.
+* `pseudo_resource_exporter.py` simulates exported resources on the catalogs generated by `up.py`.
+ You can use this script as a template to implement your own catalog manipulations.
+* `playbooks/roles/apply_puppet/` uses `up.py` to apply Puppet to Ansible hosts.
+ This script collects facts, adds the Ansible inventory to Hiera (so you can use Ansible inventory data to parameterize Puppet), compiles the catalogs, ships them to Ansible nodes, and executes Puppet.
+
+Except for exported resources, which work differently, this setup has most of the benefits of Puppet Server without having to run a Puppet Server and PuppetDB.
+
+Being able to simulate exported resources without a master lets you use the `nagios_core` module without infrastructure.
+With the `nagios_core` module, Puppet code, such as a module which sets up a web server, can define "inline" Puppet monitoring for the managed resources.
+
+## Puppet
+
+For the moment, I'm managing the following distros using this setup.
+
+| Distro | Puppet version |
+| --------------- | ------------------ |
+| Debian 11 (PVE) | Puppet 5.5 |
+| EL8 | Puppet 6.26 (EPEL) |
+| EL9 | Puppet 7.20 (EPEL) |
+
+I perform catalog compilation on my laptop running EL9.
+Although [support across Puppet 5.5-7 is not documented](https://www.puppet.com/docs/puppet/7/platform_lifecycle.html#primary-agent-compatibility), catalogs still seem to be compatible.
+
+## Misc
+
+* [Podman](podman.md)
diff --git a/personal_infra/ansible.cfg b/personal_infra/ansible.cfg
new file mode 100644
index 00000000..c7e2ae61
--- /dev/null
+++ b/personal_infra/ansible.cfg
@@ -0,0 +1,7 @@
+[defaults]
+inventory = inventory
+vault_password_file = vault_password
+callbacks_enabled = ansible.posix.profile_tasks
+
+# TODO: Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host's fingerprint to your known_hosts file to manage this host.
+host_key_checking = False
diff --git a/personal_infra/k8s/base/kustomization.yml b/personal_infra/k8s/base/kustomization.yml
new file mode 100644
index 00000000..05b0b21e
--- /dev/null
+++ b/personal_infra/k8s/base/kustomization.yml
@@ -0,0 +1,32 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.8.0/deploy/static/provider/cloud/deploy.yaml
+ - https://github.com/alexpdp7/talos-check/raw/main/manifest.yaml
+
+patches:
+ - patch: |-
+ - op: add
+ path: "/metadata/annotations/ingressclass.kubernetes.io~1is-default-class"
+ value: true
+ target:
+ kind: IngressClass
+ name: nginx
+ - patch: |
+ apiVersion: v1
+ kind: Namespace
+ metadata:
+ name: ingress-nginx
+ labels:
+ pod-security.kubernetes.io/enforce: privileged
+ - patch: |
+ apiVersion: apps/v1
+ kind: Deployment
+ metadata:
+ name: ingress-nginx-controller
+ namespace: ingress-nginx
+ spec:
+ template:
+ spec:
+ hostNetwork: true
diff --git a/personal_infra/playbooks/patch_rpc_svcgssd_service.yaml b/personal_infra/playbooks/patch_rpc_svcgssd_service.yaml
new file mode 100644
index 00000000..683cb045
--- /dev/null
+++ b/personal_infra/playbooks/patch_rpc_svcgssd_service.yaml
@@ -0,0 +1,21 @@
+---
+- hosts: patch_rpc_svcgssd_service
+ collections: freeipa.ansible_freeipa
+ vars:
+ ansible_user: alex
+ ansible_become: True
+ tasks:
+ - name: del nfs service
+ command: ipa service-del nfs/{{ inventory_hostname }}
+ ignore_errors: True
+ - name: create nfs service
+ command: ipa service-add nfs/{{ inventory_hostname }}
+ - name: clean keytab
+ command: ipa-rmkeytab -p nfs/{{ inventory_hostname }} -k /etc/krb5.keytab
+ ignore_errors: True
+ - name: get keytab
+ command: ipa-getkeytab -p nfs/{{ inventory_hostname }} -k /etc/krb5.keytab
+ - name: restart
+ service:
+ name: rpc-svcgssd.service
+ state: restarted
diff --git a/personal_infra/playbooks/roles/apply_puppet/tasks/main.yml b/personal_infra/playbooks/roles/apply_puppet/tasks/main.yml
new file mode 100644
index 00000000..2d6bdb23
--- /dev/null
+++ b/personal_infra/playbooks/roles/apply_puppet/tasks/main.yml
@@ -0,0 +1,127 @@
+---
+- name: clean puppet build directory
+ local_action:
+ module: file
+ path: "{{ inventory_dir }}/build/puppet"
+ state: absent
+ run_once: True
+ tags: puppet_fast
+- name: create puppet build directories
+ local_action:
+ module: file
+ path: "{{ inventory_dir }}/{{ item }}"
+ state: directory
+ loop:
+ - build/puppet/global_vars
+ - build/puppet/host_vars
+ - build/puppet/facts
+ run_once: True
+ tags: puppet_fast
+- name: create puppet build host vars directories
+ local_action:
+ module: file
+ path: "{{ inventory_dir }}/build/puppet/host_vars/{{ inventory_hostname }}"
+ state: directory
+ tags: puppet_fast
+- name: dump hostvars
+ local_action:
+ module: copy
+ dest: "{{ inventory_dir }}/build/puppet/global_vars/hostvars.json"
+ content: "{'hostvars': {{ hostvars }} }"
+ run_once: True
+ tags: puppet_fast
+- name: dump this
+ local_action:
+ module: copy
+ dest: "{{ inventory_dir }}/build/puppet/host_vars/{{ inventory_hostname }}/this.json"
+ content: "{{ hostvars[inventory_hostname] }}"
+ tags: puppet_fast
+- name: install epel
+ package:
+ name: epel-release
+ when: ansible_distribution_file_variety == 'RedHat'
+- name: install packages
+ package:
+ name:
+ - puppet
+ - unzip
+- name: get facts
+ command: facter -y
+ register: facter_output
+ tags: puppet_fast
+- name: dump facts
+ local_action:
+ module: copy
+ dest: "{{ inventory_dir }}/build/puppet/facts/{{ inventory_hostname }}.yaml"
+ content: "{{ facter_output.stdout }}"
+ delegate_to: 127.0.0.1
+ tags: puppet_fast
+- name: compile puppet catalogs
+ local_action:
+ module: command
+ cmd: "{{ inventory_dir }}/up.py {{ inventory_dir }}/build/puppet {{ inventory_dir }}/puppet/modules {{ inventory_dir }}/puppet/site {% for host in ansible_play_batch %}{{ host }} {% endfor %}"
+ tags: puppet_fast
+ run_once: True
+- name: simulate exported resources
+ local_action:
+ module: command
+ cmd: "./pseudo_resource_exporter.py"
+ chdir: "{{ inventory_dir }}"
+ tags: puppet_fast
+ run_once: True
+- name: package catalog
+ archive:
+ path: "{{ inventory_dir }}/build/puppet/build/output/{{ inventory_hostname }}"
+ dest: "{{ inventory_dir }}/build/puppet/puppet_catalog_{{ inventory_hostname }}.zip"
+ format: zip
+ delegate_to: 127.0.0.1
+ tags: puppet_fast
+- name: create remote temporary directory
+ tempfile:
+ state: directory
+ register: remote_temp
+ tags: puppet_fast
+- name: unpackage catalog
+ unarchive:
+ src: "{{ inventory_dir }}/build/puppet/puppet_catalog_{{ inventory_hostname }}.zip"
+ dest: "{{ remote_temp.path }}"
+ tags: puppet_fast
+- name: preview catalog
+ command: puppet apply --catalog {{ remote_temp.path }}/{{ inventory_hostname }}/catalog.json --noop --test --modulepath={{ remote_temp.path }}/{{ inventory_hostname }}/modules/
+ register: catalog_apply
+ tags: puppet_fast
+- name: display catalog preview stdout
+ debug:
+ msg: "{{ catalog_apply.stdout_lines }}"
+ tags: puppet_fast
+- name: display catalog preview stderr
+ debug:
+ msg: "{{ catalog_apply.stderr_lines }}"
+ tags: puppet_fast
+- name: pause to confirm
+ pause:
+ tags: pause
+- name: apply catalog
+ command: puppet apply --catalog {{ remote_temp.path }}/{{ inventory_hostname }}/catalog.json --modulepath={{ remote_temp.path }}/{{ inventory_hostname }}/modules/
+ register: catalog_apply
+ tags: puppet_fast
+- name: display catalog apply stdout
+ debug:
+ msg: "{{ catalog_apply.stdout_lines }}"
+ tags: puppet_fast
+- name: display catalog apply stderr
+ debug:
+ msg: "{{ catalog_apply.stderr_lines }}"
+ tags: puppet_fast
+- name: clean up remote temporary directory
+ file:
+ state: absent
+ path: "{{ remote_temp.path }}"
+ tags: puppet_fast
+- name: clean up local temporary directory
+ file:
+ state: absent
+ path: "{{ inventory_dir }}/build/puppet/"
+ delegate_to: 127.0.0.1
+ tags: puppet_fast
+ run_once: True
diff --git a/personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml b/personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml
new file mode 100644
index 00000000..11080d89
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ipsilon/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- name: install ipsilon (if this task fails, run kinit as root)
+ command: ipsilon-server-install --hostname {{ ipsilon.hostname }} --ipa yes --openidc yes --admin-user {{ ipsilon.admin_user }} --info-sssd=yes --form=yes --root-instance
+ args:
+ creates: /etc/ipsilon/idp
+- name: fix permissions
+ command: chown -R ipsilon:ipsilon /var/lib/ipsilon/ /etc/ipsilon/
+- name: create public host
+ shell: ipa host-find {{ ipsilon.hostname }} || ipa host-add {{ ipsilon.hostname }}
+- name: create public service
+ shell: ipa service-find HTTP/{{ ipsilon.hostname }} || ipa service-add HTTP/{{ ipsilon.hostname }}
+- name: add public service to keytab
+ shell: klist -k /etc/httpd/conf/http.keytab | grep HTTP/{{ ipsilon.hostname }} || ipa-getkeytab -p HTTP/{{ ipsilon.hostname }} -k /etc/httpd/conf/http.keytab
+- name: restart httpd
+ service:
+ name: httpd
+ state: restarted
diff --git a/personal_infra/playbooks/roles/deploy_ragent/files/get.py b/personal_infra/playbooks/roles/deploy_ragent/files/get.py
new file mode 100755
index 00000000..d0a78618
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ragent/files/get.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python3
+
+import json
+import sys
+import urllib.request
+
+run_id = sys.argv[1]
+
+if run_id == "last":
+ runs = json.loads(urllib.request.urlopen("https://api.github.com/repos/alexpdp7/ragent/actions/runs?branch=master").read().decode('utf8'))
+ run_id = runs["workflow_runs"][0]["id"]
+
+run = json.loads(urllib.request.urlopen("https://api.github.com/repos/alexpdp7/ragent/actions/runs/%s" % run_id).read().decode('utf8'))
+artifacts = json.loads(urllib.request.urlopen(run['artifacts_url']).read().decode('utf8'))['artifacts']
+urls = {a['name']: 'https://api.github.com/repos/alexpdp7/ragent/actions/artifacts/%s/zip' % a["id"] for a in artifacts}
+print(json.dumps(urls))
diff --git a/personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml b/personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml
new file mode 100644
index 00000000..72bd5bed
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ragent/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+- name: get url
+ local_action:
+ module: command
+ cmd: "{{ inventory_dir }}/playbooks/roles/deploy_ragent/files/get.py {{ run_id|default('last') }}"
+ run_once: True
+ register: url
+- name: download package
+ uri:
+ url: "{{ url.stdout|from_json|json_query(ragent['package_key'][ansible_os_family][ansible_distribution_major_version][ansible_architecture]) }}"
+ dest: /tmp/ragent_downloaded_package.zip
+ user: " {{ ragent_download['user'] }}"
+ password: "{{ ragent_download['token'] }}"
+ force_basic_auth: yes
+ follow_redirects: all
+- name: unzip
+ unarchive:
+ src: /tmp/ragent_downloaded_package.zip
+ dest: /tmp
+ list_files: yes
+ remote_src: yes
+ register: unzipped
+- name: remove previous
+ package:
+ name: ragent
+ state: absent
+- name: install package
+ command: "{{ ragent['install_command'][ansible_os_family] }} /tmp/{{ unzipped.files[0] }}"
+- name: configure service
+ service:
+ name: ragent
+ enabled: yes
+ state: restarted
+- name: open firewall
+ command: firewall-cmd --permanent --add-port=21488/tcp
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version in ("7", "8", "9") and ansible_virtualization_type != "lxc" and not network.disable_firewall|default(False)
+- name: reload firewall
+ command: firewall-cmd --reload
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version in ("7", "8", "9") and ansible_virtualization_type != "lxc" and not network.disable_firewall|default(False)
+- name: force check
+ community.general.nagios:
+ action: forced_check
+ host: "{{ inventory_hostname }}"
+ service: check_ragent
+ delegate_to: nagios.h1.int.pdp7.net
diff --git a/personal_infra/playbooks/roles/deploy_ragent/vars/main.yml b/personal_infra/playbooks/roles/deploy_ragent/vars/main.yml
new file mode 100644
index 00000000..0ac33330
--- /dev/null
+++ b/personal_infra/playbooks/roles/deploy_ragent/vars/main.yml
@@ -0,0 +1,24 @@
+---
+ragent:
+ package_key:
+ Debian:
+ '10':
+ x86_64: debian_buster
+ armv7l: debian_buster_rpi3
+ '11': # apparently the buster package works for bullseye
+ x86_64: debian_buster
+ armv7l: debian_buster_rpi3
+ '20': # and it also works for Ubuntu 20.04
+ x86_64: debian_buster
+ RedHat:
+ '7':
+ x86_64: el7
+ '8':
+ x86_64: el8
+ aarch64: el8_rpi4
+ '9':
+ x86_64: el8
+ aarch64: el8_rpi4
+ install_command:
+ Debian: "dpkg -i"
+ RedHat: "rpm -i"
diff --git a/personal_infra/playbooks/roles/join_ipa/handlers/main.yml b/personal_infra/playbooks/roles/join_ipa/handlers/main.yml
new file mode 100644
index 00000000..da74d5ee
--- /dev/null
+++ b/personal_infra/playbooks/roles/join_ipa/handlers/main.yml
@@ -0,0 +1,4 @@
+- name: restart_container
+ delegate_to: "{{ proxmox.host }}"
+ command: pct reboot {{ proxmox.id }}
+
diff --git a/personal_infra/playbooks/roles/join_ipa/tasks/main.yml b/personal_infra/playbooks/roles/join_ipa/tasks/main.yml
new file mode 100644
index 00000000..0fd8f5d0
--- /dev/null
+++ b/personal_infra/playbooks/roles/join_ipa/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+- name: join
+ shell: getent passwd admin || ipa-client-install -U {% if 'lxc' in group_names %} -N {% endif %} --domain={{ freeipa.domain }} -w {{ freeipa.join_password|trim }} --mkhomedir -p {{ freeipa.join_user }}
+# proxmox reorders the configuration file and misbehaves quite a bit :(
+- name: proxmox idmaps
+ when: "'lxc' in group_names"
+ block:
+ - name: set id mappings copy out
+ copy:
+ remote_src: yes
+ src: /etc/pve/lxc/{{ proxmox.id }}.conf
+ dest: /tmp/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: read conf
+ slurp:
+ src: /tmp/{{ proxmox.id }}.conf
+ register: proxmox_conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: set id mappings
+ blockinfile:
+ path: /tmp/{{ proxmox.id }}.conf
+ block: |
+ lxc.idmap = u 0 100000 65536
+ lxc.idmap = g 0 100000 65536
+ lxc.idmap = u {{ freeipa.idrange_start }} {{ freeipa.idrange_start }} {{ freeipa.idrange_size }}
+ lxc.idmap = g {{ freeipa.idrange_start }} {{ freeipa.idrange_start }} {{ freeipa.idrange_size }}
+ when: not proxmox_conf['content']|b64decode is search('lxc.idmap') and not proxmox.privileged|default(False)
+ notify: restart_container
+ delegate_to: "{{ proxmox.host }}"
+ - name: set id mappings copy in
+ command: cp /tmp/{{ proxmox.id }}.conf /etc/pve/lxc/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
diff --git a/personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml b/personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml
new file mode 100644
index 00000000..4a5d37b6
--- /dev/null
+++ b/personal_infra/playbooks/roles/proxmox_create_lxc/tasks/main.yml
@@ -0,0 +1,92 @@
+---
+- name: download template
+ command: "pveam download local {{ flavors[proxmox.flavor].template }}_amd64.tar.xz"
+ args:
+ creates: "/var/lib/vz/template/cache/{{ flavors[proxmox.flavor].template }}_amd64.tar.xz"
+ delegate_to: "{{ proxmox.host }}"
+- name: create host
+ command: >
+ pct create {{ proxmox.id }} "/var/lib/vz/template/cache/{{ flavors[proxmox.flavor].template }}_amd64.tar.xz"
+ --hostname {{ inventory_hostname }}
+ --storage local-zfs
+ -net0 name=eth0,bridge=vmbr0,ip=dhcp
+ -onboot 1
+ {% if not proxmox.privileged|default(false) %} -unprivileged {% endif %}
+ {% if proxmox.features|default(None) %} -features {{ proxmox.features }} {% endif %}
+ {% if proxmox.memory|default(None) %} -memory {{ proxmox.memory }} {% endif %}
+ {% for disk in proxmox.disks|default([]) %}
+ --mp{{ disk.index }} volume={{ disk.storage }}:{{ disk.size_gb }},mp={{ disk.path }}
+ {% endfor %}
+ -rootfs local-zfs:{{ proxmox.disk|default(4) }}
+ --password {{ ansible_password|trim }}
+ --nameserver {{ hostvars[proxmox.host].network.self_internal_ip }}
+ --ostype {{ flavors[proxmox.flavor].pct_ostype }}
+ args:
+ creates: "/etc/pve/lxc/{{ proxmox.id }}.conf"
+ delegate_to: "{{ proxmox.host }}"
+- name: allow backups
+ shell: "zfs allow -u backups mount,send,hold,snapshot,destroy rpool/data/$(pct config {{ proxmox.id }} | grep mp{{ item.index }} | cut -d , -f 1 | cut -d : -f 3)"
+ delegate_to: "{{ proxmox.host }}"
+ loop: "{{ proxmox.disks|default([]) }}"
+- name: proxmox extra
+ when: proxmox.extra|default(None)
+ block:
+ - name: set proxmox extra copy out
+ copy:
+ remote_src: yes
+ src: /etc/pve/lxc/{{ proxmox.id }}.conf
+ dest: /tmp/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: read conf
+ slurp:
+ src: /tmp/{{ proxmox.id }}.conf
+ register: proxmox_conf
+ delegate_to: "{{ proxmox.host }}"
+ - name: set proxmox extra
+ lineinfile:
+ path: /tmp/{{ proxmox.id }}.conf
+ line: "{{ item }}"
+ loop: "{{ proxmox.extra }}"
+ delegate_to: "{{ proxmox.host }}"
+ - name: set proxmox extra copy in
+ command: cp /tmp/{{ proxmox.id }}.conf /etc/pve/lxc/{{ proxmox.id }}.conf
+ delegate_to: "{{ proxmox.host }}"
+# https://bugzilla.proxmox.com/show_bug.cgi?id=4515
+- name: set hosts
+ copy:
+ content: |
+ 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
+ ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
+
+ {% if network is defined and network.ip is defined %}
+ {{ network.ip }} {{ inventory_hostname }} {{ inventory_hostname|regex_search('^[^.]*') }}
+ {% endif %}
+ dest: /rpool/data/subvol-{{ proxmox.id }}-disk-0/etc/hosts
+ delegate_to: "{{ proxmox.host }}"
+- name: prevent proxmox from manipulating hosts
+ file:
+ path: /etc/.pve-ignore.hosts
+ state: touch
+ delegate_to: "{{ proxmox.host }}"
+- name: start host
+ shell: "{ pct status {{ proxmox.id }} | grep running ; } || pct start {{ proxmox.id }}"
+ delegate_to: "{{ proxmox.host }}"
+- name: update packages to prevent automatic updates causing issues later. retry until network available
+ command: pct exec {{ proxmox.id }} -- dnf update -y
+ retries: 10
+ delay: 1
+ until: result.rc == 0
+ register: result
+ delegate_to: "{{ proxmox.host }}"
+- name: install ssh
+ command: pct exec {{ proxmox.id }} -- dnf install -y openssh-server
+ delegate_to: "{{ proxmox.host }}"
+- name: permit root password
+ lineinfile:
+ path: /rpool/data/subvol-{{ proxmox.id }}-disk-0/etc/ssh/sshd_config
+ regexp: "^#?PermitRootLogin"
+ line: "PermitRootLogin yes"
+ delegate_to: "{{ proxmox.host }}"
+- name: enable ssh
+ command: pct exec {{ proxmox.id }} -- systemctl enable --now sshd
+ delegate_to: "{{ proxmox.host }}"
diff --git a/personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml b/personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml
new file mode 100644
index 00000000..b9bdcc6a
--- /dev/null
+++ b/personal_infra/playbooks/roles/proxmox_create_lxc/vars/main.yml
@@ -0,0 +1,8 @@
+---
+flavors:
+ el8:
+ template: rockylinux-8-default_20210929
+ pct_ostype: centos
+ el9:
+ template: rockylinux-9-default_20221109
+ pct_ostype: centos
diff --git a/personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml b/personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml
new file mode 100644
index 00000000..dbedc734
--- /dev/null
+++ b/personal_infra/playbooks/roles/proxmox_route_53/tasks/main.yml
@@ -0,0 +1,11 @@
+---
+- name: create DNS entries
+ local_action:
+ module: amazon.aws.route53
+ zone: "{{ network.dns_zone }}"
+ record: "{{ item }}"
+ type: CNAME
+ value: "{{ network.public_hostname }}"
+ wait: true
+ state: present
+ loop: "{{ network.proxmox.proxy_hosts }}"
diff --git a/personal_infra/playbooks/roles/talos/README.md b/personal_infra/playbooks/roles/talos/README.md
new file mode 100644
index 00000000..4ef4e8de
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/README.md
@@ -0,0 +1,65 @@
+# Talos Ansible role
+
+This role helps provision Talos clusters using Ansible.
+Currently, this role only supports VMs on Proxmox and single node clusters.
+
+## Variables
+
+### Host variables
+
+```
+proxmox:
+ id: 123
+ host: inventory_name_of_proxmox_host
+ cores: n
+ memory: in MB
+ disk: in GB
+network:
+ ip: x.y.z.t
+talos_host:
+ talos_cluster: cluster_name
+ install_disk: /dev/vda
+```
+
+### Group variables
+
+```
+talos_clusters:
+ cluster_name: # you can have multiple clusters
+ endpoint: https://host_name:6443
+```
+
+## Talos configuration
+
+Only the Talos secret for the cluster must be stored in version control.
+You must create the secret and vault it:
+
+```
+$ talosctl gen secrets -o talos/${cluster_name}-secrets.yaml
+$ ansible-vault encrypt talos/${cluster_name}-secrets.yaml
+```
+
+## Role
+
+With the above configuration, the role will:
+
+* Create the VM in Proxmox.
+Until Talos includes https://github.com/siderolabs/talos/pull/5897 , [the playbook fishes the IP from the dnsmasq Proxmox instance](tasks/proxmox.yml#L13) by using [this script](files/get-ip).
+* Sets up Talos.
+* Fetches the kubeconfig.
+* Deploys kustomizations in `k8s/base`.
+See [my kustomizations](../../../k8s/base/).
+
+## Updates
+
+To update Talos, update `talosctl`, then:
+
+```
+$ talosctl upgrade --preserve --talosconfig talos/talosconfig-k8s-test.example --nodes k8s-test.example.com --wait
+```
+
+To update K8S:
+
+```
+$ talosctl upgrade-k8s --talosconfig talos/talosconfig-k8s-test.example --nodes k8s-test.example.com
+```
diff --git a/personal_infra/playbooks/roles/talos/files/get-ip b/personal_infra/playbooks/roles/talos/files/get-ip
new file mode 100644
index 00000000..a96dab61
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/files/get-ip
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+set -ueo pipefail
+
+grep $(cat /etc/pve/qemu-server/$1.conf | grep net0: | sed 's/^.*virtio=\([0-9A-F:]*\),.*$/\1/' | tr [:upper:] [:lower:]) /var/lib/misc/dnsmasq.leases | cut -d " " -f 3
diff --git a/personal_infra/playbooks/roles/talos/tasks/main.yaml b/personal_infra/playbooks/roles/talos/tasks/main.yaml
new file mode 100644
index 00000000..44189484
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/tasks/main.yaml
@@ -0,0 +1,111 @@
+---
+- name: generate controlplane patch
+ copy:
+ content: |
+ cluster:
+ allowSchedulingOnControlPlanes: true
+ machine:
+ install:
+ disk: {{ talos_host.install_disk }}
+ network:
+ hostname: {{ inventory_hostname }}
+ nameservers:
+ - {{ hostvars[proxmox.host].network.self_internal_ip }}
+ interfaces:
+ - interface: eth0
+ addresses:
+ - {{ network.ip }}/24
+ routes:
+ - network: 0.0.0.0/0
+ gateway: {{ hostvars[proxmox.host].network.self_internal_ip }}
+ nodeLabels:
+ role: ingress-controller
+ dest: "{{ inventory_dir }}/talos/host-{{ inventory_hostname }}.patch"
+ delegate_to: 127.0.0.1
+
+- name: generate controlplane config
+ shell:
+ cmd: talosctl gen config -t controlplane -o talos/host-{{ inventory_hostname }}.yaml --with-secrets <(ansible-vault view talos/{{ talos_host.talos_cluster }}-secrets.yaml) --config-patch-control-plane @talos/host-{{ inventory_hostname }}.patch {{ talos_host.talos_cluster }} {{ talos_clusters[talos_host.talos_cluster].endpoint }} --force
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+
+- name: generate talosconfig
+ shell:
+ cmd: talosctl gen config -t talosconfig -o talos/talosconfig-{{ talos_host.talos_cluster }} --with-secrets <(ansible-vault view talos/{{ talos_host.talos_cluster }}-secrets.yaml) {{ talos_host.talos_cluster }} {{ talos_clusters[talos_host.talos_cluster].endpoint }} --force
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+
+- name: set talosconfig endpoint
+ shell:
+ cmd: talosctl --talosconfig=talos/talosconfig-{{ talos_host.talos_cluster }} config endpoint {{ inventory_hostname }}
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+
+- name: get kubeconfig
+ command:
+ cmd: talosctl kubeconfig --talosconfig talos/talosconfig-{{ talos_host.talos_cluster }} --nodes {{ inventory_hostname }} -f
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ throttle: 1
+
+- name: check node ready
+ k8s_info:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ kind: Node
+ wait: true
+ wait_condition:
+ status: True
+ type: Ready
+ delegate_to: 127.0.0.1
+ register: nodes
+ ignore_errors: true
+
+- name: setup proxmox hosts
+ import_tasks: proxmox.yml
+ when: "'k8s_proxmox' in group_names and not 'resources' in nodes or nodes.resources|length == 0"
+
+- name: apply config
+ command:
+ cmd: talosctl apply-config --insecure --nodes {{ ip.stdout }} --file talos/host-{{ inventory_hostname }}.yaml
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ when: "not 'resources' in nodes or nodes.resources|length == 0"
+
+- name: bootstrap cluster
+ command:
+ cmd: talosctl bootstrap --nodes {{ inventory_hostname }} --talosconfig talos/talosconfig-{{ talos_host.talos_cluster }}
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ register: bootstrap
+ until: bootstrap.rc == 0
+ retries: 12
+ delay: 1
+ when: "not 'resources' in nodes or nodes.resources|length == 0"
+
+- name: get kubeconfig
+ command:
+ cmd: talosctl kubeconfig --talosconfig talos/talosconfig-{{ talos_host.talos_cluster }} --nodes {{ inventory_hostname }} -f
+ chdir: "{{ inventory_dir }}"
+ delegate_to: 127.0.0.1
+ when: "not 'resources' in nodes or nodes.resources|length == 0"
+ throttle: 1
+
+- name: wait node ready
+ k8s_info:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ kind: Node
+ wait: true
+ wait_condition:
+ status: True
+ type: Ready
+ delegate_to: 127.0.0.1
+ register: nodes
+ until: nodes.resources is defined and nodes.resources|length > 0
+ retries: 35
+ delay: 1
+
+- name: deploy kustomizations
+ k8s:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ definition: "{{ lookup('kubernetes.core.kustomize', dir='k8s/base') }}"
+ delegate_to: 127.0.0.1
diff --git a/personal_infra/playbooks/roles/talos/tasks/proxmox.yml b/personal_infra/playbooks/roles/talos/tasks/proxmox.yml
new file mode 100644
index 00000000..21bc4c83
--- /dev/null
+++ b/personal_infra/playbooks/roles/talos/tasks/proxmox.yml
@@ -0,0 +1,19 @@
+---
+- name: download iso
+ get_url:
+ url: https://github.com/siderolabs/talos/releases/download/v1.4.4/talos-amd64.iso
+ dest: /var/lib/vz/template/iso/talos-amd64-v1.4.4.iso
+ delegate_to: "{{ proxmox.host }}"
+ run_once: True
+- name: create vm
+ command: qm create {{ proxmox.id }} --cdrom local:iso/talos-amd64-v1.4.4.iso --cores 12 --memory 8192 --name {{ inventory_hostname }} --onboot 1 --start 1 --virtio0 local-zfs:32 --cpu host --net0 virtio,bridge=vmbr0
+ delegate_to: "{{ proxmox.host }}"
+ args:
+ creates: /etc/pve/qemu-server/{{ proxmox.id }}.conf
+- name: get ip
+ script: get-ip {{ proxmox.id }}
+ delegate_to: "{{ proxmox.host }}"
+ register: ip
+ until: ip.rc == 0
+ retries: 20
+ delay: 1
diff --git a/personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml b/personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml
new file mode 100644
index 00000000..da205cf4
--- /dev/null
+++ b/personal_infra/playbooks/roles/verify_root_mail/tasks/main.yml
@@ -0,0 +1,4 @@
+- name: send root mail
+ command:
+ cmd: mail -s "ansible test {{ inventory_hostname }}" root
+ stdin: test
diff --git a/personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml b/personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml
new file mode 100644
index 00000000..9aa2dc16
--- /dev/null
+++ b/personal_infra/playbooks/roles/zqxjkcrud/tasks/main.yaml
@@ -0,0 +1,13 @@
+---
+- name: render manifests
+ command: kubectl run --context {{ context }} -q -n default -i --rm builder --image quay.io/alexpdp7/zqxjkcrud:master --restart=Never --image-pull-policy=Always --command -- zqxjkcrud-manifest-builder
+ args:
+ stdin: "{{ definition|to_yaml }}"
+ register: manifestbuild
+ delegate_to: 127.0.0.1
+- k8s:
+ context: "{{ context }}"
+ state: present
+ apply: true
+ definition: "{{ manifestbuild.stdout | from_yaml_all }}"
+ delegate_to: 127.0.0.1
diff --git a/personal_infra/playbooks/setup_blog_keys.yaml b/personal_infra/playbooks/setup_blog_keys.yaml
new file mode 100644
index 00000000..b664bcb8
--- /dev/null
+++ b/personal_infra/playbooks/setup_blog_keys.yaml
@@ -0,0 +1,23 @@
+---
+- hosts: h1.pdp7.net
+ tasks:
+ - name: get public cert
+ ansible.builtin.slurp:
+ src: "/etc/apache2/md/domains/blog.pdp7.net/pubcert.pem"
+ register: public_cert
+ - name: get private key
+ ansible.builtin.slurp:
+ src: "/etc/apache2/md/domains/blog.pdp7.net/privkey.pem"
+ register: private_key
+ - k8s:
+ context: "admin@k8s-test.h1"
+ state: present
+ definition:
+ kind: Secret
+ metadata:
+ namespace: blog
+ name: tls-gemini
+ data:
+ tls.crt: "{{ public_cert.content }}"
+ tls.key: "{{ private_key.content }}"
+ delegate_to: 127.0.0.1
diff --git a/personal_infra/playbooks/setup_tinc_keys.yaml b/personal_infra/playbooks/setup_tinc_keys.yaml
new file mode 100644
index 00000000..bdd67116
--- /dev/null
+++ b/personal_infra/playbooks/setup_tinc_keys.yaml
@@ -0,0 +1,27 @@
+---
+- hosts: tinc
+ tasks:
+ - name: create tinc folder
+ file:
+ path: /etc/ansible/tinc/
+ state: directory
+ recurse: yes
+ - name: generate key
+ command: openssl genrsa -out /etc/ansible/tinc/private.pem 2048
+ args:
+ creates: /etc/ansible/tinc/private.pem
+ - name: generate public
+ command: openssl rsa -in /etc/ansible/tinc/private.pem -outform PEM -pubout -out /etc/ansible/tinc/public_{{ network.public_hostname }}.pem
+ args:
+ creates: /etc/ansible/tinc/public_{{ network.public_hostname }}.pem
+ - name: get public
+ fetch:
+ src: "/etc/ansible/tinc/public_{{ network.public_hostname }}.pem"
+ dest: /tmp/
+ flat: yes
+ - name: distribute public
+ copy:
+ src: "/tmp/public_{{ hostvars[item].network.public_hostname }}.pem"
+ dest: "/etc/ansible/tinc/"
+ with_inventory_hostnames:
+ - tinc
diff --git a/personal_infra/playbooks/site.yaml b/personal_infra/playbooks/site.yaml
new file mode 100644
index 00000000..b9852a44
--- /dev/null
+++ b/personal_infra/playbooks/site.yaml
@@ -0,0 +1,63 @@
+---
+- name: create lxc
+ hosts: lxc
+ gather_facts: false
+ tags: create_lxc
+ roles:
+ - proxmox_create_lxc
+
+- name: complete provision
+ hosts: all,!k8s
+ tags: puppet
+ roles:
+ - apply_puppet
+
+- name: join ipa
+ hosts: join_ipa
+ tags: join_ipa
+ roles:
+ - join_ipa
+
+- name: deploy ragent
+ hosts: all,!k8s
+ tags: deploy_ragent
+ roles:
+ - deploy_ragent
+
+- name: verify root mail
+ hosts: all,!k8s
+ tags: verify_root_mail
+ roles:
+ - verify_root_mail
+
+- name: create k8s
+ hosts: k8s
+ tags: k8s
+ gather_facts: false
+ roles:
+ - talos
+
+- name: deploy ipsilon
+ hosts: ipsilon
+ tags: ipsilon
+ roles:
+ - deploy_ipsilon
+
+- name: proxmox route 53
+ hosts: proxmox
+ tags: proxmox_route_53
+ gather_facts: false
+ roles:
+ - proxmox_route_53
+
+- name: deploy weight
+ hosts: k8s-prod.h1.int.pdp7.net
+ tags:
+ - k8s
+ - weight
+ gather_facts: false
+ roles:
+ - role: zqxjkcrud
+ vars:
+ context: "admin@{{ talos_host.talos_cluster }}"
+ definition: "{{ weight }}"
diff --git a/personal_infra/podman.md b/personal_infra/podman.md
new file mode 100644
index 00000000..703b6edf
--- /dev/null
+++ b/personal_infra/podman.md
@@ -0,0 +1,26 @@
+# Podman
+
+You can create LXC containers in Proxmox (using ZFS) that can run rootless Podman.
+
+The [`proxmox_create_lxc`](playbooks/roles/proxmox_create_lxc/) role can create the LXC container with the necessary options with the following configuration:
+
+```
+proxmox:
+...
+ privileged: true
+ features: fuse=1,nesting=1
+ extra:
+ - "lxc.mount.entry: /dev/net/tun dev/net/tun none bind,create=file"
+ - "lxc.cgroup2.devices.allow: c 10:200 rwm"
+```
+
+The [`podman`](puppet/modules/podman/) Puppet module can add the necessary configuration:
+
+```
+class {'podman':
+ user => 'your_username',
+ storage_driver => 'zfs',
+}
+```
+
+This module configures subuids/subgids, but until you reboot, you will get some warnings using Podman.
diff --git a/personal_infra/pseudo_resource_exporter.py b/personal_infra/pseudo_resource_exporter.py
new file mode 100755
index 00000000..9ef498bb
--- /dev/null
+++ b/personal_infra/pseudo_resource_exporter.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python3
+import json
+import pathlib
+import subprocess
+
+
+"""
+This is an ugly hack.
+
+Puppet exported resources are very nice to generate monitoring configuration
+along with your Puppet resources. As you define something like an Apache
+virtual host, you can create a Nagios service check for it.
+
+But this requires a PuppetDB, and does not play nice with having no central
+Puppet infra.
+
+With its sibling script up.py, this script takes the JSON files generated by
+that, and manipulates them. This script moves Nagios resources to a specific
+host and does ugly trickery to fool Puppet into accepting that.
+
+This is like exported resources, but you don't need to declare a resource as
+exported.
+"""
+
+
+def load_json(path):
+ with open(path) as f:
+ return json.load(f)
+
+
+def save_json(r, path):
+ with open(path, "w") as f:
+ json.dump(r, f)
+
+
+nagios_catalog_file = pathlib.Path("build/puppet/build/output/nagios.h1.int.pdp7.net/catalog.json")
+
+if nagios_catalog_file.exists():
+ nagios_catalog = load_json(nagios_catalog_file)
+
+ nagios_contacts = [r for r in nagios_catalog["resources"] if r["type"] == "Nagios_contact"]
+ assert len(nagios_contacts) == 1, f"found multiple nagios contacts {nagios_contacts}"
+ nagios_contact = nagios_contacts[0]
+
+
+ail = subprocess.run(["ansible-inventory", "--list"], check=True, stdout=subprocess.PIPE)
+inventory = json.loads(ail.stdout)
+total_hosts_in_inventory = len(inventory["_meta"]["hostvars"].keys())
+k8s_hosts_in_inventory = len(inventory["k8s"]["hosts"])
+puppet_hosts_in_inventory = total_hosts_in_inventory - k8s_hosts_in_inventory
+
+catalog_files = list(pathlib.Path("build/puppet/build/output/").glob("*/catalog.json"))
+
+if nagios_catalog_file.exists():
+ assert len(catalog_files) == puppet_hosts_in_inventory, f"catalogs {catalog_files} quantity different from total hosts in inventory {puppet_hosts_in_inventory}"
+
+
+nagios_resources = []
+nagios_edge_targets = []
+
+
+def is_nagios_resource(r):
+ return r["type"].startswith("Nagios")
+
+
+def is_nagios_edge(e):
+ return e["target"].startswith("Nagios")
+
+
+for catalog_file in catalog_files:
+ if catalog_file == nagios_catalog_file:
+ continue
+ catalog = load_json(catalog_file)
+ nagios_resources += [r for r in catalog["resources"] if is_nagios_resource(r)]
+ catalog["resources"] = [r for r in catalog["resources"] if not is_nagios_resource(r)]
+ nagios_edge_targets += [e["target"] for e in catalog["edges"] if is_nagios_edge(e)]
+ catalog["edges"] = [e for e in catalog["edges"] if not is_nagios_edge(e)]
+ save_json(catalog, catalog_file)
+
+
+if nagios_catalog_file.exists():
+ nagios_contact_position = nagios_catalog["resources"].index(nagios_contact)
+
+ def copy_parameters(r):
+ for p in ["require", "notify", "owner"]:
+ r["parameters"][p] = nagios_contact["parameters"][p]
+ return r
+
+ nagios_catalog["resources"] = (
+ nagios_catalog["resources"][0:nagios_contact_position] +
+ list(map(copy_parameters, nagios_resources)) +
+ nagios_catalog["resources"][nagios_contact_position:]
+ )
+
+ nagios_catalog["edges"] += [{"source": "Class[Nagios]", "target": t} for t in nagios_edge_targets]
+
+ save_json(nagios_catalog, nagios_catalog_file)
diff --git a/personal_infra/puppet/modules/automatic_updates/manifests/init.pp b/personal_infra/puppet/modules/automatic_updates/manifests/init.pp
new file mode 100644
index 00000000..8585b0ae
--- /dev/null
+++ b/personal_infra/puppet/modules/automatic_updates/manifests/init.pp
@@ -0,0 +1,33 @@
+class automatic_updates {
+ if ($facts['os']['family'] == 'Debian') {
+ package {["unattended-upgrades", "apt-listchanges"]:}
+ }
+ elsif ($facts['os']['family'] == 'RedHat') {
+ if ($facts['os']['release']['major'] == '7') {
+ package {'yum-cron':}
+ ->
+ file {"/etc/yum/yum-cron.conf":
+ content => epp('automatic_updates/yum-cron.conf'),
+ }
+ ~>
+ service {'yum-cron':
+ ensure => running,
+ enable => true,
+ }
+ }
+ elsif ($facts['os']['release']['major'] == '8' or $facts['os']['release']['major'] == '9') {
+ package {'dnf-automatic':}
+ ->
+ service {'dnf-automatic-install.timer':
+ ensure => running,
+ enable => true,
+ }
+ }
+ else {
+ fail($facts['os']['release']['major'])
+ }
+ }
+ else {
+ fail($facts['os'])
+ }
+}
diff --git a/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp b/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp
new file mode 100644
index 00000000..bd1ec685
--- /dev/null
+++ b/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp
@@ -0,0 +1,81 @@
+[commands]
+# What kind of update to use:
+# default = yum upgrade
+# security = yum --security upgrade
+# security-severity:Critical = yum --sec-severity=Critical upgrade
+# minimal = yum --bugfix update-minimal
+# minimal-security = yum --security update-minimal
+# minimal-security-severity:Critical = --sec-severity=Critical update-minimal
+update_cmd = default
+
+# Whether a message should be emitted when updates are available,
+# were downloaded, or applied.
+update_messages = yes
+
+# Whether updates should be downloaded when they are available.
+download_updates = yes
+
+# Whether updates should be applied when they are available. Note
+# that download_updates must also be yes for the update to be applied.
+apply_updates = yes
+
+# Maximum amout of time to randomly sleep, in minutes. The program
+# will sleep for a random amount of time between 0 and random_sleep
+# minutes before running. This is useful for e.g. staggering the
+# times that multiple systems will access update servers. If
+# random_sleep is 0 or negative, the program will run immediately.
+# 6*60 = 360
+random_sleep = 360
+
+
+[emitters]
+# Name to use for this system in messages that are emitted. If
+# system_name is None, the hostname will be used.
+system_name = None
+
+# How to send messages. Valid options are stdio and email. If
+# emit_via includes stdio, messages will be sent to stdout; this is useful
+# to have cron send the messages. If emit_via includes email, this
+# program will send email itself according to the configured options.
+# If emit_via is None or left blank, no messages will be sent.
+emit_via = stdio
+
+# The width, in characters, that messages that are emitted should be
+# formatted to.
+output_width = 80
+
+
+[email]
+# The address to send email messages from.
+# NOTE: 'localhost' will be replaced with the value of system_name.
+email_from = root@localhost
+
+# List of addresses to send messages to.
+email_to = root
+
+# Name of the host to connect to to send email messages.
+email_host = localhost
+
+
+[groups]
+# NOTE: This only works when group_command != objects, which is now the default
+# List of groups to update
+group_list = None
+
+# The types of group packages to install
+group_package_types = mandatory, default
+
+[base]
+# This section overrides yum.conf
+
+# Use this to filter Yum core messages
+# -4: critical
+# -3: critical+errors
+# -2: critical+errors+warnings (default)
+debuglevel = -2
+
+# skip_broken = True
+mdpolicy = group:main
+
+# Uncomment to auto-import new gpg keys (dangerous)
+# assumeyes = True
diff --git a/personal_infra/puppet/modules/backups/manifests/init.pp b/personal_infra/puppet/modules/backups/manifests/init.pp
new file mode 100644
index 00000000..f98d598f
--- /dev/null
+++ b/personal_infra/puppet/modules/backups/manifests/init.pp
@@ -0,0 +1,11 @@
+class backups($sanoid_config) {
+ package {'sanoid':}
+
+ file {'/etc/sanoid':
+ ensure => directory,
+ }
+ ->
+ file {'/etc/sanoid/sanoid.conf':
+ content => $sanoid_config,
+ }
+}
diff --git a/personal_infra/puppet/modules/basic_software/manifests/init.pp b/personal_infra/puppet/modules/basic_software/manifests/init.pp
new file mode 100644
index 00000000..fcceefb2
--- /dev/null
+++ b/personal_infra/puppet/modules/basic_software/manifests/init.pp
@@ -0,0 +1,7 @@
+class basic_software {
+ package {['less', 'mlocate', 'traceroute', 'nmap', 'tree', 'tar']:}
+
+ if($facts['os']['family'] == 'RedHat') {
+ package {'which':}
+ }
+}
diff --git a/personal_infra/puppet/modules/copr/manifests/init.pp b/personal_infra/puppet/modules/copr/manifests/init.pp
new file mode 100644
index 00000000..2d0474a5
--- /dev/null
+++ b/personal_infra/puppet/modules/copr/manifests/init.pp
@@ -0,0 +1,20 @@
+define copr (
+ String[1] $user,
+ String[1] $project = $title,
+ String[1] $dist,
+) {
+ file {"/etc/yum.repos.d/_copr:copr.fedorainfracloud.org:$user:$project.repo":
+ content => @("REPO"/$)
+ [copr:copr.fedorainfracloud.org:$user:$project]
+ name=Copr repo for $project owned by $user
+ baseurl=https://download.copr.fedorainfracloud.org/results/$user/$project/$dist-\$basearch/
+ type=rpm-md
+ skip_if_unavailable=True
+ gpgcheck=1
+ gpgkey=https://download.copr.fedorainfracloud.org/results/$user/$project/pubkey.gpg
+ repo_gpgcheck=0
+ enabled=1
+ enabled_metadata=1
+ | - REPO
+ }
+}
diff --git a/personal_infra/puppet/modules/debian/manifests/backports.pp b/personal_infra/puppet/modules/debian/manifests/backports.pp
new file mode 100644
index 00000000..4f33bf22
--- /dev/null
+++ b/personal_infra/puppet/modules/debian/manifests/backports.pp
@@ -0,0 +1,9 @@
+class debian::backports {
+ $codename = $facts['os']['distro']['codename']
+
+ file {'/etc/apt/sources.list.d/backports.list':
+ content => "deb http://deb.debian.org/debian ${codename}-backports main\n",
+ }
+ ~>
+ Exec["/usr/bin/apt update"]
+}
diff --git a/personal_infra/puppet/modules/debian/manifests/init.pp b/personal_infra/puppet/modules/debian/manifests/init.pp
new file mode 100644
index 00000000..fd85713d
--- /dev/null
+++ b/personal_infra/puppet/modules/debian/manifests/init.pp
@@ -0,0 +1,5 @@
+class debian {
+ exec {'/usr/bin/apt update':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp b/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp
new file mode 100644
index 00000000..f7c79724
--- /dev/null
+++ b/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp
@@ -0,0 +1,36 @@
+class dns_dhcp {
+ $domain = lookup('network.dns_dhcp.domain')
+
+ $hostvars = lookup('hostvars')
+ $fixed_dhcp_host_vars = $hostvars.filter |$host, $vars| { $vars['network'] and $vars['network']['dhcp_server'] == $facts["networking"]["fqdn"] }
+ $fixed_dhcp_hosts = Hash($fixed_dhcp_host_vars.map |$host, $vars| { [$host.match(/^[-a-z0-9]+/)[0], $vars['network']['ip'] ] })
+
+ $fixed_host_vars = $hostvars.filter |$host, $vars| { $vars['network'] and $vars['network']['register_dns_server'] == $facts["networking"]["fqdn"] }
+ $fixed_hosts = Hash($fixed_host_vars.map |$host, $vars| { [$host.match(/^[-a-z0-9]+/)[0], $vars['network']['ip'] ] })
+
+
+ package {'dnsmasq':}
+ ->
+ file {'/etc/dnsmasq.d':
+ ensure => directory,
+ purge => true,
+ recurse => true,
+ }
+ file {'/etc/dnsmasq.d/internal':
+ content => epp('dns_dhcp/internal', {
+ 'dns_dhcp' => lookup("network.dns_dhcp"),
+ 'dns_other_server_defs' => $dns_other_server_defs,
+ 'fixed_dhcp_hosts' => $fixed_dhcp_hosts,
+ 'fixed_hosts' => $fixed_hosts,
+ }),
+ }
+ ~>
+ service {'dnsmasq':
+ enable => true,
+ ensure => running,
+ }
+ ->
+ file {'/etc/resolv.conf':
+ content => "domain ${domain}\nsearch ${domain}\nnameserver 127.0.0.1\n",
+ }
+}
diff --git a/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp b/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp
new file mode 100644
index 00000000..9b751855
--- /dev/null
+++ b/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp
@@ -0,0 +1,30 @@
+domain-needed
+no-resolv
+no-hosts
+
+server=<%= $dns_dhcp['upstream_dns'] %>
+<% if $dns_dhcp['domain'] { %>
+local=/<%= $dns_dhcp['domain'] %>/
+domain=<%= $dns_dhcp['domain'] %>
+<% } %>
+
+<% if $dns_dhcp['dhcp_range'] { %>
+dhcp-range=<%= $dns_dhcp['dhcp_range'] %>
+
+dhcp-option=option:router,<%= $dns_dhcp['router'] %>
+<% } %>
+
+interface=<%= join($dns_dhcp['interfaces'], ',') %>
+
+<% $dns_other_server_defs.each |$server_def| { %>
+server=/<%= $server_def['network_name'] %>/<%= $server_def['dns_ip'] %>
+rev-server=<%= $server_def['reverse_ip_range'] %>,<%= $server_def['dns_ip'] %>
+<% } %>
+
+<% $fixed_dhcp_hosts.each |$host, $ip| { %>
+dhcp-host=<%= $host %>,<%= $ip %>,<%= $host %>
+<% } %>
+
+<% $fixed_hosts.each |$host, $ip| { %>
+host-record=<%= $host %>.<%= $dns_dhcp['domain'] %>,<%= $host %>,<%= $ip %>
+<% } %>
diff --git a/personal_infra/puppet/modules/freeipa/manifests/server.pp b/personal_infra/puppet/modules/freeipa/manifests/server.pp
new file mode 100644
index 00000000..6ca10a43
--- /dev/null
+++ b/personal_infra/puppet/modules/freeipa/manifests/server.pp
@@ -0,0 +1,13 @@
+class freeipa::server {
+ package {['ipa-server', 'ipa-server-dns', 'ipa-healthcheck']:}
+ ~>
+ service {'ipa-healthcheck.timer':
+ ensure => running,
+ enable => true,
+ }
+
+ # weak dependency that does not work on LXC[I
+ package {'low-memory-monitor':
+ ensure => purged,
+ }
+}
diff --git a/personal_infra/puppet/modules/ipsilon/manifests/init.pp b/personal_infra/puppet/modules/ipsilon/manifests/init.pp
new file mode 100644
index 00000000..aa0908aa
--- /dev/null
+++ b/personal_infra/puppet/modules/ipsilon/manifests/init.pp
@@ -0,0 +1,8 @@
+class ipsilon {
+ package {['ipsilon-tools-ipa', 'ipsilon-openidc']:}
+
+ service {'httpd':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/mailalias_core b/personal_infra/puppet/modules/mailalias_core
new file mode 160000
+Subproject e6230faf076a5ed7b474ed67a4c6c0802d0b7b5
diff --git a/personal_infra/puppet/modules/miniflux/manifests/init.pp b/personal_infra/puppet/modules/miniflux/manifests/init.pp
new file mode 100644
index 00000000..179cfc14
--- /dev/null
+++ b/personal_infra/puppet/modules/miniflux/manifests/init.pp
@@ -0,0 +1,27 @@
+class miniflux($database_url, $polling_frequency, $batch_size, $polling_parser_error_limit) {
+ file {'/etc/yum.repos.d/miniflux.repo':
+ content => "[miniflux]
+name=Miniflux Repository
+baseurl=https://repo.miniflux.app/yum/
+enabled=1
+gpgcheck=0
+",
+ }
+ ->
+ package {'miniflux':}
+ ->
+ file {'/etc/miniflux.conf':
+ content => "LISTEN_ADDR=0.0.0.0:8080
+RUN_MIGRATIONS=1
+DATABASE_URL=$database_url
+POLLING_FREQUENCY=$polling_frequency
+BATCH_SIZE=$batch_size
+POLLING_PARSING_ERROR_LIMIT=$polling_parser_error_limit
+",
+ }
+ ~>
+ service {'miniflux':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/files/check_talos_version b/personal_infra/puppet/modules/nagios/files/check_talos_version
new file mode 120000
index 00000000..16932959
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/files/check_talos_version
@@ -0,0 +1 @@
+../../../../talos-check/check_talos_version \ No newline at end of file
diff --git a/personal_infra/puppet/modules/nagios/manifests/init.pp b/personal_infra/puppet/modules/nagios/manifests/init.pp
new file mode 100644
index 00000000..5568fbf9
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/manifests/init.pp
@@ -0,0 +1,100 @@
+class nagios {
+ package {'nagios':}
+ ->
+ service {'nagios':
+ ensure => running,
+ enable => true,
+ }
+
+ file {
+ default:
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ ;
+ '/etc/nagios':
+ ensure => directory,
+ recurse => true,
+ force => true,
+ purge => true,
+ ;
+ '/etc/nagios/nagios.cfg':
+ content => epp('nagios/nagios.cfg'),
+ ;
+ # leave these unaffected
+ ['/etc/nagios/passwd', '/etc/nagios/cgi.cfg', '/etc/nagios/private/resource.cfg', '/etc/nagios/objects', '/etc/nagios/private', '/etc/nagios/objects/commands.cfg', '/etc/nagios/objects/timeperiods.cfg', '/etc/nagios/objects/templates.cfg']:
+ ensure => present,
+ ;
+ }
+
+ nagios_contact {'nagiosadmin':
+ use => 'generic-contact',
+ email => lookup('mail.root_mail'),
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_contactgroup {'admins':
+ members => 'nagiosadmin',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_command {'check_ragent':
+ command_name => 'check_ragent',
+ command_line => '/usr/bin/check_ragent http://$HOSTADDRESS$:21488/ --warning-units dnf-makecache.service --warning-units dnf-automatic-install.service',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_hostgroup {'linux':
+ hostgroup_name => 'linux',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_servicegroup {'ragent':
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_service {'check_ragent':
+ use => 'generic-service',
+ hostgroup_name => 'linux',
+ service_description => 'check_ragent',
+ servicegroups => 'ragent',
+ check_command => 'check_ragent',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_service {'check_ssh':
+ use => 'generic-service',
+ hostgroup_name => 'linux',
+ service_description => 'ssh',
+ check_command => 'check_ssh',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ package {'httpd':}
+ ->
+ service {'httpd':
+ ensure => running,
+ enable => true,
+ }
+
+ if $facts['virtual'] == 'lxc' {
+ file {'/bin/ping':
+ mode => 'u+s',
+ }
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/manifests/k8s.pp b/personal_infra/puppet/modules/nagios/manifests/k8s.pp
new file mode 100644
index 00000000..8eada3c9
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/manifests/k8s.pp
@@ -0,0 +1,41 @@
+class nagios::k8s {
+ file {'/usr/local/bin/check_talos_version':
+ content => file('nagios/check_talos_version'),
+ mode => '0755',
+ links => follow,
+ }
+
+ nagios_command {'check_talos':
+ command_name => 'check_talos',
+ command_line => '/usr/local/bin/check_talos_version http://$HOSTADDRESS$ monitor',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_hostgroup {'k8s':
+ hostgroup_name => 'k8s',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_servicegroup {'talos_check':
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_service {'talos-check':
+ use => 'generic-service',
+ hostgroup_name => 'k8s',
+ service_description => 'check_talos',
+ servicegroups => 'talos_check',
+ check_command => 'check_talos',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp b/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp
new file mode 100644
index 00000000..8e28ceb8
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp
@@ -0,0 +1,1373 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 4.4.9
+#
+# Read the documentation for more information on this configuration
+# file. I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+#
+##############################################################################
+
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes. This should be the first option specified
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# puppet generated
+cfg_file=/etc/nagios/nagios_contactgroup.cfg
+cfg_file=/etc/nagios/nagios_contact.cfg
+cfg_file=/etc/nagios/nagios_command.cfg
+cfg_file=/etc/nagios/nagios_hostgroup.cfg
+cfg_file=/etc/nagios/nagios_servicegroup.cfg
+cfg_file=/etc/nagios/nagios_host.cfg
+cfg_file=/etc/nagios/nagios_service.cfg
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts. The CGIs read object definitions from
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/spool/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file. You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/spool/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions. The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios/private/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored. Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+# restarts.
+
+status_file=/var/spool/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).
+# By default Nagios will check for external commands.
+# If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody'). Permissions should be set at the
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/spool/nagios/cmd/nagios.cmd
+
+
+
+# QUERY HANDLER INTERFACE
+# This is the socket that is created for the Query Handler interface
+
+#query_socket=/var/spool/nagios/cmd/nagios.qh
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/run/nagios/nagios.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc. This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/spool/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values: 0 = Broker nothing
+# -1 = Broker everything
+# <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup. Use multiple directives if you want
+# to load more than one module. Arguments that should be passed to
+# the module at startup are separated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory. This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem. And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+# 1. Shutdown Nagios, replace the module file, restart Nagios
+# 2. Delete the original module file, move the new module file into place,
+# restart Nagios
+#
+# Example:
+#
+# broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+# n = None - don't rotate the log
+# h = Hourly rotation (top of the hour)
+# d = Daily rotation (midnight every day)
+# w = Weekly rotation (midnight on Saturday evening)
+# m = Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1. If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0. If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0. If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0. If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1. If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option. In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# CURRENT STATES LOGGING OPTION
+# If you don't want Nagios to log all current host and service states
+# after log has been rotated to the main log file, you can disable this
+# option by setting this value to 0. Default value is 1.
+
+log_current_states=1
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0. If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0. If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)! This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed. Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts. Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks. Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+# s = Use "smart" interleave factor calculation
+# x = Use an interleave factor of x, where x is a
+# number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed. Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized. A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that a single
+# check result reaper event will be allowed to run before
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!
+
+check_result_path=/var/spool/nagios/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid. Files older than this
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks. Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option.
+# Values:
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time. This can help balance the load on
+# the monitoring server.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks. This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled. Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off. Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands. All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+ochp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down. Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor. This is useful for
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts. Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down. The state
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/spool/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting. If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set
+# program status variables based on the values saved in the
+# retention file. If you want to use retained program status
+# information, set this value to 1. If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file. If you
+# If you want to use retained scheduling info, set this
+# value to 1. If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options. For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options. For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files. Setting this to 60 means
+# that each interval is one minute long (60 seconds). Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available. It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios. Nagios is critical to you - make sure you keep it in
+# good shape. Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance
+# with our privacy policy - see https://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option determines what data Nagios will send to api.nagios.org when
+# it checks for updates. By default, Nagios will send information on the
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not. Nagios Enterprises uses
+# this data to determine the number of users running specific version of
+# Nagios. Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default). Otherwise set this value to 1 to
+# enable the aggressive check option. Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started. Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks. If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below). Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed. These commands are executed only if the
+# enable_performance_data option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/var/log/nagios/host-perfdata
+#service_perfdata_file=/var/log/nagios/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files. The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text. A newline is automatically added after each write
+# to the performance data file. Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the default append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below. A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files. The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESS EMPTY RESULTS
+# These options determine whether the core will process empty perfdata
+# results or not. This is needed for distributed monitoring, and intentionally
+# turned on by default.
+# If you don't require empty perfdata - saving some cpu cycles
+# on unwanted macro calculation - you can turn that off. Be careful!
+# Values: 1 = enable, 0 = disable
+
+#host_perfdata_process_empty_results=1
+#service_perfdata_process_empty_results=1
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_services option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_hosts option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios. This option is useful
+# if you have distributed or failover monitoring setup. In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts. If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance. Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT. By default, a passive host check
+# result will put a host into a HARD state type. This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically
+# check for orphaned host service checks. Since service checks are
+# not rescheduled until the results of their previous execution
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled. A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks. Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results. If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# SERVICE CHECK TIMEOUT STATE
+# This setting determines the state Nagios will report when a
+# service check times out - that is does not respond within
+# service_check_timeout seconds. This can be useful if a
+# machine is running at too high a load and you do not want
+# to consider a failed service check to be critical (the default).
+# Valid settings are:
+# c - Critical (default)
+# u - Unknown
+# w - Warning
+# o - OK
+
+service_check_timeout_state=c
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results. If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".
+# Flapping occurs when a host or service changes between
+# states too frequently. When Nagios detects that a
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping. Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+# 0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does. This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+# us (MM-DD-YYYY HH:MM:SS)
+# euro (DD-MM-YYYY HH:MM:SS)
+# iso8601 (YYYY-MM-DD HH:MM:SS)
+# strict-iso8601 (YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in. If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path
+# to include your timezone. Example:
+#
+# <Directory "/usr/local/nagios/sbin/">
+# SetEnv TZ "Australia/Brisbane"
+# ...
+# </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc. This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+# $HOSTOUTPUT$
+# $LONGHOSTOUTPUT$
+# $HOSTPERFDATA$
+# $HOSTACKAUTHOR$
+# $HOSTACKCOMMENT$
+# $SERVICEOUTPUT$
+# $LONGSERVICEOUTPUT$
+# $SERVICEPERFDATA$
+# $SERVICEACKAUTHOR$
+# $SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files. Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression
+# matching takes place in the object config files. This option
+# only has an effect if regular expression matching is enabled
+# (see above). If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?). If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon. Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes. Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+# 0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+# 0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.
+# Enabling this is a very bad idea for anything but very small setups,
+# as it means plugins, notification scripts and eventhandlers may run
+# out of environment space. It will also cause a significant increase
+# in CPU- and memory usage and drastically reduce the number of checks
+# you can run.
+# Values: 1 - Enable environment variable macros
+# 0 - Disable environment variable macros (default)
+
+enable_environment_macros=0
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks). If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+# 0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks). Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems. Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this. If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+# 0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file. OR values together to log multiple
+# types of information.
+# Values:
+# -1 = Everything
+# 0 = Nothing
+# 1 = Functions
+# 2 = Configuration
+# 4 = Process information
+# 8 = Scheduled events
+# 16 = Host/service checks
+# 32 = Notifications
+# 64 = Event broker
+# 128 = External commands
+# 256 = Commands
+# 512 = Scheduled downtime
+# 1024 = Comments
+# 2048 = Macros
+# 4096 = Interprocess communication
+# 8192 = Scheduling
+# 16384 = Workers
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+# 1 = More detailed
+# 2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file. If
+# the file grows larger than this size, it will be renamed with a .old
+# extension. If a file already exists with a .old extension it will
+# automatically be deleted. This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+
+
+# Should we allow hostgroups to have no hosts, we default this to off since
+# that was the old behavior
+
+allow_empty_hostgroup_assignment=0
+
+
+
+# Normally worker count is dynamically allocated based on 1.5 * number of cpu's
+# with a minimum of 4 workers. This value will override the defaults
+
+#check_workers=3
+
+
+
+# DISABLE SERVICE CHECKS WHEN HOST DOWN
+# This option will disable all service checks if the host is not in an UP state
+#
+# While desirable in some environments, enabling this value can distort report
+# values as the expected quantity of checks will not have been performed
+
+#host_down_disable_service_checks=0
+
+
+
+# SET SERVICE/HOST STATUS WHEN SERVICE CHECK SKIPPED
+# These options will allow you to set the status of a service when its
+# service check is skipped due to one of three reasons:
+# 1) failed dependency check; 2) parent's status; 3) host not up
+# Number 3 can only happen if 'host_down_disable_service_checks' above
+# is set to 1.
+# Valid values for the service* options are:
+# -1 Do not change the service status (default - same as before 4.4)
+# 0 Set the service status to STATE_OK
+# 1 Set the service status to STATE_WARNING
+# 2 Set the service status to STATE_CRITICAL
+# 3 Set the service status to STATE_UNKNOWN
+# The host_skip_check_dependency_status option will allow you to set the
+# status of a host when itscheck is skipped due to a failed dependency check.
+# Valid values for the host_skip_check_dependency_status are:
+# -1 Do not change the service status (default - same as before 4.4)
+# 0 Set the host status to STATE_UP
+# 1 Set the host status to STATE_DOWN
+# 2 Set the host status to STATE_UNREACHABLE
+# We may add one or more statuses in the future.
+
+#service_skip_check_dependency_status=-1
+#service_skip_check_parent_status=-1
+#service_skip_check_host_down_status=-1
+#host_skip_check_dependency_status=-1
+
+
+
+# LOAD CONTROL OPTIONS
+# To get current defaults based on your system, issue this command to
+# the query handler:
+# echo -e '@core loadctl\0' | nc -U /usr/local/nagios/var/rw/nagios.qh
+#
+# Please note that used incorrectly these options can induce enormous latency.
+#
+# loadctl_options:
+# jobs_max The maximum amount of jobs to run at one time
+# jobs_min The minimum amount of jobs to run at one time
+# jobs_limit The maximum amount of jobs the current load lets us run
+# backoff_limit The minimum backoff_change
+# backoff_change # of jobs to remove from jobs_limit when backing off
+# rampup_limit Minimum rampup_change
+# rampup_change # of jobs to add to jobs_limit when ramping up
+
+#loadctl_options=jobs_max=100;backoff_limit=10;rampup_change=5
diff --git a/personal_infra/puppet/modules/nagios_core b/personal_infra/puppet/modules/nagios_core
new file mode 160000
+Subproject 8dbf9f12383bd29973963a52968b2850d98292f
diff --git a/personal_infra/puppet/modules/nextcloud/manifests/init.pp b/personal_infra/puppet/modules/nextcloud/manifests/init.pp
new file mode 100644
index 00000000..1c41215e
--- /dev/null
+++ b/personal_infra/puppet/modules/nextcloud/manifests/init.pp
@@ -0,0 +1,79 @@
+class nextcloud(
+ $database_name,
+ $database_user,
+ $database_host,
+) {
+
+ file {'/etc/yum.repos.d/koalillo-nextcloud-epel-9.repo':
+ content => @("EOT"/$)
+ [copr:copr.fedorainfracloud.org:koalillo:nextcloud-test]
+ name=Copr repo for nextcloud owned by koalillo
+ baseurl=https://download.copr.fedorainfracloud.org/results/koalillo/nextcloud-test/epel-9-\$basearch/
+ type=rpm-md
+ skip_if_unavailable=True
+ gpgcheck=1
+ gpgkey=https://download.copr.fedorainfracloud.org/results/koalillo/nextcloud-test/pubkey.gpg
+ repo_gpgcheck=0
+ enabled=1
+ enabled_metadata=1
+ | EOT
+ ,
+ }
+
+ package {'remi-release':
+ source => 'https://rpms.remirepo.net/enterprise/remi-release-9.rpm',
+ }
+ ->
+ exec {'/usr/bin/dnf module enable -y php:remi-8.2':
+ unless => '/usr/bin/dnf module list --enabled php | grep remi-8.2',
+ }
+
+ package {['nextcloud-httpd', 'nextcloud-postgresql', 'php82-php-pecl-apcu', 'php-sodium', 'php-opcache',]:
+ require => [
+ Exec['/usr/bin/dnf module enable -y php:remi-8.2'],
+ File['/etc/yum.repos.d/koalillo-nextcloud-epel-9.repo'],
+ ],
+ }
+
+ service {'httpd':
+ enable => true,
+ ensure => running,
+ subscribe => Package['nextcloud-httpd'],
+ }
+
+ service {'nextcloud-cron.timer':
+ ensure => running,
+ enable => true,
+ require => Package['nextcloud-httpd'],
+ }
+
+ file {'/etc/php-fpm.d/www.conf':
+ content => epp("nextcloud/www.conf", {}),
+ }
+ ~>
+ service {'php-fpm':
+ enable => true,
+ ensure => running,
+ subscribe => Package['nextcloud-httpd'],
+ }
+
+ file {'/etc/httpd/conf.d/z-nextcloud-access.conf':
+ ensure => '/etc/httpd/conf.d/nextcloud-access.conf.avail',
+ require => Package['nextcloud-httpd'],
+ notify => Service['httpd'],
+ }
+
+ package {['php-intl', 'php-bcmath']:}
+
+ file {'/etc/php.d/99-apcu-cli.ini':
+ content => @("EOT")
+ apc.enable_cli=1
+ | EOT
+ ,
+ }
+
+ cron {"nextcloud-previews":
+ command => "sudo -u apache php -d memory_limit=512M /usr/share/nextcloud/occ preview:generate-all",
+ minute => "41",
+ }
+}
diff --git a/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp b/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp
new file mode 100644
index 00000000..70db53e4
--- /dev/null
+++ b/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp
@@ -0,0 +1,439 @@
+; Start a new pool named 'www'.
+; the variable $pool can be used in any directive and will be replaced by the
+; pool name ('www' here)
+[www]
+
+; Per pool prefix
+; It only applies on the following directives:
+; - 'access.log'
+; - 'slowlog'
+; - 'listen' (unixsocket)
+; - 'chroot'
+; - 'chdir'
+; - 'php_values'
+; - 'php_admin_values'
+; When not set, the global prefix (or @php_fpm_prefix@) applies instead.
+; Note: This directive can also be relative to the global prefix.
+; Default Value: none
+;prefix = /path/to/pools/$pool
+
+; Unix user/group of processes
+; Note: The user is mandatory. If the group is not set, the default user's group
+; will be used.
+; RPM: apache user chosen to provide access to the same directories as httpd
+user = apache
+; RPM: Keep a group allowed to write in log dir.
+group = apache
+
+; The address on which to accept FastCGI requests.
+; Valid syntaxes are:
+; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
+; a specific port;
+; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
+; a specific port;
+; 'port' - to listen on a TCP socket to all addresses
+; (IPv6 and IPv4-mapped) on a specific port;
+; '/path/to/unix/socket' - to listen on a unix socket.
+; Note: This value is mandatory.
+listen = /run/php-fpm/www.sock
+
+; Set listen(2) backlog.
+; Default Value: 511
+;listen.backlog = 511
+
+; Set permissions for unix socket, if one is used. In Linux, read/write
+; permissions must be set in order to allow connections from a web server.
+; Default Values: user and group are set as the running user
+; mode is set to 0660
+;listen.owner = nobody
+;listen.group = nobody
+;listen.mode = 0660
+
+; When POSIX Access Control Lists are supported you can set them using
+; these options, value is a comma separated list of user/group names.
+; When set, listen.owner and listen.group are ignored
+listen.acl_users = apache,nginx
+;listen.acl_groups =
+
+; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
+; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
+; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
+; must be separated by a comma. If this value is left blank, connections will be
+; accepted from any ip address.
+; Default Value: any
+listen.allowed_clients = 127.0.0.1
+
+; Specify the nice(2) priority to apply to the pool processes (only if set)
+; The value can vary from -19 (highest priority) to 20 (lower priority)
+; Note: - It will only work if the FPM master process is launched as root
+; - The pool processes will inherit the master process priority
+; unless it specified otherwise
+; Default Value: no set
+; process.priority = -19
+
+; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
+; or group is differrent than the master process user. It allows to create process
+; core dump and ptrace the process for the pool user.
+; Default Value: no
+; process.dumpable = yes
+
+; Choose how the process manager will control the number of child processes.
+; Possible Values:
+; static - a fixed number (pm.max_children) of child processes;
+; dynamic - the number of child processes are set dynamically based on the
+; following directives. With this process management, there will be
+; always at least 1 children.
+; pm.max_children - the maximum number of children that can
+; be alive at the same time.
+; pm.start_servers - the number of children created on startup.
+; pm.min_spare_servers - the minimum number of children in 'idle'
+; state (waiting to process). If the number
+; of 'idle' processes is less than this
+; number then some children will be created.
+; pm.max_spare_servers - the maximum number of children in 'idle'
+; state (waiting to process). If the number
+; of 'idle' processes is greater than this
+; number then some children will be killed.
+; ondemand - no children are created at startup. Children will be forked when
+; new requests will connect. The following parameter are used:
+; pm.max_children - the maximum number of children that
+; can be alive at the same time.
+; pm.process_idle_timeout - The number of seconds after which
+; an idle process will be killed.
+; Note: This value is mandatory.
+pm = dynamic
+
+; The number of child processes to be created when pm is set to 'static' and the
+; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
+; This value sets the limit on the number of simultaneous requests that will be
+; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
+; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
+; CGI. The below defaults are based on a server without much resources. Don't
+; forget to tweak pm.* to fit your needs.
+; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
+; Note: This value is mandatory.
+pm.max_children = 50
+
+; The number of child processes created on startup.
+; Note: Used only when pm is set to 'dynamic'
+; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
+pm.start_servers = 5
+
+; The desired minimum number of idle server processes.
+; Note: Used only when pm is set to 'dynamic'
+; Note: Mandatory when pm is set to 'dynamic'
+pm.min_spare_servers = 5
+
+; The desired maximum number of idle server processes.
+; Note: Used only when pm is set to 'dynamic'
+; Note: Mandatory when pm is set to 'dynamic'
+pm.max_spare_servers = 35
+
+; The number of seconds after which an idle process will be killed.
+; Note: Used only when pm is set to 'ondemand'
+; Default Value: 10s
+;pm.process_idle_timeout = 10s;
+
+; The number of requests each child process should execute before respawning.
+; This can be useful to work around memory leaks in 3rd party libraries. For
+; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
+; Default Value: 0
+;pm.max_requests = 500
+
+; The URI to view the FPM status page. If this value is not set, no URI will be
+; recognized as a status page. It shows the following informations:
+; pool - the name of the pool;
+; process manager - static, dynamic or ondemand;
+; start time - the date and time FPM has started;
+; start since - number of seconds since FPM has started;
+; accepted conn - the number of request accepted by the pool;
+; listen queue - the number of request in the queue of pending
+; connections (see backlog in listen(2));
+; max listen queue - the maximum number of requests in the queue
+; of pending connections since FPM has started;
+; listen queue len - the size of the socket queue of pending connections;
+; idle processes - the number of idle processes;
+; active processes - the number of active processes;
+; total processes - the number of idle + active processes;
+; max active processes - the maximum number of active processes since FPM
+; has started;
+; max children reached - number of times, the process limit has been reached,
+; when pm tries to start more children (works only for
+; pm 'dynamic' and 'ondemand');
+; Value are updated in real time.
+; Example output:
+; pool: www
+; process manager: static
+; start time: 01/Jul/2011:17:53:49 +0200
+; start since: 62636
+; accepted conn: 190460
+; listen queue: 0
+; max listen queue: 1
+; listen queue len: 42
+; idle processes: 4
+; active processes: 11
+; total processes: 15
+; max active processes: 12
+; max children reached: 0
+;
+; By default the status page output is formatted as text/plain. Passing either
+; 'html', 'xml' or 'json' in the query string will return the corresponding
+; output syntax. Example:
+; http://www.foo.bar/status
+; http://www.foo.bar/status?json
+; http://www.foo.bar/status?html
+; http://www.foo.bar/status?xml
+;
+; By default the status page only outputs short status. Passing 'full' in the
+; query string will also return status for each pool process.
+; Example:
+; http://www.foo.bar/status?full
+; http://www.foo.bar/status?json&full
+; http://www.foo.bar/status?html&full
+; http://www.foo.bar/status?xml&full
+; The Full status returns for each process:
+; pid - the PID of the process;
+; state - the state of the process (Idle, Running, ...);
+; start time - the date and time the process has started;
+; start since - the number of seconds since the process has started;
+; requests - the number of requests the process has served;
+; request duration - the duration in µs of the requests;
+; request method - the request method (GET, POST, ...);
+; request URI - the request URI with the query string;
+; content length - the content length of the request (only with POST);
+; user - the user (PHP_AUTH_USER) (or '-' if not set);
+; script - the main script called (or '-' if not set);
+; last request cpu - the %cpu the last request consumed
+; it's always 0 if the process is not in Idle state
+; because CPU calculation is done when the request
+; processing has terminated;
+; last request memory - the max amount of memory the last request consumed
+; it's always 0 if the process is not in Idle state
+; because memory calculation is done when the request
+; processing has terminated;
+; If the process is in Idle state, then informations are related to the
+; last request the process has served. Otherwise informations are related to
+; the current request being served.
+; Example output:
+; ************************
+; pid: 31330
+; state: Running
+; start time: 01/Jul/2011:17:53:49 +0200
+; start since: 63087
+; requests: 12808
+; request duration: 1250261
+; request method: GET
+; request URI: /test_mem.php?N=10000
+; content length: 0
+; user: -
+; script: /home/fat/web/docs/php/test_mem.php
+; last request cpu: 0.00
+; last request memory: 0
+;
+; Note: There is a real-time FPM status monitoring sample web page available
+; It's available in: @EXPANDED_DATADIR@/fpm/status.html
+;
+; Note: The value must start with a leading slash (/). The value can be
+; anything, but it may not be a good idea to use the .php extension or it
+; may conflict with a real PHP file.
+; Default Value: not set
+;pm.status_path = /status
+
+; The ping URI to call the monitoring page of FPM. If this value is not set, no
+; URI will be recognized as a ping page. This could be used to test from outside
+; that FPM is alive and responding, or to
+; - create a graph of FPM availability (rrd or such);
+; - remove a server from a group if it is not responding (load balancing);
+; - trigger alerts for the operating team (24/7).
+; Note: The value must start with a leading slash (/). The value can be
+; anything, but it may not be a good idea to use the .php extension or it
+; may conflict with a real PHP file.
+; Default Value: not set
+;ping.path = /ping
+
+; This directive may be used to customize the response of a ping request. The
+; response is formatted as text/plain with a 200 response code.
+; Default Value: pong
+;ping.response = pong
+
+; The access log file
+; Default: not set
+;access.log = log/$pool.access.log
+
+; The access log format.
+; The following syntax is allowed
+; %%: the '%' character
+; %C: %CPU used by the request
+; it can accept the following format:
+; - %{user}C for user CPU only
+; - %{system}C for system CPU only
+; - %{total}C for user + system CPU (default)
+; %d: time taken to serve the request
+; it can accept the following format:
+; - %{seconds}d (default)
+; - %{miliseconds}d
+; - %{mili}d
+; - %{microseconds}d
+; - %{micro}d
+; %e: an environment variable (same as $_ENV or $_SERVER)
+; it must be associated with embraces to specify the name of the env
+; variable. Some exemples:
+; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
+; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
+; %f: script filename
+; %l: content-length of the request (for POST request only)
+; %m: request method
+; %M: peak of memory allocated by PHP
+; it can accept the following format:
+; - %{bytes}M (default)
+; - %{kilobytes}M
+; - %{kilo}M
+; - %{megabytes}M
+; - %{mega}M
+; %n: pool name
+; %o: output header
+; it must be associated with embraces to specify the name of the header:
+; - %{Content-Type}o
+; - %{X-Powered-By}o
+; - %{Transfert-Encoding}o
+; - ....
+; %p: PID of the child that serviced the request
+; %P: PID of the parent of the child that serviced the request
+; %q: the query string
+; %Q: the '?' character if query string exists
+; %r: the request URI (without the query string, see %q and %Q)
+; %R: remote IP address
+; %s: status (response code)
+; %t: server time the request was received
+; it can accept a strftime(3) format:
+; %d/%b/%Y:%H:%M:%S %z (default)
+; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
+; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
+; %T: time the log has been written (the request has finished)
+; it can accept a strftime(3) format:
+; %d/%b/%Y:%H:%M:%S %z (default)
+; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
+; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
+; %u: remote user
+;
+; Default: "%R - %u %t \"%m %r\" %s"
+;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
+
+; The log file for slow requests
+; Default Value: not set
+; Note: slowlog is mandatory if request_slowlog_timeout is set
+slowlog = /var/log/php-fpm/www-slow.log
+
+; The timeout for serving a single request after which a PHP backtrace will be
+; dumped to the 'slowlog' file. A value of '0s' means 'off'.
+; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
+; Default Value: 0
+;request_slowlog_timeout = 0
+
+; Depth of slow log stack trace.
+; Default Value: 20
+;request_slowlog_trace_depth = 20
+
+; The timeout for serving a single request after which the worker process will
+; be killed. This option should be used when the 'max_execution_time' ini option
+; does not stop script execution for some reason. A value of '0' means 'off'.
+; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
+; Default Value: 0
+;request_terminate_timeout = 0
+
+; Set open file descriptor rlimit.
+; Default Value: system defined value
+;rlimit_files = 1024
+
+; Set max core size rlimit.
+; Possible Values: 'unlimited' or an integer greater or equal to 0
+; Default Value: system defined value
+;rlimit_core = 0
+
+; Chroot to this directory at the start. This value must be defined as an
+; absolute path. When this value is not set, chroot is not used.
+; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
+; of its subdirectories. If the pool prefix is not set, the global prefix
+; will be used instead.
+; Note: chrooting is a great security feature and should be used whenever
+; possible. However, all PHP paths will be relative to the chroot
+; (error_log, sessions.save_path, ...).
+; Default Value: not set
+;chroot =
+
+; Chdir to this directory at the start.
+; Note: relative path can be used.
+; Default Value: current directory or / when chroot
+;chdir = /var/www
+
+; Redirect worker stdout and stderr into main error log. If not set, stdout and
+; stderr will be redirected to /dev/null according to FastCGI specs.
+; Note: on highloaded environement, this can cause some delay in the page
+; process time (several ms).
+; Default Value: no
+;catch_workers_output = yes
+
+; Clear environment in FPM workers
+; Prevents arbitrary environment variables from reaching FPM worker processes
+; by clearing the environment in workers before env vars specified in this
+; pool configuration are added.
+; Setting to "no" will make all environment variables available to PHP code
+; via getenv(), $_ENV and $_SERVER.
+; Default Value: yes
+;clear_env = no
+
+; Limits the extensions of the main script FPM will allow to parse. This can
+; prevent configuration mistakes on the web server side. You should only limit
+; FPM to .php extensions to prevent malicious users to use other extensions to
+; execute php code.
+; Note: set an empty value to allow all extensions.
+; Default Value: .php
+;security.limit_extensions = .php .php3 .php4 .php5 .php7
+
+; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
+; the current environment.
+; Default Value: clean env
+;env[HOSTNAME] = $HOSTNAME
+;env[PATH] = /usr/local/bin:/usr/bin:/bin
+;env[TMP] = /tmp
+;env[TMPDIR] = /tmp
+;env[TEMP] = /tmp
+
+; Additional php.ini defines, specific to this pool of workers. These settings
+; overwrite the values previously defined in the php.ini. The directives are the
+; same as the PHP SAPI:
+; php_value/php_flag - you can set classic ini defines which can
+; be overwritten from PHP call 'ini_set'.
+; php_admin_value/php_admin_flag - these directives won't be overwritten by
+; PHP call 'ini_set'
+; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
+
+; Defining 'extension' will load the corresponding shared extension from
+; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
+; overwrite previously defined php.ini values, but will append the new value
+; instead.
+
+; Note: path INI options can be relative and will be expanded with the prefix
+; (pool, global or @prefix@)
+
+; Default Value: nothing is defined by default except the values in php.ini and
+; specified at startup with the -d argument
+;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
+;php_flag[display_errors] = off
+php_admin_value[error_log] = /var/log/php-fpm/www-error.log
+php_admin_flag[log_errors] = on
+php_admin_value[memory_limit] = 512M
+php_admin_value[output_buffering] = off
+
+; Set the following data paths to directories owned by the FPM process user.
+;
+; Do not change the ownership of existing system directories, if the process
+; user does not have write permission, create dedicated directories for this
+; purpose.
+;
+; See warning about choosing the location of these directories on your system
+; at http://php.net/session.save-path
+php_value[session.save_handler] = files
+php_value[session.save_path] = /var/lib/php/session
+php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache
+;php_value[opcache.file_cache] = /var/lib/php/opcache
diff --git a/personal_infra/puppet/modules/ocserv/manifests/init.pp b/personal_infra/puppet/modules/ocserv/manifests/init.pp
new file mode 100644
index 00000000..b9ead95b
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/manifests/init.pp
@@ -0,0 +1,97 @@
+class ocserv($ocserv_tcp_port,
+ $ocserv_udp_port,
+ $ocserv_default_domain,
+ $ocserv_ipv4_network,
+ $ocserv_dns,
+ $ocserv_split_dns,
+ $ocserv_routes,
+ $firewall = true)
+{
+ $run_as_user = $facts['os']['family'] ? {
+ 'Debian' => 'nobody',
+ 'RedHat' => 'ocserv',
+ }
+
+ $run_as_group = $facts['os']['family'] ? {
+ 'Debian' => 'daemon',
+ 'RedHat' => 'ocserv',
+ }
+
+ $socket_file = $facts['os']['family'] ? {
+ 'Debian' => '/var/run/ocserv-socket',
+ 'RedHat' => 'ocserv.sock',
+ }
+
+ $chroot_dir = $facts['os']['family'] ? {
+ 'Debian' => undef,
+ 'RedHat' => '/var/lib/ocserv',
+ }
+
+ $server_cert = $facts['os']['family']? {
+ 'Debian' => '/etc/ssl/certs/ssl-cert-snakeoil.pem',
+ 'RedHat' => '/etc/pki/ocserv/public/server.crt',
+ }
+
+ $server_key = $facts['os']['family'] ? {
+ 'Debian' => '/etc/ssl/private/ssl-cert-snakeoil.key',
+ 'RedHat' => '/etc/pki/ocserv/private/server.key',
+ }
+
+ package {'ocserv':}
+ ->
+ file {'/etc/ocserv/ocserv.conf':
+ content => epp('ocserv/ocserv.conf', {'tcp_port' => $ocserv_tcp_port,
+ 'udp_port' => $ocserv_udp_port,
+ 'run_as_user' => $run_as_user,
+ 'run_as_group' => $run_as_group,
+ 'socket_file' => $socket_file,
+ 'chroot_dir' => $chroot_dir,
+ 'server_cert' => $server_cert,
+ 'server_key' => $server_key,
+ 'default_domain' => $ocserv_default_domain,
+ 'ipv4_network' => $ocserv_ipv4_network,
+ 'dns' => $ocserv_dns,
+ 'split_dns' => $ocserv_split_dns,
+ 'routes' => $ocserv_routes,
+ }),
+ }
+ ~>
+ service {'ocserv':
+ enable => true,
+ ensure => running,
+ }
+
+ if ($facts['os']['family'] == 'RedHat' and $firewall) {
+ exec {'add masquerade for ocserv':
+ command => '/usr/bin/firewall-cmd --permanent --add-masquerade',
+ unless => '/usr/bin/firewall-cmd --query-masquerade',
+ notify => Exec['reload firewall for ocserv'],
+ }
+
+ exec {'open firewall for ocserv':
+ command => '/usr/bin/firewall-cmd --permanent --add-port=444/{tcp,udp}',
+ unless => '/usr/bin/firewall-cmd --query-port=444/udp',
+ }
+ ~>
+ exec {'reload firewall for ocserv':
+ command => '/usr/bin/firewall-cmd --reload',
+ refreshonly => true,
+ }
+ }
+
+ if ($facts['os']['family'] == 'Debian') {
+ file {'/etc/systemd/system/ocserv.socket.d/':
+ ensure => directory,
+ }
+ ->
+ file {'/etc/systemd/system/ocserv.socket.d/port.conf':
+ content => epp('ocserv/port.conf', {'tcp_port' => $ocserv_tcp_port,
+ 'udp_port' => $ocserv_udp_port,
+ }),
+ }
+ ~>
+ exec {'/bin/systemctl daemon-reload && systemctl restart ocserv.socket':
+ refreshonly => true,
+ }
+ }
+}
diff --git a/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp b/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp
new file mode 100644
index 00000000..b4ca12e7
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp
@@ -0,0 +1,57 @@
+<%- | $tcp_port,
+ $udp_port,
+ $run_as_user,
+ $run_as_group,
+ $socket_file,
+ $chroot_dir,
+ $server_cert,
+ $server_key,
+ $default_domain,
+ $ipv4_network,
+ $dns,
+ $split_dns,
+ $routes,
+| -%>
+auth = "pam"
+listen-host-is-dyndns = true
+# note, those are not used on Debian
+tcp-port = <%= $tcp_port %>
+udp-port = <%= $udp_port %>
+run-as-user = <%= $run_as_user %>
+run-as-group = <%= $run_as_group %>
+socket-file = <%= $socket_file %>
+<% if $chroot_dir { -%>
+chroot-dir = <%= $chroot_dir %>
+<% } -%>
+server-cert = <%= $server_cert %>
+server-key = <%= $server_key %>
+isolate-workers = true
+keepalive = 32400
+dpd = 90
+mobile-dpd = 1800
+switch-to-tcp-timeout = 25
+try-mtu-discovery = false
+compression = true
+tls-priorities = "NORMAL:%SERVER_PRECEDENCE:%COMPAT:-RSA:-VERS-SSL3.0:-ARCFOUR-128"
+auth-timeout = 240
+min-reauth-time = 3
+cookie-timeout = 300
+deny-roaming = false
+rekey-time = 172800
+rekey-method = ssl
+use-utmp = true
+pid-file = /var/run/ocserv.pid
+device = vpns
+predictable-ips = true
+default-domain = <%= $default_domain %>
+ipv4-network = <%= $ipv4_network %>
+#tunnel-all-dns = true
+dns = <%= $dns %>
+split-dns = <%= $split_dns %>
+ping-leases = true
+cisco-client-compat = true
+dtls-psk = false
+dtls-legacy = true
+<% $routes.each | $route | { -%>
+route = <%= $route %>
+<% } %>
diff --git a/personal_infra/puppet/modules/ocserv/templates/port.conf.epp b/personal_infra/puppet/modules/ocserv/templates/port.conf.epp
new file mode 100644
index 00000000..223c9961
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/templates/port.conf.epp
@@ -0,0 +1,8 @@
+<%- | $tcp_port,
+ $udp_port,
+| -%>
+[Socket]
+ListenStream=
+ListenDatagram=
+ListenStream=<%= $tcp_port %>
+ListenDatagram=<%= $udp_port %>
diff --git a/personal_infra/puppet/modules/podman/manifests/init.pp b/personal_infra/puppet/modules/podman/manifests/init.pp
new file mode 100644
index 00000000..17247aa2
--- /dev/null
+++ b/personal_infra/puppet/modules/podman/manifests/init.pp
@@ -0,0 +1,17 @@
+class podman($user, $storage_driver) {
+ package {'podman':}
+
+ file {['/etc/subuid', '/etc/subgid']:
+ content => "${user}:100000:65536\n",
+ }
+
+ exec {"/usr/bin/sed -i 's/driver = \".*\"/driver = \"${storage_driver}\"/g' /etc/containers/storage.conf":
+ require => Package['podman'],
+ unless => "/usr/bin/grep 'driver = \"${storage_driver}\"' /etc/containers/storage.conf",
+ }
+
+ exec {"/usr/bin/sed -i 's|#mount_program = \"/usr/bin/fuse-overlayfs\"|mount_program = \"/usr/bin/fuse-overlayfs\"|g' /etc/containers/storage.conf":
+ require => Package['podman'],
+ unless => "/usr/bin/grep '^#mount_program = \"/usr/bin/fuse-overlayfs\"'",
+ }
+}
diff --git a/personal_infra/puppet/modules/postgres/manifests/init.pp b/personal_infra/puppet/modules/postgres/manifests/init.pp
new file mode 100644
index 00000000..40fd5726
--- /dev/null
+++ b/personal_infra/puppet/modules/postgres/manifests/init.pp
@@ -0,0 +1,26 @@
+class postgres($pg_hba_conf) {
+ package {'pgdg-redhat-repo':
+ source => 'https://download.postgresql.org/pub/repos/yum/reporpms/EL-9-x86_64/pgdg-redhat-repo-latest.noarch.rpm',
+ }
+ ->
+ package {'postgresql15-server':}
+ ->
+ exec {'/usr/pgsql-15/bin/postgresql-15-setup initdb':
+ creates => '/var/lib/pgsql/15/data/PG_VERSION',
+ }
+ ->
+ [
+ file {'/var/lib/pgsql/15/data/pg_hba.conf':
+ # template at /usr/pgsql-15/share/pg_hba.conf.sample
+ content => $pg_hba_conf,
+ },
+ exec {'/bin/sed -i "s/#listen_addresses = \'localhost\'/listen_addresses = \'*\' /" /var/lib/pgsql/15/data/postgresql.conf':
+ unless => '/bin/grep "listen_addresses = \'\\*\'" /var/lib/pgsql/15/data/postgresql.conf',
+ }
+ ]
+ ~>
+ service {'postgresql-15':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/README.md b/personal_infra/puppet/modules/proxmox/README.md
new file mode 100644
index 00000000..5e5f8bc6
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/README.md
@@ -0,0 +1,36 @@
+# Proxmox
+
+## Networking
+
+Configures a public Internet IP, and an internal network with forwarding for containers and virtual machines.
+
+Add the following to your Proxmox host Ansible variables:
+
+```
+network:
+ ip: dotted.ip.notation
+ netmask: 255.255.255.0
+ gateway: dotted.ip.notation
+ proxmox:
+ ip: 10.3.3.1
+ netmask: 255.255.255.0
+ network: 10.3.3.0/24
+```
+
+## Proxy
+
+Class `proxmox::proxy` can handle proxying internal web servers.
+
+```
+class {'proxmox::proxy':
+ mail => lookup('mail.root_mail'),
+ base_hostname => lookup('network.public_hostname'),
+}
+```
+
+This uses the Apache HTTP Server and mod_md to obtain certificates.
+Your hostname must be publicly accessible, because http challenges are used.
+
+You receive mails to restart your server when required.
+
+The `base_hostname` certificate is injected daily to pveproxy.
diff --git a/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp b/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp
new file mode 100644
index 00000000..f3464c78
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp
@@ -0,0 +1,17 @@
+class proxmox::freeipa {
+ class {'proxmox':}
+
+ file {['/etc/subuid', '/etc/subgid']:
+ content => epp('proxmox/freeipa_subxid', {'freeipa' => lookup('freeipa')}),
+ }
+
+ # TODO
+ service {['sssd-ssh.socket', 'sssd-pam-priv.socket', 'sssd-nss.socket', 'sssd-sudo.socket', 'sssd-pam.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+ ~>
+ exec {'/usr/bin/systemctl reset-failed':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/init.pp b/personal_infra/puppet/modules/proxmox/manifests/init.pp
new file mode 100644
index 00000000..b3297eb4
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/init.pp
@@ -0,0 +1,38 @@
+class proxmox {
+ file {'/etc/network/interfaces':
+ content => epp('proxmox/interfaces', {
+ "network" => lookup("network"),
+ }),
+ }
+ ~>
+ exec {'/usr/sbin/ifreload -a':
+ refreshonly => true
+ }
+
+ # to prevent Germany/Hetzner abuse complaints
+ service {['rpcbind.target', 'rpcbind.service', 'rpcbind.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+
+ # TODO: secure this. Right now I don't use VMs, so just disable it
+ service {'spiceproxy':
+ ensure => stopped,
+ enable => mask,
+ }
+
+ file {'/etc/logrotate.d/pve':
+ ensure => absent,
+ }
+ ~>
+ service {'logrotate':}
+
+ file {'/etc/apt/sources.list.d/pve-enterprise.list':
+ ensure => absent,
+ }
+
+ file {'/etc/apt/sources.list.d/pve-no-subscription.list':
+ content => 'deb http://download.proxmox.com/debian/pve bullseye pve-no-subscription
+',
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/proxy.pp b/personal_infra/puppet/modules/proxmox/manifests/proxy.pp
new file mode 100644
index 00000000..cb3c0bd4
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/proxy.pp
@@ -0,0 +1,52 @@
+class proxmox::proxy ($mail, $base_hostname) {
+ package {'apache2':}
+ ->
+ service {'apache2':
+ enable => true,
+ ensure => running,
+ require => File['/usr/local/bin/notify_md_renewal'],
+ }
+
+ $apache_dep = {
+ require => Package['apache2'],
+ notify => Service['apache2'],
+ }
+
+ ['md', 'ssl', 'proxy_http', 'proxy'].each |$mod| {
+ exec {"/usr/sbin/a2enmod $mod":
+ creates => "/etc/apache2/mods-enabled/$mod.load",
+ * => $apache_dep,
+ }
+ }
+
+ file {'/etc/apache2/sites-enabled/test.conf':
+ content => @("EOT")
+ MDomain $base_hostname auto
+ MDCertificateAgreement accepted
+ MDContactEmail $mail
+ MDNotifyCmd /usr/local/bin/notify_md_renewal
+
+ <VirtualHost *:443>
+ ServerName $base_hostname
+ SSLEngine on
+ </VirtualHost>
+ | EOT
+ ,
+ * => $apache_dep
+ }
+
+ file {'/usr/local/bin/notify_md_renewal':
+ content => @("EOT"/$)
+ #!/bin/sh
+
+ systemctl restart apache2
+ pvenode cert set /etc/apache2/md/domains/$base_hostname/pubcert.pem /etc/apache2/md/domains/$base_hostname/privkey.pem --force 1 --restart 1
+
+ for hook in /usr/local/bin/notify_md_renewal_hook_* ; do
+ \$hook
+ done
+ | EOT
+ ,
+ mode => '0755',
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp b/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp
new file mode 100644
index 00000000..b60caf4c
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp
@@ -0,0 +1,53 @@
+define proxmox::proxy_host (String[1] $target, Optional[String[1]] $overwrite_rh_certs = undef) {
+ if $target =~ /^https:/ {
+ $ssl_fragment = @("EOT")
+ SSLProxyEngine on
+ SSLProxyCheckPeerName off
+ | EOT
+ }
+ else {
+ $ssl_fragment = ""
+ }
+
+ file {"/etc/apache2/sites-enabled/$title.conf":
+ content => @("EOT")
+ MDomain $title
+
+ <VirtualHost *:80>
+ ServerName $title
+ Redirect permanent / https://$title/
+ </VirtualHost>
+
+ <VirtualHost *:443>
+ ServerName $title
+ SSLEngine on
+
+ ProxyPass "/" "$target"
+ ProxyPassReverse "/" "$target"
+ ProxyPreservehost On
+ $ssl_fragment
+ </VirtualHost>
+ | EOT
+ ,
+ }
+ ~>
+ Service['apache2']
+
+ if $overwrite_rh_certs {
+ $pveid = lookup("hostvars.'$overwrite_rh_certs'.proxmox.id");
+
+ file {"/usr/local/bin/notify_md_renewal_hook_$overwrite_rh_certs":
+ content => @("EOT"/$)
+ #!/bin/sh
+
+ cp /etc/apache2/md/domains/$title/pubcert.pem /rpool/data/subvol-$pveid-disk-0/etc/pki/tls/certs/localhost.crt
+ cp /etc/apache2/md/domains/$title/privkey.pem /rpool/data/subvol-$pveid-disk-0/etc/pki/tls/private/localhost.key
+ pct exec $pveid systemctl restart httpd
+ | EOT
+ ,
+ mode => '0755',
+ }
+ }
+
+
+}
diff --git a/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp b/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp
new file mode 100644
index 00000000..c72d1d04
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp
@@ -0,0 +1,2 @@
+root:100000:65536
+root:<%= $freeipa['idrange_start'] %>:<%= $freeipa['idrange_size'] %>
diff --git a/personal_infra/puppet/modules/proxmox/templates/interfaces.epp b/personal_infra/puppet/modules/proxmox/templates/interfaces.epp
new file mode 100644
index 00000000..e0bfeceb
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/templates/interfaces.epp
@@ -0,0 +1,18 @@
+auto lo
+iface lo inet loopback
+
+auto eno1
+iface eno1 inet static
+ address <%= $network['ip'] %>
+ netmask <%= $network['netmask'] %>
+ gateway <%= $network['gateway'] %>
+
+auto vmbr0
+iface vmbr0 inet static
+ address <%= $network['proxmox']['ip'] %>
+ netmask <%= $network['proxmox']['netmask'] %>
+ bridge_ports none
+ bridge_stp off
+ bridge_fd 0
+ post-up echo 1 > /proc/sys/net/ipv4/ip_forward
+ post-up iptables -t nat -A POSTROUTING -s '<%= $network['proxmox']['network'] %>' -o eno1 -j MASQUERADE
diff --git a/personal_infra/puppet/modules/root_mail/manifests/init.pp b/personal_infra/puppet/modules/root_mail/manifests/init.pp
new file mode 100644
index 00000000..66cfeb87
--- /dev/null
+++ b/personal_infra/puppet/modules/root_mail/manifests/init.pp
@@ -0,0 +1,41 @@
+class root_mail {
+ package {'postfix':}
+ ->
+ service {'postfix':
+ ensure => running,
+ enable => true,
+ }
+
+ $cron_service = case $facts['os']['family'] {
+ 'Debian': { 'cron' }
+ 'RedHat': { 'crond' }
+ default: { fail($facts['os']['family']) }
+ }
+
+ # if crond doesn't see /usr/bin/sendmail on startup, it won't send mails
+ Package['postfix']
+ ~>
+ service{$cron_service:
+ ensure => running,
+ }
+
+ if($facts['os']['family'] == 'RedHat') {
+ if($facts['os']['release']['major'] == '9') {
+ package {'s-nail':}
+ }
+ else {
+ package {'mailx':}
+ }
+ }
+
+ mailalias {'root':
+ recipient => lookup('mail.root_mail'),
+ require => Package['postfix'],
+ }
+ ~>
+ exec {'/usr/sbin/postalias /etc/aliases':
+ creates => '/etc/aliases.db',
+ }
+ ~>
+ Service['postfix']
+}
diff --git a/personal_infra/puppet/modules/tinc/manifests/init.pp b/personal_infra/puppet/modules/tinc/manifests/init.pp
new file mode 100644
index 00000000..5ae78126
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/manifests/init.pp
@@ -0,0 +1,100 @@
+class tinc($tinc_name, $tinc_location, $tinc_connect_to, $tinc_locations, $tinc_ip, $tinc_netmask, $tinc_other_networks, $firewall = true) {
+ # https://bugzilla.redhat.com/show_bug.cgi?id=2153663
+ if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9') {
+ copr {'tinc':
+ user => 'koalillo',
+ dist => 'epel-9',
+ }
+ ->
+ Package['tinc']
+ }
+
+ package {'tinc':}
+ ->
+ file {'/etc/tinc':
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}":
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}/hosts":
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}/tinc.conf":
+ content => epp('tinc/tinc.conf', {'tinc_name' => $tinc_name,
+ 'tinc_location' => $tinc_location,
+ 'tinc_connect_to' => $tinc_connect_to,
+ }),
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ $tinc_locations.each |$name, $location| {
+ file {"/etc/tinc/${tinc_name}/generate_host_${name}.sh":
+ content => "#!/bin/sh
+
+set -ue
+
+echo Subnet = ${location['subnet']} >/etc/tinc/${tinc_name}/hosts/${name}
+echo Address = ${location['address']} >>/etc/tinc/${tinc_name}/hosts/${name}
+cat /etc/ansible/tinc/public_${location['address']}.pem >>/etc/tinc/${tinc_name}/hosts/${name}
+ ",
+ mode => '755',
+ }
+ ~>
+ exec {"/etc/tinc/${tinc_name}/generate_host_${name}.sh":
+ require => File["/etc/tinc/${tinc_name}/hosts"],
+ notify => Service["tinc@${tinc_name}"],
+ creates => "/etc/tinc/${tinc_name}/hosts/${name}",
+ }
+ }
+
+ service {"tinc@${tinc_name}":
+ ensure => running,
+ enable => true,
+ }
+
+ if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9') {
+ service {"tinc":
+ ensure => running,
+ enable => true,
+ }
+ }
+ exec {"/bin/cp /etc/ansible/tinc/private.pem /etc/tinc/${tinc_name}/rsa_key.priv":
+ creates => "/etc/tinc/${tinc_name}/rsa_key.priv",
+ require => File["/etc/tinc/${tinc_name}"],
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ file {"/etc/tinc/${tinc_name}/tinc-up":
+ content => epp('tinc/tinc-up', {'ip' => $tinc_ip,
+ 'netmask' => $tinc_netmask,
+ 'tinc_other_networks' => $tinc_other_networks,}),
+ require => File["/etc/tinc/${tinc_name}"],
+ mode => '777',
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ if ($facts['os']['family'] == 'RedHat' and $firewall) {
+ exec {'open firewall for tinc':
+ command => '/usr/bin/firewall-cmd --permanent --add-port=655/{tcp,udp}',
+ unless => '/usr/bin/firewall-cmd --query-port=655/udp',
+ }
+ ~>
+ exec {'reload firewall for tinc':
+ command => '/usr/bin/firewall-cmd --reload',
+ refreshonly => true,
+ }
+ }
+
+ file {'/etc/sysctl.d/tinc.conf':
+ content => "net.ipv4.ip_forward=1\nnet.ipv4.conf.all.proxy_arp=0\n",
+ }
+ ~>
+ exec {'reload sysctl for tinc':
+ command => '/sbin/sysctl --system',
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/tinc/templates/tinc-up.epp b/personal_infra/puppet/modules/tinc/templates/tinc-up.epp
new file mode 100644
index 00000000..7c89098f
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/templates/tinc-up.epp
@@ -0,0 +1,11 @@
+<%- | $ip,
+ $netmask,
+ $tinc_other_networks,
+| -%>
+#!/bin/sh
+
+ifconfig $INTERFACE <%= $ip %> netmask 255.255.255.255
+
+<% $tinc_other_networks.each |$tinc_other_network| { %>
+ route add -net <%= $tinc_other_network %> dev $INTERFACE
+<% } %>
diff --git a/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp b/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp
new file mode 100644
index 00000000..959fb949
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp
@@ -0,0 +1,8 @@
+<%- | $tinc_name,
+ $tinc_location,
+ $tinc_connect_to,
+| -%>
+Name = <%= $tinc_location %>
+<% $tinc_connect_to.each | $tinc_connection | { -%>
+ConnectTo = <%= $tinc_connection %>
+<% } %>
diff --git a/personal_infra/puppet/site/00-common.pp b/personal_infra/puppet/site/00-common.pp
new file mode 100644
index 00000000..b9f2a6f7
--- /dev/null
+++ b/personal_infra/puppet/site/00-common.pp
@@ -0,0 +1,23 @@
+include automatic_updates
+include basic_software
+include root_mail
+
+if $facts['os']['family'] == 'Debian' {
+ class {'debian':}
+}
+
+$nagios_host = $facts['networking']['fqdn']
+
+nagios_host {$nagios_host:
+ use => 'generic-host',
+ address => lookup({name => 'nagios.address', default_value => $facts['networking']['fqdn']}),
+ max_check_attempts => 5,
+ contact_groups => 'admins',
+ hostgroups => 'linux',
+ check_command => 'check-host-alive',
+}
+
+# https://github.com/alexpdp7/ragent/issues/352
+if $facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9' {
+ package {'compat-openssl11':}
+}
diff --git a/personal_infra/puppet/site/01-dns.pp b/personal_infra/puppet/site/01-dns.pp
new file mode 100644
index 00000000..eab766a4
--- /dev/null
+++ b/personal_infra/puppet/site/01-dns.pp
@@ -0,0 +1,10 @@
+$dns_source_hosts = lookup("dns.source_hosts")
+$dns_other_hosts = $dns_source_hosts.filter |$host_name| { $host_name != $facts["networking"]["fqdn"] }
+
+$dns_other_server_defs = $dns_other_hosts.map |$host_name| {
+ {
+ network_name => join([lookup("hostvars.'$host_name'.network.network_name"), lookup('dns.internal_domain')], '.'),
+ reverse_ip_range => lookup("hostvars.'$host_name'.network.self_internal_network"),
+ dns_ip => lookup("hostvars.'$host_name'.network.self_internal_ip"),
+ }
+}
diff --git a/personal_infra/puppet/site/01-ipa.pp b/personal_infra/puppet/site/01-ipa.pp
new file mode 100644
index 00000000..0aa7a6b4
--- /dev/null
+++ b/personal_infra/puppet/site/01-ipa.pp
@@ -0,0 +1,30 @@
+$ipa_client_package = case $facts['os']['family'] {
+ 'Debian': { 'freeipa-client' }
+ 'RedHat': { 'ipa-client' }
+ default: { fail($facts['os']['family']) }
+}
+
+if $facts['os']['family'] == 'Debian' and $facts['os']['release']['major'] == "11" {
+ class {'debian::backports':}
+ ->
+ Package[$ipa_client_package]
+
+ service {['sssd-pac.service', 'sssd-pac.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+}
+
+package {$ipa_client_package:}
+package {'sudo':}
+
+if 'lxc' in lookup("group_names") {
+ service {['var-lib-nfs-rpc_pipefs.mount', 'chronyd.service', 'sys-kernel-config.mount', 'sys-kernel-debug.mount', 'auth-rpcgss-module.service']:
+ ensure => stopped,
+ enable => mask,
+ }
+ ~>
+ exec {'/usr/bin/systemctl reset-failed':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/site/01-tinc.pp b/personal_infra/puppet/site/01-tinc.pp
new file mode 100644
index 00000000..6acbbd2e
--- /dev/null
+++ b/personal_infra/puppet/site/01-tinc.pp
@@ -0,0 +1,39 @@
+$tinc_hosts = lookup("groups.tinc")
+$tinc_other_hosts = $tinc_hosts.filter |$host_name| { $host_name != $facts["networking"]["fqdn"] }
+
+$tinc_locations = Hash($tinc_hosts.map |$host_name| { [
+ lookup("hostvars.'$host_name'.network.tinc.location"),
+ {
+ subnet => lookup("hostvars.'$host_name'.network.self_internal_network"),
+ address => lookup("hostvars.'$host_name'.network.public_hostname"),
+ }
+] })
+
+$tinc_connect_to = $tinc_other_hosts.map |$host_name| { lookup("hostvars.'$host_name'.network.tinc.location") }
+
+$tinc_other_networks = $tinc_other_hosts.map |$host_name| { lookup("hostvars.'$host_name'.network.self_internal_network") }
+$ocserv_networks = $tinc_hosts.map |$host_name| { lookup("hostvars.'$host_name'.network.self_internal_network") }
+
+if 'tinc' in lookup("group_names") {
+ class {'tinc':
+ tinc_name => lookup("tinc_global.name"),
+ tinc_location => lookup("network.tinc.location"),
+ tinc_connect_to => $tinc_connect_to,
+ tinc_locations => $tinc_locations,
+ tinc_ip => lookup("network.self_internal_ip"),
+ tinc_netmask => lookup("network.self_internal_netmask"),
+ tinc_other_networks => $tinc_other_networks,
+ firewall => !lookup({"name" => "network.disable_firewall", "default_value" => false}),
+ }
+
+ class {'ocserv':
+ ocserv_tcp_port => 444,
+ ocserv_udp_port => 444,
+ ocserv_default_domain => "int.pdp7.net",
+ ocserv_ipv4_network => lookup("network.ocserv.network"),
+ ocserv_dns => lookup("network.self_internal_ip"),
+ ocserv_split_dns => lookup("tinc_global.ocserv_domain"),
+ ocserv_routes => $ocserv_networks,
+ firewall => !lookup({"name" => "network.disable_firewall", "default_value" => false}),
+ }
+}
diff --git a/personal_infra/puppet/site/02-tinc-dns.pp b/personal_infra/puppet/site/02-tinc-dns.pp
new file mode 100644
index 00000000..ba7d57f6
--- /dev/null
+++ b/personal_infra/puppet/site/02-tinc-dns.pp
@@ -0,0 +1,5 @@
+if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9' and 'tinc' in lookup("group_names") and 'dns' in lookup("group_names")) {
+ exec {'/bin/sed -i "s/^bind-interfaces/bind-dynamic #bind-interfaces/" /etc/dnsmasq.conf':
+ unless => '/bin/grep "bind-dynamic #bind-interfaces" /etc/dnsmasq.conf',
+ }
+}
diff --git a/personal_infra/puppet/site/case.ces.int.pdp7.net.pp b/personal_infra/puppet/site/case.ces.int.pdp7.net.pp
new file mode 100644
index 00000000..2b4844cf
--- /dev/null
+++ b/personal_infra/puppet/site/case.ces.int.pdp7.net.pp
@@ -0,0 +1,10 @@
+node 'case.ces.int.pdp7.net' {
+ class {'dns_dhcp':}
+ ->
+ file {'/etc/dnsmasq.d/static.conf':
+ content => 'host-record=router,router.ces.int.pdp7.net,10.17.19.1
+host-record=tplink,tplink.ces.int.pdp7.net,10.17.19.2
+host-record=case.ces.int.pdp7.net,case,10.17.19.3
+',
+ }
+}
diff --git a/personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp b/personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp
new file mode 100644
index 00000000..5453eb4c
--- /dev/null
+++ b/personal_infra/puppet/site/dixie.bcn.int.pdp7.net.pp
@@ -0,0 +1,15 @@
+node 'dixie.bcn.int.pdp7.net' {
+ class {'dns_dhcp':}
+ file {'/etc/dnsmasq.d/static.conf':
+ content => "host-record=router,router.bcn.int.pdp7.net,192.168.76.1
+host-record=archerc7,archerc7.bcn.int.pdp7.net,192.168.76.6
+host-record=dixie.bcn.int.pdp7.net,dixie,192.168.76.2
+dhcp-option=121,10.0.0.0/8,192.168.76.2
+",
+ notify => Service["dnsmasq"],
+ }
+
+ class {'backups':
+ sanoid_config => "",
+ }
+}
diff --git a/personal_infra/puppet/site/h1.pdp7.net.pp b/personal_infra/puppet/site/h1.pdp7.net.pp
new file mode 100644
index 00000000..a3d62bbc
--- /dev/null
+++ b/personal_infra/puppet/site/h1.pdp7.net.pp
@@ -0,0 +1,123 @@
+node 'h1.pdp7.net' {
+ class {'proxmox::freeipa':}
+ class {'dns_dhcp':}
+
+ class {'backups':
+ sanoid_config => @("EOT")
+ # pg data
+ [rpool/data/subvol-204-disk-1]
+ use_template = backup
+
+ # nextcloud
+ [rpool/data/subvol-208-disk-1]
+ use_template = backup
+
+ [template_backup]
+ frequently=0
+ hourly=0
+ daily=100000
+ monthly=0
+ yearly=0
+ autosnap=yes
+ | EOT
+ ,
+ }
+
+ # TODO: ugly; tinc scripts require this :(
+ package {'net-tools':}
+
+ # https://lists.fedorahosted.org/archives/list/freeipa-users@lists.fedorahosted.org/thread/EZSM6LQPSNRY4WA52IYVR46RSXIDU3U7/
+ # SSH hack
+ file {'/etc/ssh/sshd_config.d/weak-gss.conf':
+ content => "GSSAPIStrictAcceptorCheck no\n",
+ }
+ ~>
+ service {'sshd':}
+
+ class {'proxmox::proxy':
+ mail => lookup('mail.root_mail'),
+ base_hostname => lookup('network.public_hostname'),
+ }
+
+ proxmox::proxy_host {'idp.pdp7.net':
+ target => 'https://ipsilon.h1.int.pdp7.net/',
+ overwrite_rh_certs => 'ipsilon.h1.int.pdp7.net',
+ }
+
+ proxmox::proxy_host {'weight.pdp7.net':
+ target => 'https://k8s-prod.h1.int.pdp7.net/',
+ }
+
+ proxmox::proxy_host {'blog.pdp7.net':
+ target => 'https://k8s-test.h1.int.pdp7.net/',
+ }
+
+ proxmox::proxy_host {'miniflux.pdp7.net':
+ target => 'http://miniflux.h1.int.pdp7.net:8080/',
+ }
+
+ proxmox::proxy_host {'nextcloud.pdp7.net':
+ target => 'http://nextcloud.h1.int.pdp7.net/',
+ }
+
+ package {'haproxy':}
+ ->
+ file {'/etc/haproxy/haproxy.cfg':
+ content => @("EOT")
+ global
+ log /dev/log local0
+ log /dev/log local1 notice
+ chroot /var/lib/haproxy
+ stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
+ stats timeout 30s
+ user haproxy
+ group haproxy
+ daemon
+
+ # Default SSL material locations
+ ca-base /etc/ssl/certs
+ crt-base /etc/ssl/private
+
+ # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
+ ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
+ ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
+ ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
+
+ defaults
+ log global
+ mode http
+ option httplog
+ option dontlognull
+ timeout connect 5000
+ timeout client 50000
+ timeout server 50000
+ errorfile 400 /etc/haproxy/errors/400.http
+ errorfile 403 /etc/haproxy/errors/403.http
+ errorfile 408 /etc/haproxy/errors/408.http
+ errorfile 500 /etc/haproxy/errors/500.http
+ errorfile 502 /etc/haproxy/errors/502.http
+ errorfile 503 /etc/haproxy/errors/503.http
+ errorfile 504 /etc/haproxy/errors/504.http
+
+ frontend gemini
+ bind :1965
+ mode tcp
+ option tcplog
+ default_backend blog
+ # TODO: sni
+ # tcp-request inspect-delay 5s
+ # acl blog req_ssl_sni blog.pdp7.net
+ # use_backend blog if blog
+
+ backend blog
+ mode tcp
+ server blog k8s-test.h1.int.pdp7.net:31965
+ | EOT
+ ,
+ }
+ ~>
+ service {'haproxy':
+ enable => true,
+ ensure => running,
+ }
+}
diff --git a/personal_infra/puppet/site/h2.pdp7.net.pp b/personal_infra/puppet/site/h2.pdp7.net.pp
new file mode 100644
index 00000000..51dda7a1
--- /dev/null
+++ b/personal_infra/puppet/site/h2.pdp7.net.pp
@@ -0,0 +1,9 @@
+node 'h2.pdp7.net' {
+ class {'dns_dhcp':}
+
+ file {'/etc/dnsmasq.d/static.conf':
+ content => "dhcp-host=freeswitch,10.42.42.3,freeswitch
+host-record=h2.h2.int.pdp7.net,10.42.42.1
+",
+ }
+}
diff --git a/personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..ef81ee3f
--- /dev/null
+++ b/personal_infra/puppet/site/ipa8.h1.int.pdp7.net.pp
@@ -0,0 +1,2 @@
+node 'ipa8.h1.int.pdp7.net' {
+}
diff --git a/personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..2228f424
--- /dev/null
+++ b/personal_infra/puppet/site/ipa9.h1.int.pdp7.net.pp
@@ -0,0 +1,3 @@
+node 'ipa9.h1.int.pdp7.net' {
+ class {'freeipa::server':}
+}
diff --git a/personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..b5f756cf
--- /dev/null
+++ b/personal_infra/puppet/site/ipsilon.h1.int.pdp7.net.pp
@@ -0,0 +1,3 @@
+node 'ipsilon.h1.int.pdp7.net' {
+ class {'ipsilon':}
+}
diff --git a/personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp b/personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp
new file mode 100644
index 00000000..064af4d4
--- /dev/null
+++ b/personal_infra/puppet/site/maelcum.mad.int.pdp7.net.pp
@@ -0,0 +1,10 @@
+node 'maelcum.mad.int.pdp7.net' {
+ class {'dns_dhcp':}
+ file {'/etc/dnsmasq.d/static.conf':
+ content => 'host-record=router,router.mad.int.pdp7.net,10.34.10.1
+dhcp-host=d8:8c:79:1a:11:59,chromecast,10.34.10.3
+host-record=maelcum.mad.int.pdp7.net,maelcum,10.34.10.2
+',
+ notify => Service['dnsmasq'],
+ }
+}
diff --git a/personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp b/personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..c6c0862e
--- /dev/null
+++ b/personal_infra/puppet/site/miniflux.h1.int.pdp7.net.pp
@@ -0,0 +1,8 @@
+node 'miniflux.h1.int.pdp7.net' {
+ class {'miniflux':
+ database_url => "host=pg.h1.int.pdp7.net user=miniflux dbname=miniflux sslmode=disable",
+ polling_frequency => 60,
+ batch_size => 100,
+ polling_parser_error_limit => 0,
+ }
+}
diff --git a/personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp b/personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..6db53e62
--- /dev/null
+++ b/personal_infra/puppet/site/nagios.h1.int.pdp7.net.pp
@@ -0,0 +1,16 @@
+node 'nagios.h1.int.pdp7.net' {
+ class {'nagios':}
+ class {'nagios::k8s':}
+
+ $k8s_hosts = lookup("groups.k8s")
+
+ $k8s_hosts.each |String $k8s_host| {
+ nagios_host {$k8s_host:
+ use => 'generic-host',
+ max_check_attempts => 5,
+ contact_groups => 'admins',
+ hostgroups => 'k8s',
+ check_command => 'check-host-alive',
+ }
+ }
+}
diff --git a/personal_infra/puppet/site/nc1.pdp7.net.pp b/personal_infra/puppet/site/nc1.pdp7.net.pp
new file mode 100644
index 00000000..e6939c8e
--- /dev/null
+++ b/personal_infra/puppet/site/nc1.pdp7.net.pp
@@ -0,0 +1,3 @@
+node 'nc1.pdp7.net' {
+ class {'freeipa::server':}
+}
diff --git a/personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp b/personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..29753d5b
--- /dev/null
+++ b/personal_infra/puppet/site/nextcloud.h1.int.pdp7.net.pp
@@ -0,0 +1,22 @@
+node 'nextcloud.h1.int.pdp7.net' {
+ class {'nextcloud':
+ database_name => 'nextcloud',
+ database_user => 'nextcloud',
+ database_host => 'pg.h1.int.pdp7.net',
+ }
+
+ file {'/var/lib/nextcloud/apps':
+ ensure => 'link',
+ target => '/nextcloud/apps/',
+ }
+
+ file {'/var/lib/nextcloud/data':
+ ensure => 'link',
+ target => '/nextcloud/data/',
+ }
+
+ file {'/etc/nextcloud/config.php':
+ ensure => 'link',
+ target => '/nextcloud/config.php',
+ }
+}
diff --git a/personal_infra/puppet/site/pg.h1.int.pdp7.net.pp b/personal_infra/puppet/site/pg.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..6c5ef035
--- /dev/null
+++ b/personal_infra/puppet/site/pg.h1.int.pdp7.net.pp
@@ -0,0 +1,16 @@
+node 'pg.h1.int.pdp7.net' {
+ class {'postgres':
+ pg_hba_conf => @(EOT)
+ # TYPE DATABASE USER ADDRESS METHOD
+ # "local" is for Unix domain socket connections only
+ local all all peer
+ host weight k8s_prod k8s-prod.h1.int.pdp7.net trust
+ host weight grafana grafana.h2.int.pdp7.net trust
+ host miniflux miniflux miniflux.h1.int.pdp7.net trust
+ host nextcloud nextcloud nextcloud.h1.int.pdp7.net trust
+ | EOT
+ ,
+ }
+
+ package {'postgresql15-contrib':} # hstore for miniflux
+}
diff --git a/personal_infra/puppet/site/ws.h1.int.pdp7.net.pp b/personal_infra/puppet/site/ws.h1.int.pdp7.net.pp
new file mode 100644
index 00000000..d667839f
--- /dev/null
+++ b/personal_infra/puppet/site/ws.h1.int.pdp7.net.pp
@@ -0,0 +1,6 @@
+node 'ws.h1.int.pdp7.net' {
+ class {'podman':
+ user => 'alex',
+ storage_driver => 'zfs',
+ }
+}
diff --git a/personal_infra/requirements.loose b/personal_infra/requirements.loose
new file mode 100644
index 00000000..0b49404e
--- /dev/null
+++ b/personal_infra/requirements.loose
@@ -0,0 +1,4 @@
+ansible
+kubernetes
+boto3
+botocore
diff --git a/personal_infra/requirements.txt b/personal_infra/requirements.txt
new file mode 100644
index 00000000..8eb95cd8
--- /dev/null
+++ b/personal_infra/requirements.txt
@@ -0,0 +1,31 @@
+ansible==8.2.0
+ansible-core==2.15.2
+boto3==1.28.17
+botocore==1.31.17
+cachetools==5.3.1
+certifi==2023.7.22
+cffi==1.15.1
+charset-normalizer==3.2.0
+cryptography==41.0.3
+google-auth==2.22.0
+idna==3.4
+importlib-resources==5.0.7
+Jinja2==3.1.2
+jmespath==1.0.1
+kubernetes==27.2.0
+MarkupSafe==2.1.3
+oauthlib==3.2.2
+packaging==23.1
+pyasn1==0.5.0
+pyasn1-modules==0.3.0
+pycparser==2.21
+python-dateutil==2.8.2
+PyYAML==6.0.1
+requests==2.31.0
+requests-oauthlib==1.3.1
+resolvelib==1.0.1
+rsa==4.9
+s3transfer==0.6.1
+six==1.16.0
+urllib3==1.26.16
+websocket-client==1.6.1
diff --git a/personal_infra/setup_ipa_replicas.md b/personal_infra/setup_ipa_replicas.md
new file mode 100644
index 00000000..95c9321b
--- /dev/null
+++ b/personal_infra/setup_ipa_replicas.md
@@ -0,0 +1,24 @@
+Update and reboot all IPA servers: https://lists.fedorahosted.org/archives/list/freeipa-users@lists.fedorahosted.org/thread/2WMK5QOAI4TYF23UKODW3M6WB65BJCHT/
+
+If the host has a firewall (e.g. physical or virtual, not LXC container):
+
+```
+firewall-cmd --permanent --add-port={80/tcp,443/tcp,389/tcp,636/tcp,88/tcp,88/udp,464/tcp,464/udp,53/
+firewall-cmd --reload
+```
+
+Join the server to IPA:
+
+```
+ipa-client-install -p principal --domain=ipa.pdp7.net -W --mkhomedir --ntp-pool=pool.ntp.org --force-join
+```
+
+Replace `--ntp-pool` with `-N` if this is a host without clock (e.g. an LXC container).
+Remove `--force-join` if you have never added this host to IPA.
+
+```
+ipa-replica-install --ip-address=thishostaddress -n ipa.pdp7.net -P alex --setup-ca --setup-dns --forwarder=upstreamdnsforthishost
+```
+
+FreeIPA doesn't seem to like having different versions. When updating, when you add a new server with a new version, remove the rest of servers.
+You might have issues joining new replicas otherwise.
diff --git a/personal_infra/setup_venv b/personal_infra/setup_venv
new file mode 100755
index 00000000..0ff1e11b
--- /dev/null
+++ b/personal_infra/setup_venv
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+rm -rf .venv
+python3 -m venv .venv
+.venv/bin/pip install -U pip
+.venv/bin/pip install -r requirements.txt
diff --git a/personal_infra/talos-check b/personal_infra/talos-check
new file mode 160000
+Subproject a7b290dd08729299b05c632d4348d0e1c931a59
diff --git a/personal_infra/up.py b/personal_infra/up.py
new file mode 100755
index 00000000..131fc9e6
--- /dev/null
+++ b/personal_infra/up.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python3
+import argparse
+from concurrent import futures
+import pathlib
+import shlex
+import shutil
+import subprocess
+import textwrap
+import yaml
+
+
+"""
+This script performs Puppet catalog compilation without a central server.
+
+It receives the following arguments:
+
+* directory: a working directory. The script expects to find some data, like
+ variables to use in the compilation process, facts, etc. The script also
+ generates intermediate files and output there.
+
+* modulepath: path to your modules directory
+* manifest: path to your site directory
+* host: the hosts to compile catalogs to
+
+The script expects the following content on the working directory:
+
+directory/
+ global_vars/*.json: these JSON files will be available to all hosts
+ host_vars/{host}/*.json: these JSON files will be available in each host
+ facts/{host}.json: output from "facter -y" for each host
+
+And produces the following files:
+
+directory/
+ output/
+ {host}/
+ catalog.json: the compiled catalog for the host
+ modules: a copy of the module directory
+
+Just ship the {host} directory to each host and run:
+
+$ puppet apply --catalog .../catalog.json --modulepath=.../modules/
+
+Check the apply_catalog Ansible role for example usage.
+
+As we have the catalogs, we can manipulate them. See
+pseudo_resource_exporter.py for an example hack. We can simulate exported
+resources without PuppetDB.
+"""
+
+
+def build_hiera(directory, build_host_dir, host):
+ hiera_data_dir = build_host_dir / "data"
+ hiera_data_dir.mkdir()
+
+ hiera = {
+ "version": 5,
+ "hierarchy": []
+ }
+
+ global_vars_dir = directory / "global_vars"
+
+ for global_var in global_vars_dir.glob("*.json"):
+ shutil.copy(global_var, hiera_data_dir / global_var.name)
+ hiera["hierarchy"].append({
+ "name": global_var.name.removesuffix(".json"),
+ "path": global_var.name,
+ "data_hash": "json_data",
+ })
+
+ host_vars_dir = directory / "host_vars" / host
+
+ for host_var in host_vars_dir.glob("*.json"):
+ shutil.copy(host_var, hiera_data_dir / host_var.name)
+ hiera["hierarchy"].append({
+ "name": host_var.name.removesuffix(".json"),
+ "path": host_var.name,
+ "data_hash": "json_data",
+ })
+
+ hiera_path = build_host_dir / "hiera.yaml"
+ with open(hiera_path, "w") as f:
+ yaml.dump(hiera, f)
+
+ return hiera_path
+
+
+def build_facts(directory, build_host_dir, host):
+ source_facts_dir = directory / "facts"
+
+ with open(source_facts_dir / f"{host}.yaml") as f:
+ facts_yaml_content = f.read()
+
+ dest_facts_dir = build_host_dir / "yaml" / "facts"
+ dest_facts_dir.mkdir(parents=True)
+
+ with open(dest_facts_dir / f"{host}.yaml", "w") as f:
+ f.write("--- !ruby/object:Puppet::Node::Facts\nvalues:\n")
+ f.write(textwrap.indent(facts_yaml_content, " "))
+
+
+def compile_catalog(directory, build_dir, modulepath, manifest, output_dir,
+ host):
+ build_host_dir = build_dir / host
+ build_host_dir.mkdir()
+
+ hiera_path = build_hiera(directory, build_host_dir, host)
+
+ build_facts(directory, build_host_dir, host)
+
+ cmd = [
+ "puppet", "catalog", "compile",
+ f"--modulepath={modulepath}",
+ f"--hiera_config={hiera_path}",
+ f"--manifest={manifest}",
+ "--terminus", "compiler",
+ "--vardir", build_host_dir,
+ "--facts_terminus", "yaml",
+ host
+ ]
+ print(shlex.join(map(str, cmd)))
+ catalog_compile = subprocess.run(
+ cmd, check=True,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ encoding="utf8"
+ )
+ assert not catalog_compile.stderr, catalog_compile.stderr
+
+ catalog_stdout = catalog_compile.stdout
+
+ _, catalog = catalog_stdout.split("\n", 1)
+
+ host_output_dir = output_dir / host
+ host_output_dir.mkdir()
+ with open(host_output_dir / "catalog.json", "w") as f:
+ f.write(catalog)
+
+ shutil.copytree(modulepath, host_output_dir / "modules")
+
+
+def up(directory: pathlib.Path, modulepath, manifest, hosts: list[str]):
+ build_dir = directory / "build"
+ build_dir.mkdir()
+
+ output_dir = build_dir / "output"
+ output_dir.mkdir()
+
+ def _compile_catalog(host):
+ compile_catalog(
+ directory=directory,
+ build_dir=build_dir,
+ modulepath=modulepath,
+ manifest=manifest,
+ output_dir=output_dir,
+ host=host)
+
+ # list because exceptions do not happen unless you iterate over the result
+ list(futures.ThreadPoolExecutor().map(_compile_catalog, hosts))
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("directory")
+ parser.add_argument("modulepath")
+ parser.add_argument("manifest")
+ parser.add_argument("hosts", nargs="+", metavar="host")
+
+ args = parser.parse_args()
+ up(
+ directory=pathlib.Path(args.directory),
+ modulepath=args.modulepath,
+ manifest=args.manifest,
+ hosts=args.hosts
+ )
+
+
+if __name__ == "__main__":
+ main()