aboutsummaryrefslogtreecommitdiff
path: root/personal_infra/puppet/modules
diff options
context:
space:
mode:
Diffstat (limited to 'personal_infra/puppet/modules')
-rw-r--r--personal_infra/puppet/modules/automatic_updates/manifests/init.pp33
-rw-r--r--personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp81
-rw-r--r--personal_infra/puppet/modules/backups/manifests/init.pp11
-rw-r--r--personal_infra/puppet/modules/basic_software/manifests/init.pp7
-rw-r--r--personal_infra/puppet/modules/copr/manifests/init.pp20
-rw-r--r--personal_infra/puppet/modules/debian/manifests/backports.pp9
-rw-r--r--personal_infra/puppet/modules/debian/manifests/init.pp5
-rw-r--r--personal_infra/puppet/modules/dns_dhcp/manifests/init.pp36
-rw-r--r--personal_infra/puppet/modules/dns_dhcp/templates/internal.epp30
-rw-r--r--personal_infra/puppet/modules/freeipa/manifests/server.pp13
-rw-r--r--personal_infra/puppet/modules/ipsilon/manifests/init.pp8
m---------personal_infra/puppet/modules/mailalias_core0
-rw-r--r--personal_infra/puppet/modules/miniflux/manifests/init.pp27
l---------personal_infra/puppet/modules/nagios/files/check_talos_version1
-rw-r--r--personal_infra/puppet/modules/nagios/manifests/init.pp100
-rw-r--r--personal_infra/puppet/modules/nagios/manifests/k8s.pp41
-rw-r--r--personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp1373
m---------personal_infra/puppet/modules/nagios_core0
-rw-r--r--personal_infra/puppet/modules/nextcloud/manifests/init.pp79
-rw-r--r--personal_infra/puppet/modules/nextcloud/templates/www.conf.epp439
-rw-r--r--personal_infra/puppet/modules/ocserv/manifests/init.pp97
-rw-r--r--personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp57
-rw-r--r--personal_infra/puppet/modules/ocserv/templates/port.conf.epp8
-rw-r--r--personal_infra/puppet/modules/podman/manifests/init.pp17
-rw-r--r--personal_infra/puppet/modules/postgres/manifests/init.pp26
-rw-r--r--personal_infra/puppet/modules/proxmox/README.md36
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/freeipa.pp17
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/init.pp38
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/proxy.pp52
-rw-r--r--personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp53
-rw-r--r--personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp2
-rw-r--r--personal_infra/puppet/modules/proxmox/templates/interfaces.epp18
-rw-r--r--personal_infra/puppet/modules/root_mail/manifests/init.pp41
-rw-r--r--personal_infra/puppet/modules/tinc/manifests/init.pp100
-rw-r--r--personal_infra/puppet/modules/tinc/templates/tinc-up.epp11
-rw-r--r--personal_infra/puppet/modules/tinc/templates/tinc.conf.epp8
36 files changed, 2894 insertions, 0 deletions
diff --git a/personal_infra/puppet/modules/automatic_updates/manifests/init.pp b/personal_infra/puppet/modules/automatic_updates/manifests/init.pp
new file mode 100644
index 00000000..8585b0ae
--- /dev/null
+++ b/personal_infra/puppet/modules/automatic_updates/manifests/init.pp
@@ -0,0 +1,33 @@
+class automatic_updates {
+ if ($facts['os']['family'] == 'Debian') {
+ package {["unattended-upgrades", "apt-listchanges"]:}
+ }
+ elsif ($facts['os']['family'] == 'RedHat') {
+ if ($facts['os']['release']['major'] == '7') {
+ package {'yum-cron':}
+ ->
+ file {"/etc/yum/yum-cron.conf":
+ content => epp('automatic_updates/yum-cron.conf'),
+ }
+ ~>
+ service {'yum-cron':
+ ensure => running,
+ enable => true,
+ }
+ }
+ elsif ($facts['os']['release']['major'] == '8' or $facts['os']['release']['major'] == '9') {
+ package {'dnf-automatic':}
+ ->
+ service {'dnf-automatic-install.timer':
+ ensure => running,
+ enable => true,
+ }
+ }
+ else {
+ fail($facts['os']['release']['major'])
+ }
+ }
+ else {
+ fail($facts['os'])
+ }
+}
diff --git a/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp b/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp
new file mode 100644
index 00000000..bd1ec685
--- /dev/null
+++ b/personal_infra/puppet/modules/automatic_updates/templates/yum-cron.conf.epp
@@ -0,0 +1,81 @@
+[commands]
+# What kind of update to use:
+# default = yum upgrade
+# security = yum --security upgrade
+# security-severity:Critical = yum --sec-severity=Critical upgrade
+# minimal = yum --bugfix update-minimal
+# minimal-security = yum --security update-minimal
+# minimal-security-severity:Critical = --sec-severity=Critical update-minimal
+update_cmd = default
+
+# Whether a message should be emitted when updates are available,
+# were downloaded, or applied.
+update_messages = yes
+
+# Whether updates should be downloaded when they are available.
+download_updates = yes
+
+# Whether updates should be applied when they are available. Note
+# that download_updates must also be yes for the update to be applied.
+apply_updates = yes
+
+# Maximum amout of time to randomly sleep, in minutes. The program
+# will sleep for a random amount of time between 0 and random_sleep
+# minutes before running. This is useful for e.g. staggering the
+# times that multiple systems will access update servers. If
+# random_sleep is 0 or negative, the program will run immediately.
+# 6*60 = 360
+random_sleep = 360
+
+
+[emitters]
+# Name to use for this system in messages that are emitted. If
+# system_name is None, the hostname will be used.
+system_name = None
+
+# How to send messages. Valid options are stdio and email. If
+# emit_via includes stdio, messages will be sent to stdout; this is useful
+# to have cron send the messages. If emit_via includes email, this
+# program will send email itself according to the configured options.
+# If emit_via is None or left blank, no messages will be sent.
+emit_via = stdio
+
+# The width, in characters, that messages that are emitted should be
+# formatted to.
+output_width = 80
+
+
+[email]
+# The address to send email messages from.
+# NOTE: 'localhost' will be replaced with the value of system_name.
+email_from = root@localhost
+
+# List of addresses to send messages to.
+email_to = root
+
+# Name of the host to connect to to send email messages.
+email_host = localhost
+
+
+[groups]
+# NOTE: This only works when group_command != objects, which is now the default
+# List of groups to update
+group_list = None
+
+# The types of group packages to install
+group_package_types = mandatory, default
+
+[base]
+# This section overrides yum.conf
+
+# Use this to filter Yum core messages
+# -4: critical
+# -3: critical+errors
+# -2: critical+errors+warnings (default)
+debuglevel = -2
+
+# skip_broken = True
+mdpolicy = group:main
+
+# Uncomment to auto-import new gpg keys (dangerous)
+# assumeyes = True
diff --git a/personal_infra/puppet/modules/backups/manifests/init.pp b/personal_infra/puppet/modules/backups/manifests/init.pp
new file mode 100644
index 00000000..f98d598f
--- /dev/null
+++ b/personal_infra/puppet/modules/backups/manifests/init.pp
@@ -0,0 +1,11 @@
+class backups($sanoid_config) {
+ package {'sanoid':}
+
+ file {'/etc/sanoid':
+ ensure => directory,
+ }
+ ->
+ file {'/etc/sanoid/sanoid.conf':
+ content => $sanoid_config,
+ }
+}
diff --git a/personal_infra/puppet/modules/basic_software/manifests/init.pp b/personal_infra/puppet/modules/basic_software/manifests/init.pp
new file mode 100644
index 00000000..fcceefb2
--- /dev/null
+++ b/personal_infra/puppet/modules/basic_software/manifests/init.pp
@@ -0,0 +1,7 @@
+class basic_software {
+ package {['less', 'mlocate', 'traceroute', 'nmap', 'tree', 'tar']:}
+
+ if($facts['os']['family'] == 'RedHat') {
+ package {'which':}
+ }
+}
diff --git a/personal_infra/puppet/modules/copr/manifests/init.pp b/personal_infra/puppet/modules/copr/manifests/init.pp
new file mode 100644
index 00000000..2d0474a5
--- /dev/null
+++ b/personal_infra/puppet/modules/copr/manifests/init.pp
@@ -0,0 +1,20 @@
+define copr (
+ String[1] $user,
+ String[1] $project = $title,
+ String[1] $dist,
+) {
+ file {"/etc/yum.repos.d/_copr:copr.fedorainfracloud.org:$user:$project.repo":
+ content => @("REPO"/$)
+ [copr:copr.fedorainfracloud.org:$user:$project]
+ name=Copr repo for $project owned by $user
+ baseurl=https://download.copr.fedorainfracloud.org/results/$user/$project/$dist-\$basearch/
+ type=rpm-md
+ skip_if_unavailable=True
+ gpgcheck=1
+ gpgkey=https://download.copr.fedorainfracloud.org/results/$user/$project/pubkey.gpg
+ repo_gpgcheck=0
+ enabled=1
+ enabled_metadata=1
+ | - REPO
+ }
+}
diff --git a/personal_infra/puppet/modules/debian/manifests/backports.pp b/personal_infra/puppet/modules/debian/manifests/backports.pp
new file mode 100644
index 00000000..4f33bf22
--- /dev/null
+++ b/personal_infra/puppet/modules/debian/manifests/backports.pp
@@ -0,0 +1,9 @@
+class debian::backports {
+ $codename = $facts['os']['distro']['codename']
+
+ file {'/etc/apt/sources.list.d/backports.list':
+ content => "deb http://deb.debian.org/debian ${codename}-backports main\n",
+ }
+ ~>
+ Exec["/usr/bin/apt update"]
+}
diff --git a/personal_infra/puppet/modules/debian/manifests/init.pp b/personal_infra/puppet/modules/debian/manifests/init.pp
new file mode 100644
index 00000000..fd85713d
--- /dev/null
+++ b/personal_infra/puppet/modules/debian/manifests/init.pp
@@ -0,0 +1,5 @@
+class debian {
+ exec {'/usr/bin/apt update':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp b/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp
new file mode 100644
index 00000000..f7c79724
--- /dev/null
+++ b/personal_infra/puppet/modules/dns_dhcp/manifests/init.pp
@@ -0,0 +1,36 @@
+class dns_dhcp {
+ $domain = lookup('network.dns_dhcp.domain')
+
+ $hostvars = lookup('hostvars')
+ $fixed_dhcp_host_vars = $hostvars.filter |$host, $vars| { $vars['network'] and $vars['network']['dhcp_server'] == $facts["networking"]["fqdn"] }
+ $fixed_dhcp_hosts = Hash($fixed_dhcp_host_vars.map |$host, $vars| { [$host.match(/^[-a-z0-9]+/)[0], $vars['network']['ip'] ] })
+
+ $fixed_host_vars = $hostvars.filter |$host, $vars| { $vars['network'] and $vars['network']['register_dns_server'] == $facts["networking"]["fqdn"] }
+ $fixed_hosts = Hash($fixed_host_vars.map |$host, $vars| { [$host.match(/^[-a-z0-9]+/)[0], $vars['network']['ip'] ] })
+
+
+ package {'dnsmasq':}
+ ->
+ file {'/etc/dnsmasq.d':
+ ensure => directory,
+ purge => true,
+ recurse => true,
+ }
+ file {'/etc/dnsmasq.d/internal':
+ content => epp('dns_dhcp/internal', {
+ 'dns_dhcp' => lookup("network.dns_dhcp"),
+ 'dns_other_server_defs' => $dns_other_server_defs,
+ 'fixed_dhcp_hosts' => $fixed_dhcp_hosts,
+ 'fixed_hosts' => $fixed_hosts,
+ }),
+ }
+ ~>
+ service {'dnsmasq':
+ enable => true,
+ ensure => running,
+ }
+ ->
+ file {'/etc/resolv.conf':
+ content => "domain ${domain}\nsearch ${domain}\nnameserver 127.0.0.1\n",
+ }
+}
diff --git a/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp b/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp
new file mode 100644
index 00000000..9b751855
--- /dev/null
+++ b/personal_infra/puppet/modules/dns_dhcp/templates/internal.epp
@@ -0,0 +1,30 @@
+domain-needed
+no-resolv
+no-hosts
+
+server=<%= $dns_dhcp['upstream_dns'] %>
+<% if $dns_dhcp['domain'] { %>
+local=/<%= $dns_dhcp['domain'] %>/
+domain=<%= $dns_dhcp['domain'] %>
+<% } %>
+
+<% if $dns_dhcp['dhcp_range'] { %>
+dhcp-range=<%= $dns_dhcp['dhcp_range'] %>
+
+dhcp-option=option:router,<%= $dns_dhcp['router'] %>
+<% } %>
+
+interface=<%= join($dns_dhcp['interfaces'], ',') %>
+
+<% $dns_other_server_defs.each |$server_def| { %>
+server=/<%= $server_def['network_name'] %>/<%= $server_def['dns_ip'] %>
+rev-server=<%= $server_def['reverse_ip_range'] %>,<%= $server_def['dns_ip'] %>
+<% } %>
+
+<% $fixed_dhcp_hosts.each |$host, $ip| { %>
+dhcp-host=<%= $host %>,<%= $ip %>,<%= $host %>
+<% } %>
+
+<% $fixed_hosts.each |$host, $ip| { %>
+host-record=<%= $host %>.<%= $dns_dhcp['domain'] %>,<%= $host %>,<%= $ip %>
+<% } %>
diff --git a/personal_infra/puppet/modules/freeipa/manifests/server.pp b/personal_infra/puppet/modules/freeipa/manifests/server.pp
new file mode 100644
index 00000000..6ca10a43
--- /dev/null
+++ b/personal_infra/puppet/modules/freeipa/manifests/server.pp
@@ -0,0 +1,13 @@
+class freeipa::server {
+ package {['ipa-server', 'ipa-server-dns', 'ipa-healthcheck']:}
+ ~>
+ service {'ipa-healthcheck.timer':
+ ensure => running,
+ enable => true,
+ }
+
+ # weak dependency that does not work on LXC[I
+ package {'low-memory-monitor':
+ ensure => purged,
+ }
+}
diff --git a/personal_infra/puppet/modules/ipsilon/manifests/init.pp b/personal_infra/puppet/modules/ipsilon/manifests/init.pp
new file mode 100644
index 00000000..aa0908aa
--- /dev/null
+++ b/personal_infra/puppet/modules/ipsilon/manifests/init.pp
@@ -0,0 +1,8 @@
+class ipsilon {
+ package {['ipsilon-tools-ipa', 'ipsilon-openidc']:}
+
+ service {'httpd':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/mailalias_core b/personal_infra/puppet/modules/mailalias_core
new file mode 160000
+Subproject e6230faf076a5ed7b474ed67a4c6c0802d0b7b5
diff --git a/personal_infra/puppet/modules/miniflux/manifests/init.pp b/personal_infra/puppet/modules/miniflux/manifests/init.pp
new file mode 100644
index 00000000..179cfc14
--- /dev/null
+++ b/personal_infra/puppet/modules/miniflux/manifests/init.pp
@@ -0,0 +1,27 @@
+class miniflux($database_url, $polling_frequency, $batch_size, $polling_parser_error_limit) {
+ file {'/etc/yum.repos.d/miniflux.repo':
+ content => "[miniflux]
+name=Miniflux Repository
+baseurl=https://repo.miniflux.app/yum/
+enabled=1
+gpgcheck=0
+",
+ }
+ ->
+ package {'miniflux':}
+ ->
+ file {'/etc/miniflux.conf':
+ content => "LISTEN_ADDR=0.0.0.0:8080
+RUN_MIGRATIONS=1
+DATABASE_URL=$database_url
+POLLING_FREQUENCY=$polling_frequency
+BATCH_SIZE=$batch_size
+POLLING_PARSING_ERROR_LIMIT=$polling_parser_error_limit
+",
+ }
+ ~>
+ service {'miniflux':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/files/check_talos_version b/personal_infra/puppet/modules/nagios/files/check_talos_version
new file mode 120000
index 00000000..16932959
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/files/check_talos_version
@@ -0,0 +1 @@
+../../../../talos-check/check_talos_version \ No newline at end of file
diff --git a/personal_infra/puppet/modules/nagios/manifests/init.pp b/personal_infra/puppet/modules/nagios/manifests/init.pp
new file mode 100644
index 00000000..5568fbf9
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/manifests/init.pp
@@ -0,0 +1,100 @@
+class nagios {
+ package {'nagios':}
+ ->
+ service {'nagios':
+ ensure => running,
+ enable => true,
+ }
+
+ file {
+ default:
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ ;
+ '/etc/nagios':
+ ensure => directory,
+ recurse => true,
+ force => true,
+ purge => true,
+ ;
+ '/etc/nagios/nagios.cfg':
+ content => epp('nagios/nagios.cfg'),
+ ;
+ # leave these unaffected
+ ['/etc/nagios/passwd', '/etc/nagios/cgi.cfg', '/etc/nagios/private/resource.cfg', '/etc/nagios/objects', '/etc/nagios/private', '/etc/nagios/objects/commands.cfg', '/etc/nagios/objects/timeperiods.cfg', '/etc/nagios/objects/templates.cfg']:
+ ensure => present,
+ ;
+ }
+
+ nagios_contact {'nagiosadmin':
+ use => 'generic-contact',
+ email => lookup('mail.root_mail'),
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_contactgroup {'admins':
+ members => 'nagiosadmin',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_command {'check_ragent':
+ command_name => 'check_ragent',
+ command_line => '/usr/bin/check_ragent http://$HOSTADDRESS$:21488/ --warning-units dnf-makecache.service --warning-units dnf-automatic-install.service',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_hostgroup {'linux':
+ hostgroup_name => 'linux',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_servicegroup {'ragent':
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_service {'check_ragent':
+ use => 'generic-service',
+ hostgroup_name => 'linux',
+ service_description => 'check_ragent',
+ servicegroups => 'ragent',
+ check_command => 'check_ragent',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_service {'check_ssh':
+ use => 'generic-service',
+ hostgroup_name => 'linux',
+ service_description => 'ssh',
+ check_command => 'check_ssh',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ package {'httpd':}
+ ->
+ service {'httpd':
+ ensure => running,
+ enable => true,
+ }
+
+ if $facts['virtual'] == 'lxc' {
+ file {'/bin/ping':
+ mode => 'u+s',
+ }
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/manifests/k8s.pp b/personal_infra/puppet/modules/nagios/manifests/k8s.pp
new file mode 100644
index 00000000..8eada3c9
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/manifests/k8s.pp
@@ -0,0 +1,41 @@
+class nagios::k8s {
+ file {'/usr/local/bin/check_talos_version':
+ content => file('nagios/check_talos_version'),
+ mode => '0755',
+ links => follow,
+ }
+
+ nagios_command {'check_talos':
+ command_name => 'check_talos',
+ command_line => '/usr/local/bin/check_talos_version http://$HOSTADDRESS$ monitor',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+
+ nagios_hostgroup {'k8s':
+ hostgroup_name => 'k8s',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_servicegroup {'talos_check':
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ ensure => present,
+ }
+
+ nagios_service {'talos-check':
+ use => 'generic-service',
+ hostgroup_name => 'k8s',
+ service_description => 'check_talos',
+ servicegroups => 'talos_check',
+ check_command => 'check_talos',
+ require => Package['nagios'],
+ notify => Service['nagios'],
+ owner => 'nagios',
+ }
+}
diff --git a/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp b/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp
new file mode 100644
index 00000000..8e28ceb8
--- /dev/null
+++ b/personal_infra/puppet/modules/nagios/templates/nagios.cfg.epp
@@ -0,0 +1,1373 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 4.4.9
+#
+# Read the documentation for more information on this configuration
+# file. I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+#
+##############################################################################
+
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes. This should be the first option specified
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# puppet generated
+cfg_file=/etc/nagios/nagios_contactgroup.cfg
+cfg_file=/etc/nagios/nagios_contact.cfg
+cfg_file=/etc/nagios/nagios_command.cfg
+cfg_file=/etc/nagios/nagios_hostgroup.cfg
+cfg_file=/etc/nagios/nagios_servicegroup.cfg
+cfg_file=/etc/nagios/nagios_host.cfg
+cfg_file=/etc/nagios/nagios_service.cfg
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts. The CGIs read object definitions from
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/spool/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file. You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/spool/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions. The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file=/etc/nagios/private/resource.cfg
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored. Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+# restarts.
+
+status_file=/var/spool/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.
+# You can either supply a username or a UID.
+
+nagios_user=nagios
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.
+# You can either supply a group name or a GID.
+
+nagios_group=nagios
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).
+# By default Nagios will check for external commands.
+# If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody'). Permissions should be set at the
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/spool/nagios/cmd/nagios.cmd
+
+
+
+# QUERY HANDLER INTERFACE
+# This is the socket that is created for the Query Handler interface
+
+#query_socket=/var/spool/nagios/cmd/nagios.qh
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file=/var/run/nagios/nagios.pid
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc. This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/spool/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values: 0 = Broker nothing
+# -1 = Broker everything
+# <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup. Use multiple directives if you want
+# to load more than one module. Arguments that should be passed to
+# the module at startup are separated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory. This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem. And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+# 1. Shutdown Nagios, replace the module file, restart Nagios
+# 2. Delete the original module file, move the new module file into place,
+# restart Nagios
+#
+# Example:
+#
+# broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+# n = None - don't rotate the log
+# h = Hourly rotation (top of the hour)
+# d = Daily rotation (midnight every day)
+# w = Weekly rotation (midnight on Saturday evening)
+# m = Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1. If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0. If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0. If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0. If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1. If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option. In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# CURRENT STATES LOGGING OPTION
+# If you don't want Nagios to log all current host and service states
+# after log has been rotated to the main log file, you can disable this
+# option by setting this value to 0. Default value is 1.
+
+log_current_states=1
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0. If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0. If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)! This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed. Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts. Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks. Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+# s = Use "smart" interleave factor calculation
+# x = Use an interleave factor of x, where x is a
+# number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring. The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+# n = None - don't use any delay between checks
+# d = Use a "dumb" delay of 1 second between checks
+# s = Use "smart" inter-check delay calculation
+# x.xx = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed. Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized. A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that a single
+# check result reaper event will be allowed to run before
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!
+
+check_result_path=/var/spool/nagios/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid. Files older than this
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks. Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed. These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+# 0 = Disable predictive checks
+# 1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state
+# information when checking host and service dependencies. Normally
+# Nagios will only use the latest hard host or service state when
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option.
+# Values:
+# 0 = Don't use soft state dependencies (default)
+# 1 = Use soft state dependencies
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time. This can help balance the load on
+# the monitoring server.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks. This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled. Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off. Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands. All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+ochp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down. Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor. This is useful for
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts. Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down. The state
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/spool/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting. If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set
+# program status variables based on the values saved in the
+# retention file. If you want to use retained program status
+# information, set this value to 1. If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file. If you
+# If you want to use retained scheduling info, set this
+# value to 1. If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options. For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options. For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files. Setting this to 60 means
+# that each interval is one minute long (60 seconds). Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available. It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios. Nagios is critical to you - make sure you keep it in
+# good shape. Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance
+# with our privacy policy - see https://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option determines what data Nagios will send to api.nagios.org when
+# it checks for updates. By default, Nagios will send information on the
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not. Nagios Enterprises uses
+# this data to determine the number of users running specific version of
+# Nagios. Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default). Otherwise set this value to 1 to
+# enable the aggressive check option. Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts. If this option is
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in. Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started. Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks. If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below). Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed. These commands are executed only if the
+# enable_performance_data option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/var/log/nagios/host-perfdata
+#service_perfdata_file=/var/log/nagios/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files. The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text. A newline is automatically added after each write
+# to the performance data file. Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the default append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below. A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files. The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESS EMPTY RESULTS
+# These options determine whether the core will process empty perfdata
+# results or not. This is needed for distributed monitoring, and intentionally
+# turned on by default.
+# If you don't require empty perfdata - saving some cpu cycles
+# on unwanted macro calculation - you can turn that off. Be careful!
+# Values: 1 = enable, 0 = disable
+
+#host_perfdata_process_empty_results=1
+#service_perfdata_process_empty_results=1
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_services option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below. Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option. Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios. This command is executed only if the
+# obsess_over_hosts option (above) is set to 1. The command
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios. This option is useful
+# if you have distributed or failover monitoring setup. In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts. If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance. Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT. By default, a passive host check
+# result will put a host into a HARD state type. This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically
+# check for orphaned host service checks. Since service checks are
+# not rescheduled until the results of their previous execution
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled. A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks. Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results. If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# SERVICE CHECK TIMEOUT STATE
+# This setting determines the state Nagios will report when a
+# service check times out - that is does not respond within
+# service_check_timeout seconds. This can be useful if a
+# machine is running at too high a load and you do not want
+# to consider a failed service check to be critical (the default).
+# Valid settings are:
+# c - Critical (default)
+# u - Unknown
+# w - Warning
+# o - OK
+
+service_check_timeout_state=c
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results. Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results. If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".
+# Flapping occurs when a host or service changes between
+# states too frequently. When Nagios detects that a
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping. Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+# 0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does. This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+# us (MM-DD-YYYY HH:MM:SS)
+# euro (DD-MM-YYYY HH:MM:SS)
+# iso8601 (YYYY-MM-DD HH:MM:SS)
+# strict-iso8601 (YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in. If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path
+# to include your timezone. Example:
+#
+# <Directory "/usr/local/nagios/sbin/">
+# SetEnv TZ "Australia/Brisbane"
+# ...
+# </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc. This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+# $HOSTOUTPUT$
+# $LONGHOSTOUTPUT$
+# $HOSTPERFDATA$
+# $HOSTACKAUTHOR$
+# $HOSTACKCOMMENT$
+# $SERVICEOUTPUT$
+# $LONGSERVICEOUTPUT$
+# $SERVICEPERFDATA$
+# $SERVICEACKAUTHOR$
+# $SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files. Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression
+# matching takes place in the object config files. This option
+# only has an effect if regular expression matching is enabled
+# (see above). If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?). If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon. Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes. Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+# 0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+# 0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.
+# Enabling this is a very bad idea for anything but very small setups,
+# as it means plugins, notification scripts and eventhandlers may run
+# out of environment space. It will also cause a significant increase
+# in CPU- and memory usage and drastically reduce the number of checks
+# you can run.
+# Values: 1 - Enable environment variable macros
+# 0 - Disable environment variable macros (default)
+
+enable_environment_macros=0
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks). If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+# 0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks). Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems. Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this. If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+# 0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file. OR values together to log multiple
+# types of information.
+# Values:
+# -1 = Everything
+# 0 = Nothing
+# 1 = Functions
+# 2 = Configuration
+# 4 = Process information
+# 8 = Scheduled events
+# 16 = Host/service checks
+# 32 = Notifications
+# 64 = Event broker
+# 128 = External commands
+# 256 = Commands
+# 512 = Scheduled downtime
+# 1024 = Comments
+# 2048 = Macros
+# 4096 = Interprocess communication
+# 8192 = Scheduling
+# 16384 = Workers
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+# 1 = More detailed
+# 2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file. If
+# the file grows larger than this size, it will be renamed with a .old
+# extension. If a file already exists with a .old extension it will
+# automatically be deleted. This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+
+
+# Should we allow hostgroups to have no hosts, we default this to off since
+# that was the old behavior
+
+allow_empty_hostgroup_assignment=0
+
+
+
+# Normally worker count is dynamically allocated based on 1.5 * number of cpu's
+# with a minimum of 4 workers. This value will override the defaults
+
+#check_workers=3
+
+
+
+# DISABLE SERVICE CHECKS WHEN HOST DOWN
+# This option will disable all service checks if the host is not in an UP state
+#
+# While desirable in some environments, enabling this value can distort report
+# values as the expected quantity of checks will not have been performed
+
+#host_down_disable_service_checks=0
+
+
+
+# SET SERVICE/HOST STATUS WHEN SERVICE CHECK SKIPPED
+# These options will allow you to set the status of a service when its
+# service check is skipped due to one of three reasons:
+# 1) failed dependency check; 2) parent's status; 3) host not up
+# Number 3 can only happen if 'host_down_disable_service_checks' above
+# is set to 1.
+# Valid values for the service* options are:
+# -1 Do not change the service status (default - same as before 4.4)
+# 0 Set the service status to STATE_OK
+# 1 Set the service status to STATE_WARNING
+# 2 Set the service status to STATE_CRITICAL
+# 3 Set the service status to STATE_UNKNOWN
+# The host_skip_check_dependency_status option will allow you to set the
+# status of a host when itscheck is skipped due to a failed dependency check.
+# Valid values for the host_skip_check_dependency_status are:
+# -1 Do not change the service status (default - same as before 4.4)
+# 0 Set the host status to STATE_UP
+# 1 Set the host status to STATE_DOWN
+# 2 Set the host status to STATE_UNREACHABLE
+# We may add one or more statuses in the future.
+
+#service_skip_check_dependency_status=-1
+#service_skip_check_parent_status=-1
+#service_skip_check_host_down_status=-1
+#host_skip_check_dependency_status=-1
+
+
+
+# LOAD CONTROL OPTIONS
+# To get current defaults based on your system, issue this command to
+# the query handler:
+# echo -e '@core loadctl\0' | nc -U /usr/local/nagios/var/rw/nagios.qh
+#
+# Please note that used incorrectly these options can induce enormous latency.
+#
+# loadctl_options:
+# jobs_max The maximum amount of jobs to run at one time
+# jobs_min The minimum amount of jobs to run at one time
+# jobs_limit The maximum amount of jobs the current load lets us run
+# backoff_limit The minimum backoff_change
+# backoff_change # of jobs to remove from jobs_limit when backing off
+# rampup_limit Minimum rampup_change
+# rampup_change # of jobs to add to jobs_limit when ramping up
+
+#loadctl_options=jobs_max=100;backoff_limit=10;rampup_change=5
diff --git a/personal_infra/puppet/modules/nagios_core b/personal_infra/puppet/modules/nagios_core
new file mode 160000
+Subproject 8dbf9f12383bd29973963a52968b2850d98292f
diff --git a/personal_infra/puppet/modules/nextcloud/manifests/init.pp b/personal_infra/puppet/modules/nextcloud/manifests/init.pp
new file mode 100644
index 00000000..1c41215e
--- /dev/null
+++ b/personal_infra/puppet/modules/nextcloud/manifests/init.pp
@@ -0,0 +1,79 @@
+class nextcloud(
+ $database_name,
+ $database_user,
+ $database_host,
+) {
+
+ file {'/etc/yum.repos.d/koalillo-nextcloud-epel-9.repo':
+ content => @("EOT"/$)
+ [copr:copr.fedorainfracloud.org:koalillo:nextcloud-test]
+ name=Copr repo for nextcloud owned by koalillo
+ baseurl=https://download.copr.fedorainfracloud.org/results/koalillo/nextcloud-test/epel-9-\$basearch/
+ type=rpm-md
+ skip_if_unavailable=True
+ gpgcheck=1
+ gpgkey=https://download.copr.fedorainfracloud.org/results/koalillo/nextcloud-test/pubkey.gpg
+ repo_gpgcheck=0
+ enabled=1
+ enabled_metadata=1
+ | EOT
+ ,
+ }
+
+ package {'remi-release':
+ source => 'https://rpms.remirepo.net/enterprise/remi-release-9.rpm',
+ }
+ ->
+ exec {'/usr/bin/dnf module enable -y php:remi-8.2':
+ unless => '/usr/bin/dnf module list --enabled php | grep remi-8.2',
+ }
+
+ package {['nextcloud-httpd', 'nextcloud-postgresql', 'php82-php-pecl-apcu', 'php-sodium', 'php-opcache',]:
+ require => [
+ Exec['/usr/bin/dnf module enable -y php:remi-8.2'],
+ File['/etc/yum.repos.d/koalillo-nextcloud-epel-9.repo'],
+ ],
+ }
+
+ service {'httpd':
+ enable => true,
+ ensure => running,
+ subscribe => Package['nextcloud-httpd'],
+ }
+
+ service {'nextcloud-cron.timer':
+ ensure => running,
+ enable => true,
+ require => Package['nextcloud-httpd'],
+ }
+
+ file {'/etc/php-fpm.d/www.conf':
+ content => epp("nextcloud/www.conf", {}),
+ }
+ ~>
+ service {'php-fpm':
+ enable => true,
+ ensure => running,
+ subscribe => Package['nextcloud-httpd'],
+ }
+
+ file {'/etc/httpd/conf.d/z-nextcloud-access.conf':
+ ensure => '/etc/httpd/conf.d/nextcloud-access.conf.avail',
+ require => Package['nextcloud-httpd'],
+ notify => Service['httpd'],
+ }
+
+ package {['php-intl', 'php-bcmath']:}
+
+ file {'/etc/php.d/99-apcu-cli.ini':
+ content => @("EOT")
+ apc.enable_cli=1
+ | EOT
+ ,
+ }
+
+ cron {"nextcloud-previews":
+ command => "sudo -u apache php -d memory_limit=512M /usr/share/nextcloud/occ preview:generate-all",
+ minute => "41",
+ }
+}
diff --git a/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp b/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp
new file mode 100644
index 00000000..70db53e4
--- /dev/null
+++ b/personal_infra/puppet/modules/nextcloud/templates/www.conf.epp
@@ -0,0 +1,439 @@
+; Start a new pool named 'www'.
+; the variable $pool can be used in any directive and will be replaced by the
+; pool name ('www' here)
+[www]
+
+; Per pool prefix
+; It only applies on the following directives:
+; - 'access.log'
+; - 'slowlog'
+; - 'listen' (unixsocket)
+; - 'chroot'
+; - 'chdir'
+; - 'php_values'
+; - 'php_admin_values'
+; When not set, the global prefix (or @php_fpm_prefix@) applies instead.
+; Note: This directive can also be relative to the global prefix.
+; Default Value: none
+;prefix = /path/to/pools/$pool
+
+; Unix user/group of processes
+; Note: The user is mandatory. If the group is not set, the default user's group
+; will be used.
+; RPM: apache user chosen to provide access to the same directories as httpd
+user = apache
+; RPM: Keep a group allowed to write in log dir.
+group = apache
+
+; The address on which to accept FastCGI requests.
+; Valid syntaxes are:
+; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific IPv4 address on
+; a specific port;
+; '[ip:6:addr:ess]:port' - to listen on a TCP socket to a specific IPv6 address on
+; a specific port;
+; 'port' - to listen on a TCP socket to all addresses
+; (IPv6 and IPv4-mapped) on a specific port;
+; '/path/to/unix/socket' - to listen on a unix socket.
+; Note: This value is mandatory.
+listen = /run/php-fpm/www.sock
+
+; Set listen(2) backlog.
+; Default Value: 511
+;listen.backlog = 511
+
+; Set permissions for unix socket, if one is used. In Linux, read/write
+; permissions must be set in order to allow connections from a web server.
+; Default Values: user and group are set as the running user
+; mode is set to 0660
+;listen.owner = nobody
+;listen.group = nobody
+;listen.mode = 0660
+
+; When POSIX Access Control Lists are supported you can set them using
+; these options, value is a comma separated list of user/group names.
+; When set, listen.owner and listen.group are ignored
+listen.acl_users = apache,nginx
+;listen.acl_groups =
+
+; List of addresses (IPv4/IPv6) of FastCGI clients which are allowed to connect.
+; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
+; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
+; must be separated by a comma. If this value is left blank, connections will be
+; accepted from any ip address.
+; Default Value: any
+listen.allowed_clients = 127.0.0.1
+
+; Specify the nice(2) priority to apply to the pool processes (only if set)
+; The value can vary from -19 (highest priority) to 20 (lower priority)
+; Note: - It will only work if the FPM master process is launched as root
+; - The pool processes will inherit the master process priority
+; unless it specified otherwise
+; Default Value: no set
+; process.priority = -19
+
+; Set the process dumpable flag (PR_SET_DUMPABLE prctl) even if the process user
+; or group is differrent than the master process user. It allows to create process
+; core dump and ptrace the process for the pool user.
+; Default Value: no
+; process.dumpable = yes
+
+; Choose how the process manager will control the number of child processes.
+; Possible Values:
+; static - a fixed number (pm.max_children) of child processes;
+; dynamic - the number of child processes are set dynamically based on the
+; following directives. With this process management, there will be
+; always at least 1 children.
+; pm.max_children - the maximum number of children that can
+; be alive at the same time.
+; pm.start_servers - the number of children created on startup.
+; pm.min_spare_servers - the minimum number of children in 'idle'
+; state (waiting to process). If the number
+; of 'idle' processes is less than this
+; number then some children will be created.
+; pm.max_spare_servers - the maximum number of children in 'idle'
+; state (waiting to process). If the number
+; of 'idle' processes is greater than this
+; number then some children will be killed.
+; ondemand - no children are created at startup. Children will be forked when
+; new requests will connect. The following parameter are used:
+; pm.max_children - the maximum number of children that
+; can be alive at the same time.
+; pm.process_idle_timeout - The number of seconds after which
+; an idle process will be killed.
+; Note: This value is mandatory.
+pm = dynamic
+
+; The number of child processes to be created when pm is set to 'static' and the
+; maximum number of child processes when pm is set to 'dynamic' or 'ondemand'.
+; This value sets the limit on the number of simultaneous requests that will be
+; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
+; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
+; CGI. The below defaults are based on a server without much resources. Don't
+; forget to tweak pm.* to fit your needs.
+; Note: Used when pm is set to 'static', 'dynamic' or 'ondemand'
+; Note: This value is mandatory.
+pm.max_children = 50
+
+; The number of child processes created on startup.
+; Note: Used only when pm is set to 'dynamic'
+; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
+pm.start_servers = 5
+
+; The desired minimum number of idle server processes.
+; Note: Used only when pm is set to 'dynamic'
+; Note: Mandatory when pm is set to 'dynamic'
+pm.min_spare_servers = 5
+
+; The desired maximum number of idle server processes.
+; Note: Used only when pm is set to 'dynamic'
+; Note: Mandatory when pm is set to 'dynamic'
+pm.max_spare_servers = 35
+
+; The number of seconds after which an idle process will be killed.
+; Note: Used only when pm is set to 'ondemand'
+; Default Value: 10s
+;pm.process_idle_timeout = 10s;
+
+; The number of requests each child process should execute before respawning.
+; This can be useful to work around memory leaks in 3rd party libraries. For
+; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
+; Default Value: 0
+;pm.max_requests = 500
+
+; The URI to view the FPM status page. If this value is not set, no URI will be
+; recognized as a status page. It shows the following informations:
+; pool - the name of the pool;
+; process manager - static, dynamic or ondemand;
+; start time - the date and time FPM has started;
+; start since - number of seconds since FPM has started;
+; accepted conn - the number of request accepted by the pool;
+; listen queue - the number of request in the queue of pending
+; connections (see backlog in listen(2));
+; max listen queue - the maximum number of requests in the queue
+; of pending connections since FPM has started;
+; listen queue len - the size of the socket queue of pending connections;
+; idle processes - the number of idle processes;
+; active processes - the number of active processes;
+; total processes - the number of idle + active processes;
+; max active processes - the maximum number of active processes since FPM
+; has started;
+; max children reached - number of times, the process limit has been reached,
+; when pm tries to start more children (works only for
+; pm 'dynamic' and 'ondemand');
+; Value are updated in real time.
+; Example output:
+; pool: www
+; process manager: static
+; start time: 01/Jul/2011:17:53:49 +0200
+; start since: 62636
+; accepted conn: 190460
+; listen queue: 0
+; max listen queue: 1
+; listen queue len: 42
+; idle processes: 4
+; active processes: 11
+; total processes: 15
+; max active processes: 12
+; max children reached: 0
+;
+; By default the status page output is formatted as text/plain. Passing either
+; 'html', 'xml' or 'json' in the query string will return the corresponding
+; output syntax. Example:
+; http://www.foo.bar/status
+; http://www.foo.bar/status?json
+; http://www.foo.bar/status?html
+; http://www.foo.bar/status?xml
+;
+; By default the status page only outputs short status. Passing 'full' in the
+; query string will also return status for each pool process.
+; Example:
+; http://www.foo.bar/status?full
+; http://www.foo.bar/status?json&full
+; http://www.foo.bar/status?html&full
+; http://www.foo.bar/status?xml&full
+; The Full status returns for each process:
+; pid - the PID of the process;
+; state - the state of the process (Idle, Running, ...);
+; start time - the date and time the process has started;
+; start since - the number of seconds since the process has started;
+; requests - the number of requests the process has served;
+; request duration - the duration in µs of the requests;
+; request method - the request method (GET, POST, ...);
+; request URI - the request URI with the query string;
+; content length - the content length of the request (only with POST);
+; user - the user (PHP_AUTH_USER) (or '-' if not set);
+; script - the main script called (or '-' if not set);
+; last request cpu - the %cpu the last request consumed
+; it's always 0 if the process is not in Idle state
+; because CPU calculation is done when the request
+; processing has terminated;
+; last request memory - the max amount of memory the last request consumed
+; it's always 0 if the process is not in Idle state
+; because memory calculation is done when the request
+; processing has terminated;
+; If the process is in Idle state, then informations are related to the
+; last request the process has served. Otherwise informations are related to
+; the current request being served.
+; Example output:
+; ************************
+; pid: 31330
+; state: Running
+; start time: 01/Jul/2011:17:53:49 +0200
+; start since: 63087
+; requests: 12808
+; request duration: 1250261
+; request method: GET
+; request URI: /test_mem.php?N=10000
+; content length: 0
+; user: -
+; script: /home/fat/web/docs/php/test_mem.php
+; last request cpu: 0.00
+; last request memory: 0
+;
+; Note: There is a real-time FPM status monitoring sample web page available
+; It's available in: @EXPANDED_DATADIR@/fpm/status.html
+;
+; Note: The value must start with a leading slash (/). The value can be
+; anything, but it may not be a good idea to use the .php extension or it
+; may conflict with a real PHP file.
+; Default Value: not set
+;pm.status_path = /status
+
+; The ping URI to call the monitoring page of FPM. If this value is not set, no
+; URI will be recognized as a ping page. This could be used to test from outside
+; that FPM is alive and responding, or to
+; - create a graph of FPM availability (rrd or such);
+; - remove a server from a group if it is not responding (load balancing);
+; - trigger alerts for the operating team (24/7).
+; Note: The value must start with a leading slash (/). The value can be
+; anything, but it may not be a good idea to use the .php extension or it
+; may conflict with a real PHP file.
+; Default Value: not set
+;ping.path = /ping
+
+; This directive may be used to customize the response of a ping request. The
+; response is formatted as text/plain with a 200 response code.
+; Default Value: pong
+;ping.response = pong
+
+; The access log file
+; Default: not set
+;access.log = log/$pool.access.log
+
+; The access log format.
+; The following syntax is allowed
+; %%: the '%' character
+; %C: %CPU used by the request
+; it can accept the following format:
+; - %{user}C for user CPU only
+; - %{system}C for system CPU only
+; - %{total}C for user + system CPU (default)
+; %d: time taken to serve the request
+; it can accept the following format:
+; - %{seconds}d (default)
+; - %{miliseconds}d
+; - %{mili}d
+; - %{microseconds}d
+; - %{micro}d
+; %e: an environment variable (same as $_ENV or $_SERVER)
+; it must be associated with embraces to specify the name of the env
+; variable. Some exemples:
+; - server specifics like: %{REQUEST_METHOD}e or %{SERVER_PROTOCOL}e
+; - HTTP headers like: %{HTTP_HOST}e or %{HTTP_USER_AGENT}e
+; %f: script filename
+; %l: content-length of the request (for POST request only)
+; %m: request method
+; %M: peak of memory allocated by PHP
+; it can accept the following format:
+; - %{bytes}M (default)
+; - %{kilobytes}M
+; - %{kilo}M
+; - %{megabytes}M
+; - %{mega}M
+; %n: pool name
+; %o: output header
+; it must be associated with embraces to specify the name of the header:
+; - %{Content-Type}o
+; - %{X-Powered-By}o
+; - %{Transfert-Encoding}o
+; - ....
+; %p: PID of the child that serviced the request
+; %P: PID of the parent of the child that serviced the request
+; %q: the query string
+; %Q: the '?' character if query string exists
+; %r: the request URI (without the query string, see %q and %Q)
+; %R: remote IP address
+; %s: status (response code)
+; %t: server time the request was received
+; it can accept a strftime(3) format:
+; %d/%b/%Y:%H:%M:%S %z (default)
+; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
+; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
+; %T: time the log has been written (the request has finished)
+; it can accept a strftime(3) format:
+; %d/%b/%Y:%H:%M:%S %z (default)
+; The strftime(3) format must be encapsuled in a %{<strftime_format>}t tag
+; e.g. for a ISO8601 formatted timestring, use: %{%Y-%m-%dT%H:%M:%S%z}t
+; %u: remote user
+;
+; Default: "%R - %u %t \"%m %r\" %s"
+;access.format = "%R - %u %t \"%m %r%Q%q\" %s %f %{mili}d %{kilo}M %C%%"
+
+; The log file for slow requests
+; Default Value: not set
+; Note: slowlog is mandatory if request_slowlog_timeout is set
+slowlog = /var/log/php-fpm/www-slow.log
+
+; The timeout for serving a single request after which a PHP backtrace will be
+; dumped to the 'slowlog' file. A value of '0s' means 'off'.
+; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
+; Default Value: 0
+;request_slowlog_timeout = 0
+
+; Depth of slow log stack trace.
+; Default Value: 20
+;request_slowlog_trace_depth = 20
+
+; The timeout for serving a single request after which the worker process will
+; be killed. This option should be used when the 'max_execution_time' ini option
+; does not stop script execution for some reason. A value of '0' means 'off'.
+; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
+; Default Value: 0
+;request_terminate_timeout = 0
+
+; Set open file descriptor rlimit.
+; Default Value: system defined value
+;rlimit_files = 1024
+
+; Set max core size rlimit.
+; Possible Values: 'unlimited' or an integer greater or equal to 0
+; Default Value: system defined value
+;rlimit_core = 0
+
+; Chroot to this directory at the start. This value must be defined as an
+; absolute path. When this value is not set, chroot is not used.
+; Note: you can prefix with '$prefix' to chroot to the pool prefix or one
+; of its subdirectories. If the pool prefix is not set, the global prefix
+; will be used instead.
+; Note: chrooting is a great security feature and should be used whenever
+; possible. However, all PHP paths will be relative to the chroot
+; (error_log, sessions.save_path, ...).
+; Default Value: not set
+;chroot =
+
+; Chdir to this directory at the start.
+; Note: relative path can be used.
+; Default Value: current directory or / when chroot
+;chdir = /var/www
+
+; Redirect worker stdout and stderr into main error log. If not set, stdout and
+; stderr will be redirected to /dev/null according to FastCGI specs.
+; Note: on highloaded environement, this can cause some delay in the page
+; process time (several ms).
+; Default Value: no
+;catch_workers_output = yes
+
+; Clear environment in FPM workers
+; Prevents arbitrary environment variables from reaching FPM worker processes
+; by clearing the environment in workers before env vars specified in this
+; pool configuration are added.
+; Setting to "no" will make all environment variables available to PHP code
+; via getenv(), $_ENV and $_SERVER.
+; Default Value: yes
+;clear_env = no
+
+; Limits the extensions of the main script FPM will allow to parse. This can
+; prevent configuration mistakes on the web server side. You should only limit
+; FPM to .php extensions to prevent malicious users to use other extensions to
+; execute php code.
+; Note: set an empty value to allow all extensions.
+; Default Value: .php
+;security.limit_extensions = .php .php3 .php4 .php5 .php7
+
+; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
+; the current environment.
+; Default Value: clean env
+;env[HOSTNAME] = $HOSTNAME
+;env[PATH] = /usr/local/bin:/usr/bin:/bin
+;env[TMP] = /tmp
+;env[TMPDIR] = /tmp
+;env[TEMP] = /tmp
+
+; Additional php.ini defines, specific to this pool of workers. These settings
+; overwrite the values previously defined in the php.ini. The directives are the
+; same as the PHP SAPI:
+; php_value/php_flag - you can set classic ini defines which can
+; be overwritten from PHP call 'ini_set'.
+; php_admin_value/php_admin_flag - these directives won't be overwritten by
+; PHP call 'ini_set'
+; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
+
+; Defining 'extension' will load the corresponding shared extension from
+; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
+; overwrite previously defined php.ini values, but will append the new value
+; instead.
+
+; Note: path INI options can be relative and will be expanded with the prefix
+; (pool, global or @prefix@)
+
+; Default Value: nothing is defined by default except the values in php.ini and
+; specified at startup with the -d argument
+;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
+;php_flag[display_errors] = off
+php_admin_value[error_log] = /var/log/php-fpm/www-error.log
+php_admin_flag[log_errors] = on
+php_admin_value[memory_limit] = 512M
+php_admin_value[output_buffering] = off
+
+; Set the following data paths to directories owned by the FPM process user.
+;
+; Do not change the ownership of existing system directories, if the process
+; user does not have write permission, create dedicated directories for this
+; purpose.
+;
+; See warning about choosing the location of these directories on your system
+; at http://php.net/session.save-path
+php_value[session.save_handler] = files
+php_value[session.save_path] = /var/lib/php/session
+php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache
+;php_value[opcache.file_cache] = /var/lib/php/opcache
diff --git a/personal_infra/puppet/modules/ocserv/manifests/init.pp b/personal_infra/puppet/modules/ocserv/manifests/init.pp
new file mode 100644
index 00000000..b9ead95b
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/manifests/init.pp
@@ -0,0 +1,97 @@
+class ocserv($ocserv_tcp_port,
+ $ocserv_udp_port,
+ $ocserv_default_domain,
+ $ocserv_ipv4_network,
+ $ocserv_dns,
+ $ocserv_split_dns,
+ $ocserv_routes,
+ $firewall = true)
+{
+ $run_as_user = $facts['os']['family'] ? {
+ 'Debian' => 'nobody',
+ 'RedHat' => 'ocserv',
+ }
+
+ $run_as_group = $facts['os']['family'] ? {
+ 'Debian' => 'daemon',
+ 'RedHat' => 'ocserv',
+ }
+
+ $socket_file = $facts['os']['family'] ? {
+ 'Debian' => '/var/run/ocserv-socket',
+ 'RedHat' => 'ocserv.sock',
+ }
+
+ $chroot_dir = $facts['os']['family'] ? {
+ 'Debian' => undef,
+ 'RedHat' => '/var/lib/ocserv',
+ }
+
+ $server_cert = $facts['os']['family']? {
+ 'Debian' => '/etc/ssl/certs/ssl-cert-snakeoil.pem',
+ 'RedHat' => '/etc/pki/ocserv/public/server.crt',
+ }
+
+ $server_key = $facts['os']['family'] ? {
+ 'Debian' => '/etc/ssl/private/ssl-cert-snakeoil.key',
+ 'RedHat' => '/etc/pki/ocserv/private/server.key',
+ }
+
+ package {'ocserv':}
+ ->
+ file {'/etc/ocserv/ocserv.conf':
+ content => epp('ocserv/ocserv.conf', {'tcp_port' => $ocserv_tcp_port,
+ 'udp_port' => $ocserv_udp_port,
+ 'run_as_user' => $run_as_user,
+ 'run_as_group' => $run_as_group,
+ 'socket_file' => $socket_file,
+ 'chroot_dir' => $chroot_dir,
+ 'server_cert' => $server_cert,
+ 'server_key' => $server_key,
+ 'default_domain' => $ocserv_default_domain,
+ 'ipv4_network' => $ocserv_ipv4_network,
+ 'dns' => $ocserv_dns,
+ 'split_dns' => $ocserv_split_dns,
+ 'routes' => $ocserv_routes,
+ }),
+ }
+ ~>
+ service {'ocserv':
+ enable => true,
+ ensure => running,
+ }
+
+ if ($facts['os']['family'] == 'RedHat' and $firewall) {
+ exec {'add masquerade for ocserv':
+ command => '/usr/bin/firewall-cmd --permanent --add-masquerade',
+ unless => '/usr/bin/firewall-cmd --query-masquerade',
+ notify => Exec['reload firewall for ocserv'],
+ }
+
+ exec {'open firewall for ocserv':
+ command => '/usr/bin/firewall-cmd --permanent --add-port=444/{tcp,udp}',
+ unless => '/usr/bin/firewall-cmd --query-port=444/udp',
+ }
+ ~>
+ exec {'reload firewall for ocserv':
+ command => '/usr/bin/firewall-cmd --reload',
+ refreshonly => true,
+ }
+ }
+
+ if ($facts['os']['family'] == 'Debian') {
+ file {'/etc/systemd/system/ocserv.socket.d/':
+ ensure => directory,
+ }
+ ->
+ file {'/etc/systemd/system/ocserv.socket.d/port.conf':
+ content => epp('ocserv/port.conf', {'tcp_port' => $ocserv_tcp_port,
+ 'udp_port' => $ocserv_udp_port,
+ }),
+ }
+ ~>
+ exec {'/bin/systemctl daemon-reload && systemctl restart ocserv.socket':
+ refreshonly => true,
+ }
+ }
+}
diff --git a/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp b/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp
new file mode 100644
index 00000000..b4ca12e7
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/templates/ocserv.conf.epp
@@ -0,0 +1,57 @@
+<%- | $tcp_port,
+ $udp_port,
+ $run_as_user,
+ $run_as_group,
+ $socket_file,
+ $chroot_dir,
+ $server_cert,
+ $server_key,
+ $default_domain,
+ $ipv4_network,
+ $dns,
+ $split_dns,
+ $routes,
+| -%>
+auth = "pam"
+listen-host-is-dyndns = true
+# note, those are not used on Debian
+tcp-port = <%= $tcp_port %>
+udp-port = <%= $udp_port %>
+run-as-user = <%= $run_as_user %>
+run-as-group = <%= $run_as_group %>
+socket-file = <%= $socket_file %>
+<% if $chroot_dir { -%>
+chroot-dir = <%= $chroot_dir %>
+<% } -%>
+server-cert = <%= $server_cert %>
+server-key = <%= $server_key %>
+isolate-workers = true
+keepalive = 32400
+dpd = 90
+mobile-dpd = 1800
+switch-to-tcp-timeout = 25
+try-mtu-discovery = false
+compression = true
+tls-priorities = "NORMAL:%SERVER_PRECEDENCE:%COMPAT:-RSA:-VERS-SSL3.0:-ARCFOUR-128"
+auth-timeout = 240
+min-reauth-time = 3
+cookie-timeout = 300
+deny-roaming = false
+rekey-time = 172800
+rekey-method = ssl
+use-utmp = true
+pid-file = /var/run/ocserv.pid
+device = vpns
+predictable-ips = true
+default-domain = <%= $default_domain %>
+ipv4-network = <%= $ipv4_network %>
+#tunnel-all-dns = true
+dns = <%= $dns %>
+split-dns = <%= $split_dns %>
+ping-leases = true
+cisco-client-compat = true
+dtls-psk = false
+dtls-legacy = true
+<% $routes.each | $route | { -%>
+route = <%= $route %>
+<% } %>
diff --git a/personal_infra/puppet/modules/ocserv/templates/port.conf.epp b/personal_infra/puppet/modules/ocserv/templates/port.conf.epp
new file mode 100644
index 00000000..223c9961
--- /dev/null
+++ b/personal_infra/puppet/modules/ocserv/templates/port.conf.epp
@@ -0,0 +1,8 @@
+<%- | $tcp_port,
+ $udp_port,
+| -%>
+[Socket]
+ListenStream=
+ListenDatagram=
+ListenStream=<%= $tcp_port %>
+ListenDatagram=<%= $udp_port %>
diff --git a/personal_infra/puppet/modules/podman/manifests/init.pp b/personal_infra/puppet/modules/podman/manifests/init.pp
new file mode 100644
index 00000000..17247aa2
--- /dev/null
+++ b/personal_infra/puppet/modules/podman/manifests/init.pp
@@ -0,0 +1,17 @@
+class podman($user, $storage_driver) {
+ package {'podman':}
+
+ file {['/etc/subuid', '/etc/subgid']:
+ content => "${user}:100000:65536\n",
+ }
+
+ exec {"/usr/bin/sed -i 's/driver = \".*\"/driver = \"${storage_driver}\"/g' /etc/containers/storage.conf":
+ require => Package['podman'],
+ unless => "/usr/bin/grep 'driver = \"${storage_driver}\"' /etc/containers/storage.conf",
+ }
+
+ exec {"/usr/bin/sed -i 's|#mount_program = \"/usr/bin/fuse-overlayfs\"|mount_program = \"/usr/bin/fuse-overlayfs\"|g' /etc/containers/storage.conf":
+ require => Package['podman'],
+ unless => "/usr/bin/grep '^#mount_program = \"/usr/bin/fuse-overlayfs\"'",
+ }
+}
diff --git a/personal_infra/puppet/modules/postgres/manifests/init.pp b/personal_infra/puppet/modules/postgres/manifests/init.pp
new file mode 100644
index 00000000..40fd5726
--- /dev/null
+++ b/personal_infra/puppet/modules/postgres/manifests/init.pp
@@ -0,0 +1,26 @@
+class postgres($pg_hba_conf) {
+ package {'pgdg-redhat-repo':
+ source => 'https://download.postgresql.org/pub/repos/yum/reporpms/EL-9-x86_64/pgdg-redhat-repo-latest.noarch.rpm',
+ }
+ ->
+ package {'postgresql15-server':}
+ ->
+ exec {'/usr/pgsql-15/bin/postgresql-15-setup initdb':
+ creates => '/var/lib/pgsql/15/data/PG_VERSION',
+ }
+ ->
+ [
+ file {'/var/lib/pgsql/15/data/pg_hba.conf':
+ # template at /usr/pgsql-15/share/pg_hba.conf.sample
+ content => $pg_hba_conf,
+ },
+ exec {'/bin/sed -i "s/#listen_addresses = \'localhost\'/listen_addresses = \'*\' /" /var/lib/pgsql/15/data/postgresql.conf':
+ unless => '/bin/grep "listen_addresses = \'\\*\'" /var/lib/pgsql/15/data/postgresql.conf',
+ }
+ ]
+ ~>
+ service {'postgresql-15':
+ ensure => running,
+ enable => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/README.md b/personal_infra/puppet/modules/proxmox/README.md
new file mode 100644
index 00000000..5e5f8bc6
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/README.md
@@ -0,0 +1,36 @@
+# Proxmox
+
+## Networking
+
+Configures a public Internet IP, and an internal network with forwarding for containers and virtual machines.
+
+Add the following to your Proxmox host Ansible variables:
+
+```
+network:
+ ip: dotted.ip.notation
+ netmask: 255.255.255.0
+ gateway: dotted.ip.notation
+ proxmox:
+ ip: 10.3.3.1
+ netmask: 255.255.255.0
+ network: 10.3.3.0/24
+```
+
+## Proxy
+
+Class `proxmox::proxy` can handle proxying internal web servers.
+
+```
+class {'proxmox::proxy':
+ mail => lookup('mail.root_mail'),
+ base_hostname => lookup('network.public_hostname'),
+}
+```
+
+This uses the Apache HTTP Server and mod_md to obtain certificates.
+Your hostname must be publicly accessible, because http challenges are used.
+
+You receive mails to restart your server when required.
+
+The `base_hostname` certificate is injected daily to pveproxy.
diff --git a/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp b/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp
new file mode 100644
index 00000000..f3464c78
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/freeipa.pp
@@ -0,0 +1,17 @@
+class proxmox::freeipa {
+ class {'proxmox':}
+
+ file {['/etc/subuid', '/etc/subgid']:
+ content => epp('proxmox/freeipa_subxid', {'freeipa' => lookup('freeipa')}),
+ }
+
+ # TODO
+ service {['sssd-ssh.socket', 'sssd-pam-priv.socket', 'sssd-nss.socket', 'sssd-sudo.socket', 'sssd-pam.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+ ~>
+ exec {'/usr/bin/systemctl reset-failed':
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/init.pp b/personal_infra/puppet/modules/proxmox/manifests/init.pp
new file mode 100644
index 00000000..b3297eb4
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/init.pp
@@ -0,0 +1,38 @@
+class proxmox {
+ file {'/etc/network/interfaces':
+ content => epp('proxmox/interfaces', {
+ "network" => lookup("network"),
+ }),
+ }
+ ~>
+ exec {'/usr/sbin/ifreload -a':
+ refreshonly => true
+ }
+
+ # to prevent Germany/Hetzner abuse complaints
+ service {['rpcbind.target', 'rpcbind.service', 'rpcbind.socket']:
+ ensure => stopped,
+ enable => mask,
+ }
+
+ # TODO: secure this. Right now I don't use VMs, so just disable it
+ service {'spiceproxy':
+ ensure => stopped,
+ enable => mask,
+ }
+
+ file {'/etc/logrotate.d/pve':
+ ensure => absent,
+ }
+ ~>
+ service {'logrotate':}
+
+ file {'/etc/apt/sources.list.d/pve-enterprise.list':
+ ensure => absent,
+ }
+
+ file {'/etc/apt/sources.list.d/pve-no-subscription.list':
+ content => 'deb http://download.proxmox.com/debian/pve bullseye pve-no-subscription
+',
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/proxy.pp b/personal_infra/puppet/modules/proxmox/manifests/proxy.pp
new file mode 100644
index 00000000..cb3c0bd4
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/proxy.pp
@@ -0,0 +1,52 @@
+class proxmox::proxy ($mail, $base_hostname) {
+ package {'apache2':}
+ ->
+ service {'apache2':
+ enable => true,
+ ensure => running,
+ require => File['/usr/local/bin/notify_md_renewal'],
+ }
+
+ $apache_dep = {
+ require => Package['apache2'],
+ notify => Service['apache2'],
+ }
+
+ ['md', 'ssl', 'proxy_http', 'proxy'].each |$mod| {
+ exec {"/usr/sbin/a2enmod $mod":
+ creates => "/etc/apache2/mods-enabled/$mod.load",
+ * => $apache_dep,
+ }
+ }
+
+ file {'/etc/apache2/sites-enabled/test.conf':
+ content => @("EOT")
+ MDomain $base_hostname auto
+ MDCertificateAgreement accepted
+ MDContactEmail $mail
+ MDNotifyCmd /usr/local/bin/notify_md_renewal
+
+ <VirtualHost *:443>
+ ServerName $base_hostname
+ SSLEngine on
+ </VirtualHost>
+ | EOT
+ ,
+ * => $apache_dep
+ }
+
+ file {'/usr/local/bin/notify_md_renewal':
+ content => @("EOT"/$)
+ #!/bin/sh
+
+ systemctl restart apache2
+ pvenode cert set /etc/apache2/md/domains/$base_hostname/pubcert.pem /etc/apache2/md/domains/$base_hostname/privkey.pem --force 1 --restart 1
+
+ for hook in /usr/local/bin/notify_md_renewal_hook_* ; do
+ \$hook
+ done
+ | EOT
+ ,
+ mode => '0755',
+ }
+}
diff --git a/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp b/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp
new file mode 100644
index 00000000..b60caf4c
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/manifests/proxy_host.pp
@@ -0,0 +1,53 @@
+define proxmox::proxy_host (String[1] $target, Optional[String[1]] $overwrite_rh_certs = undef) {
+ if $target =~ /^https:/ {
+ $ssl_fragment = @("EOT")
+ SSLProxyEngine on
+ SSLProxyCheckPeerName off
+ | EOT
+ }
+ else {
+ $ssl_fragment = ""
+ }
+
+ file {"/etc/apache2/sites-enabled/$title.conf":
+ content => @("EOT")
+ MDomain $title
+
+ <VirtualHost *:80>
+ ServerName $title
+ Redirect permanent / https://$title/
+ </VirtualHost>
+
+ <VirtualHost *:443>
+ ServerName $title
+ SSLEngine on
+
+ ProxyPass "/" "$target"
+ ProxyPassReverse "/" "$target"
+ ProxyPreservehost On
+ $ssl_fragment
+ </VirtualHost>
+ | EOT
+ ,
+ }
+ ~>
+ Service['apache2']
+
+ if $overwrite_rh_certs {
+ $pveid = lookup("hostvars.'$overwrite_rh_certs'.proxmox.id");
+
+ file {"/usr/local/bin/notify_md_renewal_hook_$overwrite_rh_certs":
+ content => @("EOT"/$)
+ #!/bin/sh
+
+ cp /etc/apache2/md/domains/$title/pubcert.pem /rpool/data/subvol-$pveid-disk-0/etc/pki/tls/certs/localhost.crt
+ cp /etc/apache2/md/domains/$title/privkey.pem /rpool/data/subvol-$pveid-disk-0/etc/pki/tls/private/localhost.key
+ pct exec $pveid systemctl restart httpd
+ | EOT
+ ,
+ mode => '0755',
+ }
+ }
+
+
+}
diff --git a/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp b/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp
new file mode 100644
index 00000000..c72d1d04
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/templates/freeipa_subxid.epp
@@ -0,0 +1,2 @@
+root:100000:65536
+root:<%= $freeipa['idrange_start'] %>:<%= $freeipa['idrange_size'] %>
diff --git a/personal_infra/puppet/modules/proxmox/templates/interfaces.epp b/personal_infra/puppet/modules/proxmox/templates/interfaces.epp
new file mode 100644
index 00000000..e0bfeceb
--- /dev/null
+++ b/personal_infra/puppet/modules/proxmox/templates/interfaces.epp
@@ -0,0 +1,18 @@
+auto lo
+iface lo inet loopback
+
+auto eno1
+iface eno1 inet static
+ address <%= $network['ip'] %>
+ netmask <%= $network['netmask'] %>
+ gateway <%= $network['gateway'] %>
+
+auto vmbr0
+iface vmbr0 inet static
+ address <%= $network['proxmox']['ip'] %>
+ netmask <%= $network['proxmox']['netmask'] %>
+ bridge_ports none
+ bridge_stp off
+ bridge_fd 0
+ post-up echo 1 > /proc/sys/net/ipv4/ip_forward
+ post-up iptables -t nat -A POSTROUTING -s '<%= $network['proxmox']['network'] %>' -o eno1 -j MASQUERADE
diff --git a/personal_infra/puppet/modules/root_mail/manifests/init.pp b/personal_infra/puppet/modules/root_mail/manifests/init.pp
new file mode 100644
index 00000000..66cfeb87
--- /dev/null
+++ b/personal_infra/puppet/modules/root_mail/manifests/init.pp
@@ -0,0 +1,41 @@
+class root_mail {
+ package {'postfix':}
+ ->
+ service {'postfix':
+ ensure => running,
+ enable => true,
+ }
+
+ $cron_service = case $facts['os']['family'] {
+ 'Debian': { 'cron' }
+ 'RedHat': { 'crond' }
+ default: { fail($facts['os']['family']) }
+ }
+
+ # if crond doesn't see /usr/bin/sendmail on startup, it won't send mails
+ Package['postfix']
+ ~>
+ service{$cron_service:
+ ensure => running,
+ }
+
+ if($facts['os']['family'] == 'RedHat') {
+ if($facts['os']['release']['major'] == '9') {
+ package {'s-nail':}
+ }
+ else {
+ package {'mailx':}
+ }
+ }
+
+ mailalias {'root':
+ recipient => lookup('mail.root_mail'),
+ require => Package['postfix'],
+ }
+ ~>
+ exec {'/usr/sbin/postalias /etc/aliases':
+ creates => '/etc/aliases.db',
+ }
+ ~>
+ Service['postfix']
+}
diff --git a/personal_infra/puppet/modules/tinc/manifests/init.pp b/personal_infra/puppet/modules/tinc/manifests/init.pp
new file mode 100644
index 00000000..5ae78126
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/manifests/init.pp
@@ -0,0 +1,100 @@
+class tinc($tinc_name, $tinc_location, $tinc_connect_to, $tinc_locations, $tinc_ip, $tinc_netmask, $tinc_other_networks, $firewall = true) {
+ # https://bugzilla.redhat.com/show_bug.cgi?id=2153663
+ if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9') {
+ copr {'tinc':
+ user => 'koalillo',
+ dist => 'epel-9',
+ }
+ ->
+ Package['tinc']
+ }
+
+ package {'tinc':}
+ ->
+ file {'/etc/tinc':
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}":
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}/hosts":
+ ensure => directory,
+ }
+ ->
+ file {"/etc/tinc/${tinc_name}/tinc.conf":
+ content => epp('tinc/tinc.conf', {'tinc_name' => $tinc_name,
+ 'tinc_location' => $tinc_location,
+ 'tinc_connect_to' => $tinc_connect_to,
+ }),
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ $tinc_locations.each |$name, $location| {
+ file {"/etc/tinc/${tinc_name}/generate_host_${name}.sh":
+ content => "#!/bin/sh
+
+set -ue
+
+echo Subnet = ${location['subnet']} >/etc/tinc/${tinc_name}/hosts/${name}
+echo Address = ${location['address']} >>/etc/tinc/${tinc_name}/hosts/${name}
+cat /etc/ansible/tinc/public_${location['address']}.pem >>/etc/tinc/${tinc_name}/hosts/${name}
+ ",
+ mode => '755',
+ }
+ ~>
+ exec {"/etc/tinc/${tinc_name}/generate_host_${name}.sh":
+ require => File["/etc/tinc/${tinc_name}/hosts"],
+ notify => Service["tinc@${tinc_name}"],
+ creates => "/etc/tinc/${tinc_name}/hosts/${name}",
+ }
+ }
+
+ service {"tinc@${tinc_name}":
+ ensure => running,
+ enable => true,
+ }
+
+ if($facts['os']['family'] == 'RedHat' and $facts['os']['release']['major'] == '9') {
+ service {"tinc":
+ ensure => running,
+ enable => true,
+ }
+ }
+ exec {"/bin/cp /etc/ansible/tinc/private.pem /etc/tinc/${tinc_name}/rsa_key.priv":
+ creates => "/etc/tinc/${tinc_name}/rsa_key.priv",
+ require => File["/etc/tinc/${tinc_name}"],
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ file {"/etc/tinc/${tinc_name}/tinc-up":
+ content => epp('tinc/tinc-up', {'ip' => $tinc_ip,
+ 'netmask' => $tinc_netmask,
+ 'tinc_other_networks' => $tinc_other_networks,}),
+ require => File["/etc/tinc/${tinc_name}"],
+ mode => '777',
+ notify => Service["tinc@${tinc_name}"],
+ }
+
+ if ($facts['os']['family'] == 'RedHat' and $firewall) {
+ exec {'open firewall for tinc':
+ command => '/usr/bin/firewall-cmd --permanent --add-port=655/{tcp,udp}',
+ unless => '/usr/bin/firewall-cmd --query-port=655/udp',
+ }
+ ~>
+ exec {'reload firewall for tinc':
+ command => '/usr/bin/firewall-cmd --reload',
+ refreshonly => true,
+ }
+ }
+
+ file {'/etc/sysctl.d/tinc.conf':
+ content => "net.ipv4.ip_forward=1\nnet.ipv4.conf.all.proxy_arp=0\n",
+ }
+ ~>
+ exec {'reload sysctl for tinc':
+ command => '/sbin/sysctl --system',
+ refreshonly => true,
+ }
+}
diff --git a/personal_infra/puppet/modules/tinc/templates/tinc-up.epp b/personal_infra/puppet/modules/tinc/templates/tinc-up.epp
new file mode 100644
index 00000000..7c89098f
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/templates/tinc-up.epp
@@ -0,0 +1,11 @@
+<%- | $ip,
+ $netmask,
+ $tinc_other_networks,
+| -%>
+#!/bin/sh
+
+ifconfig $INTERFACE <%= $ip %> netmask 255.255.255.255
+
+<% $tinc_other_networks.each |$tinc_other_network| { %>
+ route add -net <%= $tinc_other_network %> dev $INTERFACE
+<% } %>
diff --git a/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp b/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp
new file mode 100644
index 00000000..959fb949
--- /dev/null
+++ b/personal_infra/puppet/modules/tinc/templates/tinc.conf.epp
@@ -0,0 +1,8 @@
+<%- | $tinc_name,
+ $tinc_location,
+ $tinc_connect_to,
+| -%>
+Name = <%= $tinc_location %>
+<% $tinc_connect_to.each | $tinc_connection | { -%>
+ConnectTo = <%= $tinc_connection %>
+<% } %>