From f84218c9e773ba2a65eda1243d7762f008e98d44 Mon Sep 17 00:00:00 2001 From: cuqmbr Date: Mon, 23 Jun 2025 18:26:15 +0300 Subject: [PATCH] initial commit --- .gitmodules | 3 + ansible/.python-version | 1 + ansible/00_init.yml | 14 + ansible/05_bastion.yml | 14 + ansible/10_monitoring.yml | 18 ++ ansible/15_postgresql.yml | 15 + ansible/20_main_page.yml | 17 ++ ansible/21_searxng.yml | 15 + ansible/30_load_balancer.yml | 18 ++ ansible/ansible.cfg | 5 + ansible/inventories/dev/group_vars/all.yml | 23 ++ .../inventories/dev/group_vars/bastion.yml | 17 ++ .../dev/group_vars/load_balancers.yml | 84 ++++++ .../inventories/dev/group_vars/main_page.yml | 66 +++++ .../inventories/dev/group_vars/monitoring.yml | 278 ++++++++++++++++++ .../inventories/dev/group_vars/postgresql.yml | 87 ++++++ .../inventories/dev/group_vars/searxng.yml | 111 +++++++ ansible/inventories/dev/hosts.yml | 21 ++ ansible/notes.txt | 1 + ansible/roles/fluent_bit/defaults/main.yml | 22 ++ .../roles/fluent_bit/files/fluent-bit.service | 16 + ansible/roles/fluent_bit/handlers/main.yml | 10 + ansible/roles/fluent_bit/meta/main.yml | 10 + .../fluent_bit/molecule/default/converge.yml | 14 + .../fluent_bit/molecule/default/molecule.yml | 12 + ansible/roles/fluent_bit/tasks/main.yml | 53 ++++ .../fluent_bit/templates/fluent-bit.yml.j2 | 4 + ansible/roles/grafana_loki/defaults/main.yml | 56 ++++ ansible/roles/grafana_loki/handlers/main.yml | 6 + ansible/roles/grafana_loki/meta/main.yml | 10 + .../molecule/default/converge.yml | 14 + .../molecule/default/molecule.yml | 12 + ansible/roles/grafana_loki/tasks/main.yml | 23 ++ .../roles/grafana_loki/templates/loki.yml.j2 | 4 + .../files/grafana-server.service | 30 ++ ansible/roles/grafana_server/meta/main.yml | 10 + .../molecule/default/converge.yml | 14 + .../molecule/default/molecule.yml | 12 + ansible/roles/grafana_server/tasks/main.yml | 25 ++ ansible/roles/hugo/defaults/main.yml | 7 + ansible/roles/hugo/meta/main.yml | 10 + .../roles/hugo/molecule/default/converge.yml | 14 + .../roles/hugo/molecule/default/molecule.yml | 12 + ansible/roles/hugo/tasks/main.yml | 73 +++++ ansible/roles/init/defaults/main.yml | 31 ++ ansible/roles/init/files/sshd_config | 8 + ansible/roles/init/handlers/main.yml | 6 + ansible/roles/init/meta/main.yml | 10 + .../roles/init/molecule/default/converge.yml | 14 + .../roles/init/molecule/default/molecule.yml | 12 + ansible/roles/init/tasks/configure_users.yml | 29 ++ ansible/roles/init/tasks/main.yml | 50 ++++ .../roles/init/templates/authorized_keys.j2 | 3 + ansible/roles/init/templates/doas.conf.j2 | 3 + ansible/roles/nginx/defaults/main.yml | 30 ++ ansible/roles/nginx/handlers/main.yml | 6 + ansible/roles/nginx/meta/main.yml | 10 + .../roles/nginx/molecule/default/converge.yml | 42 +++ .../roles/nginx/molecule/default/molecule.yml | 12 + ansible/roles/nginx/tasks/main.yml | 23 ++ ansible/roles/nginx/templates/nginx.conf.j2 | 110 +++++++ ansible/roles/postgresql | 1 + .../prometheus_alertmanager/defaults/main.yml | 119 ++++++++ .../prometheus_alertmanager/handlers/main.yml | 6 + .../prometheus_alertmanager/meta/main.yml | 10 + .../molecule/default/converge.yml | 14 + .../molecule/default/molecule.yml | 12 + .../prometheus_alertmanager/tasks/main.yml | 31 ++ .../templates/alertmanager.yml.j2 | 4 + .../prometheus_nginx_exporter/meta/main.yml | 9 + .../molecule/default/converge.yml | 17 ++ .../molecule/default/molecule.yml | 12 + .../prometheus_nginx_exporter/tasks/main.yml | 12 + .../prometheus_node_exporter/meta/main.yml | 10 + .../molecule/default/converge.yml | 14 + .../molecule/default/molecule.yml | 12 + .../prometheus_node_exporter/tasks/main.yml | 12 + .../roles/prometheus_server/defaults/main.yml | 59 ++++ .../files/prometheus.service | 16 + .../roles/prometheus_server/handlers/main.yml | 6 + ansible/roles/prometheus_server/meta/main.yml | 10 + .../molecule/default/converge.yml | 14 + .../molecule/default/molecule.yml | 12 + .../roles/prometheus_server/tasks/main.yml | 52 ++++ .../templates/alerting_rules.yml.j2 | 4 + .../templates/prometheus.yml.j2 | 4 + ansible/roles/searxng/defaults/main.yml | 49 +++ ansible/roles/searxng/handlers/main.yml | 10 + ansible/roles/searxng/meta/main.yml | 10 + .../searxng/molecule/default/converge.yml | 14 + .../searxng/molecule/default/molecule.yml | 12 + ansible/roles/searxng/tasks/main.yml | 120 ++++++++ .../searxng/templates/searxng.service.j2 | 18 ++ .../roles/searxng/templates/settings.yml.j2 | 4 + terraform/.gitignore | 5 + terraform/.terraform.lock.hcl | 9 + terraform/bastion.tf | 110 +++++++ terraform/firewall_ipsets.tf | 35 +++ terraform/firewall_security_groups.tf | 51 ++++ terraform/forgejo.tf.disabled | 109 +++++++ terraform/load-balancer.tf | 132 +++++++++ terraform/main-page.tf | 109 +++++++ terraform/main.tf | 16 + terraform/monitoring.tf | 137 +++++++++ terraform/notes.txt | 15 + terraform/postgresql.tf | 100 +++++++ terraform/searxng.tf | 109 +++++++ terraform/test.tf.disabled | 109 +++++++ terraform/variables.tf | 33 +++ 109 files changed, 3468 insertions(+) create mode 100644 .gitmodules create mode 100644 ansible/.python-version create mode 100644 ansible/00_init.yml create mode 100644 ansible/05_bastion.yml create mode 100644 ansible/10_monitoring.yml create mode 100644 ansible/15_postgresql.yml create mode 100644 ansible/20_main_page.yml create mode 100644 ansible/21_searxng.yml create mode 100644 ansible/30_load_balancer.yml create mode 100644 ansible/ansible.cfg create mode 100644 ansible/inventories/dev/group_vars/all.yml create mode 100644 ansible/inventories/dev/group_vars/bastion.yml create mode 100644 ansible/inventories/dev/group_vars/load_balancers.yml create mode 100644 ansible/inventories/dev/group_vars/main_page.yml create mode 100644 ansible/inventories/dev/group_vars/monitoring.yml create mode 100644 ansible/inventories/dev/group_vars/postgresql.yml create mode 100644 ansible/inventories/dev/group_vars/searxng.yml create mode 100644 ansible/inventories/dev/hosts.yml create mode 100644 ansible/notes.txt create mode 100644 ansible/roles/fluent_bit/defaults/main.yml create mode 100644 ansible/roles/fluent_bit/files/fluent-bit.service create mode 100644 ansible/roles/fluent_bit/handlers/main.yml create mode 100644 ansible/roles/fluent_bit/meta/main.yml create mode 100644 ansible/roles/fluent_bit/molecule/default/converge.yml create mode 100644 ansible/roles/fluent_bit/molecule/default/molecule.yml create mode 100644 ansible/roles/fluent_bit/tasks/main.yml create mode 100644 ansible/roles/fluent_bit/templates/fluent-bit.yml.j2 create mode 100644 ansible/roles/grafana_loki/defaults/main.yml create mode 100644 ansible/roles/grafana_loki/handlers/main.yml create mode 100644 ansible/roles/grafana_loki/meta/main.yml create mode 100644 ansible/roles/grafana_loki/molecule/default/converge.yml create mode 100644 ansible/roles/grafana_loki/molecule/default/molecule.yml create mode 100644 ansible/roles/grafana_loki/tasks/main.yml create mode 100644 ansible/roles/grafana_loki/templates/loki.yml.j2 create mode 100644 ansible/roles/grafana_server/files/grafana-server.service create mode 100644 ansible/roles/grafana_server/meta/main.yml create mode 100644 ansible/roles/grafana_server/molecule/default/converge.yml create mode 100644 ansible/roles/grafana_server/molecule/default/molecule.yml create mode 100644 ansible/roles/grafana_server/tasks/main.yml create mode 100644 ansible/roles/hugo/defaults/main.yml create mode 100644 ansible/roles/hugo/meta/main.yml create mode 100644 ansible/roles/hugo/molecule/default/converge.yml create mode 100644 ansible/roles/hugo/molecule/default/molecule.yml create mode 100644 ansible/roles/hugo/tasks/main.yml create mode 100644 ansible/roles/init/defaults/main.yml create mode 100644 ansible/roles/init/files/sshd_config create mode 100644 ansible/roles/init/handlers/main.yml create mode 100644 ansible/roles/init/meta/main.yml create mode 100644 ansible/roles/init/molecule/default/converge.yml create mode 100644 ansible/roles/init/molecule/default/molecule.yml create mode 100644 ansible/roles/init/tasks/configure_users.yml create mode 100644 ansible/roles/init/tasks/main.yml create mode 100644 ansible/roles/init/templates/authorized_keys.j2 create mode 100644 ansible/roles/init/templates/doas.conf.j2 create mode 100644 ansible/roles/nginx/defaults/main.yml create mode 100644 ansible/roles/nginx/handlers/main.yml create mode 100644 ansible/roles/nginx/meta/main.yml create mode 100644 ansible/roles/nginx/molecule/default/converge.yml create mode 100644 ansible/roles/nginx/molecule/default/molecule.yml create mode 100644 ansible/roles/nginx/tasks/main.yml create mode 100644 ansible/roles/nginx/templates/nginx.conf.j2 create mode 160000 ansible/roles/postgresql create mode 100644 ansible/roles/prometheus_alertmanager/defaults/main.yml create mode 100644 ansible/roles/prometheus_alertmanager/handlers/main.yml create mode 100644 ansible/roles/prometheus_alertmanager/meta/main.yml create mode 100644 ansible/roles/prometheus_alertmanager/molecule/default/converge.yml create mode 100644 ansible/roles/prometheus_alertmanager/molecule/default/molecule.yml create mode 100644 ansible/roles/prometheus_alertmanager/tasks/main.yml create mode 100644 ansible/roles/prometheus_alertmanager/templates/alertmanager.yml.j2 create mode 100644 ansible/roles/prometheus_nginx_exporter/meta/main.yml create mode 100644 ansible/roles/prometheus_nginx_exporter/molecule/default/converge.yml create mode 100644 ansible/roles/prometheus_nginx_exporter/molecule/default/molecule.yml create mode 100644 ansible/roles/prometheus_nginx_exporter/tasks/main.yml create mode 100644 ansible/roles/prometheus_node_exporter/meta/main.yml create mode 100644 ansible/roles/prometheus_node_exporter/molecule/default/converge.yml create mode 100644 ansible/roles/prometheus_node_exporter/molecule/default/molecule.yml create mode 100644 ansible/roles/prometheus_node_exporter/tasks/main.yml create mode 100644 ansible/roles/prometheus_server/defaults/main.yml create mode 100644 ansible/roles/prometheus_server/files/prometheus.service create mode 100644 ansible/roles/prometheus_server/handlers/main.yml create mode 100644 ansible/roles/prometheus_server/meta/main.yml create mode 100644 ansible/roles/prometheus_server/molecule/default/converge.yml create mode 100644 ansible/roles/prometheus_server/molecule/default/molecule.yml create mode 100644 ansible/roles/prometheus_server/tasks/main.yml create mode 100644 ansible/roles/prometheus_server/templates/alerting_rules.yml.j2 create mode 100644 ansible/roles/prometheus_server/templates/prometheus.yml.j2 create mode 100644 ansible/roles/searxng/defaults/main.yml create mode 100644 ansible/roles/searxng/handlers/main.yml create mode 100644 ansible/roles/searxng/meta/main.yml create mode 100644 ansible/roles/searxng/molecule/default/converge.yml create mode 100644 ansible/roles/searxng/molecule/default/molecule.yml create mode 100644 ansible/roles/searxng/tasks/main.yml create mode 100644 ansible/roles/searxng/templates/searxng.service.j2 create mode 100644 ansible/roles/searxng/templates/settings.yml.j2 create mode 100644 terraform/.gitignore create mode 100644 terraform/.terraform.lock.hcl create mode 100644 terraform/bastion.tf create mode 100644 terraform/firewall_ipsets.tf create mode 100644 terraform/firewall_security_groups.tf create mode 100644 terraform/forgejo.tf.disabled create mode 100644 terraform/load-balancer.tf create mode 100644 terraform/main-page.tf create mode 100644 terraform/main.tf create mode 100644 terraform/monitoring.tf create mode 100644 terraform/notes.txt create mode 100644 terraform/postgresql.tf create mode 100644 terraform/searxng.tf create mode 100644 terraform/test.tf.disabled create mode 100644 terraform/variables.tf diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..1bd88fb --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "ansible/roles/postgresql"] + path = ansible/roles/postgresql + url = https://github.com/geerlingguy/ansible-role-postgresql.git diff --git a/ansible/.python-version b/ansible/.python-version new file mode 100644 index 0000000..56bb660 --- /dev/null +++ b/ansible/.python-version @@ -0,0 +1 @@ +3.12.7 diff --git a/ansible/00_init.yml b/ansible/00_init.yml new file mode 100644 index 0000000..42ac786 --- /dev/null +++ b/ansible/00_init.yml @@ -0,0 +1,14 @@ +--- + +- hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - role: roles/init + - role: roles/prometheus_node_exporter diff --git a/ansible/05_bastion.yml b/ansible/05_bastion.yml new file mode 100644 index 0000000..5c9936d --- /dev/null +++ b/ansible/05_bastion.yml @@ -0,0 +1,14 @@ +--- + +- hosts: bastion + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - role: roles/fluent_bit + - role: roles/prometheus_node_exporter diff --git a/ansible/10_monitoring.yml b/ansible/10_monitoring.yml new file mode 100644 index 0000000..06dd019 --- /dev/null +++ b/ansible/10_monitoring.yml @@ -0,0 +1,18 @@ +--- + +- hosts: monitoring + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - role: roles/fluent_bit + - role: roles/grafana_loki + - role: roles/prometheus_server + - role: roles/prometheus_node_exporter + - role: roles/prometheus_alertmanager + - role: roles/grafana_server diff --git a/ansible/15_postgresql.yml b/ansible/15_postgresql.yml new file mode 100644 index 0000000..6224246 --- /dev/null +++ b/ansible/15_postgresql.yml @@ -0,0 +1,15 @@ +--- + +- hosts: postgresql + # gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - role: roles/fluent_bit + - role: roles/prometheus_node_exporter + - role: roles/postgresql diff --git a/ansible/20_main_page.yml b/ansible/20_main_page.yml new file mode 100644 index 0000000..cf9ce93 --- /dev/null +++ b/ansible/20_main_page.yml @@ -0,0 +1,17 @@ +--- + +- hosts: main_page + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - role: roles/init + - role: roles/fluent_bit + - role: roles/prometheus_node_exporter + - role: roles/hugo + - role: roles/nginx diff --git a/ansible/21_searxng.yml b/ansible/21_searxng.yml new file mode 100644 index 0000000..ac2af3f --- /dev/null +++ b/ansible/21_searxng.yml @@ -0,0 +1,15 @@ +--- + +- hosts: searxng + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - role: roles/fluent_bit + - role: roles/prometheus_node_exporter + - role: roles/searxng diff --git a/ansible/30_load_balancer.yml b/ansible/30_load_balancer.yml new file mode 100644 index 0000000..c786a46 --- /dev/null +++ b/ansible/30_load_balancer.yml @@ -0,0 +1,18 @@ +--- + +- hosts: load_balancers + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + - name: Include nginx role. + ansible.builtin.include_role: + name: roles/nginx + + roles: + - role: roles/fluent_bit + - role: roles/prometheus_node_exporter + - role: roles/prometheus_nginx_exporter diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg new file mode 100644 index 0000000..29a1116 --- /dev/null +++ b/ansible/ansible.cfg @@ -0,0 +1,5 @@ +[defaults] +nocows=True + +[ssh_connection] +ssh_args = -o StrictHostKeyChecking=accept-new -o ConnectTimeout=300 -o ConnectionAttempts=5 -o PreferredAuthentications=publickey diff --git a/ansible/inventories/dev/group_vars/all.yml b/ansible/inventories/dev/group_vars/all.yml new file mode 100644 index 0000000..a76575a --- /dev/null +++ b/ansible/inventories/dev/group_vars/all.yml @@ -0,0 +1,23 @@ +--- + +users: + - name: admin + password_hash: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 30623138653735643561343061356531373430393662383764633038383238383837626636393432 + 3138653539356430306266663864343563616332656131310a343632323363653665646363366437 + 66643430626437333461656231303339656435346261336238313036306431396333643965666631 + 3665393163623266320a373838313538626438623330393533353931336331623464613664633430 + 32303734396634376431383936643431313561303864343930393363623130663236666636353637 + 63613237383666656263316661333031643032323266636464313839653065316138343035346161 + 64313037336666353136383462333832373031623637636630326330313832333265386632343139 + 30306638356434376635346637346134653064613236326333656566383137353166393063333563 + 32623638343263313463313062303465626439356461613235656661623364656138 + ssh_public_keys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0" + opendoas_settings: "permit persist admin as root" + - name: ansible + password_hash: "" + ssh_public_keys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0" + opendoas_settings: "permit nopass ansible" diff --git a/ansible/inventories/dev/group_vars/bastion.yml b/ansible/inventories/dev/group_vars/bastion.yml new file mode 100644 index 0000000..1dce274 --- /dev/null +++ b/ansible/inventories/dev/group_vars/bastion.yml @@ -0,0 +1,17 @@ +--- + +fluentbit_settings: + service: + flush: 1 + daemon: false + log_level: info + http_server: false + pipeline: + inputs: + - name: systemd + tag: systemd + outputs: + - name: loki + host: 192.168.0.252 + labels: env=common,hostname=bastion,service_name=systemd + match: systemd diff --git a/ansible/inventories/dev/group_vars/load_balancers.yml b/ansible/inventories/dev/group_vars/load_balancers.yml new file mode 100644 index 0000000..e9cc6c8 --- /dev/null +++ b/ansible/inventories/dev/group_vars/load_balancers.yml @@ -0,0 +1,84 @@ +--- + +nginx_settings: + server_tokens: false + gzip: true + ssl_protocols: + - TLSv1.2 + - TLSv1.3 + load_balancers: + http: + - upstream: + name: main-page + servers: + - 192.168.0.10:80 + server: + listen_port: 80 + names: + - dev.cuqmbr.xyz + - dev.cuqmbr.home + - upstream: + name: searxng + servers: + - 192.168.0.15:8888 + server: + listen_port: 80 + names: + - searxng.dev.cuqmbr.xyz + - searxng.dev.cuqmbr.home + # - upstream: + # name: prometheus + # servers: + # - 192.168.0.252:9090 + # server: + # listen_port: 80 + # names: + # - prometheus.dev.cuqmbr.xyz + # - prometheus.dev.cuqmbr.home + - upstream: + name: grafana + servers: + - 192.168.0.252:3000 + server: + listen_port: 80 + names: + - monitoring.dev.cuqmbr.xyz + - monitoring.dev.cuqmbr.home + statements: + - proxy_set_header Host $http_host + +fluentbit_settings: + service: + flush: 1 + daemon: false + log_level: info + http_server: false + pipeline: + inputs: + - name: systemd + tag: systemd_input + filters: + - name: rewrite_tag + match: systemd_input + rule: $_SYSTEMD_UNIT ^(nginx.service)$ nginx false + - name: rewrite_tag + match: systemd_input + rule: $_SYSTEMD_UNIT ^(nginx.service.+|(?!nginx.service).*)$ systemd false + - name: record_modifier + match: nginx + allowlist_key: + - MESSAGE + # - name: record_modifier + # match: systemd_tag + # allowlist_key: + # - _SYSTEMD_UNIT + # - MESSAGE + outputs: + - name: loki + host: 192.168.0.252 + labels: "env=common,hostname=load-balancer,service_name=nginx" + match: nginx + - name: loki + host: 192.168.0.252 + labels: "env=common,hostname=load-balancer,service_name=systemd" + match: systemd diff --git a/ansible/inventories/dev/group_vars/main_page.yml b/ansible/inventories/dev/group_vars/main_page.yml new file mode 100644 index 0000000..9c59665 --- /dev/null +++ b/ansible/inventories/dev/group_vars/main_page.yml @@ -0,0 +1,66 @@ +--- + +users: + - name: admin + password_hash: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 30623138653735643561343061356531373430393662383764633038383238383837626636393432 + 3138653539356430306266663864343563616332656131310a343632323363653665646363366437 + 66643430626437333461656231303339656435346261336238313036306431396333643965666631 + 3665393163623266320a373838313538626438623330393533353931336331623464613664633430 + 32303734396634376431383936643431313561303864343930393363623130663236666636353637 + 63613237383666656263316661333031643032323266636464313839653065316138343035346161 + 64313037336666353136383462333832373031623637636630326330313832333265386632343139 + 30306638356434376635346637346134653064613236326333656566383137353166393063333563 + 32623638343263313463313062303465626439356461613235656661623364656138 + ssh_public_keys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0" + opendoas_settings: "permit persist admin as root" + - name: ansible + password_hash: "" + ssh_public_keys: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0" + opendoas_settings: "permit nopass ansible" + - name: gitea-actions + password_hash: "" + ssh_public_keys: + - "ssh-ed25519 \ + AAAAC3NzaC1lZDI1NTE5AAAAIJv1DR3s3q6MOpe8S1FWZ/+TLok4AwNfu/h3Ugmh6lIa \ + cuqmbr.xyz_gitea-actions" + opendoas_settings: "permit nopass gitea-actions" + +hugo_settings: + hugo_version: 0.147.9 + homedir: /opt/hugo + git_repo: https://gitea.cuqmbr.xyz/cuqmbr/cuqmbr.xyz.git + git_commit: 5b894854d47b41996b1901fa257f8c2cad9224f9 + +nginx_settings: + server_tokens: false + gzip: false + ssl_protocols: + - TLSv1.2 + - TLSv1.3 + statics: + - root: /var/www/hugo + index: index.html + listen_port: 80 + names: + - dev.cuqmbr.xyz + - dev.cuqmbr.home + +fluentbit_settings: + service: + flush: 1 + daemon: false + log_level: info + http_server: false + pipeline: + inputs: + - name: systemd + tag: systemd + outputs: + - name: loki + host: 192.168.0.252 + labels: "env=dev,hostname=main-page,service_name=systemd" + match: systemd diff --git a/ansible/inventories/dev/group_vars/monitoring.yml b/ansible/inventories/dev/group_vars/monitoring.yml new file mode 100644 index 0000000..31c4cd9 --- /dev/null +++ b/ansible/inventories/dev/group_vars/monitoring.yml @@ -0,0 +1,278 @@ +--- + +prometheus_options: + global: + + alerting: + alertmanagers: + - static_configs: + - targets: + - 192.168.0.252:9093 + + rule_files: + - alerting_rules/*.yml + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - 192.168.0.252:9090 + + - job_name: node + static_configs: + - targets: + # main-page + - 192.168.0.10:9100 + labels: + env: dev + hostname: main-page + - targets: + # searxng + - 192.168.0.15:9100 + labels: + env: dev + hostname: searxng + - targets: + # bastion + - 192.168.0.254:9100 + labels: + env: common + hostname: bastion + - targets: + # load-balancer + - 192.168.0.253:9100 + labels: + env: common + hostname: load-balancer + - targets: + # monitoring + - 192.168.0.252:9100 + labels: + env: common + hostname: monitoring + + - job_name: nginx + static_configs: + - targets: + # load-balancer + - 192.168.0.253:9113 + labels: + env: common + hostname: monitoring + + + +prometheus_alertmanager_options: + global: + smtp_smarthost: mail.cuqmbr.xyz:587 + smtp_require_tls: true + smtp_from: '"Homelab Alertmanager" ' + smtp_auth_username: no-reply + smtp_auth_password: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 31393866316539633838303936366464613935393933333338336531656239333361653664346637 + 3665316532336339633432303036626339363239343065630a326361306233656632653134643966 + 39663138303439323636666665653364396132333532383463626337653061356461643734336363 + 6266353533656566330a346536333836356131343832616631666330653462613436313062643330 + 61616664646439643839366630396137616533393664323965366630363566333632 + + templates: + - /etc/prometheus/alertmanager_templates/*.tmpl + + route: + group_by: + - env + - hostname + + group_wait: 30s + group_interval: 5m + repeat_interval: 1d + receiver: default + + receivers: + - name: default + email_configs: + - to: notifications@cuqmbr.xyz + + + +prometheus_alerting_rules: + groups: + - name: DefaultMetrics + rules: + - alert: HostOutOfMemory + expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10) + for: 2m + labels: + severity: warning + annotations: + summary: Host out of memory (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "Node memory is filling up (< 10% left)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + # You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly + - alert: HostMemoryIsUnderutilized + expr: min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8 + for: 0m + labels: + severity: info + annotations: + summary: Host Memory is underutilized (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{'{{'}} $labels.instance {{'}}'}})\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + # Please add ignored mountpoints in node_exporter parameters like + # "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)". + # Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users. + - alert: HostOutOfDiskSpace + expr: (node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0) + for: 2m + labels: + severity: critical + annotations: + summary: Host out of disk space (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "Disk is almost full (< 10% left)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + - alert: HostOutOfInodes + expr: (node_filesystem_files_free / node_filesystem_files < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0) + for: 2m + labels: + severity: critical + annotations: + summary: Host out of inodes (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + - alert: HostHighCpuLoad + expr: (avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .90 + for: 10m + labels: + severity: warning + annotations: + summary: Host high CPU load (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "CPU load is > 80%\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + # You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly + - alert: HostCpuIsUnderutilized + expr: (min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8 + for: 1w + labels: + severity: info + annotations: + summary: Host CPU is underutilized (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + - alert: HostCpuHighIowait + expr: avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10 + for: 0m + labels: + severity: warning + annotations: + summary: Host CPU high iowait (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + - alert: HostSwapIsFillingUp + expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80) + for: 2m + labels: + severity: warning + annotations: + summary: Host swap is filling up (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "Swap is filling up (>80%)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + # - alert: HostSystemdServiceCrashed + # expr: (node_systemd_unit_state{state="failed"} == 1) + # for: 0m + # labels: + # severity: warning + # annotations: + # summary: Host systemd service crashed (instance {{'{{'}} $labels.instance {{'}}'}}) + # description: "systemd service crashed\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + - alert: HostOomKillDetected + expr: (increase(node_vmstat_oom_kill[1m]) > 0) + for: 0m + labels: + severity: warning + annotations: + summary: Host OOM kill detected (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "OOM kill detected\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + - alert: HostClockSkew + expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0)) + for: 10m + labels: + severity: warning + annotations: + summary: Host clock skew (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + - alert: HostClockNotSynchronising + expr: (min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16) + for: 2m + labels: + severity: warning + annotations: + summary: Host clock not synchronising (instance {{'{{'}} $labels.instance {{'}}'}}) + description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}" + + + +loki_options: + auth_enabled: false + + server: + http_listen_port: 3100 + grpc_listen_port: 9096 + log_level: info + grpc_server_max_concurrent_streams: 1000 + + common: + instance_addr: 127.0.0.1 + path_prefix: /tmp/loki + storage: + filesystem: + chunks_directory: /tmp/loki/chunks + rules_directory: /tmp/loki/rules + replication_factor: 1 + ring: + kvstore: + store: inmemory + + query_range: + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 100 + + limits_config: + metric_aggregation_enabled: true + + schema_config: + configs: + - from: 2020-10-24 + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h + + pattern_ingester: + enabled: true + metric_aggregation: + loki_address: localhost:3100 + + ruler: + alertmanager_url: http://localhost:9093 + + frontend: + encoding: protobuf + + analytics: + reporting_enabled: false + + + +fluentbit_settings: + service: + flush: 1 + daemon: false + log_level: info + http_server: false + pipeline: + inputs: + - name: systemd + tag: systemd + outputs: + - name: loki + host: 192.168.0.252 + labels: env=common,hostname=monitoring,service_name=systemd + match: systemd diff --git a/ansible/inventories/dev/group_vars/postgresql.yml b/ansible/inventories/dev/group_vars/postgresql.yml new file mode 100644 index 0000000..8efad85 --- /dev/null +++ b/ansible/inventories/dev/group_vars/postgresql.yml @@ -0,0 +1,87 @@ +--- + +postgresql_global_config_options: + - option: unix_socket_directories + value: '{{ postgresql_unix_socket_directories | join(",") }}' + - option: log_directory + value: 'log' + - option: listen_addresses + value: "*" + +postgresql_auth_method: scram-sha-256 + +postgresql_hba_entries: + - {type: local, database: all, user: postgres, auth_method: peer} + - {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: "{{ postgresql_auth_method }}"} + - {type: host, database: forgejo_db, user: forgejo, address: '192.168.0.20/32', auth_method: "{{ postgresql_auth_method }}"} + - {type: host, database: test_db, user: test, address: '0.0.0.0/0', auth_method: "{{ postgresql_auth_method }}"} + +postgresql_databases: + - name: forgejo_db + owner: forgejo + # state: absent + +postgresql_users: + - name: forgejo + password: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 63306634323739306638666538376262643231306337343036313737373735303264356238663335 + 6430623539346236303539333764666137613762623330640a643834326436363631626537396264 + 31653265343035626439376134633839376432313962323163626436633466386165386332333737 + 6339386339303566310a623236323630376665623664656563376430643836666433656433386434 + 62623536376461323563616237316232366633663834333365633334646264313831376661366436 + 61313538333965313062313138383935663739303935643331333238363463386537383238616466 + 62343232326661346563353236373163373463383431646334623537616231396137393663376332 + 35373132333865306634316433663539396632373638626130343331623138643063333561636532 + 66653139663830353632326639393835343137336235626261353130656336653962303665646664 + 63303735393638336137666234383363383764313533323031303533343562336230613434316432 + 383632343762373735633664313431613064 + encrypted: true + # state: absent + +postgresql_privs: + - db: forgejo_db + roles: forgejo + privs: ALL + type: database + # state: absent + +postgres_users_no_log: false + + + +fluentbit_settings: + service: + flush: 1 + daemon: false + log_level: info + http_server: false + pipeline: + inputs: + - name: systemd + tag: systemd_input + filters: + - name: rewrite_tag + match: systemd_input + rule: $_SYSTEMD_UNIT ^(postgresql.service)$ postgresql false + - name: rewrite_tag + match: systemd_input + rule: $_SYSTEMD_UNIT ^(postgresql.service.+|(?!postgresql.service).*)$ systemd false + - name: record_modifier + match: postgresql + allowlist_key: + - MESSAGE + # - name: record_modifier + # match: systemd_tag + # allowlist_key: + # - _SYSTEMD_UNIT + # - MESSAGE + outputs: + - name: loki + host: 192.168.0.252 + labels: "env=dev,hostname=postgresql,service_name=postgresql" + match: postgresql + - name: loki + host: 192.168.0.252 + labels: "env=dev,hostname=postgresql,service_name=systemd" + match: systemd diff --git a/ansible/inventories/dev/group_vars/searxng.yml b/ansible/inventories/dev/group_vars/searxng.yml new file mode 100644 index 0000000..f99563d --- /dev/null +++ b/ansible/inventories/dev/group_vars/searxng.yml @@ -0,0 +1,111 @@ +--- + +searxng_homedir: /opt/searxng + +searxng_git_commit: e52e9bb4b699e39d9ce51874ea339d4773717389 + +searxng_settings: + use_default_settings: true + + general: + debug: false + instance_name: "cuqmbr's SearXNG" + donation_url: "https://cuqmbr.xyz/en/donate" + contact_url: "https://cuqmbr.xyz/en/contact" + enable_metrics: true + + search: + safe_search: 0 + autocomplete: "" + formats: + - html + - csv + - json + + server: + base_url: "https://searxng.dev.cuqmbr.xyz" + bind_address: "0.0.0.0" + port: 8888 + secret_key: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 36303663616233326563336237336164383966613633373735363562346533663933393936643036 + 6237626332643263386530306139383866353739616261650a376236663962643962653335313237 + 38313232363839383030373338643666333135613838366363363565643530336331613464386236 + 3039376137306339310a346139613363303433366362336539316632346232636663346664336334 + 35346366376262316134636262393262386364356336376333383664313637366630376463303232 + 64383765663032616633346231653563613065653961646666346461613732646233363266373065 + 33326563383238613135616431323661373165383431646337653361633065626638313937393361 + 62303634643662313637 + image_proxy: true + method: "POST" + default_http_headers: + X-Content-Type-Options: nosniff + X-Download-Options: noopen + X-Robots-Tag: noindex, nofollow + Referrer-Policy: no-referrer + limiter: false + + ui: + static_use_hash: true + results_on_new_tab: true + + categories_as_tabs: + general: + images: + videos: + files: + + plugins: + searx.plugins.calculator.SXNGPlugin: + active: true + searx.plugins.hash_plugin.SXNGPlugin: + active: true + searx.plugins.self_info.SXNGPlugin: + active: true + searx.plugins.tracker_url_remover.SXNGPlugin: + active: true + searx.plugins.unit_converter.SXNGPlugin: + active: true + searx.plugins.ahmia_filter.SXNGPlugin: + active: true + searx.plugins.hostnames.SXNGPlugin: + active: true + searx.plugins.tor_check.SXNGPlugin: + active: true + + +fluentbit_settings: + service: + flush: 1 + daemon: false + log_level: info + http_server: false + pipeline: + inputs: + - name: systemd + tag: systemd_input + filters: + - name: rewrite_tag + match: systemd_input + rule: $_SYSTEMD_UNIT ^(searxng.service)$ searxng false + - name: rewrite_tag + match: systemd_input + rule: $_SYSTEMD_UNIT ^(searxng.service.+|(?!searxng.service).*)$ systemd false + - name: record_modifier + match: searxng + allowlist_key: + - MESSAGE + # - name: record_modifier + # match: systemd + # allowlist_key: + # - _SYSTEMD_UNIT + # - MESSAGE + outputs: + - name: loki + host: 192.168.0.252 + labels: "env=dev,hostname=searxng,service_name=searxng" + match: searxng + - name: loki + host: 192.168.0.252 + labels: "env=dev,hostname=searxng,service_name=systemd" + match: systemd diff --git a/ansible/inventories/dev/hosts.yml b/ansible/inventories/dev/hosts.yml new file mode 100644 index 0000000..154bc45 --- /dev/null +++ b/ansible/inventories/dev/hosts.yml @@ -0,0 +1,21 @@ +--- +all: + children: + bastion: + hosts: + 192.168.0.254: + load_balancers: + hosts: + 192.168.0.253: + monitoring: + hosts: + 192.168.0.252: + postgresql: + hosts: + 192.168.0.3: + main_page: + hosts: + 192.168.0.10: + searxng: + hosts: + 192.168.0.15: diff --git a/ansible/notes.txt b/ansible/notes.txt new file mode 100644 index 0000000..32034a9 --- /dev/null +++ b/ansible/notes.txt @@ -0,0 +1 @@ +export user="ansible"; ansible-playbook -u "${user}" --ssh-common-args "-o ProxyCommand='ssh -p 22 -W %h:%p -q ${user}@bastion.cuqmbr.home'" -J -b --become-method doas -i inventories/hosts.yml 10_monitoring.yml diff --git a/ansible/roles/fluent_bit/defaults/main.yml b/ansible/roles/fluent_bit/defaults/main.yml new file mode 100644 index 0000000..2fbddca --- /dev/null +++ b/ansible/roles/fluent_bit/defaults/main.yml @@ -0,0 +1,22 @@ +--- + +fluentbit_settings: + service: + flush: 1 + daemon: false + log_level: info + http_server: false + # + # parsers: + # + # plugins: + # + pipeline: + inputs: + - name: cpu + tag: cpu.local + interval_sec: 15 + + outputs: + - name: stdout + match: "*" diff --git a/ansible/roles/fluent_bit/files/fluent-bit.service b/ansible/roles/fluent_bit/files/fluent-bit.service new file mode 100644 index 0000000..819fc95 --- /dev/null +++ b/ansible/roles/fluent_bit/files/fluent-bit.service @@ -0,0 +1,16 @@ +[Unit] +Description=Fluent Bit +Documentation=https://docs.fluentbit.io/manual/ +Requires=network.target +After=network.target + +[Service] +Type=simple +EnvironmentFile=-/etc/sysconfig/fluent-bit +EnvironmentFile=-/etc/default/fluent-bit +ExecStart=/opt/fluent-bit/bin/fluent-bit -c /etc/fluent-bit/fluent-bit.yaml +Restart=always + +[Install] +WantedBy=multi-user.target + diff --git a/ansible/roles/fluent_bit/handlers/main.yml b/ansible/roles/fluent_bit/handlers/main.yml new file mode 100644 index 0000000..eee65b8 --- /dev/null +++ b/ansible/roles/fluent_bit/handlers/main.yml @@ -0,0 +1,10 @@ +--- + +- name: Restart fluent-bit service. + ansible.builtin.service: + name: fluent-bit + state: restarted + +- name: Reload systemd daemon. + ansible.builtin.systemd_service: + daemon_reload: true diff --git a/ansible/roles/fluent_bit/meta/main.yml b/ansible/roles/fluent_bit/meta/main.yml new file mode 100644 index 0000000..bb1c7e3 --- /dev/null +++ b/ansible/roles/fluent_bit/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: fluent_bit + author: cuqmbr-homelab + description: Install Fluent Bit. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/fluent_bit/molecule/default/converge.yml b/ansible/roles/fluent_bit/molecule/default/converge.yml new file mode 100644 index 0000000..01e53e7 --- /dev/null +++ b/ansible/roles/fluent_bit/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - fluent_bit diff --git a/ansible/roles/fluent_bit/molecule/default/molecule.yml b/ansible/roles/fluent_bit/molecule/default/molecule.yml new file mode 100644 index 0000000..86bdb63 --- /dev/null +++ b/ansible/roles/fluent_bit/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.fluent-bit_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/fluent_bit/tasks/main.yml b/ansible/roles/fluent_bit/tasks/main.yml new file mode 100644 index 0000000..8c5b230 --- /dev/null +++ b/ansible/roles/fluent_bit/tasks/main.yml @@ -0,0 +1,53 @@ +--- + +- name: Create apt keys installation directory. + ansible.builtin.file: + path: /etc/apt/keyrings + state: directory + mode: "0755" + +- name: Add Fluentbit apt key. + ansible.builtin.get_url: + url: https://packages.fluentbit.io/fluentbit.key + dest: /etc/apt/keyrings/fluentbit.asc + mode: "0444" + +- name: Add Fluentbit apt repository. + ansible.builtin.apt_repository: + repo: "deb [signed-by=/etc/apt/keyrings/fluentbit.asc] \ + https://packages.fluentbit.io/debian/bookworm bookworm stable main" + filename: grafana + state: present + update_cache: true + +- name: Install fluent-bit package using apt. + ansible.builtin.apt: + name: fluent-bit + state: present + +- name: Install fluent-bit systemd service file. + ansible.builtin.copy: + src: fluent-bit.service + dest: /lib/systemd/system/fluent-bit.service + owner: root + group: root + mode: "0644" + notify: + - Reload systemd daemon. + - Restart fluent-bit service. + +- name: Enable and start fluent-bit service. + ansible.builtin.service: + name: fluent-bit + state: started + enabled: true + +- name: Install fluent-bit configuration file. + ansible.builtin.template: + src: fluent-bit.yml.j2 + dest: /etc/fluent-bit/fluent-bit.yaml + owner: root + group: root + mode: "0644" + notify: + - Restart fluent-bit service. diff --git a/ansible/roles/fluent_bit/templates/fluent-bit.yml.j2 b/ansible/roles/fluent_bit/templates/fluent-bit.yml.j2 new file mode 100644 index 0000000..fdd5ae9 --- /dev/null +++ b/ansible/roles/fluent_bit/templates/fluent-bit.yml.j2 @@ -0,0 +1,4 @@ +--- +# Managed with Ansible + +{{ fluentbit_settings | ansible.builtin.to_nice_yaml(indent=2, width=80) }} diff --git a/ansible/roles/grafana_loki/defaults/main.yml b/ansible/roles/grafana_loki/defaults/main.yml new file mode 100644 index 0000000..3571e45 --- /dev/null +++ b/ansible/roles/grafana_loki/defaults/main.yml @@ -0,0 +1,56 @@ +--- + +loki_options: + auth_enabled: false + + server: + http_listen_port: 3100 + grpc_listen_port: 9096 + log_level: debug + grpc_server_max_concurrent_streams: 1000 + + common: + instance_addr: 127.0.0.1 + path_prefix: /tmp/loki + storage: + filesystem: + chunks_directory: /tmp/loki/chunks + rules_directory: /tmp/loki/rules + replication_factor: 1 + ring: + kvstore: + store: inmemory + + query_range: + results_cache: + cache: + embedded_cache: + enabled: true + max_size_mb: 100 + + limits_config: + metric_aggregation_enabled: true + + schema_config: + configs: + - from: 2020-10-24 + store: tsdb + object_store: filesystem + schema: v13 + index: + prefix: index_ + period: 24h + + pattern_ingester: + enabled: true + metric_aggregation: + loki_address: localhost:3100 + + ruler: + alertmanager_url: http://localhost:9093 + + frontend: + encoding: protobuf + + analytics: + reporting_enabled: false diff --git a/ansible/roles/grafana_loki/handlers/main.yml b/ansible/roles/grafana_loki/handlers/main.yml new file mode 100644 index 0000000..b0616ce --- /dev/null +++ b/ansible/roles/grafana_loki/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Restart Grafana Loki service. + ansible.builtin.service: + name: loki + state: restarted diff --git a/ansible/roles/grafana_loki/meta/main.yml b/ansible/roles/grafana_loki/meta/main.yml new file mode 100644 index 0000000..cf3eca3 --- /dev/null +++ b/ansible/roles/grafana_loki/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: grafana_loki + author: cuqmbr-homelab + description: Install Grafana Loki. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/grafana_loki/molecule/default/converge.yml b/ansible/roles/grafana_loki/molecule/default/converge.yml new file mode 100644 index 0000000..1a44e6f --- /dev/null +++ b/ansible/roles/grafana_loki/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - grafana_loki diff --git a/ansible/roles/grafana_loki/molecule/default/molecule.yml b/ansible/roles/grafana_loki/molecule/default/molecule.yml new file mode 100644 index 0000000..2c1856f --- /dev/null +++ b/ansible/roles/grafana_loki/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.grafana-loki_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/grafana_loki/tasks/main.yml b/ansible/roles/grafana_loki/tasks/main.yml new file mode 100644 index 0000000..83b6c1f --- /dev/null +++ b/ansible/roles/grafana_loki/tasks/main.yml @@ -0,0 +1,23 @@ +--- + +- name: Install Grafana Loki from mirror. + ansible.builtin.apt: + deb: "https://github.com/grafana/loki/releases\ + /download/v3.4.3/loki_3.4.3_amd64.deb" + state: present + +- name: Install Grafana Loki config. + ansible.builtin.template: + src: loki.yml.j2 + dest: /etc/loki/config.yml + owner: root + group: root + mode: "0644" + notify: + - Restart Grafana Loki service. + +- name: Enable and start Grafana Loki service. + ansible.builtin.service: + name: loki + state: started + enabled: true diff --git a/ansible/roles/grafana_loki/templates/loki.yml.j2 b/ansible/roles/grafana_loki/templates/loki.yml.j2 new file mode 100644 index 0000000..069d544 --- /dev/null +++ b/ansible/roles/grafana_loki/templates/loki.yml.j2 @@ -0,0 +1,4 @@ +--- +# Managed with Ansible + +{{ loki_options | ansible.builtin.to_nice_yaml(indent=2, width=80) }} diff --git a/ansible/roles/grafana_server/files/grafana-server.service b/ansible/roles/grafana_server/files/grafana-server.service new file mode 100644 index 0000000..d3529bb --- /dev/null +++ b/ansible/roles/grafana_server/files/grafana-server.service @@ -0,0 +1,30 @@ +[Unit] +Description=Grafana instance +Documentation=http://docs.grafana.org +Wants=network-online.target +After=network-online.target +After=postgresql.service mariadb.service mysql.service influxdb.service + +[Service] +EnvironmentFile=/etc/default/grafana-server +User=grafana +Group=grafana +Type=simple +Restart=on-failure +WorkingDirectory=/usr/share/grafana +RuntimeDirectory=grafana +RuntimeDirectoryMode=0750 +ExecStart=/usr/share/grafana/bin/grafana server \ + --config=${CONF_FILE} \ + --pidfile=${PID_FILE_DIR}/grafana-server.pid \ + --packaging=deb \ + cfg:default.paths.logs=${LOG_DIR} \ + cfg:default.paths.data=${DATA_DIR} \ + cfg:default.paths.plugins=${PLUGINS_DIR} \ + cfg:default.paths.provisioning=${PROVISIONING_CFG_DIR} + +LimitNOFILE=10000 +TimeoutStopSec=20 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/roles/grafana_server/meta/main.yml b/ansible/roles/grafana_server/meta/main.yml new file mode 100644 index 0000000..c9a9911 --- /dev/null +++ b/ansible/roles/grafana_server/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: grafana_server + author: cuqmbr-homelab + description: Install Grafana Dashboard. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/grafana_server/molecule/default/converge.yml b/ansible/roles/grafana_server/molecule/default/converge.yml new file mode 100644 index 0000000..cff9d58 --- /dev/null +++ b/ansible/roles/grafana_server/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - grafana_server diff --git a/ansible/roles/grafana_server/molecule/default/molecule.yml b/ansible/roles/grafana_server/molecule/default/molecule.yml new file mode 100644 index 0000000..637a563 --- /dev/null +++ b/ansible/roles/grafana_server/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.grafana-server_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/grafana_server/tasks/main.yml b/ansible/roles/grafana_server/tasks/main.yml new file mode 100644 index 0000000..98377db --- /dev/null +++ b/ansible/roles/grafana_server/tasks/main.yml @@ -0,0 +1,25 @@ +--- + +- name: Install Grafana from mirror. + ansible.builtin.apt: + deb: "https://mirrors.tuna.tsinghua.edu.cn\ + /grafana/apt/pool/main/g/grafana/grafana_11.6.0_amd64.deb" + state: present + +- name: Install Grafana systemd service file. + ansible.builtin.copy: + src: grafana-server.service + dest: /lib/systemd/system/grafana-server.service + owner: root + group: root + mode: "0644" + +- name: Reload systemd daemon. + ansible.builtin.systemd_service: + daemon_reload: true + +- name: Enable and start grafana service. + ansible.builtin.service: + name: grafana-server + enabled: true + state: started diff --git a/ansible/roles/hugo/defaults/main.yml b/ansible/roles/hugo/defaults/main.yml new file mode 100644 index 0000000..b0a806a --- /dev/null +++ b/ansible/roles/hugo/defaults/main.yml @@ -0,0 +1,7 @@ +--- + +hugo_settings: + hugo_version: 0.147.9 + homedir: /opt/hugo + git_repo: https://gitea.cuqmbr.xyz/cuqmbr/cuqmbr.xyz.git + git_commit: 5b894854d47b41996b1901fa257f8c2cad9224f9 diff --git a/ansible/roles/hugo/meta/main.yml b/ansible/roles/hugo/meta/main.yml new file mode 100644 index 0000000..2f305a0 --- /dev/null +++ b/ansible/roles/hugo/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: hugo + author: cuqmbr-homelab + description: Deploy Hugo (gohugo.io) site generator from git repo. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/hugo/molecule/default/converge.yml b/ansible/roles/hugo/molecule/default/converge.yml new file mode 100644 index 0000000..0d10888 --- /dev/null +++ b/ansible/roles/hugo/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - hugo diff --git a/ansible/roles/hugo/molecule/default/molecule.yml b/ansible/roles/hugo/molecule/default/molecule.yml new file mode 100644 index 0000000..b082bdf --- /dev/null +++ b/ansible/roles/hugo/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.hugo_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/hugo/tasks/main.yml b/ansible/roles/hugo/tasks/main.yml new file mode 100644 index 0000000..a54724d --- /dev/null +++ b/ansible/roles/hugo/tasks/main.yml @@ -0,0 +1,73 @@ +--- + +- name: Install hugo deb package from github. + ansible.builtin.apt: + deb: "https://github.com/gohugoio/hugo/releases/download\ + /v{{ hugo_settings.hugo_version}}/\ + hugo_{{ hugo_settings.hugo_version }}_linux-amd64.deb" + state: present + +- name: Install dependencies using apt. + ansible.builtin.apt: + name: git + state: present + +- name: Set hugo_source, hugo_compiled and hugo_deploy variables. + ansible.builtin.set_fact: + hugo_source: "{{ hugo_settings.homedir }}/src" + hugo_compiled: "{{ hugo_settings.homedir }}/compiled" + hugo_deploy: /var/www/hugo + +- name: Clone hugo site git repository. + ansible.builtin.git: + clone: true + repo: "{{ hugo_settings.git_repo }}" + force: true + recursive: true + single_branch: true + depth: 1 + dest: "{{ hugo_source }}" + version: "{{ hugo_settings.git_commit }}" + +- name: Create hugo site build directory. + ansible.builtin.file: + state: directory + owner: root + group: root + mode: "0775" + path: "{{ hugo_compiled }}" + +- name: Remove old compiled files. + ansible.builtin.file: + path: "{{ hugo_compiled }}" + state: absent + changed_when: false + +- name: Build hugo site. + ansible.builtin.shell: + chdir: "{{ hugo_source }}" + cmd: "hugo -d {{ hugo_compiled }}" + +- name: Create hugo site deployment directory. + ansible.builtin.file: + state: directory + owner: root + group: root + mode: "0775" + path: "{{ hugo_deploy }}" + +- name: Remove old deployed files. + ansible.builtin.file: + path: "{{ hugo_deploy }}" + state: absent + changed_when: false + +- name: Install new program files. + ansible.builtin.copy: + remote_src: true + src: "{{ hugo_compiled }}/" + dest: "{{ hugo_deploy }}" + owner: root + group: root + mode: "0775" + changed_when: false diff --git a/ansible/roles/init/defaults/main.yml b/ansible/roles/init/defaults/main.yml new file mode 100644 index 0000000..58b68f8 --- /dev/null +++ b/ansible/roles/init/defaults/main.yml @@ -0,0 +1,31 @@ +--- + +# mkpasswd --method=SHA-512 --stdin +# default: 0000 + +# ansible-vault encrypt_string --ask-vault-password --name 'password_hash' +# default vault password: 0000 + +users: + - name: admin + password_hash: !vault | + $ANSIBLE_VAULT;1.1;AES256 + 62386435663164656266626631323436353938336333326339333562633063383636653838373161 + 6637303930616363646630653532623738353961373032300a333264633165396663653739333664 + 37386336313137656463643437303331643965663737373035616638363430353730613036343566 + 3864336137386465330a343834663733653365323634333663666566613330393662613365646630 + 31633162323864633337386462623936326437303131383130343538346231643537303462376465 + 65396430373433326262383636353162333632343632383433616236646631663765396339323037 + 32306630363465376161343939663032666530353031316433656464643366353066346465613034 + 66363462313665666261336263336632376166306163323261663633626163396665613266366230 + 38313133663139346635323062393731303134616566373436623538353430333932 + ssh_public_keys: + - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILYtmWkAnyDE\ + +VpRy8S41AFZJPQzb4SdAGqaLW9KDTt4 example@key" + opendoas_settings: "permit persist admin as root" + - name: ansible + password_hash: "!" + ssh_public_keys: + - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILYtmWkAnyDE\ + +VpRy8S41AFZJPQzb4SdAGqaLW9KDTt4 example@key" + opendoas_settings: "permit nopass ansible" diff --git a/ansible/roles/init/files/sshd_config b/ansible/roles/init/files/sshd_config new file mode 100644 index 0000000..cde0e2f --- /dev/null +++ b/ansible/roles/init/files/sshd_config @@ -0,0 +1,8 @@ +Include /etc/ssh/sshd_config.d/*.conf + +AuthorizedKeysFile .ssh/authorized_keys + +PasswordAuthentication no +PermitRootLogin prohibit-password + +Subsystem sftp /usr/lib/ssh/sftp-server diff --git a/ansible/roles/init/handlers/main.yml b/ansible/roles/init/handlers/main.yml new file mode 100644 index 0000000..f69d149 --- /dev/null +++ b/ansible/roles/init/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Restart sshd service. + ansible.builtin.service: + name: sshd + state: restarted diff --git a/ansible/roles/init/meta/main.yml b/ansible/roles/init/meta/main.yml new file mode 100644 index 0000000..3f3d53a --- /dev/null +++ b/ansible/roles/init/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: init + author: cuqmbr-homelab + description: Initialize newly created Debian server. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/init/molecule/default/converge.yml b/ansible/roles/init/molecule/default/converge.yml new file mode 100644 index 0000000..d0683b6 --- /dev/null +++ b/ansible/roles/init/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - init diff --git a/ansible/roles/init/molecule/default/molecule.yml b/ansible/roles/init/molecule/default/molecule.yml new file mode 100644 index 0000000..70e1e6d --- /dev/null +++ b/ansible/roles/init/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.init_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/init/tasks/configure_users.yml b/ansible/roles/init/tasks/configure_users.yml new file mode 100644 index 0000000..20b8747 --- /dev/null +++ b/ansible/roles/init/tasks/configure_users.yml @@ -0,0 +1,29 @@ +--- + +- name: Create user. + ansible.builtin.user: + state: present + name: "{{ item.name }}" + password: "{{ item.password_hash }}" + create_home: true + shell: /bin/bash + +- name: Create ~/.ssh directory. + ansible.builtin.file: + path: "/home/{{ item.name }}/.ssh" + state: directory + owner: "{{ item.name }}" + group: "{{ item.name }}" + mode: "0700" + +- name: Set variable for template. + ansible.builtin.set_fact: + ssh_public_keys: "{{ item.ssh_public_keys }}" + +- name: Create authorized_keys. + ansible.builtin.template: + src: authorized_keys.j2 + dest: "/home/{{ item.name }}/.ssh/authorized_keys" + owner: "{{ item.name }}" + group: "{{ item.name }}" + mode: "0600" diff --git a/ansible/roles/init/tasks/main.yml b/ansible/roles/init/tasks/main.yml new file mode 100644 index 0000000..cfaf5df --- /dev/null +++ b/ansible/roles/init/tasks/main.yml @@ -0,0 +1,50 @@ +--- + +- name: Configure users. + ansible.builtin.include_tasks: + file: configure_users.yml + loop: "{{ users }}" + + +- name: Install opendoas. + ansible.builtin.apt: + name: + - opendoas + state: present + +- name: Set opendoas_settings variable. + block: + + - name: Initialize the variable with an empty list. + ansible.builtin.set_fact: + opendoas_settings: [] + + - name: Append settings from each user to the list. + ansible.builtin.set_fact: + opendoas_settings: "{{ opendoas_settings + [item.opendoas_settings] }}" + loop: "{{ users }}" + +- name: Install opendoas config. + ansible.builtin.template: + src: doas.conf.j2 + dest: /etc/doas.conf + owner: root + group: root + mode: "0644" + + +- name: Install openssh-server. + ansible.builtin.apt: + name: + - openssh-server + state: present + +- name: Install sshd config. + ansible.builtin.copy: + src: sshd_config + dest: /etc/ssh/sshd_config + owner: root + group: root + mode: "0644" + notify: + - Restart sshd service. diff --git a/ansible/roles/init/templates/authorized_keys.j2 b/ansible/roles/init/templates/authorized_keys.j2 new file mode 100644 index 0000000..6271205 --- /dev/null +++ b/ansible/roles/init/templates/authorized_keys.j2 @@ -0,0 +1,3 @@ +{% for item in ssh_public_keys %} +{{ item }} +{% endfor %} diff --git a/ansible/roles/init/templates/doas.conf.j2 b/ansible/roles/init/templates/doas.conf.j2 new file mode 100644 index 0000000..23b93a8 --- /dev/null +++ b/ansible/roles/init/templates/doas.conf.j2 @@ -0,0 +1,3 @@ +{% for line in opendoas_settings %} +{{ line }} +{% endfor %} diff --git a/ansible/roles/nginx/defaults/main.yml b/ansible/roles/nginx/defaults/main.yml new file mode 100644 index 0000000..d4e5aed --- /dev/null +++ b/ansible/roles/nginx/defaults/main.yml @@ -0,0 +1,30 @@ +--- + +nginx_settings: + server_tokens: false + gzip: true + ssl_protocols: + - TLSv1.2 + - TLSv1.3 + # load_balancers: + # http: + # - upstream: + # name: searxng + # servers: + # - 192.168.0.10:8888 + # server: + # listen_port: 80 + # names: + # - searxng.cuqmbr.xyz + # statements: + # - proxy_set_header Host $http_host + # statics: + # - root: /var/www/website + # index: index.html + # listen_port: 80 + # names: + # - static.cuqmbr.xyz + # statements: + # - proxy_set_header Host $http_host + +# TODO: Add https configuration diff --git a/ansible/roles/nginx/handlers/main.yml b/ansible/roles/nginx/handlers/main.yml new file mode 100644 index 0000000..e884a2e --- /dev/null +++ b/ansible/roles/nginx/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Reload nginx service. + ansible.builtin.service: + name: nginx + state: reloaded diff --git a/ansible/roles/nginx/meta/main.yml b/ansible/roles/nginx/meta/main.yml new file mode 100644 index 0000000..95d22f9 --- /dev/null +++ b/ansible/roles/nginx/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: nginx + author: cuqmbr-homelab + description: Initialize newly created server. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/nginx/molecule/default/converge.yml b/ansible/roles/nginx/molecule/default/converge.yml new file mode 100644 index 0000000..850e5e3 --- /dev/null +++ b/ansible/roles/nginx/molecule/default/converge.yml @@ -0,0 +1,42 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + vars: + nginx_settings: + server_tokens: false + gzip: true + ssl_protocols: + - TLSv1.2 + - TLSv1.3 + load_balancers: + http: + - upstream: + name: searxng + servers: + - 192.168.0.10:8888 + server: + listen_port: 80 + names: + - searxng.cuqmbr.xyz + statements: + - proxy_set_header Host $http_host + # statics: + # - root: /var/www/website + # index: index.html + # listen_port: 8080 + # names: + # - static.cuqmbr.xyz + # statements: + # - proxy_set_header Host $http_host + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - nginx diff --git a/ansible/roles/nginx/molecule/default/molecule.yml b/ansible/roles/nginx/molecule/default/molecule.yml new file mode 100644 index 0000000..3b7f58a --- /dev/null +++ b/ansible/roles/nginx/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.nginx_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/nginx/tasks/main.yml b/ansible/roles/nginx/tasks/main.yml new file mode 100644 index 0000000..e8aaada --- /dev/null +++ b/ansible/roles/nginx/tasks/main.yml @@ -0,0 +1,23 @@ +--- + +- name: Install needed packages. + ansible.builtin.apt: + pkg: + - nginx + state: present + +- name: Enable and start nginx service. + ansible.builtin.service: + name: nginx + state: started + enabled: true + +- name: Install nginx configuration file. + ansible.builtin.template: + src: nginx.conf.j2 + dest: /etc/nginx/nginx.conf + owner: root + group: root + mode: "0644" + notify: + - Reload nginx service. diff --git a/ansible/roles/nginx/templates/nginx.conf.j2 b/ansible/roles/nginx/templates/nginx.conf.j2 new file mode 100644 index 0000000..54a3ef7 --- /dev/null +++ b/ansible/roles/nginx/templates/nginx.conf.j2 @@ -0,0 +1,110 @@ +user www-data; +worker_processes auto; +pid /run/nginx.pid; + +events { + worker_connections 768; + # multi_accept on; +} + +http { + + ## + # Basic Settings + ## + + sendfile on; + tcp_nopush on; + types_hash_max_size 2048; + server_tokens {{ nginx_settings.server_tokens | ternary('on', 'off') }}; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + + ## + # SSL Settings + ## + + ssl_protocols {{ nginx_settings.ssl_protocols|join(' ') }}; + ssl_prefer_server_ciphers on; + + ## + # Logging Settings + ## + + access_log syslog:server=unix:/dev/log; + error_log syslog:server=unix:/dev/log; + + ## + # Gzip Settings + ## + + gzip {{ nginx_settings.gzip | ternary('on', 'off') }}; + gzip_comp_level 6; + gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; + gzip_buffers 16 8k; + gzip_http_version 1.1; + + ## + # Virtual Host Configs + ## + + # include /etc/nginx/conf.d/*.conf; + # include /etc/nginx/sites-enabled/*; + + # Prometheus Nginx Exporter + server { + listen 127.0.0.1:8080; + location /stub_status { + stub_status; + } + } + + +{% if nginx_settings.load_balancers is not undefined %} + ## + # Load Balancers + ## + +{% for http_load_balancer in nginx_settings.load_balancers.http %} + upstream {{ http_load_balancer.upstream.name }} { +{% for server in http_load_balancer.upstream.servers %} + server {{ server }}; +{% endfor %} + } + + server { + listen {{ http_load_balancer.server.listen_port }}; + server_name {{ http_load_balancer.server.names|join(' ') }}; + location / { + proxy_pass http://{{ http_load_balancer.upstream.name }}; +{% if http_load_balancer.server.statements is not undefined %} +{% for statement in http_load_balancer.server.statements %} + {{ statement }}; +{% endfor %} +{% endif %} + } + } + +{% endfor %} +{% endif %} +{% if nginx_settings.statics is not undefined %} + + ## + # Static Servings + ## + +{% for static in nginx_settings.statics %} + server { + listen {{ static.listen_port }}; + server_name {{ static.names|join(' ') }}; + root {{ static.root }}; + index {{ static.index }}; + location / { + try_files $uri $uri/ =404; + } + } + +{% endfor %} +{% endif %} +} diff --git a/ansible/roles/postgresql b/ansible/roles/postgresql new file mode 160000 index 0000000..845a175 --- /dev/null +++ b/ansible/roles/postgresql @@ -0,0 +1 @@ +Subproject commit 845a175a0de0308334a188bdf7283a6c4999b5f2 diff --git a/ansible/roles/prometheus_alertmanager/defaults/main.yml b/ansible/roles/prometheus_alertmanager/defaults/main.yml new file mode 100644 index 0000000..4ad63f2 --- /dev/null +++ b/ansible/roles/prometheus_alertmanager/defaults/main.yml @@ -0,0 +1,119 @@ +--- + +prometheus_alertmanager_options: + # Sample configuration. + # See https://prometheus.io/docs/alerting/configuration/ for documentation. + + global: + # The smarthost and SMTP sender used for mail notifications. + smtp_smarthost: 'localhost:25' + smtp_from: 'alertmanager@example.org' + smtp_auth_username: 'alertmanager' + smtp_auth_password: 'password' + + # The directory from which notification templates are read. + templates: + - '/etc/prometheus/alertmanager_templates/*.tmpl' + + # The root route on which each incoming alert enters. + route: + # The labels by which incoming alerts are grouped together. For example, + # multiple alerts coming in for cluster=A and alertname=LatencyHigh would + # be batched into a single group. + group_by: ['alertname', 'cluster', 'service'] + + # When a new group of alerts is created by an incoming alert, wait at + # least 'group_wait' to send the initial notification. + # This way ensures that you get multiple alerts for the same group that + # start firing shortly after another are batched together + # on the first notification. + group_wait: 30s + + # When the first notification was sent, wait 'group_interval' + # to send a batch of new alerts that started firing for that group. + group_interval: 5m + + # If an alert has successfully been sent, wait 'repeat_interval' to + # resend them. + repeat_interval: 3h + + # A default receiver + receiver: team-X-mails + + # All the above attributes are inherited by all child + # routes and can overwritten on each. + + # The child route trees. + routes: + # This routes performs a regular expression match on alert labels to + # catch alerts that are related to a list of services. + - match_re: + service: ^(foo1|foo2|baz)$ + receiver: team-X-mails + # The service has a sub-route for critical alerts, any alerts + # that do not match, i.e. severity != critical, fall-back to the + # parent node and are sent to 'team-X-mails' + routes: + - match: + severity: critical + receiver: team-X-pager + - match: + service: files + receiver: team-Y-mails + + routes: + - match: + severity: critical + receiver: team-Y-pager + + # This route handles all alerts coming from a database service. + # If there's no team to handle it, it defaults to the DB team. + - match: + service: database + receiver: team-DB-pager + # Also group alerts by affected database. + group_by: [alertname, cluster, database] + routes: + - match: + owner: team-X + receiver: team-X-pager + - match: + owner: team-Y + receiver: team-Y-pager + + + # Inhibition rules allow to mute a set of alerts given that another alert is + # firing. + # We use this to mute any warning-level notifications if the same alert is + # already critical. + inhibit_rules: + - source_match: + severity: 'critical' + target_match: + severity: 'warning' + # Apply inhibition if the alertname is the same. + equal: ['alertname', 'cluster', 'service'] + + + receivers: + - name: 'team-X-mails' + email_configs: + - to: 'team-X+alerts@example.org' + + - name: 'team-X-pager' + email_configs: + - to: 'team-X+alerts-critical@example.org' + pagerduty_configs: + - service_key: + + - name: 'team-Y-mails' + email_configs: + - to: 'team-Y+alerts@example.org' + + - name: 'team-Y-pager' + pagerduty_configs: + - service_key: + + - name: 'team-DB-pager' + pagerduty_configs: + - service_key: diff --git a/ansible/roles/prometheus_alertmanager/handlers/main.yml b/ansible/roles/prometheus_alertmanager/handlers/main.yml new file mode 100644 index 0000000..5132fdc --- /dev/null +++ b/ansible/roles/prometheus_alertmanager/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Restart prometheus-alertmanager service. + ansible.builtin.service: + name: prometheus-alertmanager + state: restarted diff --git a/ansible/roles/prometheus_alertmanager/meta/main.yml b/ansible/roles/prometheus_alertmanager/meta/main.yml new file mode 100644 index 0000000..797fd19 --- /dev/null +++ b/ansible/roles/prometheus_alertmanager/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: prometheus_alertmanager + author: cuqmbr-homelab + description: Install Prometheus Alertmanager. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/prometheus_alertmanager/molecule/default/converge.yml b/ansible/roles/prometheus_alertmanager/molecule/default/converge.yml new file mode 100644 index 0000000..37f44ca --- /dev/null +++ b/ansible/roles/prometheus_alertmanager/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - prometheus_alertmanager diff --git a/ansible/roles/prometheus_alertmanager/molecule/default/molecule.yml b/ansible/roles/prometheus_alertmanager/molecule/default/molecule.yml new file mode 100644 index 0000000..201d55a --- /dev/null +++ b/ansible/roles/prometheus_alertmanager/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.prometheus-alertmanager_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/prometheus_alertmanager/tasks/main.yml b/ansible/roles/prometheus_alertmanager/tasks/main.yml new file mode 100644 index 0000000..e4e874d --- /dev/null +++ b/ansible/roles/prometheus_alertmanager/tasks/main.yml @@ -0,0 +1,31 @@ +--- + +- name: Install prometheus-alertmanager apt package. + ansible.builtin.apt: + name: prometheus-alertmanager + state: present + +- name: Decrypt secrets in config file. + ansible.builtin.set_fact: + prometheus_alertmanager_options: >- + {{ + prometheus_alertmanager_options | + combine(prometheus_alertmanager_options, recursive=true) + }} + no_log: true + +- name: Install prometheus-alertmanager config. + ansible.builtin.template: + src: alertmanager.yml.j2 + dest: /etc/prometheus/alertmanager.yml + owner: root + group: root + mode: "0444" + notify: + - Restart prometheus-alertmanager service. + +- name: Enable and start prometheus-alertmanager service. + ansible.builtin.service: + name: prometheus-alertmanager + state: started + enabled: true diff --git a/ansible/roles/prometheus_alertmanager/templates/alertmanager.yml.j2 b/ansible/roles/prometheus_alertmanager/templates/alertmanager.yml.j2 new file mode 100644 index 0000000..d67799b --- /dev/null +++ b/ansible/roles/prometheus_alertmanager/templates/alertmanager.yml.j2 @@ -0,0 +1,4 @@ +--- +# Managed with Ansible + +{{ prometheus_alertmanager_options | ansible.builtin.to_nice_yaml(indent=2, width=80) }} diff --git a/ansible/roles/prometheus_nginx_exporter/meta/main.yml b/ansible/roles/prometheus_nginx_exporter/meta/main.yml new file mode 100644 index 0000000..fbb8fb8 --- /dev/null +++ b/ansible/roles/prometheus_nginx_exporter/meta/main.yml @@ -0,0 +1,9 @@ +galaxy_info: + role_name: prometheus_nginx_exporter + author: cuqmbr-homelab + description: Install Prometheus Nginx Exporter. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/prometheus_nginx_exporter/molecule/default/converge.yml b/ansible/roles/prometheus_nginx_exporter/molecule/default/converge.yml new file mode 100644 index 0000000..62b007d --- /dev/null +++ b/ansible/roles/prometheus_nginx_exporter/molecule/default/converge.yml @@ -0,0 +1,17 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + - name: Run cuqmbr-homelab.nginx role. + ansible.builtin.include_role: + name: ../../nginx + + roles: + - prometheus_nginx_exporter diff --git a/ansible/roles/prometheus_nginx_exporter/molecule/default/molecule.yml b/ansible/roles/prometheus_nginx_exporter/molecule/default/molecule.yml new file mode 100644 index 0000000..1ff3b0b --- /dev/null +++ b/ansible/roles/prometheus_nginx_exporter/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.prometheus-nginx-exporter_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/prometheus_nginx_exporter/tasks/main.yml b/ansible/roles/prometheus_nginx_exporter/tasks/main.yml new file mode 100644 index 0000000..93e2c31 --- /dev/null +++ b/ansible/roles/prometheus_nginx_exporter/tasks/main.yml @@ -0,0 +1,12 @@ +--- + +- name: Install prometheus-nginx-exporter apt package. + ansible.builtin.apt: + name: prometheus-nginx-exporter + state: present + +- name: Enable and start prometheus-nginx-exporter service. + ansible.builtin.service: + name: prometheus-nginx-exporter + state: started + enabled: true diff --git a/ansible/roles/prometheus_node_exporter/meta/main.yml b/ansible/roles/prometheus_node_exporter/meta/main.yml new file mode 100644 index 0000000..7356a9e --- /dev/null +++ b/ansible/roles/prometheus_node_exporter/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: prometheus_node_exporter + author: cuqmbr-homelab + description: Install Prometheus Node Exporter. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/prometheus_node_exporter/molecule/default/converge.yml b/ansible/roles/prometheus_node_exporter/molecule/default/converge.yml new file mode 100644 index 0000000..63b071a --- /dev/null +++ b/ansible/roles/prometheus_node_exporter/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - prometheus_node_exporter diff --git a/ansible/roles/prometheus_node_exporter/molecule/default/molecule.yml b/ansible/roles/prometheus_node_exporter/molecule/default/molecule.yml new file mode 100644 index 0000000..aecb941 --- /dev/null +++ b/ansible/roles/prometheus_node_exporter/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.prometheus-node-exporter_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/prometheus_node_exporter/tasks/main.yml b/ansible/roles/prometheus_node_exporter/tasks/main.yml new file mode 100644 index 0000000..7f8638e --- /dev/null +++ b/ansible/roles/prometheus_node_exporter/tasks/main.yml @@ -0,0 +1,12 @@ +--- + +- name: Install prometheus-node-exporter apt package. + ansible.builtin.apt: + name: prometheus-node-exporter + state: present + +- name: Enable and start prometheus-node-exporter service. + ansible.builtin.service: + name: prometheus-node-exporter + state: started + enabled: true diff --git a/ansible/roles/prometheus_server/defaults/main.yml b/ansible/roles/prometheus_server/defaults/main.yml new file mode 100644 index 0000000..1cb1af5 --- /dev/null +++ b/ansible/roles/prometheus_server/defaults/main.yml @@ -0,0 +1,59 @@ +--- + +prometheus_options: + global: + # Set the scrape interval to every 15 seconds. Default is every 1 minute. + scrape_interval: 15s + # Evaluate rules every 15 seconds. The default is every 1 minute. + evaluation_interval: 15s + # scrape_timeout is set to the global default (10s). + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'example' + + # Alertmanager configuration + alerting: + alertmanagers: + - static_configs: + - targets: ['localhost:9093'] + + # Load rules and evaluate them according to the global 'evaluation_interval'. + rule_files: + - alerting_rules/*.yml + + # A scrape configuration containing exactly one endpoint to scrape: + # Here it's Prometheus itself. + scrape_configs: + # The job name is added as a label `job=`. + - job_name: 'prometheus' + + # Override the global default and scrape targets from this job. + scrape_interval: 5s + scrape_timeout: 5s + + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + + static_configs: + - targets: ['localhost:9090'] + + - job_name: node + # If prometheus-node-exporter is installed, grab stats about the local + # machine by default. + static_configs: + - targets: ['localhost:9100'] + +prometheus_alerting_rules: + groups: + - name: ExampleRedisGroup + rules: + - alert: ExampleRedisDown + expr: redis_up{} == 0 + for: 2m + labels: + severity: critical + annotations: + summary: "Redis instance down" + description: "Whatever" diff --git a/ansible/roles/prometheus_server/files/prometheus.service b/ansible/roles/prometheus_server/files/prometheus.service new file mode 100644 index 0000000..b83bf5b --- /dev/null +++ b/ansible/roles/prometheus_server/files/prometheus.service @@ -0,0 +1,16 @@ +[Unit] +Description=Monitoring system and time series database +Documentation=https://prometheus.io/docs/introduction/overview/ man:prometheus(1) +After=time-sync.target + +[Service] +Restart=on-failure +User=prometheus +EnvironmentFile=/etc/default/prometheus +ExecStart=/usr/bin/prometheus $ARGS +ExecReload=/bin/kill -HUP $MAINPID +TimeoutStopSec=20s +SendSIGKILL=no + +[Install] +WantedBy=multi-user.target diff --git a/ansible/roles/prometheus_server/handlers/main.yml b/ansible/roles/prometheus_server/handlers/main.yml new file mode 100644 index 0000000..b6d6acf --- /dev/null +++ b/ansible/roles/prometheus_server/handlers/main.yml @@ -0,0 +1,6 @@ +--- + +- name: Reload prometheus service. + ansible.builtin.service: + name: prometheus + state: reloaded diff --git a/ansible/roles/prometheus_server/meta/main.yml b/ansible/roles/prometheus_server/meta/main.yml new file mode 100644 index 0000000..2c7d5c1 --- /dev/null +++ b/ansible/roles/prometheus_server/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: prometheus_server + author: cuqmbr-homelab + description: Install Prometheus Server. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/prometheus_server/molecule/default/converge.yml b/ansible/roles/prometheus_server/molecule/default/converge.yml new file mode 100644 index 0000000..4cc12f4 --- /dev/null +++ b/ansible/roles/prometheus_server/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - prometheus_server diff --git a/ansible/roles/prometheus_server/molecule/default/molecule.yml b/ansible/roles/prometheus_server/molecule/default/molecule.yml new file mode 100644 index 0000000..e964df6 --- /dev/null +++ b/ansible/roles/prometheus_server/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.prometheus-server_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/prometheus_server/tasks/main.yml b/ansible/roles/prometheus_server/tasks/main.yml new file mode 100644 index 0000000..e4052c1 --- /dev/null +++ b/ansible/roles/prometheus_server/tasks/main.yml @@ -0,0 +1,52 @@ +--- + +- name: Install prometheus apt package. + ansible.builtin.apt: + name: prometheus + state: present + +- name: Install prometheus systemd service file. + ansible.builtin.copy: + src: prometheus.service + dest: /lib/systemd/system/prometheus.service + owner: root + group: root + mode: "0644" + +- name: Reload systemd daemon. + ansible.builtin.systemd_service: + daemon_reload: true + +- name: Install prometheus config. + ansible.builtin.template: + src: prometheus.yml.j2 + dest: /etc/prometheus/prometheus.yml + owner: root + group: root + mode: "0644" + notify: + - Reload prometheus service. + +- name: Create alerting rules directory. + ansible.builtin.file: + path: /etc/prometheus/alerting_rules + state: directory + owner: root + group: root + mode: "0755" + +- name: Install alerting rules config. + ansible.builtin.template: + src: alerting_rules.yml.j2 + dest: /etc/prometheus/alerting_rules/alerting_rules.yml + owner: root + group: root + mode: "0644" + notify: + - Reload prometheus service. + +- name: Enable and start prometheus service. + ansible.builtin.service: + name: prometheus + state: started + enabled: true diff --git a/ansible/roles/prometheus_server/templates/alerting_rules.yml.j2 b/ansible/roles/prometheus_server/templates/alerting_rules.yml.j2 new file mode 100644 index 0000000..e9dce36 --- /dev/null +++ b/ansible/roles/prometheus_server/templates/alerting_rules.yml.j2 @@ -0,0 +1,4 @@ +--- +# Managed with Ansible + +{{ prometheus_alerting_rules | ansible.builtin.to_nice_yaml(indent=2) }} diff --git a/ansible/roles/prometheus_server/templates/prometheus.yml.j2 b/ansible/roles/prometheus_server/templates/prometheus.yml.j2 new file mode 100644 index 0000000..491e7e2 --- /dev/null +++ b/ansible/roles/prometheus_server/templates/prometheus.yml.j2 @@ -0,0 +1,4 @@ +--- +# Managed with Ansible + +{{ prometheus_options | ansible.builtin.to_nice_yaml(indent=2) }} diff --git a/ansible/roles/searxng/defaults/main.yml b/ansible/roles/searxng/defaults/main.yml new file mode 100644 index 0000000..6e4e21b --- /dev/null +++ b/ansible/roles/searxng/defaults/main.yml @@ -0,0 +1,49 @@ +--- + +searxng_homedir: /opt/searxng + +searxng_git_commit: c185d076894ebbdb5db921c448c240d04915847b + +searxng_settings: + # SearXNG settings + + use_default_settings: true + + general: + debug: false + instance_name: "cuqmbr's SearXNG" + + search: + safe_search: 2 + autocomplete: 'duckduckgo' + + server: + secret_key: "ultrasecretkey_change_me" + limiter: false + image_proxy: true + # public URL of the instance, to ensure correct inbound links. + # Is overwritten by ${SEARXNG_URL}. + base_url: http://example.com/location + + # redis: + # URL to connect redis database. Is overwritten by ${SEARXNG_REDIS_URL}. + # url: unix:///usr/local/searxng-redis/run/redis.sock?db=0 + + ui: + static_use_hash: true + + # preferences: + # lock: + # - autocomplete + # - method + + enabled_plugins: + - 'Hash plugin' + - 'Self Information' + - 'Tracker URL remover' + - 'Ahmia blacklist' + # - 'Hostnames plugin' # see 'hostnames' configuration below + # - 'Open Access DOI rewrite' + + # plugins: + # - only_show_green_results diff --git a/ansible/roles/searxng/handlers/main.yml b/ansible/roles/searxng/handlers/main.yml new file mode 100644 index 0000000..9c69653 --- /dev/null +++ b/ansible/roles/searxng/handlers/main.yml @@ -0,0 +1,10 @@ +--- + +- name: Reload systemd daemon. + ansible.builtin.systemd_service: + daemon_reload: true + +- name: Restart searxng service. + ansible.builtin.systemd_service: + name: searxng + state: restarted diff --git a/ansible/roles/searxng/meta/main.yml b/ansible/roles/searxng/meta/main.yml new file mode 100644 index 0000000..9cc5a6d --- /dev/null +++ b/ansible/roles/searxng/meta/main.yml @@ -0,0 +1,10 @@ +--- +galaxy_info: + role_name: searxng + author: cuqmbr-homelab + description: Install SearxNG. + # issue_tracker_url: http://example.com/issue/tracker + license: MIT + min_ansible_version: "2.1" + galaxy_tags: [] +dependencies: [] diff --git a/ansible/roles/searxng/molecule/default/converge.yml b/ansible/roles/searxng/molecule/default/converge.yml new file mode 100644 index 0000000..e293207 --- /dev/null +++ b/ansible/roles/searxng/molecule/default/converge.yml @@ -0,0 +1,14 @@ +--- + +- name: Converge + hosts: all + gather_facts: false + + pre_tasks: + - name: Update apt cache. + ansible.builtin.apt: + update_cache: true + cache_valid_time: 86400 + + roles: + - searxng diff --git a/ansible/roles/searxng/molecule/default/molecule.yml b/ansible/roles/searxng/molecule/default/molecule.yml new file mode 100644 index 0000000..0327124 --- /dev/null +++ b/ansible/roles/searxng/molecule/default/molecule.yml @@ -0,0 +1,12 @@ +--- +driver: + name: docker +platforms: + - name: cuqmbr-homelab.searxng_debian-12 + image: docker.io/geerlingguy/docker-debian12-ansible:latest + pre_build_image: true + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:rw + cgroupns_mode: host + privileged: true diff --git a/ansible/roles/searxng/tasks/main.yml b/ansible/roles/searxng/tasks/main.yml new file mode 100644 index 0000000..b7d9979 --- /dev/null +++ b/ansible/roles/searxng/tasks/main.yml @@ -0,0 +1,120 @@ +--- + +- name: Install dependencies. + ansible.builtin.apt: + name: + - python3-dev + - python3-babel + - python3-venv + - uwsgi + - uwsgi-plugin-python3 + - git + - build-essential + - libxslt-dev + - zlib1g-dev + - libffi-dev + - libssl-dev + state: present + +- name: Create searxng user. + ansible.builtin.user: + state: present + name: searxng + password: ! + system: true + create_home: true + home: "{{ searxng_homedir }}" + shell: /usr/sbin/nologin + +- name: Set searxng_source and searxng_pyenv variable. + ansible.builtin.set_fact: + searxng_source: "{{ searxng_homedir }}/src" + searxng_pyenv: "{{ searxng_homedir }}/pyenv" + searxng_compiled: "{{ searxng_homedir }}/compiled" + +- name: Clone searxng git repository. + ansible.builtin.git: + clone: true + repo: https://github.com/searxng/searxng.git + dest: "{{ searxng_source }}" + depth: 1 + version: "{{ searxng_git_commit }}" + force: true + single_branch: true + +- name: Install pip dependencies. + ansible.builtin.pip: + virtualenv: "{{ searxng_pyenv }}" + virtualenv_command: python3 -m venv + name: + - pip + - setuptools + - wheel + - pyyaml + state: present + +- name: Compile searxng. + ansible.builtin.pip: + virtualenv: "{{ searxng_pyenv }}" + requirements: "{{ searxng_source }}/requirements.txt" + extra_args: "--use-pep517 --no-build-isolation \ + -e {{ searxng_source }}" + state: present + +- name: Remove old program files. + ansible.builtin.file: + path: "{{ searxng_compiled }}" + state: absent + changed_when: false + +- name: Install new program files. + ansible.builtin.copy: + remote_src: true + src: "{{ searxng_source }}/" + dest: "{{ searxng_compiled }}" + owner: searxng + group: searxng + mode: "0775" + changed_when: false + +- name: Create searxng settings directory. + ansible.builtin.file: + state: directory + owner: searxng + group: searxng + mode: "0775" + path: /etc/searxng + +- name: Decrypt secrets in settings file. + ansible.builtin.set_fact: + searxng_settings: >- + {{ searxng_settings | combine(searxng_settings, recursive=true) }} + no_log: true + +- name: Install searxng settings file. + ansible.builtin.template: + src: settings.yml.j2 + dest: /etc/searxng/settings.yml + owner: searxng + group: searxng + mode: "0600" + notify: + - Reload systemd daemon. + - Restart searxng service. + +- name: Create systemd unit file. + ansible.builtin.template: + src: searxng.service.j2 + dest: /etc/systemd/system/searxng.service + owner: root + group: root + mode: "0444" + notify: + - Reload systemd daemon. + - Restart searxng service. + +- name: Enable and start searxng service. + ansible.builtin.service: + name: searxng + enabled: true + state: started diff --git a/ansible/roles/searxng/templates/searxng.service.j2 b/ansible/roles/searxng/templates/searxng.service.j2 new file mode 100644 index 0000000..8645294 --- /dev/null +++ b/ansible/roles/searxng/templates/searxng.service.j2 @@ -0,0 +1,18 @@ +[Unit] +Description=SearxNG +After=multi-user.target + +[Service] +Type=simple +User=searxng +Group=searxng +WorkingDirectory={{ searxng_compiled }} +ExecStart={{ searxng_pyenv }}/bin/python {{ searxng_compiled }}/searx/webapp.py +Environment="SEARXNG_SETTINGS_PATH=/etc/searxng/settings.yml" + +Restart=always + +RestartSec=2 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/roles/searxng/templates/settings.yml.j2 b/ansible/roles/searxng/templates/settings.yml.j2 new file mode 100644 index 0000000..fdb81c4 --- /dev/null +++ b/ansible/roles/searxng/templates/settings.yml.j2 @@ -0,0 +1,4 @@ +--- +# Managed with Ansible + +{{ searxng_settings | ansible.builtin.to_nice_yaml(indent=2, width=80) }} diff --git a/terraform/.gitignore b/terraform/.gitignore new file mode 100644 index 0000000..a774974 --- /dev/null +++ b/terraform/.gitignore @@ -0,0 +1,5 @@ +terraform.tfstate +terraform.tfstate.* +.terraform.tfstate.lock.info +.terraform +terraform.tfvars diff --git a/terraform/.terraform.lock.hcl b/terraform/.terraform.lock.hcl new file mode 100644 index 0000000..27ec51a --- /dev/null +++ b/terraform/.terraform.lock.hcl @@ -0,0 +1,9 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "local/bpg/proxmox" { + version = "0.78.2" + hashes = [ + "h1:N/p0BJCms7y2MBJmYjoWXFtxocN55PKYz1ulwzPTO00=", + ] +} diff --git a/terraform/bastion.tf b/terraform/bastion.tf new file mode 100644 index 0000000..bbd829e --- /dev/null +++ b/terraform/bastion.tf @@ -0,0 +1,110 @@ +resource "proxmox_virtual_environment_container" "bastion" { + node_name = "pve" + + vm_id = 6000 + + tags = ["dev", "prod", "common", "bastion"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 512 + } + + disk { + datastore_id = var.datastore_id + size = 4 + } + + network_interface { + bridge = var.external_network_bridge_name + name = "eth-ext" + firewall = true + enabled = true + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "bastion" + ip_config { + ipv4 { + address = "dhcp" + } + } + ip_config { + ipv4 { + address = "192.168.0.254/24" + # gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + # TODO: make into a variable + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 100 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "bastion" { + depends_on = [proxmox_virtual_environment_container.bastion] + + node_name = proxmox_virtual_environment_container.bastion.node_name + vm_id = proxmox_virtual_environment_container.bastion.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "bastion" { + depends_on = [proxmox_virtual_environment_container.bastion] + + node_name = proxmox_virtual_environment_container.bastion.node_name + vm_id = proxmox_virtual_environment_container.bastion.vm_id + + rule { + type = "in" + action = "ACCEPT" + dport = "22" + proto = "tcp" + } + + rule { + type = "in" + action = "ACCEPT" + dport = "8" + proto = "icmp" + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } +} diff --git a/terraform/firewall_ipsets.tf b/terraform/firewall_ipsets.tf new file mode 100644 index 0000000..64efe26 --- /dev/null +++ b/terraform/firewall_ipsets.tf @@ -0,0 +1,35 @@ +resource "proxmox_virtual_environment_firewall_ipset" "loggers" { + + name = "loggers" + comment = "Nodes that send logs to Monitoring Node." + + cidr { + name = "192.168.0.254" + comment = "bastion" + } + + cidr { + name = "192.168.0.253" + comment = "load-balancer" + } + + cidr { + name = "192.168.0.252" + comment = "monitoring" + } + + cidr { + name = "192.168.0.3" + comment = "postgresql" + } + + cidr { + name = "192.168.0.10" + comment = "main-page" + } + + cidr { + name = "192.168.0.15" + comment = "searxng" + } +} diff --git a/terraform/firewall_security_groups.tf b/terraform/firewall_security_groups.tf new file mode 100644 index 0000000..5d6c2ea --- /dev/null +++ b/terraform/firewall_security_groups.tf @@ -0,0 +1,51 @@ +resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_node_exporter" { + name = "prom-node-exp" + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0] + proto = "tcp" + dport = "9100" + action = "ACCEPT" + } +} + +resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_nginx_exporter" { + name = "prom-nginx-exp" + comment = "Allow Prometheus server to pull Prometheus nginx exporter from Monitoring Node." + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0] + proto = "tcp" + dport = "9113" + action = "ACCEPT" + } +} + +resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_server_exporter" { + name = "prom-srv-exp" + comment = "Allow Prometheus server to pull Prometheus default exporter from Monitoring Node." + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0] + proto = "tcp" + dport = "9090" + action = "ACCEPT" + } +} + +resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_alertmanager" { + name = "prom-alert" + comment = "Access Prometheus Alertmanager from Monitoring Node." + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0] + proto = "tcp" + dport = "9093" + action = "ACCEPT" + } +} diff --git a/terraform/forgejo.tf.disabled b/terraform/forgejo.tf.disabled new file mode 100644 index 0000000..d5214a7 --- /dev/null +++ b/terraform/forgejo.tf.disabled @@ -0,0 +1,109 @@ +resource "proxmox_virtual_environment_container" "forgejo" { + node_name = "pve" + + vm_id = 1200 + + tags = ["dev"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 1536 + } + + disk { + datastore_id = var.datastore_id + size = 32 + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "forgejo" + ip_config { + ipv4 { + address = "192.168.0.12/24" + gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 500 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "forgejo" { + depends_on = [proxmox_virtual_environment_container.forgejo] + + node_name = proxmox_virtual_environment_container.forgejo.node_name + vm_id = proxmox_virtual_environment_container.forgejo.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "forgejo" { + depends_on = [proxmox_virtual_environment_container.forgejo] + + node_name = proxmox_virtual_environment_container.forgejo.node_name + vm_id = proxmox_virtual_environment_container.forgejo.vm_id + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "22" + action = "ACCEPT" + comment = "SSH from Bastion." + } + + rule { + type = "in" + proto = "icmp" + dport = "8" + action = "ACCEPT" + comment = "Ping." + } + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.load_balancer.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "3000" + action = "ACCEPT" + comment = "Forgejo." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } +} diff --git a/terraform/load-balancer.tf b/terraform/load-balancer.tf new file mode 100644 index 0000000..ec88fa5 --- /dev/null +++ b/terraform/load-balancer.tf @@ -0,0 +1,132 @@ +resource "proxmox_virtual_environment_container" "load_balancer" { + node_name = "pve" + + vm_id = 6010 + + tags = ["dev", "prod", "common", "load-balancer"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 512 + } + + disk { + datastore_id = var.datastore_id + size = 4 + } + + network_interface { + bridge = var.external_network_bridge_name + name = "eth-ext" + firewall = true + enabled = true + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "load-balancer" + ip_config { + ipv4 { + address = "dhcp" + } + } + ip_config { + ipv4 { + address = "192.168.0.253/24" + # gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 1000 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "load_balancer" { + depends_on = [proxmox_virtual_environment_container.load_balancer] + + node_name = proxmox_virtual_environment_container.load_balancer.node_name + vm_id = proxmox_virtual_environment_container.load_balancer.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "load_balancer" { + depends_on = [proxmox_virtual_environment_container.load_balancer] + + node_name = proxmox_virtual_environment_container.load_balancer.node_name + vm_id = proxmox_virtual_environment_container.load_balancer.vm_id + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "22" + action = "ACCEPT" + comment = "SSH from Bastion." + } + + rule { + type = "in" + proto = "icmp" + dport = "8" + action = "ACCEPT" + } + + rule { + type = "in" + action = "ACCEPT" + dport = "80" + proto = "tcp" + comment = "Ping." + } + + rule { + type = "in" + proto = "tcp" + dport = "443" + action = "ACCEPT" + comment = "HTTPS." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_nginx_exporter.name + comment = "Allow Prometheus server to pull Prometheus nginx exporter from Monitoring Node." + } +} diff --git a/terraform/main-page.tf b/terraform/main-page.tf new file mode 100644 index 0000000..52023bb --- /dev/null +++ b/terraform/main-page.tf @@ -0,0 +1,109 @@ +resource "proxmox_virtual_environment_container" "main_page" { + node_name = "pve" + + vm_id = 1010 + + tags = ["dev"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 512 + } + + disk { + datastore_id = var.datastore_id + size = 4 + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "main-page" + ip_config { + ipv4 { + address = "192.168.0.10/24" + gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 500 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "main_page" { + depends_on = [proxmox_virtual_environment_container.main_page] + + node_name = proxmox_virtual_environment_container.main_page.node_name + vm_id = proxmox_virtual_environment_container.main_page.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "main_page" { + depends_on = [proxmox_virtual_environment_container.main_page] + + node_name = proxmox_virtual_environment_container.main_page.node_name + vm_id = proxmox_virtual_environment_container.main_page.vm_id + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "22" + action = "ACCEPT" + comment = "SSH from Bastion." + } + + rule { + type = "in" + proto = "icmp" + dport = "8" + action = "ACCEPT" + comment = "Ping." + } + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.load_balancer.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "80" + action = "ACCEPT" + comment = "Nginx Static Serving." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } +} diff --git a/terraform/main.tf b/terraform/main.tf new file mode 100644 index 0000000..bd94ca8 --- /dev/null +++ b/terraform/main.tf @@ -0,0 +1,16 @@ +terraform { + backend "local" { + path = "./terraform.tfstate" + } + required_providers { + proxmox = { + source = "local/bpg/proxmox" + } + } +} + +provider "proxmox" { + endpoint = var.virtual_environment_endpoint + api_token = var.virtual_environment_api_token + insecure = true +} diff --git a/terraform/monitoring.tf b/terraform/monitoring.tf new file mode 100644 index 0000000..56c26f9 --- /dev/null +++ b/terraform/monitoring.tf @@ -0,0 +1,137 @@ +resource "proxmox_virtual_environment_container" "monitoring" { + node_name = "pve" + + vm_id = 6020 + + tags = ["dev", "prod", "common", "monitoring"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 3072 + } + + disk { + datastore_id = var.datastore_id + size = 64 + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "monitoring" + ip_config { + ipv4 { + address = "192.168.0.252/24" + gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 0 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "monitoring" { + depends_on = [proxmox_virtual_environment_container.monitoring] + + node_name = proxmox_virtual_environment_container.monitoring.node_name + vm_id = proxmox_virtual_environment_container.monitoring.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "monitoring" { + depends_on = [proxmox_virtual_environment_container.monitoring] + + node_name = proxmox_virtual_environment_container.monitoring.node_name + vm_id = proxmox_virtual_environment_container.monitoring.vm_id + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "22" + action = "ACCEPT" + comment = "SSH from Bastion." + } + + rule { + type = "in" + proto = "icmp" + dport = "8" + action = "ACCEPT" + comment = "Ping." + } + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.load_balancer.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "3000" + action = "ACCEPT" + comment = "Grafana Server from Load Balancer." + } + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.load_balancer.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "9090" + action = "ACCEPT" + comment = "Prometheus Server from Load Balancer." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_server_exporter.name + comment = "Allow Prometheus server to pull Prometheus default exporter from Monitoring Node." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_alertmanager.name + comment = "Access Prometheus Alertmanager from Monitoring Node." + } + + rule { + type = "in" + source = "+${proxmox_virtual_environment_firewall_ipset.loggers.name}" + proto = "tcp" + dport = "3100" + action = "ACCEPT" + comment = "Access Grafana Loki from logging nodes." + } +} diff --git a/terraform/notes.txt b/terraform/notes.txt new file mode 100644 index 0000000..9483a39 --- /dev/null +++ b/terraform/notes.txt @@ -0,0 +1,15 @@ +VM/CT IDs: + - first digit: + - environment number + - 1 - development + - 5 - production + - 6 - common + - second-third digit: + - node types + - 0 - load balancers, databases, gateways + - 1-9 - other services + - fourth digits: + - service number + - reserve an interval for services + +enable nesting to prevent freezes on user change diff --git a/terraform/postgresql.tf b/terraform/postgresql.tf new file mode 100644 index 0000000..d17efee --- /dev/null +++ b/terraform/postgresql.tf @@ -0,0 +1,100 @@ +resource "proxmox_virtual_environment_container" "postgresql" { + node_name = "pve" + + vm_id = 1030 + + tags = ["dev", "database"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 512 + } + + disk { + datastore_id = var.datastore_id + size = 8 + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "postgresql" + ip_config { + ipv4 { + address = "192.168.0.3/24" + gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 100 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "postgresql" { + depends_on = [proxmox_virtual_environment_container.postgresql] + + node_name = proxmox_virtual_environment_container.postgresql.node_name + vm_id = proxmox_virtual_environment_container.postgresql.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "postgresql" { + depends_on = [proxmox_virtual_environment_container.postgresql] + + node_name = proxmox_virtual_environment_container.postgresql.node_name + vm_id = proxmox_virtual_environment_container.postgresql.vm_id + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "22" + action = "ACCEPT" + comment = "SSH from Bastion." + } + + rule { + type = "in" + proto = "icmp" + dport = "8" + action = "ACCEPT" + comment = "Ping." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } +} diff --git a/terraform/searxng.tf b/terraform/searxng.tf new file mode 100644 index 0000000..f0355d8 --- /dev/null +++ b/terraform/searxng.tf @@ -0,0 +1,109 @@ +resource "proxmox_virtual_environment_container" "searxng" { + node_name = "pve" + + vm_id = 1020 + + tags = ["dev"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 512 + } + + disk { + datastore_id = var.datastore_id + size = 4 + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "searxng" + ip_config { + ipv4 { + address = "192.168.0.15/24" + gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 500 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "searxng" { + depends_on = [proxmox_virtual_environment_container.searxng] + + node_name = proxmox_virtual_environment_container.searxng.node_name + vm_id = proxmox_virtual_environment_container.searxng.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "searxng" { + depends_on = [proxmox_virtual_environment_container.searxng] + + node_name = proxmox_virtual_environment_container.searxng.node_name + vm_id = proxmox_virtual_environment_container.searxng.vm_id + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "22" + action = "ACCEPT" + comment = "SSH from Bastion." + } + + rule { + type = "in" + proto = "icmp" + dport = "8" + action = "ACCEPT" + comment = "Ping." + } + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.load_balancer.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "8888" + action = "ACCEPT" + comment = "SearxNG." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } +} diff --git a/terraform/test.tf.disabled b/terraform/test.tf.disabled new file mode 100644 index 0000000..4094fea --- /dev/null +++ b/terraform/test.tf.disabled @@ -0,0 +1,109 @@ +resource "proxmox_virtual_environment_container" "test" { + node_name = "pve" + + vm_id = 1201 + + tags = ["dev"] + + unprivileged = true + + cpu { + cores = 1 + } + + memory { + dedicated = 1536 + } + + disk { + datastore_id = var.datastore_id + size = 10 + } + + network_interface { + bridge = var.development_network_bridge_name + name = "eth-dev" + firewall = true + enabled = true + } + + initialization { + hostname = "test" + ip_config { + ipv4 { + address = "192.168.0.100/24" + gateway = "192.168.0.1" + } + } + user_account { + keys = [var.ssh_public_key] + } + } + + operating_system { + template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst" + type = "debian" + } + + started = true + + startup { + order = 500 + up_delay = 0 + down_delay = 0 + } + + features { + nesting = true + } +} + +resource "proxmox_virtual_environment_firewall_options" "test" { + depends_on = [proxmox_virtual_environment_container.test] + + node_name = proxmox_virtual_environment_container.test.node_name + vm_id = proxmox_virtual_environment_container.test.vm_id + + enabled = true + dhcp = true + input_policy = "DROP" + output_policy = "ACCEPT" +} + +resource "proxmox_virtual_environment_firewall_rules" "test" { + depends_on = [proxmox_virtual_environment_container.test] + + node_name = proxmox_virtual_environment_container.test.node_name + vm_id = proxmox_virtual_environment_container.test.vm_id + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "22" + action = "ACCEPT" + comment = "SSH from Bastion." + } + + rule { + type = "in" + proto = "icmp" + dport = "8" + action = "ACCEPT" + comment = "Ping." + } + + rule { + type = "in" + source = split("/", proxmox_virtual_environment_container.load_balancer.initialization[0].ip_config[1].ipv4[0].address)[0] + proto = "tcp" + dport = "3000" + action = "ACCEPT" + comment = "test." + } + + rule { + security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name + comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node." + } +} diff --git a/terraform/variables.tf b/terraform/variables.tf new file mode 100644 index 0000000..6e731aa --- /dev/null +++ b/terraform/variables.tf @@ -0,0 +1,33 @@ +# Connection Settings + +variable "virtual_environment_endpoint" { + description = "Proxmox Virtual Envirnment Endpoint e.g. https://pve.domain.tld:8006/." + type = string +} + +variable "virtual_environment_api_token" { + description = "Tocket to access PVE API on behalf of the user." + type = string + sensitive = true +} + +variable "ssh_public_key" { + description = "SSH public key to place into authorized_keys of a root user in new vm/ct." + type = string + sensitive = true +} + + +# Variables + +variable "datastore_id" { + type = string +} + +variable "external_network_bridge_name" { + type = string +} + +variable "development_network_bridge_name" { + type = string +}