1
0

initial commit

This commit is contained in:
cuqmbr 2025-06-23 18:26:15 +03:00
commit f84218c9e7
Signed by: cuqmbr
GPG Key ID: 0AA446880C766199
109 changed files with 3468 additions and 0 deletions

3
.gitmodules vendored Normal file
View File

@ -0,0 +1,3 @@
[submodule "ansible/roles/postgresql"]
path = ansible/roles/postgresql
url = https://github.com/geerlingguy/ansible-role-postgresql.git

1
ansible/.python-version Normal file
View File

@ -0,0 +1 @@
3.12.7

14
ansible/00_init.yml Normal file
View File

@ -0,0 +1,14 @@
---
- hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- role: roles/init
- role: roles/prometheus_node_exporter

14
ansible/05_bastion.yml Normal file
View File

@ -0,0 +1,14 @@
---
- hosts: bastion
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- role: roles/fluent_bit
- role: roles/prometheus_node_exporter

18
ansible/10_monitoring.yml Normal file
View File

@ -0,0 +1,18 @@
---
- hosts: monitoring
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- role: roles/fluent_bit
- role: roles/grafana_loki
- role: roles/prometheus_server
- role: roles/prometheus_node_exporter
- role: roles/prometheus_alertmanager
- role: roles/grafana_server

15
ansible/15_postgresql.yml Normal file
View File

@ -0,0 +1,15 @@
---
- hosts: postgresql
# gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- role: roles/fluent_bit
- role: roles/prometheus_node_exporter
- role: roles/postgresql

17
ansible/20_main_page.yml Normal file
View File

@ -0,0 +1,17 @@
---
- hosts: main_page
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- role: roles/init
- role: roles/fluent_bit
- role: roles/prometheus_node_exporter
- role: roles/hugo
- role: roles/nginx

15
ansible/21_searxng.yml Normal file
View File

@ -0,0 +1,15 @@
---
- hosts: searxng
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- role: roles/fluent_bit
- role: roles/prometheus_node_exporter
- role: roles/searxng

View File

@ -0,0 +1,18 @@
---
- hosts: load_balancers
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
- name: Include nginx role.
ansible.builtin.include_role:
name: roles/nginx
roles:
- role: roles/fluent_bit
- role: roles/prometheus_node_exporter
- role: roles/prometheus_nginx_exporter

5
ansible/ansible.cfg Normal file
View File

@ -0,0 +1,5 @@
[defaults]
nocows=True
[ssh_connection]
ssh_args = -o StrictHostKeyChecking=accept-new -o ConnectTimeout=300 -o ConnectionAttempts=5 -o PreferredAuthentications=publickey

View File

@ -0,0 +1,23 @@
---
users:
- name: admin
password_hash: !vault |
$ANSIBLE_VAULT;1.1;AES256
30623138653735643561343061356531373430393662383764633038383238383837626636393432
3138653539356430306266663864343563616332656131310a343632323363653665646363366437
66643430626437333461656231303339656435346261336238313036306431396333643965666631
3665393163623266320a373838313538626438623330393533353931336331623464613664633430
32303734396634376431383936643431313561303864343930393363623130663236666636353637
63613237383666656263316661333031643032323266636464313839653065316138343035346161
64313037336666353136383462333832373031623637636630326330313832333265386632343139
30306638356434376635346637346134653064613236326333656566383137353166393063333563
32623638343263313463313062303465626439356461613235656661623364656138
ssh_public_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0"
opendoas_settings: "permit persist admin as root"
- name: ansible
password_hash: ""
ssh_public_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0"
opendoas_settings: "permit nopass ansible"

View File

@ -0,0 +1,17 @@
---
fluentbit_settings:
service:
flush: 1
daemon: false
log_level: info
http_server: false
pipeline:
inputs:
- name: systemd
tag: systemd
outputs:
- name: loki
host: 192.168.0.252
labels: env=common,hostname=bastion,service_name=systemd
match: systemd

View File

@ -0,0 +1,84 @@
---
nginx_settings:
server_tokens: false
gzip: true
ssl_protocols:
- TLSv1.2
- TLSv1.3
load_balancers:
http:
- upstream:
name: main-page
servers:
- 192.168.0.10:80
server:
listen_port: 80
names:
- dev.cuqmbr.xyz
- dev.cuqmbr.home
- upstream:
name: searxng
servers:
- 192.168.0.15:8888
server:
listen_port: 80
names:
- searxng.dev.cuqmbr.xyz
- searxng.dev.cuqmbr.home
# - upstream:
# name: prometheus
# servers:
# - 192.168.0.252:9090
# server:
# listen_port: 80
# names:
# - prometheus.dev.cuqmbr.xyz
# - prometheus.dev.cuqmbr.home
- upstream:
name: grafana
servers:
- 192.168.0.252:3000
server:
listen_port: 80
names:
- monitoring.dev.cuqmbr.xyz
- monitoring.dev.cuqmbr.home
statements:
- proxy_set_header Host $http_host
fluentbit_settings:
service:
flush: 1
daemon: false
log_level: info
http_server: false
pipeline:
inputs:
- name: systemd
tag: systemd_input
filters:
- name: rewrite_tag
match: systemd_input
rule: $_SYSTEMD_UNIT ^(nginx.service)$ nginx false
- name: rewrite_tag
match: systemd_input
rule: $_SYSTEMD_UNIT ^(nginx.service.+|(?!nginx.service).*)$ systemd false
- name: record_modifier
match: nginx
allowlist_key:
- MESSAGE
# - name: record_modifier
# match: systemd_tag
# allowlist_key:
# - _SYSTEMD_UNIT
# - MESSAGE
outputs:
- name: loki
host: 192.168.0.252
labels: "env=common,hostname=load-balancer,service_name=nginx"
match: nginx
- name: loki
host: 192.168.0.252
labels: "env=common,hostname=load-balancer,service_name=systemd"
match: systemd

View File

@ -0,0 +1,66 @@
---
users:
- name: admin
password_hash: !vault |
$ANSIBLE_VAULT;1.1;AES256
30623138653735643561343061356531373430393662383764633038383238383837626636393432
3138653539356430306266663864343563616332656131310a343632323363653665646363366437
66643430626437333461656231303339656435346261336238313036306431396333643965666631
3665393163623266320a373838313538626438623330393533353931336331623464613664633430
32303734396634376431383936643431313561303864343930393363623130663236666636353637
63613237383666656263316661333031643032323266636464313839653065316138343035346161
64313037336666353136383462333832373031623637636630326330313832333265386632343139
30306638356434376635346637346134653064613236326333656566383137353166393063333563
32623638343263313463313062303465626439356461613235656661623364656138
ssh_public_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0"
opendoas_settings: "permit persist admin as root"
- name: ansible
password_hash: ""
ssh_public_keys:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDKNzJdo6/c7uXrg0lqVwyXOhcNxO/BnylyJeqoBe4rAO5fhjwWLsvMAeCEmYa/3i8ITSvurFEou7BELo25vM58dNfGQHig52LrA/GU/jwDAhHyTXP3AvqqgIFa0ysMaHasYny6oqXi+eb2w/KimtgOhe5/oUdNBe/KgqZ+hP3qlTchxBl5MEzZIKgXTXQeYJpYYrnFb0l/R8qSkFBJv2xzxVJxEamN71SG7OIsi9m14D6hd2pNDHDDqHgKBVbN5irxDuJAzHN5upzfziXiYCOusud23tX6/nNv8t03CbB7FW0OxaCGhAjbavTFAf164L9GM7j76BGsLwWSh2HhG9G9lKs2bEI3IQudllMc6p9N6j2FhMOCKK6YYekdAOVc3ozTFc73VLkXtN8pnTC8OCSavthSt5jOUd0qTsQGH91lWlEkVe0bWi+s9nggfeWFM7HMVmqsR1jYlOXoi5s7xYwKLUdeUjRk3/rkzIFoOxquE5sVVuNDRNCaqcpPVY4k0gE= openpgp:0x8880F3E0"
opendoas_settings: "permit nopass ansible"
- name: gitea-actions
password_hash: ""
ssh_public_keys:
- "ssh-ed25519 \
AAAAC3NzaC1lZDI1NTE5AAAAIJv1DR3s3q6MOpe8S1FWZ/+TLok4AwNfu/h3Ugmh6lIa \
cuqmbr.xyz_gitea-actions"
opendoas_settings: "permit nopass gitea-actions"
hugo_settings:
hugo_version: 0.147.9
homedir: /opt/hugo
git_repo: https://gitea.cuqmbr.xyz/cuqmbr/cuqmbr.xyz.git
git_commit: 5b894854d47b41996b1901fa257f8c2cad9224f9
nginx_settings:
server_tokens: false
gzip: false
ssl_protocols:
- TLSv1.2
- TLSv1.3
statics:
- root: /var/www/hugo
index: index.html
listen_port: 80
names:
- dev.cuqmbr.xyz
- dev.cuqmbr.home
fluentbit_settings:
service:
flush: 1
daemon: false
log_level: info
http_server: false
pipeline:
inputs:
- name: systemd
tag: systemd
outputs:
- name: loki
host: 192.168.0.252
labels: "env=dev,hostname=main-page,service_name=systemd"
match: systemd

View File

@ -0,0 +1,278 @@
---
prometheus_options:
global:
alerting:
alertmanagers:
- static_configs:
- targets:
- 192.168.0.252:9093
rule_files:
- alerting_rules/*.yml
scrape_configs:
- job_name: prometheus
static_configs:
- targets:
- 192.168.0.252:9090
- job_name: node
static_configs:
- targets:
# main-page
- 192.168.0.10:9100
labels:
env: dev
hostname: main-page
- targets:
# searxng
- 192.168.0.15:9100
labels:
env: dev
hostname: searxng
- targets:
# bastion
- 192.168.0.254:9100
labels:
env: common
hostname: bastion
- targets:
# load-balancer
- 192.168.0.253:9100
labels:
env: common
hostname: load-balancer
- targets:
# monitoring
- 192.168.0.252:9100
labels:
env: common
hostname: monitoring
- job_name: nginx
static_configs:
- targets:
# load-balancer
- 192.168.0.253:9113
labels:
env: common
hostname: monitoring
prometheus_alertmanager_options:
global:
smtp_smarthost: mail.cuqmbr.xyz:587
smtp_require_tls: true
smtp_from: '"Homelab Alertmanager" <no-reply@cuqmbr.xyz>'
smtp_auth_username: no-reply
smtp_auth_password: !vault |
$ANSIBLE_VAULT;1.1;AES256
31393866316539633838303936366464613935393933333338336531656239333361653664346637
3665316532336339633432303036626339363239343065630a326361306233656632653134643966
39663138303439323636666665653364396132333532383463626337653061356461643734336363
6266353533656566330a346536333836356131343832616631666330653462613436313062643330
61616664646439643839366630396137616533393664323965366630363566333632
templates:
- /etc/prometheus/alertmanager_templates/*.tmpl
route:
group_by:
- env
- hostname
group_wait: 30s
group_interval: 5m
repeat_interval: 1d
receiver: default
receivers:
- name: default
email_configs:
- to: notifications@cuqmbr.xyz
prometheus_alerting_rules:
groups:
- name: DefaultMetrics
rules:
- alert: HostOutOfMemory
expr: (node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes < .10)
for: 2m
labels:
severity: warning
annotations:
summary: Host out of memory (instance {{'{{'}} $labels.instance {{'}}'}})
description: "Node memory is filling up (< 10% left)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
# You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly
- alert: HostMemoryIsUnderutilized
expr: min_over_time(node_memory_MemFree_bytes[1w]) > node_memory_MemTotal_bytes * .8
for: 0m
labels:
severity: info
annotations:
summary: Host Memory is underutilized (instance {{'{{'}} $labels.instance {{'}}'}})
description: "Node memory usage is < 20% for 1 week. Consider reducing memory space. (instance {{'{{'}} $labels.instance {{'}}'}})\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
# Please add ignored mountpoints in node_exporter parameters like
# "--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|run)($|/)".
# Same rule using "node_filesystem_free_bytes" will fire when disk fills for non-root users.
- alert: HostOutOfDiskSpace
expr: (node_filesystem_avail_bytes{fstype!~"^(fuse.*|tmpfs|cifs|nfs)"} / node_filesystem_size_bytes < .10 and on (instance, device, mountpoint) node_filesystem_readonly == 0)
for: 2m
labels:
severity: critical
annotations:
summary: Host out of disk space (instance {{'{{'}} $labels.instance {{'}}'}})
description: "Disk is almost full (< 10% left)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
- alert: HostOutOfInodes
expr: (node_filesystem_files_free / node_filesystem_files < .10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0)
for: 2m
labels:
severity: critical
annotations:
summary: Host out of inodes (instance {{'{{'}} $labels.instance {{'}}'}})
description: "Disk is almost running out of available inodes (< 10% left)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
- alert: HostHighCpuLoad
expr: (avg by (instance) (rate(node_cpu_seconds_total{mode!="idle"}[2m]))) > .90
for: 10m
labels:
severity: warning
annotations:
summary: Host high CPU load (instance {{'{{'}} $labels.instance {{'}}'}})
description: "CPU load is > 80%\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
# You may want to increase the alert manager 'repeat_interval' for this type of alert to daily or weekly
- alert: HostCpuIsUnderutilized
expr: (min by (instance) (rate(node_cpu_seconds_total{mode="idle"}[1h]))) > 0.8
for: 1w
labels:
severity: info
annotations:
summary: Host CPU is underutilized (instance {{'{{'}} $labels.instance {{'}}'}})
description: "CPU load has been < 20% for 1 week. Consider reducing the number of CPUs.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
- alert: HostCpuHighIowait
expr: avg by (instance) (rate(node_cpu_seconds_total{mode="iowait"}[5m])) > .10
for: 0m
labels:
severity: warning
annotations:
summary: Host CPU high iowait (instance {{'{{'}} $labels.instance {{'}}'}})
description: "CPU iowait > 10%. Your CPU is idling waiting for storage to respond.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
- alert: HostSwapIsFillingUp
expr: ((1 - (node_memory_SwapFree_bytes / node_memory_SwapTotal_bytes)) * 100 > 80)
for: 2m
labels:
severity: warning
annotations:
summary: Host swap is filling up (instance {{'{{'}} $labels.instance {{'}}'}})
description: "Swap is filling up (>80%)\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
# - alert: HostSystemdServiceCrashed
# expr: (node_systemd_unit_state{state="failed"} == 1)
# for: 0m
# labels:
# severity: warning
# annotations:
# summary: Host systemd service crashed (instance {{'{{'}} $labels.instance {{'}}'}})
# description: "systemd service crashed\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
- alert: HostOomKillDetected
expr: (increase(node_vmstat_oom_kill[1m]) > 0)
for: 0m
labels:
severity: warning
annotations:
summary: Host OOM kill detected (instance {{'{{'}} $labels.instance {{'}}'}})
description: "OOM kill detected\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
- alert: HostClockSkew
expr: ((node_timex_offset_seconds > 0.05 and deriv(node_timex_offset_seconds[5m]) >= 0) or (node_timex_offset_seconds < -0.05 and deriv(node_timex_offset_seconds[5m]) <= 0))
for: 10m
labels:
severity: warning
annotations:
summary: Host clock skew (instance {{'{{'}} $labels.instance {{'}}'}})
description: "Clock skew detected. Clock is out of sync. Ensure NTP is configured correctly on this host.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
- alert: HostClockNotSynchronising
expr: (min_over_time(node_timex_sync_status[1m]) == 0 and node_timex_maxerror_seconds >= 16)
for: 2m
labels:
severity: warning
annotations:
summary: Host clock not synchronising (instance {{'{{'}} $labels.instance {{'}}'}})
description: "Clock not synchronising. Ensure NTP is configured on this host.\n VALUE = {{'{{'}} $value {{'}}'}}\n LABELS = {{'{{'}} $labels {{'}}'}}"
loki_options:
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
log_level: info
grpc_server_max_concurrent_streams: 1000
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
limits_config:
metric_aggregation_enabled: true
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
pattern_ingester:
enabled: true
metric_aggregation:
loki_address: localhost:3100
ruler:
alertmanager_url: http://localhost:9093
frontend:
encoding: protobuf
analytics:
reporting_enabled: false
fluentbit_settings:
service:
flush: 1
daemon: false
log_level: info
http_server: false
pipeline:
inputs:
- name: systemd
tag: systemd
outputs:
- name: loki
host: 192.168.0.252
labels: env=common,hostname=monitoring,service_name=systemd
match: systemd

View File

@ -0,0 +1,87 @@
---
postgresql_global_config_options:
- option: unix_socket_directories
value: '{{ postgresql_unix_socket_directories | join(",") }}'
- option: log_directory
value: 'log'
- option: listen_addresses
value: "*"
postgresql_auth_method: scram-sha-256
postgresql_hba_entries:
- {type: local, database: all, user: postgres, auth_method: peer}
- {type: host, database: all, user: all, address: '127.0.0.1/32', auth_method: "{{ postgresql_auth_method }}"}
- {type: host, database: forgejo_db, user: forgejo, address: '192.168.0.20/32', auth_method: "{{ postgresql_auth_method }}"}
- {type: host, database: test_db, user: test, address: '0.0.0.0/0', auth_method: "{{ postgresql_auth_method }}"}
postgresql_databases:
- name: forgejo_db
owner: forgejo
# state: absent
postgresql_users:
- name: forgejo
password: !vault |
$ANSIBLE_VAULT;1.1;AES256
63306634323739306638666538376262643231306337343036313737373735303264356238663335
6430623539346236303539333764666137613762623330640a643834326436363631626537396264
31653265343035626439376134633839376432313962323163626436633466386165386332333737
6339386339303566310a623236323630376665623664656563376430643836666433656433386434
62623536376461323563616237316232366633663834333365633334646264313831376661366436
61313538333965313062313138383935663739303935643331333238363463386537383238616466
62343232326661346563353236373163373463383431646334623537616231396137393663376332
35373132333865306634316433663539396632373638626130343331623138643063333561636532
66653139663830353632326639393835343137336235626261353130656336653962303665646664
63303735393638336137666234383363383764313533323031303533343562336230613434316432
383632343762373735633664313431613064
encrypted: true
# state: absent
postgresql_privs:
- db: forgejo_db
roles: forgejo
privs: ALL
type: database
# state: absent
postgres_users_no_log: false
fluentbit_settings:
service:
flush: 1
daemon: false
log_level: info
http_server: false
pipeline:
inputs:
- name: systemd
tag: systemd_input
filters:
- name: rewrite_tag
match: systemd_input
rule: $_SYSTEMD_UNIT ^(postgresql.service)$ postgresql false
- name: rewrite_tag
match: systemd_input
rule: $_SYSTEMD_UNIT ^(postgresql.service.+|(?!postgresql.service).*)$ systemd false
- name: record_modifier
match: postgresql
allowlist_key:
- MESSAGE
# - name: record_modifier
# match: systemd_tag
# allowlist_key:
# - _SYSTEMD_UNIT
# - MESSAGE
outputs:
- name: loki
host: 192.168.0.252
labels: "env=dev,hostname=postgresql,service_name=postgresql"
match: postgresql
- name: loki
host: 192.168.0.252
labels: "env=dev,hostname=postgresql,service_name=systemd"
match: systemd

View File

@ -0,0 +1,111 @@
---
searxng_homedir: /opt/searxng
searxng_git_commit: e52e9bb4b699e39d9ce51874ea339d4773717389
searxng_settings:
use_default_settings: true
general:
debug: false
instance_name: "cuqmbr's SearXNG"
donation_url: "https://cuqmbr.xyz/en/donate"
contact_url: "https://cuqmbr.xyz/en/contact"
enable_metrics: true
search:
safe_search: 0
autocomplete: ""
formats:
- html
- csv
- json
server:
base_url: "https://searxng.dev.cuqmbr.xyz"
bind_address: "0.0.0.0"
port: 8888
secret_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
36303663616233326563336237336164383966613633373735363562346533663933393936643036
6237626332643263386530306139383866353739616261650a376236663962643962653335313237
38313232363839383030373338643666333135613838366363363565643530336331613464386236
3039376137306339310a346139613363303433366362336539316632346232636663346664336334
35346366376262316134636262393262386364356336376333383664313637366630376463303232
64383765663032616633346231653563613065653961646666346461613732646233363266373065
33326563383238613135616431323661373165383431646337653361633065626638313937393361
62303634643662313637
image_proxy: true
method: "POST"
default_http_headers:
X-Content-Type-Options: nosniff
X-Download-Options: noopen
X-Robots-Tag: noindex, nofollow
Referrer-Policy: no-referrer
limiter: false
ui:
static_use_hash: true
results_on_new_tab: true
categories_as_tabs:
general:
images:
videos:
files:
plugins:
searx.plugins.calculator.SXNGPlugin:
active: true
searx.plugins.hash_plugin.SXNGPlugin:
active: true
searx.plugins.self_info.SXNGPlugin:
active: true
searx.plugins.tracker_url_remover.SXNGPlugin:
active: true
searx.plugins.unit_converter.SXNGPlugin:
active: true
searx.plugins.ahmia_filter.SXNGPlugin:
active: true
searx.plugins.hostnames.SXNGPlugin:
active: true
searx.plugins.tor_check.SXNGPlugin:
active: true
fluentbit_settings:
service:
flush: 1
daemon: false
log_level: info
http_server: false
pipeline:
inputs:
- name: systemd
tag: systemd_input
filters:
- name: rewrite_tag
match: systemd_input
rule: $_SYSTEMD_UNIT ^(searxng.service)$ searxng false
- name: rewrite_tag
match: systemd_input
rule: $_SYSTEMD_UNIT ^(searxng.service.+|(?!searxng.service).*)$ systemd false
- name: record_modifier
match: searxng
allowlist_key:
- MESSAGE
# - name: record_modifier
# match: systemd
# allowlist_key:
# - _SYSTEMD_UNIT
# - MESSAGE
outputs:
- name: loki
host: 192.168.0.252
labels: "env=dev,hostname=searxng,service_name=searxng"
match: searxng
- name: loki
host: 192.168.0.252
labels: "env=dev,hostname=searxng,service_name=systemd"
match: systemd

View File

@ -0,0 +1,21 @@
---
all:
children:
bastion:
hosts:
192.168.0.254:
load_balancers:
hosts:
192.168.0.253:
monitoring:
hosts:
192.168.0.252:
postgresql:
hosts:
192.168.0.3:
main_page:
hosts:
192.168.0.10:
searxng:
hosts:
192.168.0.15:

1
ansible/notes.txt Normal file
View File

@ -0,0 +1 @@
export user="ansible"; ansible-playbook -u "${user}" --ssh-common-args "-o ProxyCommand='ssh -p 22 -W %h:%p -q ${user}@bastion.cuqmbr.home'" -J -b --become-method doas -i inventories/hosts.yml 10_monitoring.yml

View File

@ -0,0 +1,22 @@
---
fluentbit_settings:
service:
flush: 1
daemon: false
log_level: info
http_server: false
#
# parsers:
#
# plugins:
#
pipeline:
inputs:
- name: cpu
tag: cpu.local
interval_sec: 15
outputs:
- name: stdout
match: "*"

View File

@ -0,0 +1,16 @@
[Unit]
Description=Fluent Bit
Documentation=https://docs.fluentbit.io/manual/
Requires=network.target
After=network.target
[Service]
Type=simple
EnvironmentFile=-/etc/sysconfig/fluent-bit
EnvironmentFile=-/etc/default/fluent-bit
ExecStart=/opt/fluent-bit/bin/fluent-bit -c /etc/fluent-bit/fluent-bit.yaml
Restart=always
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,10 @@
---
- name: Restart fluent-bit service.
ansible.builtin.service:
name: fluent-bit
state: restarted
- name: Reload systemd daemon.
ansible.builtin.systemd_service:
daemon_reload: true

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: fluent_bit
author: cuqmbr-homelab
description: Install Fluent Bit.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- fluent_bit

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.fluent-bit_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,53 @@
---
- name: Create apt keys installation directory.
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: "0755"
- name: Add Fluentbit apt key.
ansible.builtin.get_url:
url: https://packages.fluentbit.io/fluentbit.key
dest: /etc/apt/keyrings/fluentbit.asc
mode: "0444"
- name: Add Fluentbit apt repository.
ansible.builtin.apt_repository:
repo: "deb [signed-by=/etc/apt/keyrings/fluentbit.asc] \
https://packages.fluentbit.io/debian/bookworm bookworm stable main"
filename: grafana
state: present
update_cache: true
- name: Install fluent-bit package using apt.
ansible.builtin.apt:
name: fluent-bit
state: present
- name: Install fluent-bit systemd service file.
ansible.builtin.copy:
src: fluent-bit.service
dest: /lib/systemd/system/fluent-bit.service
owner: root
group: root
mode: "0644"
notify:
- Reload systemd daemon.
- Restart fluent-bit service.
- name: Enable and start fluent-bit service.
ansible.builtin.service:
name: fluent-bit
state: started
enabled: true
- name: Install fluent-bit configuration file.
ansible.builtin.template:
src: fluent-bit.yml.j2
dest: /etc/fluent-bit/fluent-bit.yaml
owner: root
group: root
mode: "0644"
notify:
- Restart fluent-bit service.

View File

@ -0,0 +1,4 @@
---
# Managed with Ansible
{{ fluentbit_settings | ansible.builtin.to_nice_yaml(indent=2, width=80) }}

View File

@ -0,0 +1,56 @@
---
loki_options:
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
log_level: debug
grpc_server_max_concurrent_streams: 1000
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 100
limits_config:
metric_aggregation_enabled: true
schema_config:
configs:
- from: 2020-10-24
store: tsdb
object_store: filesystem
schema: v13
index:
prefix: index_
period: 24h
pattern_ingester:
enabled: true
metric_aggregation:
loki_address: localhost:3100
ruler:
alertmanager_url: http://localhost:9093
frontend:
encoding: protobuf
analytics:
reporting_enabled: false

View File

@ -0,0 +1,6 @@
---
- name: Restart Grafana Loki service.
ansible.builtin.service:
name: loki
state: restarted

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: grafana_loki
author: cuqmbr-homelab
description: Install Grafana Loki.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- grafana_loki

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.grafana-loki_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,23 @@
---
- name: Install Grafana Loki from mirror.
ansible.builtin.apt:
deb: "https://github.com/grafana/loki/releases\
/download/v3.4.3/loki_3.4.3_amd64.deb"
state: present
- name: Install Grafana Loki config.
ansible.builtin.template:
src: loki.yml.j2
dest: /etc/loki/config.yml
owner: root
group: root
mode: "0644"
notify:
- Restart Grafana Loki service.
- name: Enable and start Grafana Loki service.
ansible.builtin.service:
name: loki
state: started
enabled: true

View File

@ -0,0 +1,4 @@
---
# Managed with Ansible
{{ loki_options | ansible.builtin.to_nice_yaml(indent=2, width=80) }}

View File

@ -0,0 +1,30 @@
[Unit]
Description=Grafana instance
Documentation=http://docs.grafana.org
Wants=network-online.target
After=network-online.target
After=postgresql.service mariadb.service mysql.service influxdb.service
[Service]
EnvironmentFile=/etc/default/grafana-server
User=grafana
Group=grafana
Type=simple
Restart=on-failure
WorkingDirectory=/usr/share/grafana
RuntimeDirectory=grafana
RuntimeDirectoryMode=0750
ExecStart=/usr/share/grafana/bin/grafana server \
--config=${CONF_FILE} \
--pidfile=${PID_FILE_DIR}/grafana-server.pid \
--packaging=deb \
cfg:default.paths.logs=${LOG_DIR} \
cfg:default.paths.data=${DATA_DIR} \
cfg:default.paths.plugins=${PLUGINS_DIR} \
cfg:default.paths.provisioning=${PROVISIONING_CFG_DIR}
LimitNOFILE=10000
TimeoutStopSec=20
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: grafana_server
author: cuqmbr-homelab
description: Install Grafana Dashboard.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- grafana_server

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.grafana-server_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,25 @@
---
- name: Install Grafana from mirror.
ansible.builtin.apt:
deb: "https://mirrors.tuna.tsinghua.edu.cn\
/grafana/apt/pool/main/g/grafana/grafana_11.6.0_amd64.deb"
state: present
- name: Install Grafana systemd service file.
ansible.builtin.copy:
src: grafana-server.service
dest: /lib/systemd/system/grafana-server.service
owner: root
group: root
mode: "0644"
- name: Reload systemd daemon.
ansible.builtin.systemd_service:
daemon_reload: true
- name: Enable and start grafana service.
ansible.builtin.service:
name: grafana-server
enabled: true
state: started

View File

@ -0,0 +1,7 @@
---
hugo_settings:
hugo_version: 0.147.9
homedir: /opt/hugo
git_repo: https://gitea.cuqmbr.xyz/cuqmbr/cuqmbr.xyz.git
git_commit: 5b894854d47b41996b1901fa257f8c2cad9224f9

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: hugo
author: cuqmbr-homelab
description: Deploy Hugo (gohugo.io) site generator from git repo.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- hugo

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.hugo_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,73 @@
---
- name: Install hugo deb package from github.
ansible.builtin.apt:
deb: "https://github.com/gohugoio/hugo/releases/download\
/v{{ hugo_settings.hugo_version}}/\
hugo_{{ hugo_settings.hugo_version }}_linux-amd64.deb"
state: present
- name: Install dependencies using apt.
ansible.builtin.apt:
name: git
state: present
- name: Set hugo_source, hugo_compiled and hugo_deploy variables.
ansible.builtin.set_fact:
hugo_source: "{{ hugo_settings.homedir }}/src"
hugo_compiled: "{{ hugo_settings.homedir }}/compiled"
hugo_deploy: /var/www/hugo
- name: Clone hugo site git repository.
ansible.builtin.git:
clone: true
repo: "{{ hugo_settings.git_repo }}"
force: true
recursive: true
single_branch: true
depth: 1
dest: "{{ hugo_source }}"
version: "{{ hugo_settings.git_commit }}"
- name: Create hugo site build directory.
ansible.builtin.file:
state: directory
owner: root
group: root
mode: "0775"
path: "{{ hugo_compiled }}"
- name: Remove old compiled files.
ansible.builtin.file:
path: "{{ hugo_compiled }}"
state: absent
changed_when: false
- name: Build hugo site.
ansible.builtin.shell:
chdir: "{{ hugo_source }}"
cmd: "hugo -d {{ hugo_compiled }}"
- name: Create hugo site deployment directory.
ansible.builtin.file:
state: directory
owner: root
group: root
mode: "0775"
path: "{{ hugo_deploy }}"
- name: Remove old deployed files.
ansible.builtin.file:
path: "{{ hugo_deploy }}"
state: absent
changed_when: false
- name: Install new program files.
ansible.builtin.copy:
remote_src: true
src: "{{ hugo_compiled }}/"
dest: "{{ hugo_deploy }}"
owner: root
group: root
mode: "0775"
changed_when: false

View File

@ -0,0 +1,31 @@
---
# mkpasswd --method=SHA-512 --stdin
# default: 0000
# ansible-vault encrypt_string --ask-vault-password --name 'password_hash'
# default vault password: 0000
users:
- name: admin
password_hash: !vault |
$ANSIBLE_VAULT;1.1;AES256
62386435663164656266626631323436353938336333326339333562633063383636653838373161
6637303930616363646630653532623738353961373032300a333264633165396663653739333664
37386336313137656463643437303331643965663737373035616638363430353730613036343566
3864336137386465330a343834663733653365323634333663666566613330393662613365646630
31633162323864633337386462623936326437303131383130343538346231643537303462376465
65396430373433326262383636353162333632343632383433616236646631663765396339323037
32306630363465376161343939663032666530353031316433656464643366353066346465613034
66363462313665666261336263336632376166306163323261663633626163396665613266366230
38313133663139346635323062393731303134616566373436623538353430333932
ssh_public_keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILYtmWkAnyDE\
+VpRy8S41AFZJPQzb4SdAGqaLW9KDTt4 example@key"
opendoas_settings: "permit persist admin as root"
- name: ansible
password_hash: "!"
ssh_public_keys:
- "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILYtmWkAnyDE\
+VpRy8S41AFZJPQzb4SdAGqaLW9KDTt4 example@key"
opendoas_settings: "permit nopass ansible"

View File

@ -0,0 +1,8 @@
Include /etc/ssh/sshd_config.d/*.conf
AuthorizedKeysFile .ssh/authorized_keys
PasswordAuthentication no
PermitRootLogin prohibit-password
Subsystem sftp /usr/lib/ssh/sftp-server

View File

@ -0,0 +1,6 @@
---
- name: Restart sshd service.
ansible.builtin.service:
name: sshd
state: restarted

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: init
author: cuqmbr-homelab
description: Initialize newly created Debian server.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- init

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.init_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,29 @@
---
- name: Create user.
ansible.builtin.user:
state: present
name: "{{ item.name }}"
password: "{{ item.password_hash }}"
create_home: true
shell: /bin/bash
- name: Create ~/.ssh directory.
ansible.builtin.file:
path: "/home/{{ item.name }}/.ssh"
state: directory
owner: "{{ item.name }}"
group: "{{ item.name }}"
mode: "0700"
- name: Set variable for template.
ansible.builtin.set_fact:
ssh_public_keys: "{{ item.ssh_public_keys }}"
- name: Create authorized_keys.
ansible.builtin.template:
src: authorized_keys.j2
dest: "/home/{{ item.name }}/.ssh/authorized_keys"
owner: "{{ item.name }}"
group: "{{ item.name }}"
mode: "0600"

View File

@ -0,0 +1,50 @@
---
- name: Configure users.
ansible.builtin.include_tasks:
file: configure_users.yml
loop: "{{ users }}"
- name: Install opendoas.
ansible.builtin.apt:
name:
- opendoas
state: present
- name: Set opendoas_settings variable.
block:
- name: Initialize the variable with an empty list.
ansible.builtin.set_fact:
opendoas_settings: []
- name: Append settings from each user to the list.
ansible.builtin.set_fact:
opendoas_settings: "{{ opendoas_settings + [item.opendoas_settings] }}"
loop: "{{ users }}"
- name: Install opendoas config.
ansible.builtin.template:
src: doas.conf.j2
dest: /etc/doas.conf
owner: root
group: root
mode: "0644"
- name: Install openssh-server.
ansible.builtin.apt:
name:
- openssh-server
state: present
- name: Install sshd config.
ansible.builtin.copy:
src: sshd_config
dest: /etc/ssh/sshd_config
owner: root
group: root
mode: "0644"
notify:
- Restart sshd service.

View File

@ -0,0 +1,3 @@
{% for item in ssh_public_keys %}
{{ item }}
{% endfor %}

View File

@ -0,0 +1,3 @@
{% for line in opendoas_settings %}
{{ line }}
{% endfor %}

View File

@ -0,0 +1,30 @@
---
nginx_settings:
server_tokens: false
gzip: true
ssl_protocols:
- TLSv1.2
- TLSv1.3
# load_balancers:
# http:
# - upstream:
# name: searxng
# servers:
# - 192.168.0.10:8888
# server:
# listen_port: 80
# names:
# - searxng.cuqmbr.xyz
# statements:
# - proxy_set_header Host $http_host
# statics:
# - root: /var/www/website
# index: index.html
# listen_port: 80
# names:
# - static.cuqmbr.xyz
# statements:
# - proxy_set_header Host $http_host
# TODO: Add https configuration

View File

@ -0,0 +1,6 @@
---
- name: Reload nginx service.
ansible.builtin.service:
name: nginx
state: reloaded

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: nginx
author: cuqmbr-homelab
description: Initialize newly created server.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,42 @@
---
- name: Converge
hosts: all
gather_facts: false
vars:
nginx_settings:
server_tokens: false
gzip: true
ssl_protocols:
- TLSv1.2
- TLSv1.3
load_balancers:
http:
- upstream:
name: searxng
servers:
- 192.168.0.10:8888
server:
listen_port: 80
names:
- searxng.cuqmbr.xyz
statements:
- proxy_set_header Host $http_host
# statics:
# - root: /var/www/website
# index: index.html
# listen_port: 8080
# names:
# - static.cuqmbr.xyz
# statements:
# - proxy_set_header Host $http_host
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- nginx

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.nginx_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,23 @@
---
- name: Install needed packages.
ansible.builtin.apt:
pkg:
- nginx
state: present
- name: Enable and start nginx service.
ansible.builtin.service:
name: nginx
state: started
enabled: true
- name: Install nginx configuration file.
ansible.builtin.template:
src: nginx.conf.j2
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: "0644"
notify:
- Reload nginx service.

View File

@ -0,0 +1,110 @@
user www-data;
worker_processes auto;
pid /run/nginx.pid;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
types_hash_max_size 2048;
server_tokens {{ nginx_settings.server_tokens | ternary('on', 'off') }};
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols {{ nginx_settings.ssl_protocols|join(' ') }};
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
access_log syslog:server=unix:/dev/log;
error_log syslog:server=unix:/dev/log;
##
# Gzip Settings
##
gzip {{ nginx_settings.gzip | ternary('on', 'off') }};
gzip_comp_level 6;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
gzip_buffers 16 8k;
gzip_http_version 1.1;
##
# Virtual Host Configs
##
# include /etc/nginx/conf.d/*.conf;
# include /etc/nginx/sites-enabled/*;
# Prometheus Nginx Exporter
server {
listen 127.0.0.1:8080;
location /stub_status {
stub_status;
}
}
{% if nginx_settings.load_balancers is not undefined %}
##
# Load Balancers
##
{% for http_load_balancer in nginx_settings.load_balancers.http %}
upstream {{ http_load_balancer.upstream.name }} {
{% for server in http_load_balancer.upstream.servers %}
server {{ server }};
{% endfor %}
}
server {
listen {{ http_load_balancer.server.listen_port }};
server_name {{ http_load_balancer.server.names|join(' ') }};
location / {
proxy_pass http://{{ http_load_balancer.upstream.name }};
{% if http_load_balancer.server.statements is not undefined %}
{% for statement in http_load_balancer.server.statements %}
{{ statement }};
{% endfor %}
{% endif %}
}
}
{% endfor %}
{% endif %}
{% if nginx_settings.statics is not undefined %}
##
# Static Servings
##
{% for static in nginx_settings.statics %}
server {
listen {{ static.listen_port }};
server_name {{ static.names|join(' ') }};
root {{ static.root }};
index {{ static.index }};
location / {
try_files $uri $uri/ =404;
}
}
{% endfor %}
{% endif %}
}

@ -0,0 +1 @@
Subproject commit 845a175a0de0308334a188bdf7283a6c4999b5f2

View File

@ -0,0 +1,119 @@
---
prometheus_alertmanager_options:
# Sample configuration.
# See https://prometheus.io/docs/alerting/configuration/ for documentation.
global:
# The smarthost and SMTP sender used for mail notifications.
smtp_smarthost: 'localhost:25'
smtp_from: 'alertmanager@example.org'
smtp_auth_username: 'alertmanager'
smtp_auth_password: 'password'
# The directory from which notification templates are read.
templates:
- '/etc/prometheus/alertmanager_templates/*.tmpl'
# The root route on which each incoming alert enters.
route:
# The labels by which incoming alerts are grouped together. For example,
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
# be batched into a single group.
group_by: ['alertname', 'cluster', 'service']
# When a new group of alerts is created by an incoming alert, wait at
# least 'group_wait' to send the initial notification.
# This way ensures that you get multiple alerts for the same group that
# start firing shortly after another are batched together
# on the first notification.
group_wait: 30s
# When the first notification was sent, wait 'group_interval'
# to send a batch of new alerts that started firing for that group.
group_interval: 5m
# If an alert has successfully been sent, wait 'repeat_interval' to
# resend them.
repeat_interval: 3h
# A default receiver
receiver: team-X-mails
# All the above attributes are inherited by all child
# routes and can overwritten on each.
# The child route trees.
routes:
# This routes performs a regular expression match on alert labels to
# catch alerts that are related to a list of services.
- match_re:
service: ^(foo1|foo2|baz)$
receiver: team-X-mails
# The service has a sub-route for critical alerts, any alerts
# that do not match, i.e. severity != critical, fall-back to the
# parent node and are sent to 'team-X-mails'
routes:
- match:
severity: critical
receiver: team-X-pager
- match:
service: files
receiver: team-Y-mails
routes:
- match:
severity: critical
receiver: team-Y-pager
# This route handles all alerts coming from a database service.
# If there's no team to handle it, it defaults to the DB team.
- match:
service: database
receiver: team-DB-pager
# Also group alerts by affected database.
group_by: [alertname, cluster, database]
routes:
- match:
owner: team-X
receiver: team-X-pager
- match:
owner: team-Y
receiver: team-Y-pager
# Inhibition rules allow to mute a set of alerts given that another alert is
# firing.
# We use this to mute any warning-level notifications if the same alert is
# already critical.
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
# Apply inhibition if the alertname is the same.
equal: ['alertname', 'cluster', 'service']
receivers:
- name: 'team-X-mails'
email_configs:
- to: 'team-X+alerts@example.org'
- name: 'team-X-pager'
email_configs:
- to: 'team-X+alerts-critical@example.org'
pagerduty_configs:
- service_key: <team-X-key>
- name: 'team-Y-mails'
email_configs:
- to: 'team-Y+alerts@example.org'
- name: 'team-Y-pager'
pagerduty_configs:
- service_key: <team-Y-key>
- name: 'team-DB-pager'
pagerduty_configs:
- service_key: <team-DB-key>

View File

@ -0,0 +1,6 @@
---
- name: Restart prometheus-alertmanager service.
ansible.builtin.service:
name: prometheus-alertmanager
state: restarted

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: prometheus_alertmanager
author: cuqmbr-homelab
description: Install Prometheus Alertmanager.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- prometheus_alertmanager

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.prometheus-alertmanager_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,31 @@
---
- name: Install prometheus-alertmanager apt package.
ansible.builtin.apt:
name: prometheus-alertmanager
state: present
- name: Decrypt secrets in config file.
ansible.builtin.set_fact:
prometheus_alertmanager_options: >-
{{
prometheus_alertmanager_options |
combine(prometheus_alertmanager_options, recursive=true)
}}
no_log: true
- name: Install prometheus-alertmanager config.
ansible.builtin.template:
src: alertmanager.yml.j2
dest: /etc/prometheus/alertmanager.yml
owner: root
group: root
mode: "0444"
notify:
- Restart prometheus-alertmanager service.
- name: Enable and start prometheus-alertmanager service.
ansible.builtin.service:
name: prometheus-alertmanager
state: started
enabled: true

View File

@ -0,0 +1,4 @@
---
# Managed with Ansible
{{ prometheus_alertmanager_options | ansible.builtin.to_nice_yaml(indent=2, width=80) }}

View File

@ -0,0 +1,9 @@
galaxy_info:
role_name: prometheus_nginx_exporter
author: cuqmbr-homelab
description: Install Prometheus Nginx Exporter.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,17 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
- name: Run cuqmbr-homelab.nginx role.
ansible.builtin.include_role:
name: ../../nginx
roles:
- prometheus_nginx_exporter

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.prometheus-nginx-exporter_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,12 @@
---
- name: Install prometheus-nginx-exporter apt package.
ansible.builtin.apt:
name: prometheus-nginx-exporter
state: present
- name: Enable and start prometheus-nginx-exporter service.
ansible.builtin.service:
name: prometheus-nginx-exporter
state: started
enabled: true

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: prometheus_node_exporter
author: cuqmbr-homelab
description: Install Prometheus Node Exporter.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- prometheus_node_exporter

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.prometheus-node-exporter_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,12 @@
---
- name: Install prometheus-node-exporter apt package.
ansible.builtin.apt:
name: prometheus-node-exporter
state: present
- name: Enable and start prometheus-node-exporter service.
ansible.builtin.service:
name: prometheus-node-exporter
state: started
enabled: true

View File

@ -0,0 +1,59 @@
---
prometheus_options:
global:
# Set the scrape interval to every 15 seconds. Default is every 1 minute.
scrape_interval: 15s
# Evaluate rules every 15 seconds. The default is every 1 minute.
evaluation_interval: 15s
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'example'
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets: ['localhost:9093']
# Load rules and evaluate them according to the global 'evaluation_interval'.
rule_files:
- alerting_rules/*.yml
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>`.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job.
scrape_interval: 5s
scrape_timeout: 5s
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
- job_name: node
# If prometheus-node-exporter is installed, grab stats about the local
# machine by default.
static_configs:
- targets: ['localhost:9100']
prometheus_alerting_rules:
groups:
- name: ExampleRedisGroup
rules:
- alert: ExampleRedisDown
expr: redis_up{} == 0
for: 2m
labels:
severity: critical
annotations:
summary: "Redis instance down"
description: "Whatever"

View File

@ -0,0 +1,16 @@
[Unit]
Description=Monitoring system and time series database
Documentation=https://prometheus.io/docs/introduction/overview/ man:prometheus(1)
After=time-sync.target
[Service]
Restart=on-failure
User=prometheus
EnvironmentFile=/etc/default/prometheus
ExecStart=/usr/bin/prometheus $ARGS
ExecReload=/bin/kill -HUP $MAINPID
TimeoutStopSec=20s
SendSIGKILL=no
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,6 @@
---
- name: Reload prometheus service.
ansible.builtin.service:
name: prometheus
state: reloaded

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: prometheus_server
author: cuqmbr-homelab
description: Install Prometheus Server.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- prometheus_server

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.prometheus-server_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,52 @@
---
- name: Install prometheus apt package.
ansible.builtin.apt:
name: prometheus
state: present
- name: Install prometheus systemd service file.
ansible.builtin.copy:
src: prometheus.service
dest: /lib/systemd/system/prometheus.service
owner: root
group: root
mode: "0644"
- name: Reload systemd daemon.
ansible.builtin.systemd_service:
daemon_reload: true
- name: Install prometheus config.
ansible.builtin.template:
src: prometheus.yml.j2
dest: /etc/prometheus/prometheus.yml
owner: root
group: root
mode: "0644"
notify:
- Reload prometheus service.
- name: Create alerting rules directory.
ansible.builtin.file:
path: /etc/prometheus/alerting_rules
state: directory
owner: root
group: root
mode: "0755"
- name: Install alerting rules config.
ansible.builtin.template:
src: alerting_rules.yml.j2
dest: /etc/prometheus/alerting_rules/alerting_rules.yml
owner: root
group: root
mode: "0644"
notify:
- Reload prometheus service.
- name: Enable and start prometheus service.
ansible.builtin.service:
name: prometheus
state: started
enabled: true

View File

@ -0,0 +1,4 @@
---
# Managed with Ansible
{{ prometheus_alerting_rules | ansible.builtin.to_nice_yaml(indent=2) }}

View File

@ -0,0 +1,4 @@
---
# Managed with Ansible
{{ prometheus_options | ansible.builtin.to_nice_yaml(indent=2) }}

View File

@ -0,0 +1,49 @@
---
searxng_homedir: /opt/searxng
searxng_git_commit: c185d076894ebbdb5db921c448c240d04915847b
searxng_settings:
# SearXNG settings
use_default_settings: true
general:
debug: false
instance_name: "cuqmbr's SearXNG"
search:
safe_search: 2
autocomplete: 'duckduckgo'
server:
secret_key: "ultrasecretkey_change_me"
limiter: false
image_proxy: true
# public URL of the instance, to ensure correct inbound links.
# Is overwritten by ${SEARXNG_URL}.
base_url: http://example.com/location
# redis:
# URL to connect redis database. Is overwritten by ${SEARXNG_REDIS_URL}.
# url: unix:///usr/local/searxng-redis/run/redis.sock?db=0
ui:
static_use_hash: true
# preferences:
# lock:
# - autocomplete
# - method
enabled_plugins:
- 'Hash plugin'
- 'Self Information'
- 'Tracker URL remover'
- 'Ahmia blacklist'
# - 'Hostnames plugin' # see 'hostnames' configuration below
# - 'Open Access DOI rewrite'
# plugins:
# - only_show_green_results

View File

@ -0,0 +1,10 @@
---
- name: Reload systemd daemon.
ansible.builtin.systemd_service:
daemon_reload: true
- name: Restart searxng service.
ansible.builtin.systemd_service:
name: searxng
state: restarted

View File

@ -0,0 +1,10 @@
---
galaxy_info:
role_name: searxng
author: cuqmbr-homelab
description: Install SearxNG.
# issue_tracker_url: http://example.com/issue/tracker
license: MIT
min_ansible_version: "2.1"
galaxy_tags: []
dependencies: []

View File

@ -0,0 +1,14 @@
---
- name: Converge
hosts: all
gather_facts: false
pre_tasks:
- name: Update apt cache.
ansible.builtin.apt:
update_cache: true
cache_valid_time: 86400
roles:
- searxng

View File

@ -0,0 +1,12 @@
---
driver:
name: docker
platforms:
- name: cuqmbr-homelab.searxng_debian-12
image: docker.io/geerlingguy/docker-debian12-ansible:latest
pre_build_image: true
command: ${MOLECULE_DOCKER_COMMAND:-""}
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:rw
cgroupns_mode: host
privileged: true

View File

@ -0,0 +1,120 @@
---
- name: Install dependencies.
ansible.builtin.apt:
name:
- python3-dev
- python3-babel
- python3-venv
- uwsgi
- uwsgi-plugin-python3
- git
- build-essential
- libxslt-dev
- zlib1g-dev
- libffi-dev
- libssl-dev
state: present
- name: Create searxng user.
ansible.builtin.user:
state: present
name: searxng
password: !
system: true
create_home: true
home: "{{ searxng_homedir }}"
shell: /usr/sbin/nologin
- name: Set searxng_source and searxng_pyenv variable.
ansible.builtin.set_fact:
searxng_source: "{{ searxng_homedir }}/src"
searxng_pyenv: "{{ searxng_homedir }}/pyenv"
searxng_compiled: "{{ searxng_homedir }}/compiled"
- name: Clone searxng git repository.
ansible.builtin.git:
clone: true
repo: https://github.com/searxng/searxng.git
dest: "{{ searxng_source }}"
depth: 1
version: "{{ searxng_git_commit }}"
force: true
single_branch: true
- name: Install pip dependencies.
ansible.builtin.pip:
virtualenv: "{{ searxng_pyenv }}"
virtualenv_command: python3 -m venv
name:
- pip
- setuptools
- wheel
- pyyaml
state: present
- name: Compile searxng.
ansible.builtin.pip:
virtualenv: "{{ searxng_pyenv }}"
requirements: "{{ searxng_source }}/requirements.txt"
extra_args: "--use-pep517 --no-build-isolation \
-e {{ searxng_source }}"
state: present
- name: Remove old program files.
ansible.builtin.file:
path: "{{ searxng_compiled }}"
state: absent
changed_when: false
- name: Install new program files.
ansible.builtin.copy:
remote_src: true
src: "{{ searxng_source }}/"
dest: "{{ searxng_compiled }}"
owner: searxng
group: searxng
mode: "0775"
changed_when: false
- name: Create searxng settings directory.
ansible.builtin.file:
state: directory
owner: searxng
group: searxng
mode: "0775"
path: /etc/searxng
- name: Decrypt secrets in settings file.
ansible.builtin.set_fact:
searxng_settings: >-
{{ searxng_settings | combine(searxng_settings, recursive=true) }}
no_log: true
- name: Install searxng settings file.
ansible.builtin.template:
src: settings.yml.j2
dest: /etc/searxng/settings.yml
owner: searxng
group: searxng
mode: "0600"
notify:
- Reload systemd daemon.
- Restart searxng service.
- name: Create systemd unit file.
ansible.builtin.template:
src: searxng.service.j2
dest: /etc/systemd/system/searxng.service
owner: root
group: root
mode: "0444"
notify:
- Reload systemd daemon.
- Restart searxng service.
- name: Enable and start searxng service.
ansible.builtin.service:
name: searxng
enabled: true
state: started

View File

@ -0,0 +1,18 @@
[Unit]
Description=SearxNG
After=multi-user.target
[Service]
Type=simple
User=searxng
Group=searxng
WorkingDirectory={{ searxng_compiled }}
ExecStart={{ searxng_pyenv }}/bin/python {{ searxng_compiled }}/searx/webapp.py
Environment="SEARXNG_SETTINGS_PATH=/etc/searxng/settings.yml"
Restart=always
RestartSec=2
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,4 @@
---
# Managed with Ansible
{{ searxng_settings | ansible.builtin.to_nice_yaml(indent=2, width=80) }}

5
terraform/.gitignore vendored Normal file
View File

@ -0,0 +1,5 @@
terraform.tfstate
terraform.tfstate.*
.terraform.tfstate.lock.info
.terraform
terraform.tfvars

9
terraform/.terraform.lock.hcl generated Normal file
View File

@ -0,0 +1,9 @@
# This file is maintained automatically by "terraform init".
# Manual edits may be lost in future updates.
provider "local/bpg/proxmox" {
version = "0.78.2"
hashes = [
"h1:N/p0BJCms7y2MBJmYjoWXFtxocN55PKYz1ulwzPTO00=",
]
}

110
terraform/bastion.tf Normal file
View File

@ -0,0 +1,110 @@
resource "proxmox_virtual_environment_container" "bastion" {
node_name = "pve"
vm_id = 6000
tags = ["dev", "prod", "common", "bastion"]
unprivileged = true
cpu {
cores = 1
}
memory {
dedicated = 512
}
disk {
datastore_id = var.datastore_id
size = 4
}
network_interface {
bridge = var.external_network_bridge_name
name = "eth-ext"
firewall = true
enabled = true
}
network_interface {
bridge = var.development_network_bridge_name
name = "eth-dev"
firewall = true
enabled = true
}
initialization {
hostname = "bastion"
ip_config {
ipv4 {
address = "dhcp"
}
}
ip_config {
ipv4 {
address = "192.168.0.254/24"
# gateway = "192.168.0.1"
}
}
user_account {
keys = [var.ssh_public_key]
}
}
operating_system {
# TODO: make into a variable
template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst"
type = "debian"
}
started = true
startup {
order = 100
up_delay = 0
down_delay = 0
}
features {
nesting = true
}
}
resource "proxmox_virtual_environment_firewall_options" "bastion" {
depends_on = [proxmox_virtual_environment_container.bastion]
node_name = proxmox_virtual_environment_container.bastion.node_name
vm_id = proxmox_virtual_environment_container.bastion.vm_id
enabled = true
dhcp = true
input_policy = "DROP"
output_policy = "ACCEPT"
}
resource "proxmox_virtual_environment_firewall_rules" "bastion" {
depends_on = [proxmox_virtual_environment_container.bastion]
node_name = proxmox_virtual_environment_container.bastion.node_name
vm_id = proxmox_virtual_environment_container.bastion.vm_id
rule {
type = "in"
action = "ACCEPT"
dport = "22"
proto = "tcp"
}
rule {
type = "in"
action = "ACCEPT"
dport = "8"
proto = "icmp"
}
rule {
security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name
comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node."
}
}

View File

@ -0,0 +1,35 @@
resource "proxmox_virtual_environment_firewall_ipset" "loggers" {
name = "loggers"
comment = "Nodes that send logs to Monitoring Node."
cidr {
name = "192.168.0.254"
comment = "bastion"
}
cidr {
name = "192.168.0.253"
comment = "load-balancer"
}
cidr {
name = "192.168.0.252"
comment = "monitoring"
}
cidr {
name = "192.168.0.3"
comment = "postgresql"
}
cidr {
name = "192.168.0.10"
comment = "main-page"
}
cidr {
name = "192.168.0.15"
comment = "searxng"
}
}

View File

@ -0,0 +1,51 @@
resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_node_exporter" {
name = "prom-node-exp"
comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node."
rule {
type = "in"
source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0]
proto = "tcp"
dport = "9100"
action = "ACCEPT"
}
}
resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_nginx_exporter" {
name = "prom-nginx-exp"
comment = "Allow Prometheus server to pull Prometheus nginx exporter from Monitoring Node."
rule {
type = "in"
source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0]
proto = "tcp"
dport = "9113"
action = "ACCEPT"
}
}
resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_server_exporter" {
name = "prom-srv-exp"
comment = "Allow Prometheus server to pull Prometheus default exporter from Monitoring Node."
rule {
type = "in"
source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0]
proto = "tcp"
dport = "9090"
action = "ACCEPT"
}
}
resource "proxmox_virtual_environment_cluster_firewall_security_group" "prometheus_alertmanager" {
name = "prom-alert"
comment = "Access Prometheus Alertmanager from Monitoring Node."
rule {
type = "in"
source = split("/", proxmox_virtual_environment_container.monitoring.initialization[0].ip_config[0].ipv4[0].address)[0]
proto = "tcp"
dport = "9093"
action = "ACCEPT"
}
}

View File

@ -0,0 +1,109 @@
resource "proxmox_virtual_environment_container" "forgejo" {
node_name = "pve"
vm_id = 1200
tags = ["dev"]
unprivileged = true
cpu {
cores = 1
}
memory {
dedicated = 1536
}
disk {
datastore_id = var.datastore_id
size = 32
}
network_interface {
bridge = var.development_network_bridge_name
name = "eth-dev"
firewall = true
enabled = true
}
initialization {
hostname = "forgejo"
ip_config {
ipv4 {
address = "192.168.0.12/24"
gateway = "192.168.0.1"
}
}
user_account {
keys = [var.ssh_public_key]
}
}
operating_system {
template_file_id = "local:vztmpl/debian-12-standard_12.7-1_amd64.tar.zst"
type = "debian"
}
started = true
startup {
order = 500
up_delay = 0
down_delay = 0
}
features {
nesting = true
}
}
resource "proxmox_virtual_environment_firewall_options" "forgejo" {
depends_on = [proxmox_virtual_environment_container.forgejo]
node_name = proxmox_virtual_environment_container.forgejo.node_name
vm_id = proxmox_virtual_environment_container.forgejo.vm_id
enabled = true
dhcp = true
input_policy = "DROP"
output_policy = "ACCEPT"
}
resource "proxmox_virtual_environment_firewall_rules" "forgejo" {
depends_on = [proxmox_virtual_environment_container.forgejo]
node_name = proxmox_virtual_environment_container.forgejo.node_name
vm_id = proxmox_virtual_environment_container.forgejo.vm_id
rule {
type = "in"
source = split("/", proxmox_virtual_environment_container.bastion.initialization[0].ip_config[1].ipv4[0].address)[0]
proto = "tcp"
dport = "22"
action = "ACCEPT"
comment = "SSH from Bastion."
}
rule {
type = "in"
proto = "icmp"
dport = "8"
action = "ACCEPT"
comment = "Ping."
}
rule {
type = "in"
source = split("/", proxmox_virtual_environment_container.load_balancer.initialization[0].ip_config[1].ipv4[0].address)[0]
proto = "tcp"
dport = "3000"
action = "ACCEPT"
comment = "Forgejo."
}
rule {
security_group = proxmox_virtual_environment_cluster_firewall_security_group.prometheus_node_exporter.name
comment = "Allow Prometheus server to pull Prometheus node exporter from Monitoring Node."
}
}

Some files were not shown because too many files have changed in this diff Show More