Compare commits

...

9 Commits

Author SHA1 Message Date
b5ce914073 Grindin 2025-07-12 23:41:13 -04:00
dd83d30d14 Added stuff 2025-07-11 23:07:32 -04:00
43f7bb16b2 Fix skill issues 2025-07-08 15:07:56 -04:00
4d6a28942d Deploy more shit 2025-07-07 01:49:01 -04:00
a3983ddfcb Start setting up caddy 2025-07-06 17:51:48 -04:00
172d49be25 Bangers only 2025-07-04 02:46:10 -04:00
df070e3c93 Shit is sweet out here 2025-07-04 01:49:35 -04:00
b79c1563d5 Start refactoring to new structure 2025-07-03 22:03:13 -04:00
fe940fe107 Updates 2025-07-02 23:16:50 -04:00
81 changed files with 1913 additions and 28 deletions

View File

@ -1,3 +1,4 @@
{
"ansible.python.interpreterPath": "/bin/python3"
"ansible.python.interpreterPath": "/bin/python3",
"ansible.validation.lint.enabled": false
}

View File

@ -1,3 +1,6 @@
[defaults]
inventory = hosts.yml
roles_path = ./roles
vault_password_file = ~/.homelab-ansible-vault-pass
vars_files="group_vars/secrets.yml"
lookup_plugins=./lookup_plugins

450
assets/Caddyfile Normal file
View File

@ -0,0 +1,450 @@
# DO NOT EDIT THIS FILE -- OPNsense auto-generated file
# caddy_user=root
# Global Options
{
log {
include http.log.access.1b833379-a450-474a-ad74-2aee6a5d836a
include http.log.access.46a2fd7c-cbe2-4e99-9d10-9c10a4aa2e92
include http.log.access.7293a3a7-ca31-4d7e-be90-780cb4240e6b
include http.log.access.5efd6136-26c6-4af7-9404-75201f69b046
output net unixgram//var/run/caddy/log.sock {
}
format json {
time_format rfc3339
}
}
servers {
protocols h1 h2
}
dynamic_dns {
provider cloudflare 0zKl_ezOn2j4HHjS6ew3k3KTqH4rLIYHiV-TDYxe
domains {
thatshit.live *
thatshit.live draw
thatshit.live checkif
thatshit.live is
thatshit.live paste
thatshit.live crop
thatshit.live blaze
thatshit.live do
thatshit.live upload
blinker.club *
blinker.club wiki
blinker.club kuma
blinker.club pass
blinker.club pdf
blinker.club tables
blinker.club linkwarden
blinker.club auth
blinker.club readeck
blinker.club watch
blinker.club mediarequest
blinker.club wizarr
blinker.club memos
blinker.club vaultwarden
blinker.club fileshare
blinker.club @
club blinker
thegrind.dev *
thegrind.dev gist
thegrind.dev tools
thegrind.dev wiki
thegrind.dev auth
thegrind.dev blog
thegrind.dev tunnel
thegrind.dev plane
thegrind.dev tasks
thegrind.dev tianji
usefor.dev *
}
}
grace_period 10s
import /usr/local/etc/caddy/caddy.d/*.global
}
# Reverse Proxy Configuration
# Reverse Proxy Domain: "1b833379-a450-474a-ad74-2aee6a5d836a"
*.thatshit.live {
log 1b833379-a450-474a-ad74-2aee6a5d836a
tls {
issuer acme {
dns cloudflare 0zKl_ezOn2j4HHjS6ew3k3KTqH4rLIYHiV-TDYxe
}
}
@60825f6e-1b8f-4d29-9af3-19572e830eb2 {
host draw.thatshit.live
}
handle @60825f6e-1b8f-4d29-9af3-19572e830eb2 {
handle {
reverse_proxy 10.89.0.101:5001 {
transport http {
}
}
}
}
@8d17c8c4-d282-4922-acc7-3635d24b2eba {
host checkif.thatshit.live
}
handle @8d17c8c4-d282-4922-acc7-3635d24b2eba {
}
@7c9ccb9d-c8f6-4392-a032-d7a1fcf16bca {
host is.thatshit.live
}
handle @7c9ccb9d-c8f6-4392-a032-d7a1fcf16bca {
handle {
reverse_proxy 10.89.0.100 {
transport http {
}
}
}
}
@ceb5e51a-9b6d-4931-ae38-249fdfbab0dc {
host paste.thatshit.live
}
handle @ceb5e51a-9b6d-4931-ae38-249fdfbab0dc {
handle {
reverse_proxy 10.89.0.101:5009 {
transport http {
}
}
}
}
@36f95298-290c-4ed9-bac4-e657e7f12bfa {
host crop.thatshit.live
}
handle @36f95298-290c-4ed9-bac4-e657e7f12bfa {
handle {
reverse_proxy 10.89.0.101:6354 {
transport http {
}
}
}
}
@55a3bfea-48b4-44cf-ad4c-e4457fa04a1c {
host blaze.thatshit.live
}
handle @55a3bfea-48b4-44cf-ad4c-e4457fa04a1c {
}
@54e6acf9-1a0e-41f2-b31f-1e99ac35eab1 {
host do.thatshit.live
}
handle @54e6acf9-1a0e-41f2-b31f-1e99ac35eab1 {
handle {
reverse_proxy 10.89.0.108:7076 {
}
}
}
@815a9cab-b1f2-4256-9cd0-9569b23c3f77 {
host upload.thatshit.live
}
handle @815a9cab-b1f2-4256-9cd0-9569b23c3f77 {
handle {
reverse_proxy 10.89.0.108:7077 {
}
}
}
@85d4c638-68d0-4f44-84fb-a51e71695d2e_thatshitlive {
client_ip 10.0.0.0/8
}
handle @85d4c638-68d0-4f44-84fb-a51e71695d2e_thatshitlive {
abort
}
}
# Reverse Proxy Domain: "46a2fd7c-cbe2-4e99-9d10-9c10a4aa2e92"
*.blinker.club {
log 46a2fd7c-cbe2-4e99-9d10-9c10a4aa2e92
tls {
issuer acme {
dns cloudflare 0zKl_ezOn2j4HHjS6ew3k3KTqH4rLIYHiV-TDYxe
}
}
@de74e403-15ae-4c45-ac05-c9785dd31ab6 {
host wiki.blinker.club
}
handle @de74e403-15ae-4c45-ac05-c9785dd31ab6 {
handle {
reverse_proxy 10.89.0.100 {
transport http {
}
}
}
}
@6f0c960c-a8b7-4fa8-9168-cf0a5551be56 {
host kuma.blinker.club
}
handle @6f0c960c-a8b7-4fa8-9168-cf0a5551be56 {
handle {
reverse_proxy 10.89.0.100 {
transport http {
}
}
}
}
@b714662c-6abf-4b15-9b33-7c6387d18506 {
host pass.blinker.club
}
handle @b714662c-6abf-4b15-9b33-7c6387d18506 {
handle {
reverse_proxy 10.89.0.101:5004 {
transport http {
}
}
}
}
@b36e8ae9-b645-4e9f-b927-ee2bb7dfe40e {
host pdf.blinker.club
}
handle @b36e8ae9-b645-4e9f-b927-ee2bb7dfe40e {
handle {
reverse_proxy /outpost.goauthentik.io/* http://10.89.0.101:4501 {
}
forward_auth http://10.89.0.101:4501 {
uri /outpost.goauthentik.io/auth/caddy
copy_headers X-Authentik-Username
copy_headers X-Authentik-Groups
copy_headers X-Authentik-Email
copy_headers X-Authentik-Name
copy_headers X-Authentik-Uid
copy_headers X-Authentik-Jwt
copy_headers X-Authentik-Meta-Jwks
copy_headers X-Authentik-Meta-Outpost
copy_headers X-Authentik-Meta-Provider
copy_headers X-Authentik-Meta-App
copy_headers X-Authentik-Meta-Version
}
reverse_proxy 10.89.0.108:7075 {
}
}
}
@91587ab9-67e9-4678-9cb8-e8dc8ed89efd {
host tables.blinker.club
}
handle @91587ab9-67e9-4678-9cb8-e8dc8ed89efd {
handle {
reverse_proxy 10.89.0.101:5005 {
transport http {
}
}
}
}
@adea5e03-ec48-4fe5-ad9b-80e35c7de2f9 {
host linkwarden.blinker.club
}
handle @adea5e03-ec48-4fe5-ad9b-80e35c7de2f9 {
handle {
reverse_proxy 10.89.0.101:5010 {
}
}
}
@d7ffda69-ace3-4dcd-b766-ec3655de2e63 {
host auth.blinker.club
}
handle @d7ffda69-ace3-4dcd-b766-ec3655de2e63 {
handle {
reverse_proxy 10.89.0.101:4501 {
}
}
}
@3e2f0689-8e96-426b-bfc1-d50adbca5290 {
host readeck.blinker.club
}
handle @3e2f0689-8e96-426b-bfc1-d50adbca5290 {
handle {
reverse_proxy 10.89.0.103:5001 {
}
}
}
@db876ae0-c7d6-401f-bdda-85531d1d30d2 {
host watch.blinker.club
}
handle @db876ae0-c7d6-401f-bdda-85531d1d30d2 {
handle {
reverse_proxy 10.89.0.106:5001 {
}
}
}
@23bc0bb3-7e8b-4b05-b7f2-8e139c38b23d {
host mediarequest.blinker.club
}
handle @23bc0bb3-7e8b-4b05-b7f2-8e139c38b23d {
handle {
reverse_proxy 10.89.0.106:5002 {
}
}
}
@27847df4-83a6-4695-a87b-2a51e187225a {
host wizarr.blinker.club
}
handle @27847df4-83a6-4695-a87b-2a51e187225a {
handle {
reverse_proxy 10.89.0.106:5003 {
}
}
}
@4387e47a-3cd5-4209-a351-afb5d683c688 {
host memos.blinker.club
}
handle @4387e47a-3cd5-4209-a351-afb5d683c688 {
handle {
reverse_proxy 10.89.0.108:7071 {
}
}
}
@80736838-c5db-4c49-a7eb-439ef8a4835e {
host vaultwarden.blinker.club
}
handle @80736838-c5db-4c49-a7eb-439ef8a4835e {
handle {
reverse_proxy 10.89.0.108:7072 {
}
}
}
@075fb390-8759-48df-a196-c2b41794bba3 {
host fileshare.blinker.club
}
handle @075fb390-8759-48df-a196-c2b41794bba3 {
handle {
reverse_proxy 10.89.0.108:7073 {
}
}
}
}
# Reverse Proxy Domain: "7293a3a7-ca31-4d7e-be90-780cb4240e6b"
blinker.club {
log 7293a3a7-ca31-4d7e-be90-780cb4240e6b
tls {
issuer acme {
dns cloudflare 0zKl_ezOn2j4HHjS6ew3k3KTqH4rLIYHiV-TDYxe
}
}
@a9fe8c37-91be-4c0d-a363-ee49dd020790 {
host blinker.club
}
handle @a9fe8c37-91be-4c0d-a363-ee49dd020790 {
handle {
reverse_proxy 10.89.0.101:7575 {
transport http {
}
}
}
}
}
# Reverse Proxy Domain: "5efd6136-26c6-4af7-9404-75201f69b046"
*.thegrind.dev {
log 5efd6136-26c6-4af7-9404-75201f69b046
tls {
issuer acme {
dns cloudflare 0zKl_ezOn2j4HHjS6ew3k3KTqH4rLIYHiV-TDYxe
}
}
@42e9f10e-4e8f-428b-8609-15a4ae8eed2e {
host gist.thegrind.dev
}
handle @42e9f10e-4e8f-428b-8609-15a4ae8eed2e {
handle {
reverse_proxy 10.89.0.101:5006 {
}
}
}
@470fb753-2bbc-4560-b448-a8dbb6d9a8b2 {
host tools.thegrind.dev
}
handle @470fb753-2bbc-4560-b448-a8dbb6d9a8b2 {
handle {
reverse_proxy 10.89.0.101:8989 {
}
}
}
@c549d42a-99c8-4995-912d-4c45814da111 {
host wiki.thegrind.dev
}
handle @c549d42a-99c8-4995-912d-4c45814da111 {
handle {
reverse_proxy 10.89.0.101:5002 {
}
}
}
@9d44d816-4e06-4592-a595-3060d3e128b5 {
host auth.thegrind.dev
}
handle @9d44d816-4e06-4592-a595-3060d3e128b5 {
handle {
reverse_proxy 10.89.0.101:4501 {
}
}
}
@c73d8643-fb52-43a0-ad06-ea800f6e90f8 {
host blog.thegrind.dev
}
handle @c73d8643-fb52-43a0-ad06-ea800f6e90f8 {
handle {
reverse_proxy 10.89.0.101:5007 {
transport http {
}
}
}
}
@69e141fe-1031-4dfd-a9dd-e7013f518f65 {
host tunnel.thegrind.dev
}
handle @69e141fe-1031-4dfd-a9dd-e7013f518f65 {
}
@70467ce5-1d6a-45fc-a81b-42b7aa40f7ae {
host plane.thegrind.dev
}
handle @70467ce5-1d6a-45fc-a81b-42b7aa40f7ae {
handle {
reverse_proxy 10.89.0.104:80 {
}
}
}
@183b97ca-18ac-4478-89aa-d7e79f82969a {
host tasks.thegrind.dev
}
handle @183b97ca-18ac-4478-89aa-d7e79f82969a {
handle {
reverse_proxy 10.89.0.108:7070 {
}
}
}
@c58cfb1f-66ef-4f74-87f5-58186668dcd6 {
host tianji.thegrind.dev
}
handle @c58cfb1f-66ef-4f74-87f5-58186668dcd6 {
handle {
reverse_proxy 10.89.0.108:7074 {
}
}
}
}
# Reverse Proxy Domain: "ec02b95f-dda1-44dd-966d-1636595ab192"
*.usefor.dev {
tls {
issuer acme {
dns cloudflare 0zKl_ezOn2j4HHjS6ew3k3KTqH4rLIYHiV-TDYxe
}
}
handle {
reverse_proxy 10.89.0.101:5008 {
transport http {
}
}
}
}
import /usr/local/etc/caddy/caddy.d/*.conf

View File

@ -1,2 +1,3 @@
sudo apt install python3-psycopg2
sudo apt install sshpass
ansible-galaxy collection install prometheus.prometheus

View File

@ -8,6 +8,7 @@ assets: "{{ project_root }}/assets"
# Remote paths
remote_stacks: "/home/javi/docker"
remote_app_mounts: "/home/docker"
container_data_base_path: "/home/docker/container-data"
# Postgres
pg_host: 10.89.0.102

7
group_vars/vms.yml Normal file
View File

@ -0,0 +1,7 @@
$ANSIBLE_VAULT;1.1;AES256
66666232393236366535336639396266366131643930323662376334333138363266633431656263
6266363861373237346231343334623565386138393737390a623932363063306437383466303564
35616132383361353036663839643763363762623534653732323864636462346635363366623533
6536376161333663300a643939303465326133366463383234356535626465623162303836373664
65373562363134653633363335326536353062373734373066393862363139376435303833393836
3664646663646534323938393762373535666332386164353631

View File

@ -1,4 +1,8 @@
---
# Homelab IP addressing scheme/convention
# 10.89.0.1x-2x for low level hosts (proxmox servers, NAS, etc)
# 10.89.0.3x for proxies (caddy-proxy-external, caddy-proxy-internal, etc)
# 10.89.0.1xx for VMs running the actual apps I host
all:
children:
servers:
@ -9,6 +13,12 @@ all:
ansible_host: 10.89.0.12
nas:
ansible_host: 10.89.0.15
proxies:
hosts:
caddy_internal:
ansible_host: 10.89.0.30
caddy_external:
ansible_host: 10.89.0.31
vms:
hosts:
portainer_main:
@ -17,7 +27,13 @@ all:
ansible_host: 10.89.0.102
dev_services:
ansible_host: 10.89.0.105
streaming_services:
streaming_services: # The one running jellyfin
ansible_host: 10.89.0.106
streaming:
streaming: # The one running the arr stack
ansible_host: 10.89.0.107
apps:
ansible_host: 10.89.0.108
utility:
hosts:
observability_hub:
ansible_host: 10.89.0.120

Binary file not shown.

23
lookup_plugins/hostip.py Normal file
View File

@ -0,0 +1,23 @@
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleError
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if not variables or 'hostvars' not in variables:
raise AnsibleError("hostvars is not available in this context")
results = []
for term in terms:
if term not in variables['hostvars']:
raise AnsibleError(f"Host '{term}' not found in hostvars")
host = variables['hostvars'][term]
ip = host.get('ansible_host') or host.get('ansible_default_ipv4', {}).get('address')
if not ip:
raise AnsibleError(f"No IP found for host '{term}'")
results.append(ip)
return results

90
playbooks/nodes/apps.yml Normal file
View File

@ -0,0 +1,90 @@
---
- name: Deploy apps to apps-1 node
hosts: apps
become: true
roles:
- role: apps/kan
vars:
port: 7070
- role: apps/memos
vars:
port: 7071
- role: apps/vaultwarden
vars:
port: 7072
- role: apps/erugo
vars:
port: 7073
- role: apps/tianji
vars:
port: 7074
- role: apps/stirling-pdf
vars:
port: 7075
- role: apps/dumbware-todo
vars:
port: 7076
pin: 8989
- role: apps/filebrowser
vars:
port: 7078
directory: "fb-file-sharing"
container_name: "filebrowser-sharing"
- role: apps/reubah
vars:
port: 7079
- role: apps/komga
vars:
port: 7080
- role: apps/outline-wiki
vars:
port: 7083
url: "https://docs.thegrind.dev"
container_name: "the-grind-outline-wiki"
outline_db_name: "the_grind_docs"
data_dir_name: "the-grind-docs"
secret_key: "42f5ab8c01771f40f1e1bae554b82b883f25ab13a79767ffc57e8c31ab172c43"
utils_secret_key: "f8410c6a12da5e92ac4200ef923fe516bed3cef2abacfb1877e32f2f1c63a325"
oidc_client_id: "XfaDJwpZfdHXiK6kulZiAK9nCliUieOdW0Ah1jJ1"
oidc_client_secret: "FtouI96uTg4YJc3ViSfcQ9SdubviXiPULne5tKyIXKbHxDczdlZoWkpwroobEGcgxrJaMFmdX6vBLJACZJVojFW1DJLyqt7gTAo6SmfuL7cJVzAnvDagxWJtamqhkKGR"
oidc_auth_uri: "https://auth.thegrind.dev/application/o/authorize/"
oidc_token_uri: "https://auth.thegrind.dev/application/o/token/"
oidc_userinfo_uri: "https://auth.thegrind.dev/application/o/userinfo/"
oidc_logout_uri: "https://auth.thegrind.dev/application/o/the-grind-docs/end-session/"
odic_button_text: "The Grind Auth"
# - role: apps/postiz
# vars:
# port: 7084
# url: "https://postiz.blinker.club"
# postiz_instance_name: "postiz-main"
# postiz_db_name: "postiz_main"
# jwt_secret: "42cd08e857d0178075a305d7511c778336a501951ae0e4f05bf5ad862f611e72"
- role: apps/planka
vars:
port: 7085
url: "https://tasks.thegrind.dev"
tasks:
- name: Personal DW drop
ansible.builtin.include_role:
name: apps/dumbware-drop
vars:
port: 7077
pin: "8989"
- name: Komga DW drop
ansible.builtin.include_role:
name: apps/dumbware-drop
vars:
container_name: dw-drop-komga-books
page_title: "Contribute to the book library"
port: 7081
pin: "1337"
directory: "komga/data/books"
- name: Javier Feliz Blog
ansible.builtin.include_role:
name: apps/ghost
vars:
ghost_instance_name: javierfeliz-blog
database_name: ghostcms_javierfelizblog
blog_url: "https://javierfeliz.com"
port: 7082

View File

@ -0,0 +1,19 @@
---
- name: Set up the observability hub
hosts: observability_hub
become: true
roles:
# - docker/install
# - docker/portainer
# - observability/grafana
# - observability/prometheus
- observability/beszel-hub
tasks:
# - name: Deploy promlens
# community.docker.docker_container:
# name: promlens
# image: prom/promlens
# state: started
# restart_policy: unless-stopped
# ports:
# - '8080:8080'

View File

@ -0,0 +1,8 @@
---
- name: Set up the prod services node
hosts: prod_services
become: true
roles:
- role: services/valkey
vars:
container_name: "prod-valkey"

View File

@ -0,0 +1,62 @@
---
- name: Install Tianji Reporter
# hosts: vms:!apps # Tianji is already installed on apps node
# hosts: "{{ (groups['vms'] + groups['proxies']) | unique }}"
hosts: caddy_external
become: true
vars:
server_url: "http://10.89.0.108:7074"
workspace_id: "clnzoxcy10001vy2ohi4obbi0"
tasks:
- name: Ensure curl and wget are installed (Debian)
ansible.builtin.apt:
name:
- curl
- wget
update_cache: true
- name: Create install directory
ansible.builtin.file:
path: /usr/local/tianji/reporter
state: directory
mode: '0755'
- name: Check if Tianji reporter binary already exists
stat:
path: /usr/local/tianji/reporter/tianji-reporter
register: tianji_binary
- name: Download Tianji reporter binary
ansible.builtin.get_url:
url: "https://github.com/msgbyte/tianji/releases/latest/download/tianji-reporter-linux-amd64"
dest: /usr/local/tianji/reporter/tianji-reporter
mode: '0755'
when: not tianji_binary.stat.exists
- name: Create systemd service
ansible.builtin.copy:
dest: /usr/lib/systemd/system/tianji-reporter.service
content: |
[Unit]
Description=Tianji-Reporter
Documentation=https://github.com/msgbyte/tianji
After=network.target
[Service]
ExecStart=/usr/local/tianji/reporter/tianji-reporter --url "{{ server_url }}" --workspace "{{ workspace_id }}"
ExecReload=/bin/kill -HUP $MAINPID
Restart=on-failure
[Install]
WantedBy=multi-user.target
mode: '0644'
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
- name: Enable and start Tianji reporter
ansible.builtin.systemd:
name: tianji-reporter
enabled: true
state: reloaded

View File

@ -0,0 +1,103 @@
---
- name: Set up the reverse proxy for external only services
hosts: caddy_external
become: true
roles:
# - role: caddy/install # Only needed the first setup
- role: caddy/proxy
vars:
domains:
- name: "thatshit.live"
dynamic_dns: true
sites:
- name: "draw"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 5001
- name: "paste"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 5009
- name: "do"
host: "{{ lookup('hostip', 'apps') }}"
port: 7076
- name: "drop"
host: "{{ lookup('hostip', 'apps') }}"
port: 7077
- name: "share"
host: "{{ lookup('hostip', 'apps') }}"
port: 7078
- name: "convert"
host: "{{ lookup('hostip', 'apps') }}"
port: 7079
- name: "blinker.club"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 7575
dynamic_dns: true
sites:
- name: "pass"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 5004
- name: "tables"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 5005
- name: "watch"
host: "{{ lookup('hostip', 'streaming_services') }}"
port: 5001
- name: "memos"
host: "{{ lookup('hostip', 'apps') }}"
port: 7071
- name: "auth"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 4501
- name: "pdf"
host: "{{ lookup('hostip', 'apps') }}"
port: 7075
- name: "linkwarden"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 5010
- name: "vaultwarden"
host: "{{ lookup('hostip', 'apps') }}"
port: 7072
- name: "mediarequest"
host: "{{ lookup('hostip', 'streaming_services') }}"
port: 5002
- name: "fileshare"
host: "{{ lookup('hostip', 'apps') }}"
port: 7073
- name: "books"
host: "{{ lookup('hostip', 'apps') }}"
port: 7080
- name: "bookupload"
host: "{{ lookup('hostip', 'apps') }}"
port: 7081
- name: "postiz"
host: "{{ lookup('hostip', 'apps') }}"
port: 7084
- name: "javierfeliz.com"
host: "{{ lookup('hostip', 'apps') }}"
port: 7082
dynamic_dns: true
sites: []
- name: "thegrind.dev"
dynamic_dns: true
sites:
- name: "blog"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 5007
- name: "tools"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 8989
- name: "auth"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 4501
- name: "gist"
host: "{{ lookup('hostip', 'portainer_main') }}"
port: 5006
- name: "tianji"
host: "{{ lookup('hostip', 'apps') }}"
port: 7074
- name: "tasks"
host: "{{ lookup('hostip', 'apps') }}"
port: 7085
- name: "docs"
host: "{{ lookup('hostip', 'apps') }}"
port: 7083

View File

@ -0,0 +1,72 @@
---
- name: Set up the reverse proxy for internal only services
hosts: caddy_internal
become: true
roles:
# - role: caddy/install # Only needed for first setup
- role: caddy/proxy
vars:
domains:
- name: "lan.thegrind.dev"
sites:
- name: "whale"
host: 10.89.0.101
port: 9443
https: true
transport_opts:
- tls_insecure_skip_verify
- name: "router"
host: 10.89.0.1
port: 8989
- name: "adguard"
host: 10.89.0.1
port: 3000
- name: "nas"
host: 10.89.0.15
port: 80
- name: "streaming"
host: 10.89.0.106
port: 10000
https: true
transport_opts:
- tls_insecure_skip_verify
- name: "stash"
host: 10.89.0.106
port: 6969
- name: "node1"
host: 10.89.0.13
port: 8006
transport_opts:
- tls_insecure_skip_verify
- name: "node3"
host: 10.89.0.12
port: 8006
transport_opts:
- tls_insecure_skip_verify
- name: "sonarr"
host: "{{ lookup('hostip', 'streaming') }}"
port: 5002
- name: "radarr"
host: "{{ lookup('hostip', 'streaming') }}"
port: 5003
- name: "prowlarr"
host: "{{ lookup('hostip', 'streaming') }}"
port: 5001
- name: "sab"
host: "{{ lookup('hostip', 'streaming') }}"
port: 5013
- name: "qbit"
host: "{{ lookup('hostip', 'streaming') }}"
port: 5007
- name: "dash"
host: "{{ lookup('hostip', 'observability_hub') }}"
port: 3000
- name: "prometheus"
host: "{{ lookup('hostip', 'observability_hub') }}"
port: 9090
- name: "promlens"
host: "{{ lookup('hostip', 'observability_hub') }}"
port: 8080
- name: "bez"
host: "{{ lookup('hostip', 'observability_hub') }}"
port: 8090

View File

@ -1,9 +1,10 @@
---
- name: Set up a new virtual machine
- name: set up a new virtual machine
hosts: vms
become: true
roles:
- role: docker/install
- role: docker/portainer
- role: server/setup/sshkey
- role: server/setup/webmin
# - role: observability/prometheus-node-exporter
- role: observability/beszel-agent
# - role: docker/install
# - role: docker/portainer
# - role: server/setup/sshkey

View File

@ -0,0 +1,15 @@
---
- name: Regenerate prometheus config and restart container
hosts: observability_hub
become: true
tasks:
- name: Generate Prometheus config from template
ansible.builtin.template:
src: templates/prometheus.yml.j2
dest: "{{ container_data_base_path }}/prometheus/prometheus.yml"
- name: Restart prometheus container
community.docker.docker_container:
name: prometheus
state: started
restart: true

View File

@ -0,0 +1,15 @@
#jinja2: trim_blocks: True, lstrip_blocks: True
global:
scrape_interval: "15s"
scrape_configs:
- job_name: node
metrics_path: /metrics
scheme: http
static_configs:
{% for host in groups['vms'] %}
- targets: ['{{ hostvars[host].ansible_host }}:9100']
labels:
instance_name: '{{ host }}'
{% endfor %}

View File

View File

View File

@ -0,0 +1,3 @@
directory: "dw-drop"
container_name: "dw-drop"
page_title: "Share a file with Javi"

View File

@ -0,0 +1,22 @@
- name: Create the data dir on network drive
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "{{ directory }}"
- name: Create dumbware-drop container
community.docker.docker_container:
name: "{{ container_name }}"
image: dumbwareio/dumbdrop:latest
pull: true
state: started
restart_policy: always
ports:
- "{{ port }}:3000"
volumes:
- "/home/docker/container-data/{{ directory }}:/app/uploads"
env:
DUMBDROP_TITLE: "{{ page_title }}"
DUMBDROP_PIN: "{{ pin }}"
MAX_FILE_SIZE: "10240" # 10GB
LOCAL_UPLOAD_DIR: /app/uploads

View File

@ -0,0 +1,20 @@
- name: Create data folder
ansible.builtin.file:
path: /home/docker/dw-todo
state: directory
mode: '0777'
- name: Create dw-todo container
community.docker.docker_container:
name: dw-todo
image: dumbwareio/dumbdo:latest
pull: true
state: started
restart_policy: unless-stopped
ports:
- "{{ port }}:3000"
volumes:
- /home/docker/dw-todo:/app/data
env:
DUMBDO_SITE_TITLE="Javi's Daily TO-DO"
DUMBDO_PIN="{{ pin }}"

View File

View File

@ -0,0 +1,17 @@
- name: Create the data dir on network drive
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "erugo-data"
- name: Create erugo container
community.docker.docker_container:
name: erugo
image: wardy784/erugo:latest
pull: true
state: started
restart_policy: always
ports:
- "{{ port }}:80"
volumes:
- /home/docker/container-data/erugo-data:/var/www/html/storage

View File

View File

@ -0,0 +1,37 @@
- name: Mount the container data folder
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "{{ directory }}"
- name: Create files folder
ansible.builtin.file:
dest: "/home/docker/container-data/{{ directory }}/files"
state: directory
mode: '0777'
- name: Create config folder
ansible.builtin.file:
dest: "/home/docker/container-data/{{ directory }}/config"
state: directory
mode: '0777'
- name: Create config folder
ansible.builtin.file:
dest: "/home/docker/container-data/{{ directory }}/database"
state: directory
mode: '0777'
- name: Deploy filebrowser container
community.docker.docker_container:
name: "{{ container_name }}"
pull: true
state: started
restart_policy: unless-stopped
image: filebrowser/filebrowser
ports:
- '{{ port }}:80'
volumes:
- '/home/docker/container-data/{{ directory }}/config:/config'
- '/home/docker/container-data/{{ directory }}/database:/database'
- '/home/docker/container-data/{{ directory }}/files:/srv'

View File

View File

@ -0,0 +1,27 @@
# TODO: Once I set up the mysql role set it up here
- name: Container data
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "{{ ghost_instance_name }}-data"
- name: Deploy Ghost CMS
community.docker.docker_container:
name: "{{ ghost_instance_name }}"
image: ghost:5-alpine
restart_policy: always
ports:
- "{{ port }}:2368"
env:
# see https://ghost.org/docs/config/#configuration-options
database__client: mysql
database__connection__host: 10.89.0.102
database__connection__user: root
database__connection__password: rootpassword
database__connection__database: "{{ database_name }}"
# this url value is just an example, and is likely wrong for your environment!
url: "{{ blog_url }}"
# contrary to the default mentioned in the linked documentation, this image defaults to NODE_ENV=production (so development mode needs to be explicitly specified if desired)
#NODE_ENV: development
volumes:
- "{{ container_data_base_path }}/{{ ghost_instance_name }}-data:/var/lib/ghost/content"

View File

View File

@ -0,0 +1,20 @@
- name: Create app DB
ansible.builtin.include_role:
name: app/database
vars:
app_name: kan
- name: Create kan-web container
community.docker.docker_container:
name: kan-web
image: ghcr.io/kanbn/kan:latest
pull: true
state: started
restart_policy: unless-stopped
ports:
- "{{ port }}:3000"
env:
NEXT_PUBLIC_BASE_URL: "https://tasks.thegrind.dev"
BETTER_AUTH_SECRET: "your_auth_secret"
POSTGRES_URL: "postgresql://kan:password@10.89.0.102:5432/kan"
NEXT_PUBLIC_ALLOW_CREDENTIALS: "true"

View File

View File

@ -0,0 +1,35 @@
# - name: Create DB
# ansible.builtin.include_role:
# name: app/database
# vars:
# app_name: "booklore"
- name: Create data folder
ansible.builtin.include_role:
name: docker/container-data
vars:
dir_name: "komga"
- name: Create necessary subfolders
ansible.builtin.file:
dest: "{{ container_data_base_path }}/komga/{{ item }}"
state: directory
mode: '0777'
loop:
- data
- "data/books"
- config
- name: Deploy container
community.docker.docker_container:
name: komga-ebook-library
pull: true
state: started
image: gotson/komga
volumes:
- "{{ container_data_base_path }}/komga/config:/config"
- "{{ container_data_base_path }}/komga/data:/data"
- /etc/timezone:/etc/timezone
ports:
- "{{ port }}:25600"
user: "1000:1000"
restart_policy: unless-stopped

View File

View File

@ -0,0 +1,18 @@
- name: Create app DB
ansible.builtin.include_role:
name: app/database
vars:
app_name: memos
- name: Deploy memos container
community.docker.docker_container:
name: memos
image: neosmemo/memos:stable
pull: true
state: started
restart_policy: unless-stopped
ports:
- "{{ port }}:5230"
env:
MEMOS_DRIVER: "postgres"
MEMOS_DSN: "user=memos password=password dbname=memos host=10.89.0.102 sslmode=disable"

View File

@ -0,0 +1,27 @@
- name: Create data folder
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "{{ data_dir_name }}"
- name: Create release notes folder
ansible.builtin.file:
dest: "{{ container_data_base_path }}/{{ data_dir_name }}/release-notes"
state: directory
mode: '0777'
- name: Create config file
ansible.builtin.template:
src: config.j2
dest: "{{ container_data_base_path }}/{{ data_dir_name }}/openchangelog.yml"
mode: '0777'
- name: Deploy container
community.docker.docker_container:
name: "{{ container_name }}"
image: "ghcr.io/jonashiltl/openchangelog:0.6.2"
ports:
- "{{ port }}:6001"
volumes:
- "{{ container_data_base_path }}/{{ data_dir_name }}/release-notes:/release-notes"
- "{{ container_data_base_path }}/{{ data_dir_name }}/openchangelog.yml:/etc/openchangelog.yml"

View File

@ -0,0 +1,11 @@
addr: 0.0.0.0:6001
local:
filesPath: /release-notes
page:
title: {{ site_title }}
subtitle: {{ site_subtitle }}
colorScheme: dark
hidePoweredBy: false
logo:
src: https://openchangelog.com/logo-full.webp
link: {{ site_url }}

View File

@ -0,0 +1,7 @@
container_name: "outline-wiki"
storage_mode: "local"
smtp_service: ""
smtp_username: ""
smtp_password: ""
smtp_from_email: ""
pg_ssl_mode: "disable"

View File

@ -0,0 +1,80 @@
- name: Create database
ansible.builtin.include_role:
role: app/database
vars:
app_name: "{{ outline_db_name }}"
- name: Create data volume
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "{{ data_dir_name }}"
- name: Deploy Outline Wiki Container
community.docker.docker_container:
name: "{{ container_name }}"
image: docker.getoutline.com/outlinewiki/outline:latest
ports:
- "{{ port }}:3000"
volumes:
- "{{ container_data_base_path }}/{{ data_dir_name }}:/var/lib/outline/data"
env:
URL: "{{ url }}"
# Generate a hex-encoded 32-byte random key. Use `openssl rand -hex 32` in your
# terminal to generate a random value.
SECRET_KEY: "{{ secret_key }}"
# Generate a unique random key. The format is not important but you could still use
# `openssl rand -hex 32` in your terminal to generate a random value.
UTILS_SECRET: "{{ utils_secret_key }}"
DEFAULT_LANGUAGE: en_US
DATABASE_URL: "postgres://{{ outline_db_name }}:password@{{ pg_host }}:5432/{{ outline_db_name }}"
# Uncomment this line if you will not use SSL for connecting to Postgres. This is acceptable
# if the database and the application are on the same machine.
PGSSLMODE: "{{ pg_ssl_mode }}"
REDIS_URL: "redis://{{ lookup('hostip', 'prod_services') }}:6379"
FILE_STORAGE: "{{ storage_mode }}"
FILE_STORAGE_UPLOAD_MAX_SIZE: "262144000"
# Auto-redirect to https in production. The default is true but you may set to
# false if you can be sure that SSL is terminated at an external loadbalancer.
FORCE_HTTPS: "true"
# Generic OIDC provider
# DOCS: https://docs.getoutline.com/s/hosting/doc/oidc-8CPBm6uC0I
OIDC_CLIENT_ID: "{{ oidc_client_id }}"
OIDC_CLIENT_SECRET: "{{ oidc_client_secret }}"
OIDC_AUTH_URI: "{{ oidc_auth_uri }}"
OIDC_TOKEN_URI: "{{ oidc_token_uri }}"
OIDC_USERINFO_URI: "{{ oidc_userinfo_uri }}"
OIDC_LOGOUT_URI: "{{ oidc_logout_uri }}"
# Specify which claims to derive user information from
# Supports any valid JSON path with the JWT payload
OIDC_USERNAME_CLAIM: preferred_username
# Display name for OIDC authentication
OIDC_DISPLAY_NAME: "{{ odic_button_text }}"
OIDC_SCOPES: openid profile email
SMTP_SERVICE: "{{ smtp_service }}"
SMTP_USERNAME: "{{ smtp_username }}"
SMTP_PASSWORD: "{{ smtp_password }}"
SMTP_FROM_EMAIL: "{{ smtp_from_email }}"
# Whether the rate limiter is enabled or not
RATE_LIMITER_ENABLED: "true"
# Individual endpoints have hardcoded rate limits that are enabled
# with the above setting, however this is a global rate limiter
# across all requests
RATE_LIMITER_REQUESTS: "1000"
RATE_LIMITER_DURATION_WINDOW: "60"
# Have the installation check for updates by sending anonymized statistics to
# the maintainers
ENABLE_UPDATES: "true"
# Debugging categories to enable you can remove the default "http" value if
# your proxy already logs incoming http requests and this ends up being duplicative
DEBUG: "http"
# Configure lowest severity level for server logs. Should be one of
# error, warn, info, http, verbose, debug, or silly
LOG_LEVEL: "info"

View File

View File

@ -0,0 +1,90 @@
- name: Create Planka DB on postgres
ansible.builtin.include_role:
role: app/database
vars:
app_name: "planka"
- name: Create container data folder on NAS
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "planka"
- name: Set facts
ansible.builtin.set_fact:
data_path: "{{ container_data_base_path }}/planka"
- name: Create needed subdirectories
ansible.builtin.file:
dest: "{{ data_path }}/{{item}}"
state: directory
mode: '0777'
loop:
- favicons
- user-avatars
- background-images
- attachments
- name: Deploy planka container
community.docker.docker_container:
name: "planka"
image: ghcr.io/plankanban/planka:2.0.0-rc.3
restart_policy: on-failure
volumes:
- "{{ data_path }}/favicons:/app/public/favicons"
- "{{ data_path }}/user-avatars:/app/public/user-avatars"
- "{{ data_path }}/background-images:/app/public/background-images"
- "{{ data_path }}/attachments:/app/private/attachments"
ports:
- "{{ port }}:1337"
env:
BASE_URL: "{{ url }}"
DATABASE_URL: "postgresql://planka:password@{{ pg_host }}:5432/planka"
SECRET_KEY: "27736f8948e37890474af876715b73b5c99ef65e36e5c9ccf6f7e0295ce462c4"
LOG_LEVEL: "warn"
TRUST_PROXY: "true"
TOKEN_EXPIRES_IN: "365" # In days
# related: https://github.com/knex/knex/issues/2354
# As knex does not pass query parameters from the connection string,
# we have to use environment variables in order to pass the desired values, e.g.
PGSSLMODE: "disable"
# Used for per-board notifications
DEFAULT_LANGUAGE: "en-US"
# Do not comment out DEFAULT_ADMIN_EMAIL if you want to prevent this user from being edited/deleted
# DEFAULT_ADMIN_EMAIL: "me@javierfeliz.com"
# DEFAULT_ADMIN_PASSWORD: "password"
# - OIDC_ISSUER=
# - OIDC_CLIENT_ID=
# - OIDC_CLIENT_SECRET=
# Optionally store in secrets - then OIDC_CLIENT_SECRET should not be set
# - OIDC_CLIENT_SECRET__FILE=/run/secrets/oidc_client_secret
# - OIDC_ID_TOKEN_SIGNED_RESPONSE_ALG=
# - OIDC_USERINFO_SIGNED_RESPONSE_ALG=
# - OIDC_SCOPES=openid email profile
# - OIDC_RESPONSE_MODE=fragment
# - OIDC_USE_DEFAULT_RESPONSE_MODE=true
# - OIDC_ADMIN_ROLES=admin
# - OIDC_PROJECT_OWNER_ROLES=project_owner
# - OIDC_BOARD_USER_ROLES=board_user
# - OIDC_CLAIMS_SOURCE=userinfo
# - OIDC_EMAIL_ATTRIBUTE=email
# - OIDC_NAME_ATTRIBUTE=name
# - OIDC_USERNAME_ATTRIBUTE=preferred_username
# - OIDC_ROLES_ATTRIBUTE=groups
# - OIDC_IGNORE_USERNAME=true
# - OIDC_IGNORE_ROLES=true
# - OIDC_ENFORCED=true
# TODO: When I set up mxroute or something
# Email Notifications (https://nodemailer.com/smtp/)
# - SMTP_HOST=
# - SMTP_PORT=587
# - SMTP_NAME=
# - SMTP_SECURE=true
# - SMTP_USER=
# - SMTP_PASSWORD=
# Optionally store in secrets - then SMTP_PASSWORD should not be set
# - SMTP_PASSWORD__FILE=/run/secrets/smtp_password
# - SMTP_FROM="Demo Demo" <demo@demo.demo>
# - SMTP_TLS_REJECT_UNAUTHORIZED=false

View File

View File

@ -0,0 +1,48 @@
- name: Create postiz db
ansible.builtin.include_role:
role: app/database
vars:
app_name: "{{ postiz_db_name }}"
- name: Container data on NAS
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "{{ postiz_instance_name }}"
- name: Create needed subdirectories
ansible.builtin.file:
dest: "{{ container_data_base_path }}/{{postiz_instance_name}}/{{ item }}"
state: directory
mode: '0777'
loop:
- config
- uploads
- name: Deploy postiz container
community.docker.docker_container:
name: "{{ postiz_instance_name }}"
image: ghcr.io/gitroomhq/postiz-app:latest
restart_policy: always
env:
MAIN_URL: "{{ url }}"
FRONTEND_URL: "{{ url }}"
NEXT_PUBLIC_BACKEND_URL: "{{ url }}/api"
JWT_SECRET: "{{ jwt_secret }}"
# These defaults are probably fine, but if you change your user/password, update it in the
# postiz-postgres or postiz-redis services below.
DATABASE_URL: "postgresql://{{ postiz_db_name }}:{{ postiz_db_name }}@password:5432/{{ postiz_db_name }}"
REDIS_URL: "redis://{{ lookup('hostip', 'prod_services') }}:6379"
BACKEND_INTERNAL_URL: "http://localhost:3000"
IS_GENERAL: "true" # Required for self-hosting.
DISABLE_REGISTRATION: "false" # Only allow single registration, then disable signup
# The container images are pre-configured to use /uploads for file storage.
# You probably should not change this unless you have a really good reason!
STORAGE_PROVIDER: "local"
UPLOAD_DIRECTORY: "/uploads"
NEXT_PUBLIC_UPLOAD_DIRECTORY: "/uploads"
volumes:
- "{{ container_data_base_path }}/config:/config/"
- "{{ container_data_base_path }}/uploads:/uploads/"
ports:
- "{{ port }}:5000"

View File

View File

@ -0,0 +1,19 @@
- name: Make mount folder
ansible.builtin.file:
dest: "/home/docker/reubah"
mode: '0777'
state: directory
- name: Put up reubah container
community.docker.docker_container:
name: reubah
image: 'ghcr.io/dendianugerah/reubah:latest'
pull: true
state: started
restart_policy: unless-stopped
env:
PORT=8081
volumes:
- '/home/docker/reubah:/tmp'
ports:
- '{{ port }}:8081'

View File

@ -0,0 +1,21 @@
- name: Create the data dir on network drive
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "stirlingpdf-data"
- name: Make stirlingPDF container
community.docker.docker_container:
name: stirling-pdf
image: docker.stirlingpdf.com/stirlingtools/stirling-pdf:latest
ports:
- "{{ port }}:8080"
volumes:
- /home/docker/container-data/stirlingpdf-data/trainingData:/usr/share/tessdata # Required for extra OCR languages
- /home/docker/container-data/stirlingpdf-data/extraConfigs:/configs
- /home/docker/container-data/stirlingpdf-data/customFiles:/customFiles/
- /home/docker/container-data/stirlingpdf-data/logs:/logs/
- /home/docker/container-data/stirlingpdf-data/pipeline:/pipeline/
env:
DOCKER_ENABLE_SECURITY=false
LANGS=en_GB

View File

View File

@ -0,0 +1,20 @@
- name: Create app DB
ansible.builtin.include_role:
name: app/database
vars:
app_name: tianji
- name: Create tianji container
community.docker.docker_container:
name: tianji
image: moonrailgun/tianji
pull: true
state: started
restart_policy: always
ports:
- "{{ port }}:12345"
env:
DATABASE_URL: postgresql://tianji:password@10.89.0.102:5432/tianji
JWT_SECRET: 7RpgjEwcb3lQOjT25sRXYg==
ALLOW_REGISTER: "false"
ALLOW_OPENAPI: "true"

View File

View File

@ -0,0 +1,19 @@
- name: Create the data dir on network drive
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "vaultwarden-data"
- name: Create vaultwarden container
community.docker.docker_container:
name: vaultwarden
image: vaultwarden/server:latest
pull: true
state: started
restart_policy: always
ports:
- "{{ port }}:80"
env:
DOMAIN: "https://vaultwarden.blinker.club"
ADMIN_TOKEN: "{{ vaultwarden_admin_token }}"
volumes:
- /home/docker/container-data/vaultwarden-data/:/data/

View File

@ -0,0 +1 @@

View File

@ -0,0 +1,101 @@
- name: Install dependencies
apt:
name:
- apt-transport-https
- golang
state: present
update_cache: true
- name: Make config directory
ansible.builtin.file:
dest: "/etc/caddy"
state: directory
mode: '0777'
- name: Download and install XCaddy GPG key
ansible.builtin.shell:
cmd: >
curl -1sLf 'https://dl.cloudsmith.io/public/caddy/xcaddy/gpg.key' |
gpg --dearmor -o /usr/share/keyrings/caddy-xcaddy-archive-keyring.gpg
args:
creates: /usr/share/keyrings/caddy-xcaddy-archive-keyring.gpg
become: true
- name: Add XCaddy repository list
ansible.builtin.get_url:
url: https://dl.cloudsmith.io/public/caddy/xcaddy/debian.deb.txt
dest: /etc/apt/sources.list.d/caddy-xcaddy.list
mode: '0644'
force: true
- name: Update apt cache
ansible.builtin.apt:
update_cache: true
- name: Install xcaddy
ansible.builtin.apt:
name: xcaddy
state: present
- name: Build Caddy with Cloudflare DNS plugin
ansible.builtin.shell: |
xcaddy build \
--with github.com/caddy-dns/cloudflare \
--with github.com/mholt/caddy-dynamicdns \
--output /usr/local/bin/caddy
args:
creates: /usr/local/bin/caddy
- name: Create systemd service for custom Caddy binary
ansible.builtin.copy:
dest: /etc/systemd/system/caddy.service
owner: root
group: root
mode: '0644'
content: |
[Unit]
Description=Caddy
[Service]
User=root
Group=root
ExecStart=/usr/local/bin/caddy run --environ --config /etc/caddy/Caddyfile
ExecReload=/usr/local/bin/caddy reload --config /etc/caddy/Caddyfile
TimeoutStopSec=5s
LimitNOFILE=1048576
Restart=on-failure
[Install]
WantedBy=multi-user.target
- name: Unmask Caddy service
ansible.builtin.systemd:
name: caddy
masked: false
- name: Allow Caddy to bind to ports <1024
command: setcap 'cap_net_bind_service=+ep' /usr/local/bin/caddy
- name: Reload systemd to pick up caddy.service changes
ansible.builtin.systemd:
daemon_reload: true
- name: Set resolv.conf DNS
ansible.builtin.copy:
dest: /etc/resolv.conf
content: |
nameserver 1.1.1.1
nameserver 8.8.8.8
force: true
- name: Restart systemd-resolved
systemd:
name: systemd-resolved
state: restarted
enabled: true
- name: Enable and start Caddy service
ansible.builtin.systemd:
name: caddy
enabled: true
state: started

View File

@ -0,0 +1,8 @@
cloudflare_api_key: !vault |
$ANSIBLE_VAULT;1.1;AES256
34356665643335356136633736363963383562366164613637363437636435343835303966356331
3463343766623264393037333638386534306164393430610a396533613235313030623834646466
31313662653435656663663361646261626130376632323163626232616331663239623236366264
6632626166623131380a333935646530396362363833383164386234653834323462386563373132
66643138663062346237646236333934306434633837643738343137623934626132376230316138
3237633130343862376332633565373631313736366431316232

View File

@ -0,0 +1,16 @@
- name: Generate Caddyfile from template
template:
src: Caddyfile.j2
dest: /etc/caddy/Caddyfile
owner: root
group: root
mode: '0644'
- name: Format config in the server
ansible.builtin.command:
caddy fmt --overwrite --config /etc/caddy/Caddyfile
- name: Reload Caddy
ansible.builtin.systemd:
name: caddy
state: reloaded

View File

@ -0,0 +1,78 @@
{
servers {
protocols h1 h2
}
dynamic_dns {
provider cloudflare {{ cloudflare_api_key }}
domains {
{% for domain in domains %}
{% set base_domain = domain.name.lstrip('*.') %}
{% if (domain.dynamic_dns | default(false)) %}
{{ base_domain }} *
{{ base_domain }} @
{% for site in domain.sites %}
{{ base_domain }} {{ site.name }}
{% endfor %}
{% endif %}
{% endfor %}
}
}
grace_period 10s
}
{% for domain in domains %}
{% set base_domain = domain.name.lstrip('*.') %}
{% set domain_var_name = domain.name.replace('.', '') %}
# Subdomain domain proxy for {{ base_domain }}
*.{{ base_domain }} {
tls {
issuer acme {
dns cloudflare {{ cloudflare_api_key }}
}
}
{% for site in domain.sites %}
@{{ site.name }} host {{ site.name }}.{{ base_domain }}
handle @{{ site.name }} {
reverse_proxy {{ site.host }}:{{ site.port }} {
transport http {
{% for opt in (site.transport_opts | default([])) %}
{{ opt }}
{% endfor %}
}
}
}
{% endfor %}
}
# Base domain proxy for {{ base_domain }}
{% if domain.host is defined and domain.port is defined %}
{{base_domain}} {
tls {
issuer acme {
dns cloudflare {{ cloudflare_api_key }}
}
}
@{{ domain_var_name }} {
host {{ base_domain}}
}
handle @{{ domain_var_name }} {
handle {
reverse_proxy {{ domain.host }}:{{ domain.port }} {
transport http {
{% for opt in (domain.transport_opts | default([])) %}
{{ opt }}
{% endfor %}
}
}
}
}
}
{% endif %}
{% endfor %}

View File

@ -0,0 +1,20 @@
- name: Create the data directory
ansible.builtin.file:
path: "{{ container_data_base_path }}"
state: directory
mode: '0777'
become: true
- name: Mount the data share to the VM
ansible.builtin.include_tasks:
file: ../tasks/mount_nfs.yml
vars:
mount_path: "{{ container_data_base_path }}"
mount_source: "10.89.0.15:/mnt/main/container-data"
- name: Create the data directory
ansible.builtin.file:
path: "{{ container_data_base_path }}/{{ dir_name }}"
state: directory
mode: '0777'
become: true

View File

@ -0,0 +1,45 @@
---
- name: Update apt cache
ansible.builtin.apt:
update_cache: yes
- name: Install prerequisite packages
ansible.builtin.apt:
name:
- ca-certificates
- curl
state: present
- name: Create apt keyrings directory
ansible.builtin.file:
path: /etc/apt/keyrings
state: directory
mode: '0755'
- name: Download Docker GPG key
ansible.builtin.get_url:
url: "https://download.docker.com/linux/ubuntu/gpg"
dest: /etc/apt/keyrings/docker.asc
mode: '0644'
- name: Add Docker apt repository
ansible.builtin.apt_repository:
repo: "deb [arch={{ docker_arch }} signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
filename: docker
state: present
vars:
docker_arch: "{{ ansible_architecture | regex_replace('x86_64', 'amd64') }}"
- name: Update apt cache after adding Docker repository
ansible.builtin.apt:
update_cache: true
- name: Install Docker packages
ansible.builtin.apt:
name:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
state: present

View File

@ -0,0 +1,22 @@
- name: Pull Portainer Agent image
become: true
community.docker.docker_image:
name: portainer/agent
tag: latest
source: pull
- name: Deploy Portainer Agent container
become: true
community.docker.docker_container:
name: portainer_agent
image: portainer/agent
pull: false # we already pulled above
state: started
restart_policy: always
ports:
- "9001:9001"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
- /:/host
timeout: 120 # wait up to 2m for it to come up

View File

@ -0,0 +1,12 @@
- name: Deploy beszel agent
community.docker.docker_container:
name: beszel-agent
state: started
image: henrygd/beszel-agent:latest
restart_policy: unless-stopped
network_mode: host
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
env:
LISTEN: "45876"
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMCUPTi9HAx/CV1RmSnO2p365gKUPSPzTLN9fbmINO4d'

View File

@ -0,0 +1,23 @@
- name: Deploy beszel hub
community.docker.docker_container:
name: beszel
image: henrygd/beszel:latest
restart_policy: unless-stopped
ports:
- 8090:8090
volumes:
- ./beszel_data:/beszel_data
- ./beszel_socket:/beszel_socket
- name: Deploy local agent
community.docker.docker_config:
name: beszel-agent
image: henrygd/beszel-agent:latest
restart_policy: unless-stopped
network_mode: host
volumes:
- ./beszel_socket:/beszel_socket
- /var/run/docker.sock:/var/run/docker.sock:ro
env:
LISTEN: /beszel_socket/beszel.sock
KEY: 'ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMCUPTi9HAx/CV1RmSnO2p365gKUPSPzTLN9fbmINO4d'

View File

@ -0,0 +1,22 @@
- name: Create data folder on NAS
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "grafana"
- name: Set facts
ansible.builtin.set_fact:
data_dir: "{{ container_data_base_path }}/grafana"
- name: Deploy grafana container
community.docker.docker_container:
name: grafana
image: grafana/grafana-oss
state: started
restart_policy: unless-stopped
env:
GF_SERVER_ROOT_URL: "https://dash.lan.thegrind.dev"
ports:
- '3000:3000'
volumes:
- "{{ data_dir }}:/var/lib/grafana"

View File

@ -0,0 +1,11 @@
- name: Deploy node exporter container
community.docker.docker_container:
name: prometheus-node-exporter
image: quay.io/prometheus/node-exporter:latest
command: "--path.rootfs=/host"
state: started
network_mode: host
pid_mode: host
restart_policy: unless-stopped
volumes:
- '/:/host:ro,rslave'

View File

@ -0,0 +1,29 @@
- name: Create data folder on NAS
ansible.builtin.include_role:
role: docker/container-data
vars:
dir_name: "prometheus"
- name: Set facts
ansible.builtin.set_fact:
data_dir: "{{ container_data_base_path }}/prometheus"
- name: Create config file
ansible.builtin.file:
dest: "{{ data_dir }}/prometheus.yml"
state: touch
mode: '0777'
- name: Deploy prometheus container
community.docker.docker_container:
name: prometheus
image: prom/prometheus
state: started
restart_policy: unless-stopped
# env:
# GF_SERVER_ROOT_URL: "https://dash.lan.thegrind.dev"
ports:
- '9090:9090'
volumes:
- "{{ data_dir }}:/prometheus"
- "{{ data_dir }}/prometheus.yml:/etc/prometheus/prometheus.yml"

View File

@ -1,4 +1,18 @@
---
- name: Grant database-level privileges on "{{ database }}"
delegate_to: localhost
community.postgresql.postgresql_privs:
db: "{{ database }}"
type: database
objs: "{{ database }}"
privs: "CREATE"
role: "{{ user }}"
state: present
login_host: "{{ pg_host }}"
login_port: "{{ pg_port }}"
login_user: "{{ pg_user }}"
login_password: "{{ pg_password }}"
- name: Give user full priviledges on database
delegate_to: localhost
community.postgresql.postgresql_privs:

View File

@ -0,0 +1,8 @@
# roles/webmin/defaults/main.yml
webmin_repo_url: https://download.webmin.com
webmin_dist: stable
webmin_section: contrib
# Optional HTTP auth
webmin_auth_user: javi
webmin_auth_pass: password

View File

@ -1,40 +1,66 @@
---
- name: Install required dependencies
- name: Remove any old/malformed Webmin sources list
ansible.builtin.file:
path: /etc/apt/sources.list.d/webmin.list
state: absent
- name: Remove any old Webmin keyring
ansible.builtin.file:
path: /usr/share/keyrings/webmin-archive-keyring.gpg
state: absent
- name: Install Webmin GPG key into its own keyring
ansible.builtin.apt_key:
url: https://download.webmin.com/developers-key.asc
keyring: /usr/share/keyrings/webmin-archive-keyring.gpg
state: present
- name: Add Webmin APT repository
ansible.builtin.apt_repository:
filename: webmin
repo: >-
deb [signed-by=/usr/share/keyrings/webmin-archive-keyring.gpg]
https://download.webmin.com/download/repository
sarge contrib
state: present
- name: Ensure apt prerequisites are installed
ansible.builtin.apt:
name:
- wget
- curl
- gnupg
- apt-transport-https
- software-properties-common
- ca-certificates
state: present
update_cache: true
- name: Add Webmin repository
- name: Configure APT HTTP auth for Webmin
when: webmin_auth_user is defined and webmin_auth_pass is defined
ansible.builtin.copy:
dest: /etc/apt/sources.list.d/webmin.list
content: "deb http://download.webmin.com/download/repository sarge contrib"
owner: javi
group: javi
mode: '0777'
dest: /etc/apt/auth.conf.d/webmin.conf
mode: '0600'
content: |
machine {{ webmin_repo_url | regex_replace('^https?://','') }}
login {{ webmin_auth_user }}
password {{ webmin_auth_pass }}
- name: Add Webmin GPG key
ansible.builtin.apt_key:
url: https://www.webmin.com/jcameron-key.asc
state: present
- name: Remove old Webmin preference file
ansible.builtin.file:
path: /etc/apt/preferences.d/webmin-stable-package-priority
state: absent
- name: Update apt cache
- name: Refresh apt cache
ansible.builtin.apt:
update_cache: true
- name: Install Webmin with recommended packages
- name: Install Webmin
ansible.builtin.apt:
name:
- webmin
state: present
update_cache: true
install_recommends: true
- name: Ensure Webmin service is enabled and running
ansible.builtin.systemd:
- name: Ensure Webmin service is enabled & started
ansible.builtin.service:
name: webmin
state: started
enabled: true
state: started

View File

View File

View File

@ -0,0 +1,3 @@
port: "6379"
container_name: "valkey"
volume_path: "/home/docker/valkey"

View File

@ -0,0 +1,18 @@
- name: Create persistence volume
ansible.builtin.file:
dest: "{{ volume_path }}"
state: directory
mode: '0777'
- name: Deploy valkey container
community.docker.docker_container:
name: "{{ container_name }}"
image: bitnami/valkey:latest
pull: true
restart_policy: always
ports:
- "{{ port }}:6379"
env:
ALLOW_EMPTY_PASSWORD: "yes"
volumes:
- "{{ volume_path }}:/bitnami/valkey/data"