Compare commits
9 Commits
5d4a21bf55
...
fbd756c2e4
| Author | SHA1 | Date | |
|---|---|---|---|
| fbd756c2e4 | |||
| c352d29c86 | |||
| b534acb705 | |||
| daa4e986e4 | |||
| e816cd06cc | |||
| ff4c81f9f5 | |||
| 24bb0cc05b | |||
| c604dec983 | |||
| d20185463b |
7
_caddy/.env.example
Normal file
7
_caddy/.env.example
Normal file
@@ -0,0 +1,7 @@
|
||||
SOCKET=/var/run/docker.sock
|
||||
ENDPOINT=your_endpoint
|
||||
APP_KEY=your_application_key
|
||||
APP_SECRET=your_application_secret
|
||||
CONSUMER_KEY=your_consumer_key
|
||||
DOMAIN=example.com
|
||||
INT_DOMAIN=hl.$DOMAIN #homelab
|
||||
32
_caddy/compose.yml
Normal file
32
_caddy/compose.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
---
|
||||
services:
|
||||
caddy:
|
||||
container_name: caddy
|
||||
image: git.morthimer.fr/morthimer/caddy-homelab
|
||||
ports:
|
||||
- 8080:80
|
||||
- 8443:443
|
||||
environment:
|
||||
- CADDY_INGRESS_NETWORKS=dmz
|
||||
# If you are not using Podman, comment the following line
|
||||
- CADDY_DOCKER_NO_SCOPE=true
|
||||
env_file: .env
|
||||
networks:
|
||||
- dmz
|
||||
volumes:
|
||||
- ${SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
- ./appdata:/data
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
# Caddy configuration (https://github.com/lucaslorentz/caddy-docker-proxy?tab=readme-ov-file#table-of-contents)
|
||||
# Global ACME DNS settings
|
||||
caddy.acme_dns: ovh
|
||||
caddy.acme_dns.endpoint: "{env.ENDPOINT}"
|
||||
caddy.acme_dns.application_key: "{env.APP_KEY}"
|
||||
caddy.acme_dns.application_secret: "{env.APP_SECRET}"
|
||||
caddy.acme_dns.consumer_key: "{env.CONSUMER_KEY}"
|
||||
## Debug
|
||||
# caddy.log.level: DEBUG
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
122
alertmanager/appconf/example.alertmanager.yml
Normal file
122
alertmanager/appconf/example.alertmanager.yml
Normal file
@@ -0,0 +1,122 @@
|
||||
global:
|
||||
# The smarthost and SMTP sender used for mail notifications.
|
||||
smtp_smarthost: 'localhost:25'
|
||||
smtp_from: 'alertmanager@example.org'
|
||||
smtp_auth_username: 'alertmanager'
|
||||
smtp_auth_password: 'password'
|
||||
|
||||
# The directory from which notification templates are read.
|
||||
templates:
|
||||
- '/etc/alertmanager/template/*.tmpl'
|
||||
|
||||
# The root route on which each incoming alert enters.
|
||||
route:
|
||||
# The labels by which incoming alerts are grouped together. For example,
|
||||
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
|
||||
# be batched into a single group.
|
||||
#
|
||||
# To aggregate by all possible labels use '...' as the sole label name.
|
||||
# This effectively disables aggregation entirely, passing through all
|
||||
# alerts as-is. This is unlikely to be what you want, unless you have
|
||||
# a very low alert volume or your upstream notification system performs
|
||||
# its own grouping. Example: group_by: [...]
|
||||
group_by: ['alertname', 'cluster', 'service']
|
||||
|
||||
# When a new group of alerts is created by an incoming alert, wait at
|
||||
# least 'group_wait' to send the initial notification.
|
||||
# This way ensures that you get multiple alerts for the same group that start
|
||||
# firing shortly after another are batched together on the first
|
||||
# notification.
|
||||
group_wait: 30s
|
||||
|
||||
# When the first notification was sent, wait 'group_interval' to send a batch
|
||||
# of new alerts that started firing for that group.
|
||||
group_interval: 5m
|
||||
|
||||
# If an alert has successfully been sent, wait 'repeat_interval' to
|
||||
# resend them.
|
||||
repeat_interval: 3h
|
||||
|
||||
# A default receiver
|
||||
receiver: team-X-mails
|
||||
|
||||
# All the above attributes are inherited by all child routes and can
|
||||
# overwritten on each.
|
||||
|
||||
# The child route trees.
|
||||
routes:
|
||||
# This routes performs a regular expression match on alert labels to
|
||||
# catch alerts that are related to a list of services.
|
||||
- matchers:
|
||||
- service=~"foo1|foo2|baz"
|
||||
receiver: team-X-mails
|
||||
# The service has a sub-route for critical alerts, any alerts
|
||||
# that do not match, i.e. severity != critical, fall-back to the
|
||||
# parent node and are sent to 'team-X-mails'
|
||||
routes:
|
||||
- matchers:
|
||||
- severity="critical"
|
||||
receiver: team-X-pager
|
||||
- matchers:
|
||||
- service="files"
|
||||
receiver: team-Y-mails
|
||||
|
||||
routes:
|
||||
- matchers:
|
||||
- severity="critical"
|
||||
receiver: team-Y-pager
|
||||
|
||||
# This route handles all alerts coming from a database service. If there's
|
||||
# no team to handle it, it defaults to the DB team.
|
||||
- matchers:
|
||||
- service="database"
|
||||
receiver: team-DB-pager
|
||||
# Also group alerts by affected database.
|
||||
group_by: [alertname, cluster, database]
|
||||
routes:
|
||||
- matchers:
|
||||
- owner="team-X"
|
||||
receiver: team-X-pager
|
||||
continue: true
|
||||
- matchers:
|
||||
- owner="team-Y"
|
||||
receiver: team-Y-pager
|
||||
|
||||
|
||||
# Inhibition rules allow to mute a set of alerts given that another alert is
|
||||
# firing.
|
||||
# We use this to mute any warning-level notifications if the same alert is
|
||||
# already critical.
|
||||
inhibit_rules:
|
||||
- source_matchers: [severity="critical"]
|
||||
target_matchers: [severity="warning"]
|
||||
# Apply inhibition if the alertname is the same.
|
||||
# CAUTION:
|
||||
# If all label names listed in `equal` are missing
|
||||
# from both the source and target alerts,
|
||||
# the inhibition rule will apply!
|
||||
equal: [alertname, cluster, service]
|
||||
|
||||
|
||||
receivers:
|
||||
- name: 'team-X-mails'
|
||||
email_configs:
|
||||
- to: 'team-X+alerts@example.org'
|
||||
|
||||
- name: 'team-X-pager'
|
||||
email_configs:
|
||||
- to: 'team-X+alerts-critical@example.org'
|
||||
pagerduty_configs:
|
||||
- service_key: <team-X-key>
|
||||
|
||||
- name: 'team-Y-mails'
|
||||
email_configs:
|
||||
- to: 'team-Y+alerts@example.org'
|
||||
|
||||
- name: 'team-Y-pager'
|
||||
pagerduty_configs:
|
||||
- service_key: <team-Y-key>
|
||||
|
||||
- name: 'team-DB-pager'
|
||||
pagerduty_configs:
|
||||
- service_key: <team-DB-key>
|
||||
16
alertmanager/compose.yml
Normal file
16
alertmanager/compose.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
services:
|
||||
alertmanager:
|
||||
container_name: alertmanager
|
||||
image: quay.io/prometheus/alertmanager
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./appconf/alertmanager.yml:/etc/alertmanager/alertmanager.yml
|
||||
networks:
|
||||
- monitoring
|
||||
labels:
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
external: true
|
||||
34
cadvisor/compose.yml
Normal file
34
cadvisor/compose.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
services:
|
||||
cadvisor:
|
||||
container_name: cadvisor
|
||||
image: gcr.io/cadvisor/cadvisor:v0.49.1
|
||||
command:
|
||||
- --podman=unix:///var/run/podman/podman.sock
|
||||
- --housekeeping_interval=10s
|
||||
- --docker_only=true
|
||||
volumes:
|
||||
- /:/rootfs:ro
|
||||
- /sys:/sys:ro
|
||||
- /dev/disk:/dev/disk:ro
|
||||
- ${SOCKET:-/var/run/podman}:/var/run/podman:ro
|
||||
devices:
|
||||
- /dev/kmesg
|
||||
privileged: true
|
||||
restart: always
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@cadvisor: host cadvisor.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@cadvisor"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8080}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
networks:
|
||||
- monitoring
|
||||
- dmz
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
external: true
|
||||
dmz:
|
||||
external: true
|
||||
26
code-server/compose.yml
Normal file
26
code-server/compose.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
services:
|
||||
code-server:
|
||||
container_name: code-server
|
||||
image: lscr.io/linuxserver/code-server
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
volumes:
|
||||
- ./appconf:/config
|
||||
environment:
|
||||
# If you're not using Podman, set PUID and PGID to 1000
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ=Europe/Paris
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@code: host code.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@code"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8443}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
25
diun/compose.yml
Normal file
25
diun/compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
diun:
|
||||
container_name: diun
|
||||
image: crazymax/diun:4
|
||||
command: serve
|
||||
hostname: $HOST # server hostname
|
||||
volumes:
|
||||
- ./appdata:/data
|
||||
- $SOCKET:/var/run/docker.sock
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- TZ=Europe/Paris
|
||||
- DIUN_WATCH_WORKERS=20
|
||||
- DIUN_WATCH_SCHEDULE=0 */6 * * *
|
||||
- DIUN_WATCH_JITTER=30s
|
||||
- DIUN_PROVIDERS_DOCKER=true
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
25
dockge/compose.yml
Normal file
25
dockge/compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
dockge:
|
||||
container_name: dockge
|
||||
image: louislam/dockge:1
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ${SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
- ./appdata:/app/data
|
||||
- /opt/stacks:/opt/stacks
|
||||
environment:
|
||||
- DOCKGE_STACKS_DIR=/opt/stacks
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@dockge: host dockge.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@dockge"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 5001}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
24
dozzle/compose.yml
Normal file
24
dozzle/compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
dozzle:
|
||||
container_name: dozzle
|
||||
image: amir20/dozzle:latest
|
||||
volumes:
|
||||
- $SOCKET:/var/run/docker.sock
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- monitoring
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@dozzle: host dozzle.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@dozzle"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8080}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
external: true
|
||||
dmz:
|
||||
external: true
|
||||
Reference in New Issue
Block a user