Compare commits
26 Commits
5d4a21bf55
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| aa2a56230d | |||
| 6494393821 | |||
| 616f2afdfa | |||
| 0706280a9d | |||
| 041ee577f4 | |||
| 595a9bceba | |||
| eb01a093b0 | |||
| 30fd28f54c | |||
| 4afa83f9bc | |||
| 365382e7df | |||
| a8a09e7cee | |||
| b9896fe850 | |||
| 22f4139a3b | |||
| 0238550f15 | |||
| 83cff7450a | |||
| 26bab08ea0 | |||
| fd7a550dc5 | |||
| fbd756c2e4 | |||
| c352d29c86 | |||
| b534acb705 | |||
| daa4e986e4 | |||
| e816cd06cc | |||
| ff4c81f9f5 | |||
| 24bb0cc05b | |||
| c604dec983 | |||
| d20185463b |
71
README.md
Normal file
71
README.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Homelab
|
||||
|
||||
## Description
|
||||
This repository list all my services hosted on several machines of my homelab using Podman
|
||||
|
||||
## List of services (WIP)
|
||||
|
||||
Web services:
|
||||
- Caddy
|
||||
- Dockge
|
||||
- Excalidraw
|
||||
- It-tools
|
||||
- Umami
|
||||
- Hugo
|
||||
- Whoogle
|
||||
- Wikijs
|
||||
- Stirling-pdf
|
||||
- Redlib
|
||||
- Homepage
|
||||
- yarr
|
||||
- Traefik
|
||||
- Ntfy
|
||||
- Pihole
|
||||
|
||||
Security and Authentication:
|
||||
- Fail2ban
|
||||
- Authelia
|
||||
- Wireguard
|
||||
- lldap
|
||||
|
||||
Development:
|
||||
- Gitea
|
||||
- Gitea Actions Runner
|
||||
- code-server
|
||||
|
||||
Monitoring and Logging:
|
||||
- Grafana
|
||||
- Prometheus
|
||||
- Prometheus Node Exporter
|
||||
- Prometheus Blackbox Exporter
|
||||
- cAdvisor
|
||||
- victoria-metrics
|
||||
- Alertmanager
|
||||
- Vector
|
||||
- Loki
|
||||
- Diun
|
||||
- Dozzle
|
||||
|
||||
File management and Backup:
|
||||
- Filebrowser
|
||||
- Syncthing
|
||||
- Linkding
|
||||
- shiori
|
||||
|
||||
Multimedia:
|
||||
- jellyfin
|
||||
- sonarr
|
||||
- radarr
|
||||
- lidarr
|
||||
- bazarr
|
||||
- jellyseerr
|
||||
- jackett
|
||||
- flaresolverr
|
||||
- transmission
|
||||
- joal
|
||||
- pigallery2
|
||||
|
||||
# Roadmap
|
||||
|
||||
- Deploying containers with either Renovate or Ansible
|
||||
- K8s, ArgoCD ?
|
||||
7
_caddy/.env.example
Normal file
7
_caddy/.env.example
Normal file
@@ -0,0 +1,7 @@
|
||||
SOCKET=/var/run/docker.sock
|
||||
ENDPOINT=your_endpoint
|
||||
APP_KEY=your_application_key
|
||||
APP_SECRET=your_application_secret
|
||||
CONSUMER_KEY=your_consumer_key
|
||||
DOMAIN=example.com
|
||||
INT_DOMAIN=hl.$DOMAIN #homelab
|
||||
33
_caddy/compose.yml
Normal file
33
_caddy/compose.yml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
services:
|
||||
caddy:
|
||||
container_name: caddy
|
||||
image: git.morthimer.fr/morthimer/caddy-homelab
|
||||
ports:
|
||||
- 8080:80
|
||||
- 8443:443
|
||||
environment:
|
||||
- CADDY_INGRESS_NETWORKS=dmz
|
||||
# If you are not using Podman, comment the following line
|
||||
- CADDY_DOCKER_NO_SCOPE=true
|
||||
env_file: .env
|
||||
networks:
|
||||
- dmz
|
||||
volumes:
|
||||
- ${SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
- ./appdata:/data
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
# Caddy configuration (https://github.com/lucaslorentz/caddy-docker-proxy?tab=readme-ov-file#table-of-contents)
|
||||
# Global ACME DNS settings
|
||||
caddy.acme_dns: ovh
|
||||
caddy.acme_dns.endpoint: "{env.ENDPOINT}"
|
||||
caddy.acme_dns.application_key: "{env.APP_KEY}"
|
||||
caddy.acme_dns.application_secret: "{env.APP_SECRET}"
|
||||
caddy.acme_dns.consumer_key: "{env.CONSUMER_KEY}"
|
||||
## Debug
|
||||
# caddy.log.level: DEBUG
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
122
alertmanager/appconf/example.alertmanager.yml
Normal file
122
alertmanager/appconf/example.alertmanager.yml
Normal file
@@ -0,0 +1,122 @@
|
||||
global:
|
||||
# The smarthost and SMTP sender used for mail notifications.
|
||||
smtp_smarthost: 'localhost:25'
|
||||
smtp_from: 'alertmanager@example.org'
|
||||
smtp_auth_username: 'alertmanager'
|
||||
smtp_auth_password: 'password'
|
||||
|
||||
# The directory from which notification templates are read.
|
||||
templates:
|
||||
- '/etc/alertmanager/template/*.tmpl'
|
||||
|
||||
# The root route on which each incoming alert enters.
|
||||
route:
|
||||
# The labels by which incoming alerts are grouped together. For example,
|
||||
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
|
||||
# be batched into a single group.
|
||||
#
|
||||
# To aggregate by all possible labels use '...' as the sole label name.
|
||||
# This effectively disables aggregation entirely, passing through all
|
||||
# alerts as-is. This is unlikely to be what you want, unless you have
|
||||
# a very low alert volume or your upstream notification system performs
|
||||
# its own grouping. Example: group_by: [...]
|
||||
group_by: ['alertname', 'cluster', 'service']
|
||||
|
||||
# When a new group of alerts is created by an incoming alert, wait at
|
||||
# least 'group_wait' to send the initial notification.
|
||||
# This way ensures that you get multiple alerts for the same group that start
|
||||
# firing shortly after another are batched together on the first
|
||||
# notification.
|
||||
group_wait: 30s
|
||||
|
||||
# When the first notification was sent, wait 'group_interval' to send a batch
|
||||
# of new alerts that started firing for that group.
|
||||
group_interval: 5m
|
||||
|
||||
# If an alert has successfully been sent, wait 'repeat_interval' to
|
||||
# resend them.
|
||||
repeat_interval: 3h
|
||||
|
||||
# A default receiver
|
||||
receiver: team-X-mails
|
||||
|
||||
# All the above attributes are inherited by all child routes and can
|
||||
# overwritten on each.
|
||||
|
||||
# The child route trees.
|
||||
routes:
|
||||
# This routes performs a regular expression match on alert labels to
|
||||
# catch alerts that are related to a list of services.
|
||||
- matchers:
|
||||
- service=~"foo1|foo2|baz"
|
||||
receiver: team-X-mails
|
||||
# The service has a sub-route for critical alerts, any alerts
|
||||
# that do not match, i.e. severity != critical, fall-back to the
|
||||
# parent node and are sent to 'team-X-mails'
|
||||
routes:
|
||||
- matchers:
|
||||
- severity="critical"
|
||||
receiver: team-X-pager
|
||||
- matchers:
|
||||
- service="files"
|
||||
receiver: team-Y-mails
|
||||
|
||||
routes:
|
||||
- matchers:
|
||||
- severity="critical"
|
||||
receiver: team-Y-pager
|
||||
|
||||
# This route handles all alerts coming from a database service. If there's
|
||||
# no team to handle it, it defaults to the DB team.
|
||||
- matchers:
|
||||
- service="database"
|
||||
receiver: team-DB-pager
|
||||
# Also group alerts by affected database.
|
||||
group_by: [alertname, cluster, database]
|
||||
routes:
|
||||
- matchers:
|
||||
- owner="team-X"
|
||||
receiver: team-X-pager
|
||||
continue: true
|
||||
- matchers:
|
||||
- owner="team-Y"
|
||||
receiver: team-Y-pager
|
||||
|
||||
|
||||
# Inhibition rules allow to mute a set of alerts given that another alert is
|
||||
# firing.
|
||||
# We use this to mute any warning-level notifications if the same alert is
|
||||
# already critical.
|
||||
inhibit_rules:
|
||||
- source_matchers: [severity="critical"]
|
||||
target_matchers: [severity="warning"]
|
||||
# Apply inhibition if the alertname is the same.
|
||||
# CAUTION:
|
||||
# If all label names listed in `equal` are missing
|
||||
# from both the source and target alerts,
|
||||
# the inhibition rule will apply!
|
||||
equal: [alertname, cluster, service]
|
||||
|
||||
|
||||
receivers:
|
||||
- name: 'team-X-mails'
|
||||
email_configs:
|
||||
- to: 'team-X+alerts@example.org'
|
||||
|
||||
- name: 'team-X-pager'
|
||||
email_configs:
|
||||
- to: 'team-X+alerts-critical@example.org'
|
||||
pagerduty_configs:
|
||||
- service_key: <team-X-key>
|
||||
|
||||
- name: 'team-Y-mails'
|
||||
email_configs:
|
||||
- to: 'team-Y+alerts@example.org'
|
||||
|
||||
- name: 'team-Y-pager'
|
||||
pagerduty_configs:
|
||||
- service_key: <team-Y-key>
|
||||
|
||||
- name: 'team-DB-pager'
|
||||
pagerduty_configs:
|
||||
- service_key: <team-DB-key>
|
||||
16
alertmanager/compose.yml
Normal file
16
alertmanager/compose.yml
Normal file
@@ -0,0 +1,16 @@
|
||||
services:
|
||||
alertmanager:
|
||||
container_name: alertmanager
|
||||
image: quay.io/prometheus/alertmanager
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./appconf/alertmanager.yml:/etc/alertmanager/alertmanager.yml
|
||||
networks:
|
||||
- monitoring
|
||||
labels:
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
external: true
|
||||
34
cadvisor/compose.yml
Normal file
34
cadvisor/compose.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
services:
|
||||
cadvisor:
|
||||
container_name: cadvisor
|
||||
image: gcr.io/cadvisor/cadvisor:v0.49.1
|
||||
command:
|
||||
- --podman=unix:///var/run/podman/podman.sock
|
||||
- --housekeeping_interval=10s
|
||||
- --docker_only=true
|
||||
volumes:
|
||||
- /:/rootfs:ro
|
||||
- /sys:/sys:ro
|
||||
- /dev/disk:/dev/disk:ro
|
||||
- ${SOCKET:-/var/run/podman}:/var/run/podman:ro
|
||||
devices:
|
||||
- /dev/kmesg
|
||||
privileged: true
|
||||
restart: always
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@cadvisor: host cadvisor.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@cadvisor"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8080}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
networks:
|
||||
- monitoring
|
||||
- dmz
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
external: true
|
||||
dmz:
|
||||
external: true
|
||||
26
code-server/compose.yml
Normal file
26
code-server/compose.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
services:
|
||||
code-server:
|
||||
container_name: code-server
|
||||
image: lscr.io/linuxserver/code-server
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
volumes:
|
||||
- ./appconf:/config
|
||||
environment:
|
||||
# If you're not using Podman, set PUID and PGID to 1000
|
||||
- PUID=0
|
||||
- PGID=0
|
||||
- TZ=Europe/Paris
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@code: host code.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@code"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8443}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
25
diun/compose.yml
Normal file
25
diun/compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
diun:
|
||||
container_name: diun
|
||||
image: crazymax/diun:4
|
||||
command: serve
|
||||
hostname: $HOST # server hostname
|
||||
volumes:
|
||||
- ./appdata:/data
|
||||
- $SOCKET:/var/run/docker.sock
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- TZ=Europe/Paris
|
||||
- DIUN_WATCH_WORKERS=20
|
||||
- DIUN_WATCH_SCHEDULE=0 */6 * * *
|
||||
- DIUN_WATCH_JITTER=30s
|
||||
- DIUN_PROVIDERS_DOCKER=true
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
25
dockge/compose.yml
Normal file
25
dockge/compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
dockge:
|
||||
container_name: dockge
|
||||
image: louislam/dockge:1
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ${SOCKET:-/var/run/docker.sock}:/var/run/docker.sock
|
||||
- ./appdata:/app/data
|
||||
- /opt/stacks:/opt/stacks
|
||||
environment:
|
||||
- DOCKGE_STACKS_DIR=/opt/stacks
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@dockge: host dockge.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@dockge"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 5001}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
24
dozzle/compose.yml
Normal file
24
dozzle/compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
dozzle:
|
||||
container_name: dozzle
|
||||
image: amir20/dozzle:latest
|
||||
volumes:
|
||||
- $SOCKET:/var/run/docker.sock
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- monitoring
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@dozzle: host dozzle.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@dozzle"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8080}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
external: true
|
||||
dmz:
|
||||
external: true
|
||||
21
excalidraw/compose.yml
Normal file
21
excalidraw/compose.yml
Normal file
@@ -0,0 +1,21 @@
|
||||
services:
|
||||
excalidraw:
|
||||
container_name: excalidraw
|
||||
image: excalidraw/excalidraw
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
healthcheck:
|
||||
disable: true
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@draw: host draw.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@draw"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 80}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
3
gitea/.env.example
Normal file
3
gitea/.env.example
Normal file
@@ -0,0 +1,3 @@
|
||||
# https://docs.gitea.com/administration/config-cheat-sheet
|
||||
# GITEA____APP_NAME=
|
||||
# GITEA__SERVER__DOMAIN=
|
||||
29
gitea/compose.yml
Normal file
29
gitea/compose.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
services:
|
||||
gitea:
|
||||
container_name: gitea
|
||||
image: gitea/gitea:1
|
||||
networks:
|
||||
- dmz
|
||||
- ldap
|
||||
ports:
|
||||
- 2222:22
|
||||
volumes:
|
||||
- ./appdata:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
restart: unless-stopped
|
||||
env_file: .env
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@git: host git.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@git"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 3000}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
ldap:
|
||||
external: true
|
||||
1
grafana/.env.example
Normal file
1
grafana/.env.example
Normal file
@@ -0,0 +1 @@
|
||||
# https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#override-configuration-with-environment-variables
|
||||
26
grafana/compose.yml
Normal file
26
grafana/compose.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
services:
|
||||
grafana:
|
||||
container_name: grafana
|
||||
image: grafana/grafana
|
||||
user: 0:0 # Rootless Podman
|
||||
env_file: .env
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./appdata:/var/lib/grafana
|
||||
networks:
|
||||
- dmz
|
||||
- monitoring
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@grafana: host grafana.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@grafana"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 3000}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
monitoring:
|
||||
external: true
|
||||
1
homepage/.env.example
Normal file
1
homepage/.env.example
Normal file
@@ -0,0 +1 @@
|
||||
# https://gethomepage.dev/latest/installation/docker/#using-environment-secrets
|
||||
23
homepage/compose.yml
Normal file
23
homepage/compose.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
services:
|
||||
homepage:
|
||||
image: ghcr.io/gethomepage/homepage
|
||||
container_name: homepage
|
||||
volumes:
|
||||
- ./appconf:/app/config
|
||||
- $SOCKET:/var/run/docker.sock
|
||||
env_file: .env
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@homepage: host homepage.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@homepage"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 3000}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
19
it-tools/compose.yml
Normal file
19
it-tools/compose.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
services:
|
||||
it-tools:
|
||||
container_name: it-tools
|
||||
image: corentinth/it-tools
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@it-tools: "host it-tools.{$$INT_DOMAIN}"
|
||||
caddy.1_handle: "@it-tools"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 80}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
6
lldap/.env.example
Normal file
6
lldap/.env.example
Normal file
@@ -0,0 +1,6 @@
|
||||
UID=####
|
||||
GID=####
|
||||
TZ=####/####
|
||||
LLDAP_JWT_SECRET=REPLACE_WITH_RANDOM
|
||||
LLDAP_KEY_SEED=REPLACE_WITH_RANDOM
|
||||
LLDAP_LDAP_BASE_DN=dc=example,dc=com
|
||||
32
lldap/compose.yml
Normal file
32
lldap/compose.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
services:
|
||||
lldap:
|
||||
container_name: lldap
|
||||
image: lldap/lldap:stable
|
||||
ports:
|
||||
# For LDAP
|
||||
#- "3890:3890"
|
||||
# For LDAPS (LDAP Over SSL), enable port if LLDAP_LDAPS_OPTIONS__ENABLED set true, look env below
|
||||
#- "6360:6360"
|
||||
# For the web front-end
|
||||
#- "17170:17170"
|
||||
volumes:
|
||||
- ./appdata:/data
|
||||
env_file: .env
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
- ldap
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@lldap: "host lldap.{$$INT_DOMAIN}"
|
||||
caddy.1_handle: "@lldap"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 17170}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
ldap:
|
||||
external: true
|
||||
28
ntfy/compose.yml
Normal file
28
ntfy/compose.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
services:
|
||||
ntfy:
|
||||
container_name: ntfy
|
||||
image: binwiederhier/ntfy
|
||||
command: serve
|
||||
environment:
|
||||
- TZ=Europe/Paris
|
||||
volumes:
|
||||
- /var/cache/ntfy:/var/cache/ntfy
|
||||
- ./appconf:/etc/ntfy
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
- monitoring
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@ntfy: "host ntfy.{$$INT_DOMAIN}"
|
||||
caddy.1_handle: "@ntfy"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 80}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
monitoring:
|
||||
external: true
|
||||
442
prometheus/appconf/example.prometheus.yml
Normal file
442
prometheus/appconf/example.prometheus.yml
Normal file
@@ -0,0 +1,442 @@
|
||||
# my global config
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 30s
|
||||
body_size_limit: 15MB
|
||||
sample_limit: 1500
|
||||
target_limit: 30
|
||||
label_limit: 30
|
||||
label_name_length_limit: 200
|
||||
label_value_length_limit: 200
|
||||
# scrape_timeout is set to the global default (10s).
|
||||
|
||||
external_labels:
|
||||
monitor: codelab
|
||||
foo: bar
|
||||
|
||||
rule_files:
|
||||
- "first.rules"
|
||||
- "my/*.rules"
|
||||
|
||||
remote_write:
|
||||
- url: http://remote1/push
|
||||
name: drop_expensive
|
||||
write_relabel_configs:
|
||||
- source_labels: [__name__]
|
||||
regex: expensive.*
|
||||
action: drop
|
||||
oauth2:
|
||||
client_id: "123"
|
||||
client_secret: "456"
|
||||
token_url: "http://remote1/auth"
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- url: http://remote2/push
|
||||
name: rw_tls
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
headers:
|
||||
name: value
|
||||
|
||||
remote_read:
|
||||
- url: http://remote1/read
|
||||
read_recent: true
|
||||
name: default
|
||||
enable_http2: false
|
||||
- url: http://remote3/read
|
||||
read_recent: false
|
||||
name: read_special
|
||||
required_matchers:
|
||||
job: special
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
scrape_configs:
|
||||
- job_name: prometheus
|
||||
|
||||
honor_labels: true
|
||||
# scrape_interval is defined by the configured global (15s).
|
||||
# scrape_timeout is defined by the global default (10s).
|
||||
|
||||
# metrics_path defaults to '/metrics'
|
||||
# scheme defaults to 'http'.
|
||||
|
||||
file_sd_configs:
|
||||
- files:
|
||||
- foo/*.slow.json
|
||||
- foo/*.slow.yml
|
||||
- single/file.yml
|
||||
refresh_interval: 10m
|
||||
- files:
|
||||
- bar/*.yaml
|
||||
|
||||
static_configs:
|
||||
- targets: ["localhost:9090", "localhost:9191"]
|
||||
labels:
|
||||
my: label
|
||||
your: label
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [job, __meta_dns_name]
|
||||
regex: (.*)some-[regex]
|
||||
target_label: job
|
||||
replacement: foo-${1}
|
||||
# action defaults to 'replace'
|
||||
- source_labels: [abc]
|
||||
target_label: cde
|
||||
- replacement: static
|
||||
target_label: abc
|
||||
- regex:
|
||||
replacement: static
|
||||
target_label: abc
|
||||
- source_labels: [foo]
|
||||
target_label: abc
|
||||
action: keepequal
|
||||
- source_labels: [foo]
|
||||
target_label: abc
|
||||
action: dropequal
|
||||
|
||||
authorization:
|
||||
credentials_file: valid_token_file
|
||||
|
||||
tls_config:
|
||||
min_version: TLS10
|
||||
|
||||
- job_name: service-x
|
||||
|
||||
basic_auth:
|
||||
username: admin_name
|
||||
password: "multiline\nmysecret\ntest"
|
||||
|
||||
scrape_interval: 50s
|
||||
scrape_timeout: 5s
|
||||
scrape_protocols: ["PrometheusText0.0.4"]
|
||||
|
||||
body_size_limit: 10MB
|
||||
sample_limit: 1000
|
||||
target_limit: 35
|
||||
label_limit: 35
|
||||
label_name_length_limit: 210
|
||||
label_value_length_limit: 210
|
||||
|
||||
metrics_path: /my_path
|
||||
scheme: https
|
||||
|
||||
dns_sd_configs:
|
||||
- refresh_interval: 15s
|
||||
names:
|
||||
- first.dns.address.domain.com
|
||||
- second.dns.address.domain.com
|
||||
- names:
|
||||
- first.dns.address.domain.com
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [job]
|
||||
regex: (.*)some-[regex]
|
||||
action: drop
|
||||
- source_labels: [__address__]
|
||||
modulus: 8
|
||||
target_label: __tmp_hash
|
||||
action: hashmod
|
||||
- source_labels: [__tmp_hash]
|
||||
regex: 1
|
||||
action: keep
|
||||
- action: labelmap
|
||||
regex: 1
|
||||
- action: labeldrop
|
||||
regex: d
|
||||
- action: labelkeep
|
||||
regex: k
|
||||
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__name__]
|
||||
regex: expensive_metric.*
|
||||
action: drop
|
||||
|
||||
- job_name: service-y
|
||||
|
||||
consul_sd_configs:
|
||||
- server: "localhost:1234"
|
||||
token: mysecret
|
||||
path_prefix: /consul
|
||||
services: ["nginx", "cache", "mysql"]
|
||||
tags: ["canary", "v1"]
|
||||
node_meta:
|
||||
rack: "123"
|
||||
allow_stale: true
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: valid_ca_file
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
insecure_skip_verify: false
|
||||
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_sd_consul_tags]
|
||||
separator: ","
|
||||
regex: label:([^=]+)=([^,]+)
|
||||
target_label: ${1}
|
||||
replacement: ${2}
|
||||
|
||||
- job_name: service-z
|
||||
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
authorization:
|
||||
credentials: mysecret
|
||||
|
||||
- job_name: service-kubernetes
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
api_server: "https://localhost:1234"
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
basic_auth:
|
||||
username: "myusername"
|
||||
password: "mysecret"
|
||||
|
||||
- job_name: service-kubernetes-namespaces
|
||||
|
||||
kubernetes_sd_configs:
|
||||
- role: endpoints
|
||||
api_server: "https://localhost:1234"
|
||||
namespaces:
|
||||
names:
|
||||
- default
|
||||
|
||||
basic_auth:
|
||||
username: "myusername"
|
||||
password_file: valid_password_file
|
||||
|
||||
- job_name: service-kuma
|
||||
|
||||
kuma_sd_configs:
|
||||
- server: http://kuma-control-plane.kuma-system.svc:5676
|
||||
client_id: main-prometheus
|
||||
|
||||
- job_name: service-marathon
|
||||
marathon_sd_configs:
|
||||
- servers:
|
||||
- "https://marathon.example.com:443"
|
||||
|
||||
auth_token: "mysecret"
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: service-nomad
|
||||
nomad_sd_configs:
|
||||
- server: 'http://localhost:4646'
|
||||
|
||||
- job_name: service-ec2
|
||||
ec2_sd_configs:
|
||||
- region: us-east-1
|
||||
access_key: access
|
||||
secret_key: mysecret
|
||||
profile: profile
|
||||
filters:
|
||||
- name: tag:environment
|
||||
values:
|
||||
- prod
|
||||
|
||||
- name: tag:service
|
||||
values:
|
||||
- web
|
||||
- db
|
||||
|
||||
- job_name: service-lightsail
|
||||
lightsail_sd_configs:
|
||||
- region: us-east-1
|
||||
access_key: access
|
||||
secret_key: mysecret
|
||||
profile: profile
|
||||
|
||||
- job_name: service-azure
|
||||
azure_sd_configs:
|
||||
- environment: AzurePublicCloud
|
||||
authentication_method: OAuth
|
||||
subscription_id: 11AAAA11-A11A-111A-A111-1111A1111A11
|
||||
resource_group: my-resource-group
|
||||
tenant_id: BBBB222B-B2B2-2B22-B222-2BB2222BB2B2
|
||||
client_id: 333333CC-3C33-3333-CCC3-33C3CCCCC33C
|
||||
client_secret: mysecret
|
||||
port: 9100
|
||||
|
||||
- job_name: service-nerve
|
||||
nerve_sd_configs:
|
||||
- servers:
|
||||
- localhost
|
||||
paths:
|
||||
- /monitoring
|
||||
|
||||
- job_name: 0123service-xxx
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
|
||||
- job_name: badfederation
|
||||
honor_timestamps: false
|
||||
metrics_path: /federate
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
|
||||
- job_name: 測試
|
||||
metrics_path: /metrics
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:9090
|
||||
|
||||
- job_name: httpsd
|
||||
http_sd_configs:
|
||||
- url: "http://example.com/prometheus"
|
||||
|
||||
- job_name: service-triton
|
||||
triton_sd_configs:
|
||||
- account: "testAccount"
|
||||
dns_suffix: "triton.example.com"
|
||||
endpoint: "triton.example.com"
|
||||
port: 9163
|
||||
refresh_interval: 1m
|
||||
version: 1
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: digitalocean-droplets
|
||||
digitalocean_sd_configs:
|
||||
- authorization:
|
||||
credentials: abcdef
|
||||
|
||||
- job_name: docker
|
||||
docker_sd_configs:
|
||||
- host: unix:///var/run/docker.sock
|
||||
|
||||
- job_name: dockerswarm
|
||||
dockerswarm_sd_configs:
|
||||
- host: http://127.0.0.1:2375
|
||||
role: nodes
|
||||
|
||||
- job_name: service-openstack
|
||||
openstack_sd_configs:
|
||||
- role: instance
|
||||
region: RegionOne
|
||||
port: 80
|
||||
refresh_interval: 1m
|
||||
tls_config:
|
||||
ca_file: valid_ca_file
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: service-puppetdb
|
||||
puppetdb_sd_configs:
|
||||
- url: https://puppetserver/
|
||||
query: 'resources { type = "Package" and title = "httpd" }'
|
||||
include_parameters: true
|
||||
port: 80
|
||||
refresh_interval: 1m
|
||||
tls_config:
|
||||
ca_file: valid_ca_file
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
|
||||
- job_name: hetzner
|
||||
relabel_configs:
|
||||
- action: uppercase
|
||||
source_labels: [instance]
|
||||
target_label: instance
|
||||
hetzner_sd_configs:
|
||||
- role: hcloud
|
||||
authorization:
|
||||
credentials: abcdef
|
||||
- role: robot
|
||||
basic_auth:
|
||||
username: abcdef
|
||||
password: abcdef
|
||||
|
||||
- job_name: service-eureka
|
||||
eureka_sd_configs:
|
||||
- server: "http://eureka.example.com:8761/eureka"
|
||||
|
||||
- job_name: ovhcloud
|
||||
ovhcloud_sd_configs:
|
||||
- service: vps
|
||||
endpoint: ovh-eu
|
||||
application_key: testAppKey
|
||||
application_secret: testAppSecret
|
||||
consumer_key: testConsumerKey
|
||||
refresh_interval: 1m
|
||||
- service: dedicated_server
|
||||
endpoint: ovh-eu
|
||||
application_key: testAppKey
|
||||
application_secret: testAppSecret
|
||||
consumer_key: testConsumerKey
|
||||
refresh_interval: 1m
|
||||
|
||||
- job_name: scaleway
|
||||
scaleway_sd_configs:
|
||||
- role: instance
|
||||
project_id: 11111111-1111-1111-1111-111111111112
|
||||
access_key: SCWXXXXXXXXXXXXXXXXX
|
||||
secret_key: 11111111-1111-1111-1111-111111111111
|
||||
- role: baremetal
|
||||
project_id: 11111111-1111-1111-1111-111111111112
|
||||
access_key: SCWXXXXXXXXXXXXXXXXX
|
||||
secret_key: 11111111-1111-1111-1111-111111111111
|
||||
|
||||
- job_name: linode-instances
|
||||
linode_sd_configs:
|
||||
- authorization:
|
||||
credentials: abcdef
|
||||
|
||||
- job_name: uyuni
|
||||
uyuni_sd_configs:
|
||||
- server: https://localhost:1234
|
||||
username: gopher
|
||||
password: hole
|
||||
|
||||
- job_name: ionos
|
||||
ionos_sd_configs:
|
||||
- datacenter_id: 8feda53f-15f0-447f-badf-ebe32dad2fc0
|
||||
authorization:
|
||||
credentials: abcdef
|
||||
|
||||
- job_name: vultr
|
||||
vultr_sd_configs:
|
||||
- authorization:
|
||||
credentials: abcdef
|
||||
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- scheme: https
|
||||
static_configs:
|
||||
- targets:
|
||||
- "1.2.3.4:9093"
|
||||
- "1.2.3.5:9093"
|
||||
- "1.2.3.6:9093"
|
||||
|
||||
storage:
|
||||
tsdb:
|
||||
out_of_order_time_window: 30m
|
||||
|
||||
tracing:
|
||||
endpoint: "localhost:4317"
|
||||
client_type: "grpc"
|
||||
headers:
|
||||
foo: "bar"
|
||||
timeout: 5s
|
||||
compression: "gzip"
|
||||
tls_config:
|
||||
cert_file: valid_cert_file
|
||||
key_file: valid_key_file
|
||||
insecure_skip_verify: true
|
||||
30
prometheus/compose.yml
Normal file
30
prometheus/compose.yml
Normal file
@@ -0,0 +1,30 @@
|
||||
services:
|
||||
prometheus:
|
||||
container_name: prometheus
|
||||
image: prom/prometheus
|
||||
volumes:
|
||||
- ./appconf:/etc/prometheus
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
- --storage.tsdb.path=/prometheus
|
||||
- --web.console.libraries=/usr/share/prometheus/console_libraries
|
||||
- --web.console.templates=/usr/share/prometheus/consoles
|
||||
- --web.enable-lifecycle
|
||||
networks:
|
||||
- dmz
|
||||
- monitoring
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@prometheus: host prometheus.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@prometheus"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 9090}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
monitoring:
|
||||
external: true
|
||||
24
redlib/compose.yml
Normal file
24
redlib/compose.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
services:
|
||||
redlib:
|
||||
container_name: redlib
|
||||
image: quay.io/redlib/redlib
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
environment:
|
||||
- REDLIB_ROBOTS_DISABLE_INDEXING=on
|
||||
- REDLIB_DEFAULT_THEME=black
|
||||
- REDLIB_DEFAULT_LAYOUT=clean
|
||||
- REDLIB_DEFAULT_WIDE=on
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@redlib: host redlib.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@redlib"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8080}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
26
shiori/compose.yml
Normal file
26
shiori/compose.yml
Normal file
@@ -0,0 +1,26 @@
|
||||
services:
|
||||
shiori:
|
||||
container_name: shiori
|
||||
image: ghcr.io/go-shiori/shiori
|
||||
restart: unless-stopped
|
||||
user: 0:0 # Rootless Podman
|
||||
environment:
|
||||
SHIORI_DIR: /data
|
||||
SHIORI_HTTP_SECRET_KEY: $HTTP_SECRET_KEY
|
||||
TZ: Europe/Paris
|
||||
volumes:
|
||||
- ./appdata:/data
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@shiori: "host shiori.{$$INT_DOMAIN}"
|
||||
caddy.1_handle: "@shiori"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8080}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
19
stiling-pdf/compose.yml
Normal file
19
stiling-pdf/compose.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
services:
|
||||
s-pdf:
|
||||
container_name: s-pdf
|
||||
image: frooodle/s-pdf
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@spdf: "host spdf.{$$INT_DOMAIN}"
|
||||
caddy.1_handle: "@spdf"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 8080}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
4
umami/.env.example
Normal file
4
umami/.env.example
Normal file
@@ -0,0 +1,4 @@
|
||||
SECRET=replace-me-with-a-random-string
|
||||
DB=umami
|
||||
USER=user
|
||||
PASS=password
|
||||
42
umami/compose.yml
Normal file
42
umami/compose.yml
Normal file
@@ -0,0 +1,42 @@
|
||||
services:
|
||||
umami:
|
||||
container_name: umami
|
||||
image: ghcr.io/umami-software/umami:postgresql-latest
|
||||
environment:
|
||||
DATABASE_URL: postgresql://$USER:$PASS@umami-db:5432/$DB
|
||||
DATABASE_TYPE: postgresql
|
||||
APP_SECRET: $SECRET
|
||||
depends_on:
|
||||
- umami-db
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- umami
|
||||
- proxy
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@umami: host umami.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@umami"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 3000}}"
|
||||
# Get real client public IP if behind another proxy
|
||||
# caddy.1_handle.reverse_proxy.header_up: X-Forwarded-For {header.X-Real-IP}
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
umami-db:
|
||||
container_name: umami-db
|
||||
image: postgres:15-alpine
|
||||
environment:
|
||||
POSTGRES_DB: $DB
|
||||
POSTGRES_USER: $USER
|
||||
POSTGRES_PASSWORD: $PASS
|
||||
volumes:
|
||||
- ./appdb:/var/lib/postgresql/data
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- umami
|
||||
networks:
|
||||
umami:
|
||||
external: true
|
||||
proxy:
|
||||
external: true
|
||||
19
victoria-metrics/compose.yml
Normal file
19
victoria-metrics/compose.yml
Normal file
@@ -0,0 +1,19 @@
|
||||
services:
|
||||
victoria-metrics-single:
|
||||
container_name: victoria-metrics-single
|
||||
image: victoriametrics/victoria-metrics:stable
|
||||
volumes:
|
||||
- ./appdata:/victoria-metrics-data
|
||||
command:
|
||||
- -retentionPeriod=12
|
||||
- -maxLabelsPerTimeseries=50
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- monitoring
|
||||
labels:
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
monitoring:
|
||||
external: true
|
||||
20
whoogle/compose.yml
Normal file
20
whoogle/compose.yml
Normal file
@@ -0,0 +1,20 @@
|
||||
services:
|
||||
whoogle:
|
||||
container_name: whoogle
|
||||
image: benbusby/whoogle-search
|
||||
env_file: .env
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- dmz
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@search: host search.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@search"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 5000}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
25
yarr/compose.yml
Normal file
25
yarr/compose.yml
Normal file
@@ -0,0 +1,25 @@
|
||||
services:
|
||||
yarr:
|
||||
container_name: yarr
|
||||
image: ghcr.io/wakeful-cloud/yarr
|
||||
volumes:
|
||||
- ./appdata:/data
|
||||
environment:
|
||||
# Rootless Podman
|
||||
- UID=0
|
||||
- GID=0
|
||||
networks:
|
||||
- dmz
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
# Caddy
|
||||
caddy: "*.{$$INT_DOMAIN}"
|
||||
caddy.1_@rss: host rss.{$$INT_DOMAIN}
|
||||
caddy.1_handle: "@rss"
|
||||
caddy.1_handle.reverse_proxy: "{{upstreams 7070}}"
|
||||
# Diun
|
||||
diun.enable: true
|
||||
|
||||
networks:
|
||||
dmz:
|
||||
external: true
|
||||
Reference in New Issue
Block a user