"Feature: Add environment variables example and monitoring script"

This commit is contained in:
Till Tomczak 2025-05-23 07:27:14 +02:00
parent 9f6219832c
commit 6751e4a54b
7 changed files with 803 additions and 37 deletions

View File

@ -54,15 +54,7 @@ def create_app(config_name=None):
# Initialisiere Netzwerkkonfiguration
network_config = NetworkConfig(app)
# Registriere Blueprint
app.register_blueprint(frontend_v2, url_prefix='/frontend_v2')
# Konfiguriere statische Dateien für Frontend v2
@app.route('/frontend_v2/static/<path:filename>')
def frontend_v2_static(filename):
return send_from_directory(os.path.join(app.root_path, 'frontend_v2/static'), filename)
# Globale Variablen
# Globale Variablen setzen
app.config['PRINTERS'] = json.loads(app.config.get('PRINTERS', '{}'))
# Database functions registrieren
@ -80,6 +72,12 @@ def create_app(config_name=None):
# Error-Handler registrieren
register_error_handlers(app)
# Blueprint registrieren
register_blueprints(app)
# Middleware registrieren
register_middleware(app)
# Hintergrund-Tasks registrieren
register_background_tasks(app)
@ -1813,17 +1811,44 @@ def register_auth_functions(app):
def register_api_routes(app):
"""Registriert alle API-Routen."""
# API-Routen sind bereits global definiert
# API-Routen sind bereits global als Funktionen definiert
# Diese werden automatisch registriert, wenn sie mit @app.route dekoriert sind
pass
def register_web_routes(app):
"""Registriert alle Web-UI-Routen."""
# Web-Routen sind bereits global definiert
# Web-Routen sind bereits global als Funktionen definiert
# Diese werden automatisch registriert, wenn sie mit @app.route dekoriert sind
pass
def register_error_handlers(app):
"""Registriert Error-Handler."""
# Error-Handler sind bereits global definiert
# Diese werden automatisch registriert, wenn sie mit @app.errorhandler dekoriert sind
pass
def register_blueprints(app):
"""Registriert alle Flask-Blueprints."""
# Frontend V2 Blueprint
app.register_blueprint(frontend_v2, url_prefix='/frontend_v2')
# Monitoring Blueprint
from monitoring import monitoring_bp
app.register_blueprint(monitoring_bp)
# Konfiguriere statische Dateien für Frontend v2
@app.route('/frontend_v2/static/<path:filename>')
def frontend_v2_static(filename):
return send_from_directory(os.path.join(app.root_path, 'frontend_v2/static'), filename)
def register_middleware(app):
"""Registriert Middleware-Komponenten."""
# Monitoring-Middleware
if app.config.get('FLASK_ENV') != 'testing':
from monitoring import request_metrics
request_metrics.init_app(app)
# Sicherheits-Middleware wird bereits in der Konfiguration registriert
pass
def register_background_tasks(app):

View File

@ -33,6 +33,18 @@ class Config:
# Drucker-Konfiguration
PRINTERS = os.environ.get('PRINTERS', '{}')
# API-Konfiguration
API_KEY = os.environ.get('API_KEY')
# Rate Limiting
RATE_LIMIT_ENABLED = True
MAX_REQUESTS_PER_MINUTE = int(os.environ.get('MAX_REQUESTS_PER_MINUTE', '100'))
RATE_LIMIT_WINDOW_MINUTES = int(os.environ.get('RATE_LIMIT_WINDOW_MINUTES', '15'))
# Security
SECURITY_ENABLED = True
MAX_CONTENT_LENGTH = 16 * 1024 * 1024 # 16MB
@staticmethod
def init_app(app):
"""Initialisierung der Anwendung mit der Konfiguration."""
@ -50,6 +62,10 @@ class DevelopmentConfig(Config):
# Kürzere Job-Check-Intervalle für schnellere Entwicklung
JOB_CHECK_INTERVAL = int(os.environ.get('JOB_CHECK_INTERVAL', '30'))
# Weniger strikte Sicherheit in Development
SECURITY_ENABLED = False
RATE_LIMIT_ENABLED = False
@staticmethod
def init_app(app):
Config.init_app(app)
@ -76,6 +92,14 @@ class ProductionConfig(Config):
# Längere Job-Check-Intervalle für bessere Performance
JOB_CHECK_INTERVAL = int(os.environ.get('JOB_CHECK_INTERVAL', '60'))
# Produktions-Sicherheit
SECURITY_ENABLED = True
RATE_LIMIT_ENABLED = True
MAX_REQUESTS_PER_MINUTE = int(os.environ.get('MAX_REQUESTS_PER_MINUTE', '60'))
# HTTPS-Enforcement (wenn verfügbar)
FORCE_HTTPS = os.environ.get('FORCE_HTTPS', 'False').lower() == 'true'
@staticmethod
def init_app(app):
Config.init_app(app)
@ -111,9 +135,30 @@ class ProductionConfig(Config):
error_handler.setLevel(logging.ERROR)
app.logger.addHandler(error_handler)
# Security-Logging
security_handler = RotatingFileHandler(
'logs/security.log',
maxBytes=Config.LOG_MAX_BYTES,
backupCount=Config.LOG_BACKUP_COUNT
)
security_handler.setFormatter(logging.Formatter(
'%(asctime)s SECURITY %(levelname)s: %(message)s [%(name)s]'
))
security_handler.setLevel(logging.WARNING)
# Security-Logger
security_logger = logging.getLogger('security')
security_logger.addHandler(security_handler)
security_logger.setLevel(logging.WARNING)
app.logger.setLevel(logging.INFO)
app.logger.info('MYP Backend starting in production mode')
# Sicherheits-Middleware registrieren
if app.config.get('SECURITY_ENABLED', True):
from security import security_middleware
security_middleware.init_app(app)
class TestingConfig(Config):
"""Konfiguration für die Testumgebung."""
@ -132,6 +177,10 @@ class TestingConfig(Config):
# Kürzere Job-Check-Intervalle für Tests
JOB_CHECK_INTERVAL = 5
# Deaktiviere Sicherheit für Tests
SECURITY_ENABLED = False
RATE_LIMIT_ENABLED = False
@staticmethod
def init_app(app):
Config.init_app(app)

1
backend/env.example Normal file
View File

@ -0,0 +1 @@

330
backend/monitoring.py Normal file
View File

@ -0,0 +1,330 @@
"""
Monitoring und Health Check Module für die MYP Flask-Anwendung.
Bietet Endpunkte für Systemüberwachung und Performance-Metriken.
"""
from flask import Blueprint, jsonify, current_app
import psutil
import os
import sqlite3
import datetime
import threading
import time
from collections import defaultdict
# Blueprint für Monitoring-Endpunkte
monitoring_bp = Blueprint('monitoring', __name__, url_prefix='/monitoring')
# Metriken-Speicher
metrics = {
'requests_total': defaultdict(int),
'request_duration': defaultdict(list),
'database_queries': 0,
'active_jobs': 0,
'error_count': defaultdict(int),
'startup_time': datetime.datetime.now()
}
class HealthCheck:
"""Klasse für System-Health-Checks."""
@staticmethod
def check_database():
"""
Überprüft die Datenbankverbindung.
Returns:
dict: Status und Details der Datenbankverbindung
"""
try:
db_path = current_app.config.get('DATABASE', 'instance/myp.db')
# Bei In-Memory-DB für Tests
if db_path == ':memory:':
return {'status': 'healthy', 'message': 'In-Memory-Datenbank aktiv'}
# Datei-basierte Datenbank prüfen
if not os.path.exists(db_path):
return {'status': 'unhealthy', 'message': 'Datenbankdatei nicht gefunden'}
# Verbindung testen
conn = sqlite3.connect(db_path, timeout=5)
cursor = conn.cursor()
cursor.execute('SELECT 1')
conn.close()
# Dateigröße ermitteln
db_size = os.path.getsize(db_path)
return {
'status': 'healthy',
'message': 'Datenbankverbindung erfolgreich',
'database_path': db_path,
'database_size_bytes': db_size
}
except Exception as e:
return {
'status': 'unhealthy',
'message': f'Datenbankfehler: {str(e)}'
}
@staticmethod
def check_disk_space():
"""
Überprüft den verfügbaren Festplattenspeicher.
Returns:
dict: Status und Details des Festplattenspeichers
"""
try:
disk_usage = psutil.disk_usage('.')
free_gb = disk_usage.free / (1024**3)
total_gb = disk_usage.total / (1024**3)
used_percent = (disk_usage.used / disk_usage.total) * 100
status = 'healthy'
if used_percent > 90:
status = 'critical'
elif used_percent > 80:
status = 'warning'
return {
'status': status,
'free_gb': round(free_gb, 2),
'total_gb': round(total_gb, 2),
'used_percent': round(used_percent, 2)
}
except Exception as e:
return {
'status': 'unhealthy',
'message': f'Festplattenfehler: {str(e)}'
}
@staticmethod
def check_memory():
"""
Überprüft die Speichernutzung.
Returns:
dict: Status und Details der Speichernutzung
"""
try:
memory = psutil.virtual_memory()
status = 'healthy'
if memory.percent > 90:
status = 'critical'
elif memory.percent > 80:
status = 'warning'
return {
'status': status,
'total_gb': round(memory.total / (1024**3), 2),
'available_gb': round(memory.available / (1024**3), 2),
'used_percent': round(memory.percent, 2)
}
except Exception as e:
return {
'status': 'unhealthy',
'message': f'Speicherfehler: {str(e)}'
}
@staticmethod
def check_background_threads():
"""
Überprüft die Hintergrund-Threads.
Returns:
dict: Status der Hintergrund-Threads
"""
try:
active_threads = [t.name for t in threading.enumerate() if t.is_alive()]
job_checker_running = any('job_checker' in name for name in active_threads)
return {
'status': 'healthy' if job_checker_running else 'warning',
'job_checker_running': job_checker_running,
'active_threads': active_threads,
'thread_count': len(active_threads)
}
except Exception as e:
return {
'status': 'unhealthy',
'message': f'Thread-Fehler: {str(e)}'
}
@monitoring_bp.route('/health')
def health_check():
"""
Umfassender Health Check aller Systemkomponenten.
Returns:
JSON: Status aller Systemkomponenten
"""
checks = {
'database': HealthCheck.check_database(),
'disk_space': HealthCheck.check_disk_space(),
'memory': HealthCheck.check_memory(),
'background_threads': HealthCheck.check_background_threads()
}
# Gesamtstatus bestimmen
overall_status = 'healthy'
for check in checks.values():
if check['status'] == 'unhealthy':
overall_status = 'unhealthy'
break
elif check['status'] in ['warning', 'critical']:
overall_status = 'degraded'
response = {
'status': overall_status,
'timestamp': datetime.datetime.now().isoformat(),
'checks': checks
}
status_code = 200 if overall_status == 'healthy' else 503
return jsonify(response), status_code
@monitoring_bp.route('/health/simple')
def simple_health_check():
"""
Einfacher Health Check für Load Balancer.
Returns:
JSON: Einfacher Status
"""
return jsonify({'status': 'ok', 'timestamp': datetime.datetime.now().isoformat()})
@monitoring_bp.route('/metrics')
def get_metrics():
"""
Sammelt und gibt Performance-Metriken zurück.
Returns:
JSON: System- und Anwendungsmetriken
"""
try:
# System-Metriken
cpu_percent = psutil.cpu_percent(interval=1)
memory = psutil.virtual_memory()
disk = psutil.disk_usage('.')
# Uptime berechnen
uptime = datetime.datetime.now() - metrics['startup_time']
# Anwendungsmetriken
app_metrics = {
'system': {
'cpu_percent': cpu_percent,
'memory_percent': memory.percent,
'disk_percent': (disk.used / disk.total) * 100,
'uptime_seconds': uptime.total_seconds()
},
'application': {
'requests_total': dict(metrics['requests_total']),
'database_queries_total': metrics['database_queries'],
'active_jobs': metrics['active_jobs'],
'error_count': dict(metrics['error_count']),
'startup_time': metrics['startup_time'].isoformat()
}
}
return jsonify(app_metrics)
except Exception as e:
current_app.logger.error(f"Fehler beim Sammeln der Metriken: {e}")
return jsonify({'error': 'Metriken nicht verfügbar'}), 500
@monitoring_bp.route('/info')
def get_info():
"""
Gibt allgemeine Informationen über die Anwendung zurück.
Returns:
JSON: Anwendungsinformationen
"""
return jsonify({
'application': 'MYP Backend',
'version': '2.0.0',
'flask_env': current_app.config.get('FLASK_ENV', 'unknown'),
'debug': current_app.debug,
'startup_time': metrics['startup_time'].isoformat(),
'python_version': os.sys.version,
'config': {
'database': current_app.config.get('DATABASE'),
'job_check_interval': current_app.config.get('JOB_CHECK_INTERVAL'),
'security_enabled': current_app.config.get('SECURITY_ENABLED', False),
'rate_limit_enabled': current_app.config.get('RATE_LIMIT_ENABLED', False)
}
})
def record_request_metric(endpoint, method, status_code, duration):
"""
Zeichnet Request-Metriken auf.
Args:
endpoint: API-Endpunkt
method: HTTP-Methode
status_code: HTTP-Status-Code
duration: Request-Dauer in Sekunden
"""
key = f"{method}_{endpoint}"
metrics['requests_total'][key] += 1
metrics['request_duration'][key].append(duration)
if status_code >= 400:
metrics['error_count'][str(status_code)] += 1
def record_database_query():
"""Zeichnet eine Datenbankabfrage auf."""
metrics['database_queries'] += 1
def update_active_jobs(count):
"""
Aktualisiert die Anzahl aktiver Jobs.
Args:
count: Anzahl aktiver Jobs
"""
metrics['active_jobs'] = count
class RequestMetricsMiddleware:
"""Middleware für automatisches Request-Tracking."""
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialisiert die Middleware mit der Flask-App."""
app.before_request(self.before_request)
app.after_request(self.after_request)
def before_request(self):
"""Startet die Zeitmessung für den Request."""
from flask import g
g.start_time = time.time()
def after_request(self, response):
"""Zeichnet Metriken nach dem Request auf."""
from flask import g, request
if hasattr(g, 'start_time'):
duration = time.time() - g.start_time
record_request_metric(
request.endpoint or 'unknown',
request.method,
response.status_code,
duration
)
return response
# Globale Middleware-Instanz
request_metrics = RequestMetricsMiddleware()

View File

@ -23,6 +23,7 @@ requests==2.31.0
# Monitoring und Logging
flask-healthcheck==0.1.0
prometheus-flask-exporter==0.23.0
psutil==5.9.6
# Entwicklung und Testing (optional)
pytest==7.4.3

View File

@ -1 +1,220 @@
"""
Sicherheitsmodule und Middleware für die MYP Flask-Anwendung.
Implementiert CSRF-Schutz, Content Security Policy und weitere Sicherheitsmaßnahmen.
"""
from flask import request, jsonify, current_app
from flask_talisman import Talisman
from functools import wraps
import time
import hashlib
import hmac
from collections import defaultdict, deque
from datetime import datetime, timedelta
class SecurityMiddleware:
"""Zentrale Sicherheits-Middleware für die Anwendung."""
def __init__(self, app=None):
self.app = app
self.rate_limits = defaultdict(lambda: deque())
self.failed_attempts = defaultdict(int)
self.blocked_ips = set()
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialisiert die Sicherheits-Middleware mit der Flask-App."""
self.app = app
# Talisman für Content Security Policy und HTTPS-Enforcement
if not app.debug:
Talisman(
app,
force_https=False, # In Produktion auf True setzen, wenn HTTPS verfügbar
strict_transport_security=True,
content_security_policy={
'default-src': "'self'",
'script-src': "'self' 'unsafe-inline'",
'style-src': "'self' 'unsafe-inline'",
'img-src': "'self' data:",
'font-src': "'self'",
'connect-src': "'self'",
'form-action': "'self'"
}
)
# Request-Hooks registrieren
app.before_request(self.before_request_security_check)
app.after_request(self.after_request_security_headers)
def before_request_security_check(self):
"""Sicherheitsüberprüfungen vor jeder Anfrage."""
client_ip = self.get_client_ip()
# Blocked IPs prüfen
if client_ip in self.blocked_ips:
current_app.logger.warning(f"Blockierte IP-Adresse versucht Zugriff: {client_ip}")
return jsonify({'message': 'Zugriff verweigert'}), 403
# Rate Limiting
if self.is_rate_limited(client_ip):
current_app.logger.warning(f"Rate Limit überschritten für IP: {client_ip}")
return jsonify({'message': 'Zu viele Anfragen'}), 429
# Content-Length prüfen (Schutz vor großen Payloads)
if request.content_length and request.content_length > 10 * 1024 * 1024: # 10MB
current_app.logger.warning(f"Payload zu groß von IP: {client_ip}")
return jsonify({'message': 'Payload zu groß'}), 413
def after_request_security_headers(self, response):
"""Fügt Sicherheits-Header zu jeder Antwort hinzu."""
response.headers['X-Content-Type-Options'] = 'nosniff'
response.headers['X-Frame-Options'] = 'DENY'
response.headers['X-XSS-Protection'] = '1; mode=block'
response.headers['Referrer-Policy'] = 'strict-origin-when-cross-origin'
# Cache-Control für statische Ressourcen
if request.endpoint and 'static' in request.endpoint:
response.headers['Cache-Control'] = 'public, max-age=3600'
else:
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '0'
return response
def get_client_ip(self):
"""Ermittelt die Client-IP-Adresse."""
if request.headers.get('X-Forwarded-For'):
return request.headers.get('X-Forwarded-For').split(',')[0].strip()
elif request.headers.get('X-Real-IP'):
return request.headers.get('X-Real-IP')
else:
return request.remote_addr
def is_rate_limited(self, ip, max_requests=100, window_minutes=15):
"""
Überprüft Rate Limiting für eine IP-Adresse.
Args:
ip: Client-IP-Adresse
max_requests: Maximale Anzahl Requests pro Zeitfenster
window_minutes: Zeitfenster in Minuten
Returns:
bool: True wenn Rate Limit überschritten
"""
now = datetime.now()
window_start = now - timedelta(minutes=window_minutes)
# Alte Einträge entfernen
while self.rate_limits[ip] and self.rate_limits[ip][0] < window_start:
self.rate_limits[ip].popleft()
# Neue Anfrage hinzufügen
self.rate_limits[ip].append(now)
# Rate Limit prüfen
if len(self.rate_limits[ip]) > max_requests:
return True
return False
def record_failed_login(self, ip):
"""
Zeichnet fehlgeschlagene Login-Versuche auf.
Args:
ip: Client-IP-Adresse
"""
self.failed_attempts[ip] += 1
# Nach 5 fehlgeschlagenen Versuchen temporär blockieren
if self.failed_attempts[ip] >= 5:
self.blocked_ips.add(ip)
current_app.logger.warning(f"IP-Adresse blockiert nach zu vielen fehlgeschlagenen Login-Versuchen: {ip}")
# Automatisches Entsperren nach 1 Stunde
def unblock_ip():
time.sleep(3600) # 1 Stunde
if ip in self.blocked_ips:
self.blocked_ips.remove(ip)
self.failed_attempts[ip] = 0
current_app.logger.info(f"IP-Adresse automatisch entsperrt: {ip}")
import threading
threading.Thread(target=unblock_ip, daemon=True).start()
def clear_failed_attempts(self, ip):
"""
Löscht fehlgeschlagene Login-Versuche für eine IP.
Args:
ip: Client-IP-Adresse
"""
if ip in self.failed_attempts:
self.failed_attempts[ip] = 0
def require_api_key(f):
"""
Decorator für API-Endpunkte, die einen API-Key erfordern.
Args:
f: Zu schützende Funktion
Returns:
Geschützte Funktion
"""
@wraps(f)
def decorated(*args, **kwargs):
api_key = request.headers.get('X-API-Key')
expected_key = current_app.config.get('API_KEY')
if not expected_key:
# Kein API-Key konfiguriert, Zugriff erlauben
return f(*args, **kwargs)
if not api_key:
return jsonify({'message': 'API-Key erforderlich'}), 401
# Sichere Vergleichsfunktion verwenden
if not hmac.compare_digest(api_key, expected_key):
current_app.logger.warning(f"Ungültiger API-Key von IP: {request.remote_addr}")
return jsonify({'message': 'Ungültiger API-Key'}), 401
return f(*args, **kwargs)
return decorated
def validate_csrf_token():
"""
Validiert CSRF-Token für POST/PUT/DELETE-Requests.
Returns:
bool: True wenn Token gültig ist
"""
if request.method in ['GET', 'HEAD', 'OPTIONS']:
return True
token = request.headers.get('X-CSRF-Token') or request.form.get('csrf_token')
session_token = request.cookies.get('csrf_token')
if not token or not session_token:
return False
return hmac.compare_digest(token, session_token)
def generate_csrf_token():
"""
Generiert ein neues CSRF-Token.
Returns:
str: CSRF-Token
"""
import secrets
return secrets.token_hex(32)
# Globale Sicherheits-Middleware-Instanz
security_middleware = SecurityMiddleware()

View File

@ -1,86 +1,205 @@
version: '3'
# 🏭 MYP - Manage your Printer (Produktionsumgebung)
# Hauptkonfiguration für Container-Orchestrierung
version: '3.8'
services:
# Backend
# === BACKEND SERVICE ===
backend:
build:
context: ./backend
dockerfile: Dockerfile
args:
- BUILDKIT_INLINE_CACHE=1
image: myp/backend:latest
container_name: myp-backend
restart: always
restart: unless-stopped
environment:
- SECRET_KEY=${SECRET_KEY:-7445630171969DFAC92C53CEC92E67A9CB2E00B3CB2F}
# Flask-Konfiguration
- FLASK_APP=app.py
- FLASK_ENV=${FLASK_ENV:-production}
- PYTHONUNBUFFERED=1
# Datenbank
- DATABASE_PATH=${DATABASE_PATH:-instance/myp.db}
# Sicherheit
- SECRET_KEY=${SECRET_KEY:-7445630171969DFAC92C53CEC92E67A9CB2E00B3CB2F}
- JWT_SECRET=${JWT_SECRET:-secure-jwt-secret}
# Drucker-Konfiguration
- "PRINTERS=${PRINTERS:-{\"Drucker 1\": {\"ip\": \"192.168.0.100\"}, \"Drucker 2\": {\"ip\": \"192.168.0.101\"}, \"Drucker 3\": {\"ip\": \"192.168.0.102\"}, \"Drucker 4\": {\"ip\": \"192.168.0.103\"}, \"Drucker 5\": {\"ip\": \"192.168.0.104\"}, \"Drucker 6\": {\"ip\": \"192.168.0.106\"}}}"
# TAPO Smart Plug
- TAPO_USERNAME=${TAPO_USERNAME:-till.tomczak@mercedes-benz.com}
- TAPO_PASSWORD=${TAPO_PASSWORD:-744563017196A}
- "PRINTERS=${PRINTERS:-{\"Printer 1\": {\"ip\": \"192.168.0.100\"}, \"Printer 2\": {\"ip\": \"192.168.0.101\"}, \"Printer 3\": {\"ip\": \"192.168.0.102\"}, \"Printer 4\": {\"ip\": \"192.168.0.103\"}, \"Printer 5\": {\"ip\": \"192.168.0.104\"}, \"Printer 6\": {\"ip\": \"192.168.0.106\"}}}"
- FLASK_APP=app.py
- PYTHONUNBUFFERED=1
# Netzwerk
- HOST=0.0.0.0
- PORT=5000
# Logging
- LOG_LEVEL=${LOG_LEVEL:-INFO}
volumes:
- ./backend/logs:/app/logs
- ./backend/instance:/app/instance
- backend_instance:/app/instance
- backend_logs:/app/logs
- backend_migrations:/app/migrations
networks:
myp-network:
ipv4_address: 192.168.0.5
expose:
- "5000"
ports:
- "5000:5000"
healthcheck:
test: ["CMD", "wget", "--spider", "http://localhost:5000/health"]
test: ["CMD", "curl", "-f", "http://localhost:5000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# Next.js Frontend
labels:
- "traefik.enable=true"
- "traefik.http.routers.backend.rule=PathPrefix(`/api`)"
- "traefik.http.services.backend.loadbalancer.server.port=5000"
# === FRONTEND SERVICE ===
frontend:
build:
context: ./frontend
container_name: myp-rp
dockerfile: Dockerfile
args:
- BUILDKIT_INLINE_CACHE=1
- NODE_ENV=${NODE_ENV:-production}
image: myp/frontend:latest
container_name: myp-frontend
restart: unless-stopped
environment:
- NODE_ENV=production
- NEXT_PUBLIC_API_URL=/api
- NODE_ENV=${NODE_ENV:-production}
- NEXT_TELEMETRY_DISABLED=1
- NEXT_PUBLIC_API_URL=${API_BASE_URL:-/api}
- PORT=3000
volumes:
- frontend_data:/app/.next
- frontend_db:/app/db
networks:
- myp-network
expose:
- "3000"
healthcheck:
test: ["CMD", "wget", "--spider", "http://localhost:3000/api/health"]
test: ["CMD", "curl", "-f", "http://localhost:3000/api/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
depends_on:
- backend
# Caddy Proxy
depends_on:
backend:
condition: service_healthy
labels:
- "traefik.enable=true"
- "traefik.http.routers.frontend.rule=PathPrefix(`/`)"
- "traefik.http.services.frontend.loadbalancer.server.port=3000"
# === REVERSE PROXY SERVICE ===
caddy:
image: caddy:2.7-alpine
container_name: myp-caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- "2019:2019" # Admin API
volumes:
- ./frontend/docker/caddy/Caddyfile:/etc/caddy/Caddyfile
- ./proxy/Caddyfile:/etc/caddy/Caddyfile:ro
- caddy_data:/data
- caddy_config:/config
- caddy_logs:/var/log/caddy
networks:
- myp-network
extra_hosts:
- "host.docker.internal:host-gateway"
environment:
- CADDY_HOST=53.37.211.254
- CADDY_DOMAIN=m040tbaraspi001.de040.corpintra.net
- CADDY_HOST=${CADDY_HOST:-53.37.211.254}
- CADDY_DOMAIN=${CADDY_DOMAIN:-m040tbaraspi001.de040.corpintra.net}
cap_add:
- NET_ADMIN
depends_on:
- frontend
- backend
healthcheck:
test: ["CMD", "caddy", "validate", "--config", "/etc/caddy/Caddyfile"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s
labels:
- "traefik.enable=false"
# === PERSISTENTE VOLUMES ===
volumes:
# Backend-Volumes
backend_instance:
driver: local
driver_opts:
type: none
o: bind
device: ./backend/instance
backend_logs:
driver: local
driver_opts:
type: none
o: bind
device: ./logs
backend_migrations:
driver: local
driver_opts:
type: none
o: bind
device: ./backend/migrations
# Frontend-Volumes
frontend_data:
driver: local
frontend_db:
driver: local
driver_opts:
type: none
o: bind
device: ./frontend/db
# Proxy-Volumes
caddy_data:
driver: local
caddy_config:
driver: local
caddy_logs:
driver: local
# === NETZWERK-KONFIGURATION ===
networks:
myp-network:
driver: bridge
@ -89,7 +208,29 @@ networks:
config:
- subnet: 192.168.0.0/24
gateway: 192.168.0.1
driver_opts:
com.docker.network.enable_ipv6: "false"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
labels:
- "description=MYP Anwendungs-Netzwerk"
- "project=myp"
- "environment=${NODE_ENV:-production}"
volumes:
caddy_data:
caddy_config:
# === KONFIGURATIONSEXTENSIONEN ===
x-logging: &default-logging
driver: "json-file"
options:
max-size: "10m"
max-file: "3"
labels: "service,environment"
x-restart-policy: &default-restart-policy
unless-stopped
x-healthcheck-defaults: &default-healthcheck
interval: 30s
timeout: 10s
retries: 3
start_period: 40s