📚 Improved backend structure & documentation, added new features, and refactored scripts. 🚀🔧📝💻🖥️
This commit is contained in:
607
backend/app.py
607
backend/app.py
@@ -2180,6 +2180,67 @@ def test_all_printers_tapo_connection():
|
||||
|
||||
# ===== ADMIN FORM ENDPOINTS =====
|
||||
|
||||
@app.route("/admin/users/add", methods=["GET"])
|
||||
@login_required
|
||||
@admin_required
|
||||
def admin_add_user_page():
|
||||
"""Zeigt die Seite zum Hinzufügen neuer Benutzer an."""
|
||||
try:
|
||||
app_logger.info(f"Admin-Benutzer-Hinzufügen-Seite aufgerufen von User {current_user.id}")
|
||||
return render_template("admin_add_user.html")
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Laden der Benutzer-Hinzufügen-Seite: {str(e)}")
|
||||
flash("Fehler beim Laden der Benutzer-Hinzufügen-Seite.", "error")
|
||||
return redirect(url_for("admin_page", tab="users"))
|
||||
|
||||
@app.route("/admin/printers/add", methods=["GET"])
|
||||
@login_required
|
||||
@admin_required
|
||||
def admin_add_printer_page():
|
||||
"""Zeigt die Seite zum Hinzufügen neuer Drucker an."""
|
||||
try:
|
||||
app_logger.info(f"Admin-Drucker-Hinzufügen-Seite aufgerufen von User {current_user.id}")
|
||||
return render_template("admin_add_printer.html")
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Laden der Drucker-Hinzufügen-Seite: {str(e)}")
|
||||
flash("Fehler beim Laden der Drucker-Hinzufügen-Seite.", "error")
|
||||
return redirect(url_for("admin_page", tab="printers"))
|
||||
|
||||
@app.route("/admin/printers/<int:printer_id>/edit", methods=["GET"])
|
||||
@login_required
|
||||
@admin_required
|
||||
def admin_edit_printer_page(printer_id):
|
||||
"""Zeigt die Drucker-Bearbeitungsseite an."""
|
||||
try:
|
||||
db_session = get_db_session()
|
||||
printer = db_session.get(Printer, printer_id)
|
||||
|
||||
if not printer:
|
||||
db_session.close()
|
||||
flash("Drucker nicht gefunden.", "error")
|
||||
return redirect(url_for("admin_page", tab="printers"))
|
||||
|
||||
printer_data = {
|
||||
"id": printer.id,
|
||||
"name": printer.name,
|
||||
"model": printer.model or 'Unbekanntes Modell',
|
||||
"location": printer.location or 'Unbekannter Standort',
|
||||
"mac_address": printer.mac_address,
|
||||
"plug_ip": printer.plug_ip,
|
||||
"status": printer.status or "offline",
|
||||
"active": printer.active if hasattr(printer, 'active') else True,
|
||||
"created_at": printer.created_at.isoformat() if printer.created_at else datetime.now().isoformat()
|
||||
}
|
||||
|
||||
db_session.close()
|
||||
app_logger.info(f"Admin-Drucker-Bearbeiten-Seite aufgerufen für Drucker {printer_id} von User {current_user.id}")
|
||||
return render_template("admin_edit_printer.html", printer=printer_data)
|
||||
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Laden der Drucker-Bearbeitungsseite: {str(e)}")
|
||||
flash("Fehler beim Laden der Drucker-Daten.", "error")
|
||||
return redirect(url_for("admin_page", tab="printers"))
|
||||
|
||||
@app.route("/admin/users/create", methods=["POST"])
|
||||
@login_required
|
||||
def admin_create_user_form():
|
||||
@@ -5304,6 +5365,426 @@ def api_logs():
|
||||
'error': f'Fehler beim Abrufen der Log-Daten: {str(e)}'
|
||||
}), 500
|
||||
|
||||
# ===== FEHLENDE ADMIN API-ENDPUNKTE =====
|
||||
|
||||
@app.route("/api/admin/database/status", methods=['GET'])
|
||||
@login_required
|
||||
@admin_required
|
||||
def api_admin_database_status():
|
||||
"""
|
||||
API-Endpunkt für Datenbank-Status-Informationen
|
||||
|
||||
Liefert detaillierte Informationen über den Zustand der SQLite-Datenbank
|
||||
"""
|
||||
try:
|
||||
from models import get_db_session, create_optimized_engine
|
||||
from sqlalchemy import text
|
||||
import os
|
||||
|
||||
db_session = get_db_session()
|
||||
engine = create_optimized_engine()
|
||||
|
||||
# Basis-Datenbankpfad
|
||||
db_path = os.path.join(os.path.dirname(__file__), 'database', 'printer_system.db')
|
||||
|
||||
# Datenbank-Datei-Informationen
|
||||
db_info = {
|
||||
'file_path': db_path,
|
||||
'file_exists': os.path.exists(db_path),
|
||||
'file_size_mb': 0,
|
||||
'last_modified': None
|
||||
}
|
||||
|
||||
if os.path.exists(db_path):
|
||||
stat_info = os.stat(db_path)
|
||||
db_info['file_size_mb'] = round(stat_info.st_size / (1024 * 1024), 2)
|
||||
db_info['last_modified'] = datetime.fromtimestamp(stat_info.st_mtime).isoformat()
|
||||
|
||||
# SQLite-spezifische Informationen
|
||||
with engine.connect() as conn:
|
||||
# Datenbankschema-Version und Pragma-Informationen
|
||||
pragma_info = {}
|
||||
|
||||
# Wichtige PRAGMA-Werte abrufen
|
||||
pragma_queries = {
|
||||
'user_version': 'PRAGMA user_version',
|
||||
'schema_version': 'PRAGMA schema_version',
|
||||
'journal_mode': 'PRAGMA journal_mode',
|
||||
'synchronous': 'PRAGMA synchronous',
|
||||
'cache_size': 'PRAGMA cache_size',
|
||||
'page_size': 'PRAGMA page_size',
|
||||
'page_count': 'PRAGMA page_count',
|
||||
'freelist_count': 'PRAGMA freelist_count',
|
||||
'integrity_check': 'PRAGMA quick_check'
|
||||
}
|
||||
|
||||
for key, query in pragma_queries.items():
|
||||
try:
|
||||
result = conn.execute(text(query)).fetchone()
|
||||
pragma_info[key] = result[0] if result else None
|
||||
except Exception as e:
|
||||
pragma_info[key] = f"Error: {str(e)}"
|
||||
|
||||
# Tabellen-Informationen
|
||||
tables_result = conn.execute(text("SELECT name FROM sqlite_master WHERE type='table'")).fetchall()
|
||||
tables = [row[0] for row in tables_result]
|
||||
|
||||
# Tabellen-Statistiken
|
||||
table_stats = {}
|
||||
for table in tables:
|
||||
try:
|
||||
count_result = conn.execute(text(f"SELECT COUNT(*) FROM {table}")).fetchone()
|
||||
table_stats[table] = count_result[0] if count_result else 0
|
||||
except Exception as e:
|
||||
table_stats[table] = f"Error: {str(e)}"
|
||||
|
||||
# Connection-Pool-Status
|
||||
pool_status = {
|
||||
'pool_size': engine.pool.size(),
|
||||
'checked_in': engine.pool.checkedin(),
|
||||
'checked_out': engine.pool.checkedout(),
|
||||
'overflow': engine.pool.overflow(),
|
||||
'invalid': engine.pool.invalid()
|
||||
}
|
||||
|
||||
db_session.close()
|
||||
|
||||
# Status bewerten
|
||||
status = 'healthy'
|
||||
issues = []
|
||||
|
||||
if pragma_info.get('integrity_check') != 'ok':
|
||||
status = 'warning'
|
||||
issues.append('Datenbank-Integritätsprüfung fehlgeschlagen')
|
||||
|
||||
if db_info['file_size_mb'] > 100: # Warnung bei >100MB
|
||||
issues.append(f"Große Datenbankdatei: {db_info['file_size_mb']}MB")
|
||||
|
||||
if pragma_info.get('freelist_count', 0) > 1000:
|
||||
issues.append('Hohe Anzahl freier Seiten - VACUUM empfohlen')
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'status': status,
|
||||
'issues': issues,
|
||||
'database_info': db_info,
|
||||
'pragma_info': pragma_info,
|
||||
'tables': tables,
|
||||
'table_stats': table_stats,
|
||||
'connection_pool': pool_status,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Abrufen des Datenbank-Status: {str(e)}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Fehler beim Abrufen des Datenbank-Status: {str(e)}',
|
||||
'status': 'error'
|
||||
}), 500
|
||||
|
||||
@app.route("/api/admin/system/status", methods=['GET'])
|
||||
@login_required
|
||||
@admin_required
|
||||
def api_admin_system_status():
|
||||
"""
|
||||
API-Endpunkt für System-Status-Informationen
|
||||
|
||||
Liefert detaillierte Informationen über den Zustand des Systems
|
||||
"""
|
||||
try:
|
||||
import psutil
|
||||
import platform
|
||||
import subprocess
|
||||
|
||||
# System-Informationen
|
||||
system_info = {
|
||||
'platform': platform.system(),
|
||||
'platform_release': platform.release(),
|
||||
'platform_version': platform.version(),
|
||||
'architecture': platform.machine(),
|
||||
'processor': platform.processor(),
|
||||
'python_version': platform.python_version(),
|
||||
'hostname': platform.node()
|
||||
}
|
||||
|
||||
# CPU-Informationen
|
||||
cpu_info = {
|
||||
'physical_cores': psutil.cpu_count(logical=False),
|
||||
'total_cores': psutil.cpu_count(logical=True),
|
||||
'max_frequency': psutil.cpu_freq().max if psutil.cpu_freq() else None,
|
||||
'current_frequency': psutil.cpu_freq().current if psutil.cpu_freq() else None,
|
||||
'cpu_usage_percent': psutil.cpu_percent(interval=1),
|
||||
'load_average': psutil.getloadavg() if hasattr(psutil, 'getloadavg') else None
|
||||
}
|
||||
|
||||
# Memory-Informationen
|
||||
memory = psutil.virtual_memory()
|
||||
memory_info = {
|
||||
'total_gb': round(memory.total / (1024**3), 2),
|
||||
'available_gb': round(memory.available / (1024**3), 2),
|
||||
'used_gb': round(memory.used / (1024**3), 2),
|
||||
'percentage': memory.percent,
|
||||
'free_gb': round(memory.free / (1024**3), 2)
|
||||
}
|
||||
|
||||
# Disk-Informationen
|
||||
disk = psutil.disk_usage('/' if os.name != 'nt' else 'C:\\')
|
||||
disk_info = {
|
||||
'total_gb': round(disk.total / (1024**3), 2),
|
||||
'used_gb': round(disk.used / (1024**3), 2),
|
||||
'free_gb': round(disk.free / (1024**3), 2),
|
||||
'percentage': round((disk.used / disk.total) * 100, 1)
|
||||
}
|
||||
|
||||
# Netzwerk-Informationen
|
||||
try:
|
||||
network = psutil.net_io_counters()
|
||||
network_info = {
|
||||
'bytes_sent_mb': round(network.bytes_sent / (1024**2), 2),
|
||||
'bytes_recv_mb': round(network.bytes_recv / (1024**2), 2),
|
||||
'packets_sent': network.packets_sent,
|
||||
'packets_recv': network.packets_recv
|
||||
}
|
||||
except Exception:
|
||||
network_info = {'error': 'Netzwerk-Informationen nicht verfügbar'}
|
||||
|
||||
# Prozess-Informationen
|
||||
try:
|
||||
current_process = psutil.Process()
|
||||
process_info = {
|
||||
'pid': current_process.pid,
|
||||
'memory_mb': round(current_process.memory_info().rss / (1024**2), 2),
|
||||
'cpu_percent': current_process.cpu_percent(),
|
||||
'num_threads': current_process.num_threads(),
|
||||
'create_time': datetime.fromtimestamp(current_process.create_time()).isoformat(),
|
||||
'status': current_process.status()
|
||||
}
|
||||
except Exception as e:
|
||||
process_info = {'error': f'Prozess-Informationen nicht verfügbar: {str(e)}'}
|
||||
|
||||
# Uptime
|
||||
try:
|
||||
boot_time = psutil.boot_time()
|
||||
uptime_seconds = int(time.time() - boot_time)
|
||||
uptime_info = {
|
||||
'boot_time': datetime.fromtimestamp(boot_time).isoformat(),
|
||||
'uptime_seconds': uptime_seconds,
|
||||
'uptime_formatted': str(timedelta(seconds=uptime_seconds))
|
||||
}
|
||||
except Exception:
|
||||
uptime_info = {'error': 'Uptime-Informationen nicht verfügbar'}
|
||||
|
||||
# Service-Status (Windows/Linux kompatibel)
|
||||
services_status = {}
|
||||
try:
|
||||
if os.name == 'nt': # Windows
|
||||
# Windows-Services prüfen
|
||||
services_to_check = ['Schedule', 'Themes', 'Spooler']
|
||||
for service in services_to_check:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['sc', 'query', service],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5
|
||||
)
|
||||
services_status[service] = 'running' if 'RUNNING' in result.stdout else 'stopped'
|
||||
except Exception:
|
||||
services_status[service] = 'unknown'
|
||||
else: # Linux
|
||||
# Linux-Services prüfen
|
||||
services_to_check = ['systemd', 'cron', 'cups']
|
||||
for service in services_to_check:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['systemctl', 'is-active', service],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5
|
||||
)
|
||||
services_status[service] = result.stdout.strip()
|
||||
except Exception:
|
||||
services_status[service] = 'unknown'
|
||||
except Exception as e:
|
||||
services_status = {'error': f'Service-Status nicht verfügbar: {str(e)}'}
|
||||
|
||||
# System-Gesundheit bewerten
|
||||
health_status = 'healthy'
|
||||
issues = []
|
||||
|
||||
if cpu_info['cpu_usage_percent'] > 80:
|
||||
health_status = 'warning'
|
||||
issues.append(f'Hohe CPU-Auslastung: {cpu_info["cpu_usage_percent"]}%')
|
||||
|
||||
if memory_info['percentage'] > 85:
|
||||
health_status = 'warning'
|
||||
issues.append(f'Hohe Memory-Auslastung: {memory_info["percentage"]}%')
|
||||
|
||||
if disk_info['percentage'] > 90:
|
||||
health_status = 'critical'
|
||||
issues.append(f'Kritisch wenig Speicherplatz: {disk_info["percentage"]}%')
|
||||
|
||||
if process_info.get('memory_mb', 0) > 500: # 500MB+ für Flask-App
|
||||
issues.append(f'Hoher Memory-Verbrauch der Anwendung: {process_info.get("memory_mb", 0)}MB')
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'health_status': health_status,
|
||||
'issues': issues,
|
||||
'system_info': system_info,
|
||||
'cpu_info': cpu_info,
|
||||
'memory_info': memory_info,
|
||||
'disk_info': disk_info,
|
||||
'network_info': network_info,
|
||||
'process_info': process_info,
|
||||
'uptime_info': uptime_info,
|
||||
'services_status': services_status,
|
||||
'timestamp': datetime.now().isoformat()
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Abrufen des System-Status: {str(e)}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Fehler beim Abrufen des System-Status: {str(e)}',
|
||||
'health_status': 'error'
|
||||
}), 500
|
||||
|
||||
@app.route("/api/stats", methods=['GET'])
|
||||
@login_required
|
||||
def api_stats():
|
||||
"""
|
||||
API-Endpunkt für allgemeine Statistiken
|
||||
|
||||
Liefert zusammengefasste Statistiken für normale Benutzer und Admins
|
||||
"""
|
||||
try:
|
||||
db_session = get_db_session()
|
||||
|
||||
# Basis-Statistiken die alle Benutzer sehen können
|
||||
user_stats = {}
|
||||
|
||||
if current_user.is_authenticated:
|
||||
# Benutzer-spezifische Statistiken
|
||||
user_jobs = db_session.query(Job).filter(Job.user_id == current_user.id)
|
||||
|
||||
user_stats = {
|
||||
'my_jobs': {
|
||||
'total': user_jobs.count(),
|
||||
'completed': user_jobs.filter(Job.status == 'completed').count(),
|
||||
'failed': user_jobs.filter(Job.status == 'failed').count(),
|
||||
'running': user_jobs.filter(Job.status == 'running').count(),
|
||||
'queued': user_jobs.filter(Job.status == 'queued').count()
|
||||
},
|
||||
'my_activity': {
|
||||
'jobs_today': user_jobs.filter(
|
||||
Job.created_at >= datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
).count() if hasattr(Job, 'created_at') else 0,
|
||||
'jobs_this_week': user_jobs.filter(
|
||||
Job.created_at >= datetime.now() - timedelta(days=7)
|
||||
).count() if hasattr(Job, 'created_at') else 0
|
||||
}
|
||||
}
|
||||
|
||||
# System-weite Statistiken (für alle Benutzer)
|
||||
general_stats = {
|
||||
'system': {
|
||||
'total_printers': db_session.query(Printer).count(),
|
||||
'online_printers': db_session.query(Printer).filter(Printer.status == 'online').count(),
|
||||
'total_users': db_session.query(User).count(),
|
||||
'jobs_today': db_session.query(Job).filter(
|
||||
Job.created_at >= datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
).count() if hasattr(Job, 'created_at') else 0
|
||||
}
|
||||
}
|
||||
|
||||
# Admin-spezifische erweiterte Statistiken
|
||||
admin_stats = {}
|
||||
if current_user.is_admin:
|
||||
try:
|
||||
# Erweiterte Statistiken für Admins
|
||||
total_jobs = db_session.query(Job).count()
|
||||
completed_jobs = db_session.query(Job).filter(Job.status == 'completed').count()
|
||||
failed_jobs = db_session.query(Job).filter(Job.status == 'failed').count()
|
||||
|
||||
# Erfolgsrate berechnen
|
||||
success_rate = 0
|
||||
if completed_jobs + failed_jobs > 0:
|
||||
success_rate = round((completed_jobs / (completed_jobs + failed_jobs)) * 100, 1)
|
||||
|
||||
admin_stats = {
|
||||
'detailed_jobs': {
|
||||
'total': total_jobs,
|
||||
'completed': completed_jobs,
|
||||
'failed': failed_jobs,
|
||||
'success_rate': success_rate,
|
||||
'running': db_session.query(Job).filter(Job.status == 'running').count(),
|
||||
'queued': db_session.query(Job).filter(Job.status == 'queued').count()
|
||||
},
|
||||
'printers': {
|
||||
'total': db_session.query(Printer).count(),
|
||||
'online': db_session.query(Printer).filter(Printer.status == 'online').count(),
|
||||
'offline': db_session.query(Printer).filter(Printer.status == 'offline').count(),
|
||||
'maintenance': db_session.query(Printer).filter(Printer.status == 'maintenance').count()
|
||||
},
|
||||
'users': {
|
||||
'total': db_session.query(User).count(),
|
||||
'active_today': db_session.query(User).filter(
|
||||
User.last_login >= datetime.now() - timedelta(days=1)
|
||||
).count() if hasattr(User, 'last_login') else 0,
|
||||
'admins': db_session.query(User).filter(User.role == 'admin').count()
|
||||
}
|
||||
}
|
||||
|
||||
# Zeitbasierte Trends (letzte 7 Tage)
|
||||
daily_stats = []
|
||||
for i in range(7):
|
||||
day = datetime.now() - timedelta(days=i)
|
||||
day_start = day.replace(hour=0, minute=0, second=0, microsecond=0)
|
||||
day_end = day_start + timedelta(days=1)
|
||||
|
||||
jobs_count = db_session.query(Job).filter(
|
||||
Job.created_at >= day_start,
|
||||
Job.created_at < day_end
|
||||
).count() if hasattr(Job, 'created_at') else 0
|
||||
|
||||
daily_stats.append({
|
||||
'date': day.strftime('%Y-%m-%d'),
|
||||
'jobs': jobs_count
|
||||
})
|
||||
|
||||
admin_stats['trends'] = {
|
||||
'daily_jobs': list(reversed(daily_stats)) # Älteste zuerst
|
||||
}
|
||||
|
||||
except Exception as admin_error:
|
||||
app_logger.warning(f"Fehler bei Admin-Statistiken: {str(admin_error)}")
|
||||
admin_stats = {'error': 'Admin-Statistiken nicht verfügbar'}
|
||||
|
||||
db_session.close()
|
||||
|
||||
# Response zusammenstellen
|
||||
response_data = {
|
||||
'success': True,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'user_stats': user_stats,
|
||||
'general_stats': general_stats
|
||||
}
|
||||
|
||||
# Admin-Statistiken nur für Admins hinzufügen
|
||||
if current_user.is_admin:
|
||||
response_data['admin_stats'] = admin_stats
|
||||
|
||||
return jsonify(response_data)
|
||||
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Abrufen der Statistiken: {str(e)}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': f'Fehler beim Abrufen der Statistiken: {str(e)}'
|
||||
}), 500
|
||||
|
||||
# ===== LIVE ADMIN STATISTIKEN API =====
|
||||
|
||||
@app.route("/api/admin/stats/live", methods=['GET'])
|
||||
@@ -5395,7 +5876,7 @@ def api_admin_stats_live():
|
||||
|
||||
# Queue-Status (falls Queue Manager läuft)
|
||||
try:
|
||||
from queue_manager import get_queue_status
|
||||
from utils.queue_manager import get_queue_status
|
||||
queue_status = get_queue_status()
|
||||
stats['queue'] = queue_status
|
||||
except Exception as queue_error:
|
||||
@@ -5411,7 +5892,7 @@ def api_admin_stats_live():
|
||||
stats['recent_activity'] = [
|
||||
{
|
||||
'id': job.id,
|
||||
'filename': job.filename,
|
||||
'filename': getattr(job, 'filename', 'Unbekannt'),
|
||||
'status': job.status,
|
||||
'user': job.user.username if job.user else 'Unbekannt',
|
||||
'created_at': job.created_at.isoformat() if hasattr(job, 'created_at') and job.created_at else None
|
||||
@@ -5473,43 +5954,51 @@ if __name__ == "__main__":
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Stoppen des Schedulers: {str(e)}")
|
||||
|
||||
# ===== DATENBANKVERBINDUNGEN ORDNUNGSGEMÄSS SCHLIESSEN =====
|
||||
app_logger.info("💾 Führe Datenbank-Cleanup durch...")
|
||||
# ===== ROBUSTES DATENBANK-CLEANUP MIT NEUER LOGIC =====
|
||||
app_logger.info("💾 Führe robustes Datenbank-Cleanup durch...")
|
||||
try:
|
||||
from models import get_db_session, create_optimized_engine
|
||||
from sqlalchemy import text
|
||||
# Importiere und verwende den neuen DatabaseCleanupManager
|
||||
from utils.database_cleanup import safe_database_cleanup
|
||||
|
||||
# WAL-Checkpoint ausführen um .shm und .wal Dateien zu bereinigen
|
||||
engine = create_optimized_engine()
|
||||
# Führe umfassendes, sicheres Cleanup durch
|
||||
cleanup_result = safe_database_cleanup(force_mode_switch=True)
|
||||
|
||||
with engine.connect() as conn:
|
||||
# Vollständiger WAL-Checkpoint (TRUNCATE-Modus)
|
||||
app_logger.info("📝 Führe WAL-Checkpoint durch...")
|
||||
result = conn.execute(text("PRAGMA wal_checkpoint(TRUNCATE)")).fetchone()
|
||||
if cleanup_result["success"]:
|
||||
app_logger.info(f"✅ Datenbank-Cleanup erfolgreich: {', '.join(cleanup_result['operations'])}")
|
||||
if cleanup_result.get("wal_files_removed", False):
|
||||
app_logger.info("✅ WAL- und SHM-Dateien erfolgreich entfernt")
|
||||
else:
|
||||
app_logger.warning(f"⚠️ Datenbank-Cleanup mit Problemen: {', '.join(cleanup_result['errors'])}")
|
||||
# Trotzdem weiter - wenigstens WAL-Checkpoint versucht
|
||||
|
||||
if result:
|
||||
app_logger.info(f"WAL-Checkpoint abgeschlossen: {result[1]} Seiten übertragen, {result[2]} Seiten zurückgesetzt")
|
||||
except ImportError:
|
||||
# Fallback auf die alte Methode falls Cleanup-Manager nicht verfügbar
|
||||
app_logger.warning("Fallback: Verwende Legacy-Datenbank-Cleanup...")
|
||||
try:
|
||||
from models import create_optimized_engine
|
||||
from sqlalchemy import text
|
||||
|
||||
# Alle pending Transaktionen committen
|
||||
conn.commit()
|
||||
engine = create_optimized_engine()
|
||||
|
||||
# Journal-Mode zu DELETE wechseln (entfernt .wal/.shm Dateien)
|
||||
app_logger.info("📁 Schalte Journal-Mode um...")
|
||||
conn.execute(text("PRAGMA journal_mode=DELETE"))
|
||||
with engine.connect() as conn:
|
||||
# Nur WAL-Checkpoint - kein risikoreicher Mode-Switch
|
||||
app_logger.info("📝 Führe WAL-Checkpoint durch...")
|
||||
result = conn.execute(text("PRAGMA wal_checkpoint(TRUNCATE)")).fetchone()
|
||||
|
||||
if result:
|
||||
app_logger.info(f"WAL-Checkpoint abgeschlossen: {result[1]} Seiten übertragen, {result[2]} Seiten zurückgesetzt")
|
||||
|
||||
conn.commit()
|
||||
|
||||
# Optimize und Vacuum für sauberen Zustand
|
||||
conn.execute(text("PRAGMA optimize"))
|
||||
conn.execute(text("VACUUM"))
|
||||
# Engine-Connection-Pool schließen
|
||||
engine.dispose()
|
||||
app_logger.info("✅ Legacy-Datenbank-Cleanup abgeschlossen")
|
||||
|
||||
conn.commit()
|
||||
|
||||
# Engine-Connection-Pool schließen
|
||||
engine.dispose()
|
||||
|
||||
app_logger.info("✅ Datenbank-Cleanup abgeschlossen - WAL-Dateien sollten verschwunden sein")
|
||||
|
||||
except Exception as db_error:
|
||||
app_logger.error(f"❌ Fehler beim Datenbank-Cleanup: {str(db_error)}")
|
||||
except Exception as db_error:
|
||||
app_logger.error(f"❌ Fehler beim Legacy-Datenbank-Cleanup: {str(db_error)}")
|
||||
|
||||
except Exception as cleanup_error:
|
||||
app_logger.error(f"❌ Fehler beim robusten Datenbank-Cleanup: {str(cleanup_error)}")
|
||||
|
||||
app_logger.info("✅ Shutdown abgeschlossen")
|
||||
sys.exit(0)
|
||||
@@ -5570,32 +6059,48 @@ if __name__ == "__main__":
|
||||
|
||||
atexit.register(cleanup_queue_manager)
|
||||
|
||||
# ===== DATENBANK-CLEANUP BEIM PROGRAMMENDE =====
|
||||
# ===== ROBUSTES DATENBANK-CLEANUP BEIM PROGRAMMENDE =====
|
||||
def cleanup_database():
|
||||
"""Führt Datenbank-Cleanup beim normalen Programmende aus."""
|
||||
"""Führt robustes Datenbank-Cleanup beim normalen Programmende aus."""
|
||||
try:
|
||||
app_logger.info("💾 Führe finales Datenbank-Cleanup durch...")
|
||||
from models import create_optimized_engine
|
||||
from sqlalchemy import text
|
||||
app_logger.info("💾 Führe finales robustes Datenbank-Cleanup durch...")
|
||||
|
||||
engine = create_optimized_engine()
|
||||
|
||||
with engine.connect() as conn:
|
||||
# WAL-Checkpoint für sauberes Beenden
|
||||
result = conn.execute(text("PRAGMA wal_checkpoint(TRUNCATE)")).fetchone()
|
||||
if result and result[1] > 0:
|
||||
app_logger.info(f"Final WAL-Checkpoint: {result[1]} Seiten übertragen")
|
||||
# Verwende den neuen DatabaseCleanupManager
|
||||
try:
|
||||
from utils.database_cleanup import safe_database_cleanup
|
||||
|
||||
# Journal-Mode umschalten um .wal/.shm Dateien zu entfernen
|
||||
conn.execute(text("PRAGMA journal_mode=DELETE"))
|
||||
conn.commit()
|
||||
|
||||
# Connection-Pool ordnungsgemäß schließen
|
||||
engine.dispose()
|
||||
app_logger.info("✅ Finales Datenbank-Cleanup abgeschlossen")
|
||||
# Führe umfassendes, sicheres Cleanup durch
|
||||
cleanup_result = safe_database_cleanup(force_mode_switch=True)
|
||||
|
||||
if cleanup_result["success"]:
|
||||
app_logger.info(f"✅ Finales Datenbank-Cleanup erfolgreich: {', '.join(cleanup_result['operations'])}")
|
||||
if cleanup_result.get("wal_files_removed", False):
|
||||
app_logger.info("✅ WAL- und SHM-Dateien erfolgreich entfernt")
|
||||
else:
|
||||
app_logger.warning(f"⚠️ Finales Datenbank-Cleanup mit Problemen: {', '.join(cleanup_result['errors'])}")
|
||||
|
||||
except ImportError:
|
||||
# Fallback auf die alte Methode falls Cleanup-Manager nicht verfügbar
|
||||
app_logger.warning("Fallback: Verwende Legacy-finales-Datenbank-Cleanup...")
|
||||
from models import create_optimized_engine
|
||||
from sqlalchemy import text
|
||||
|
||||
engine = create_optimized_engine()
|
||||
|
||||
with engine.connect() as conn:
|
||||
# Nur WAL-Checkpoint - kein risikoreicher Mode-Switch
|
||||
result = conn.execute(text("PRAGMA wal_checkpoint(TRUNCATE)")).fetchone()
|
||||
if result and result[1] > 0:
|
||||
app_logger.info(f"Final WAL-Checkpoint: {result[1]} Seiten übertragen")
|
||||
|
||||
conn.commit()
|
||||
|
||||
# Connection-Pool ordnungsgemäß schließen
|
||||
engine.dispose()
|
||||
app_logger.info("✅ Legacy-finales Datenbank-Cleanup abgeschlossen")
|
||||
|
||||
except Exception as e:
|
||||
app_logger.error(f"❌ Fehler beim finalen Datenbank-Cleanup: {str(e)}")
|
||||
app_logger.error(f"❌ Fehler beim finalen robusten Datenbank-Cleanup: {str(e)}")
|
||||
|
||||
atexit.register(cleanup_database)
|
||||
|
||||
|
Reference in New Issue
Block a user