🎉 Refactored backend structure: Removed unused files including app_cleaned.py, admin_api.py, admin.py, user.py, and others. Updated settings.local.json to include additional Bash commands. Enhanced admin templates for better navigation and functionality. Improved logging and error handling across various modules.
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
backend/utils/__pycache__/performance_tracker.cpython-311.pyc
Normal file
BIN
backend/utils/__pycache__/performance_tracker.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
backend/utils/__pycache__/settings.cpython-311.pyc
Normal file
BIN
backend/utils/__pycache__/settings.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
backend/utils/__pycache__/tapo_controller.cpython-311.pyc
Normal file
BIN
backend/utils/__pycache__/tapo_controller.cpython-311.pyc
Normal file
Binary file not shown.
@@ -1,25 +1,177 @@
|
||||
"""
|
||||
Backup Manager - Datensicherungsverwaltung
|
||||
Minimal implementation to resolve import dependencies.
|
||||
Backup Manager - Wrapper für DatabaseBackupManager
|
||||
Kompatibilitäts-Wrapper für die vollständige Backup-Implementierung in database_utils.py
|
||||
"""
|
||||
|
||||
from utils.logging_config import get_logger
|
||||
from utils.database_utils import DatabaseBackupManager
|
||||
|
||||
backup_logger = get_logger("backup")
|
||||
|
||||
class BackupManager:
|
||||
"""Minimale BackupManager-Implementierung"""
|
||||
"""
|
||||
Kompatibilitäts-Wrapper für DatabaseBackupManager.
|
||||
Stellt die ursprüngliche API bereit, nutzt aber die vollständige Implementierung.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.enabled = False
|
||||
backup_logger.info("BackupManager initialisiert (minimal implementation)")
|
||||
"""Initialisiert den BackupManager mit vollständiger Funktionalität."""
|
||||
try:
|
||||
self._db_backup_manager = DatabaseBackupManager()
|
||||
self.enabled = True
|
||||
backup_logger.info("BackupManager erfolgreich initialisiert mit vollständiger Funktionalität")
|
||||
except Exception as e:
|
||||
backup_logger.error(f"Fehler bei BackupManager-Initialisierung: {e}")
|
||||
self._db_backup_manager = None
|
||||
self.enabled = False
|
||||
|
||||
def create_backup(self, backup_type="manual"):
|
||||
"""Erstellt ein Backup (Placeholder)"""
|
||||
backup_logger.info(f"Backup-Erstellung angefordert: {backup_type}")
|
||||
return {"success": False, "message": "Backup-Funktionalität nicht implementiert"}
|
||||
"""
|
||||
Erstellt ein Backup der Datenbank.
|
||||
|
||||
Args:
|
||||
backup_type (str): Typ des Backups (manual, automatic, emergency)
|
||||
|
||||
Returns:
|
||||
dict: Ergebnis der Backup-Operation mit success/error Status
|
||||
"""
|
||||
if not self.enabled or not self._db_backup_manager:
|
||||
backup_logger.warning("BackupManager nicht verfügbar - Backup-Erstellung fehlgeschlagen")
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Backup-System nicht verfügbar",
|
||||
"error": "BackupManager nicht initialisiert"
|
||||
}
|
||||
|
||||
try:
|
||||
backup_logger.info(f"Starte Backup-Erstellung: {backup_type}")
|
||||
|
||||
# Nutze die vollständige DatabaseBackupManager-Implementation
|
||||
backup_path = self._db_backup_manager.create_backup(compress=True)
|
||||
|
||||
backup_logger.info(f"Backup erfolgreich erstellt: {backup_path}")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Backup erfolgreich erstellt: {backup_type}",
|
||||
"backup_path": backup_path,
|
||||
"backup_type": backup_type
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
backup_logger.error(f"Fehler bei Backup-Erstellung ({backup_type}): {str(e)}")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Backup-Erstellung fehlgeschlagen: {str(e)}",
|
||||
"error": str(e),
|
||||
"backup_type": backup_type
|
||||
}
|
||||
|
||||
def restore_backup(self, backup_path):
|
||||
"""Stellt ein Backup wieder her (Placeholder)"""
|
||||
backup_logger.info(f"Backup-Wiederherstellung angefordert: {backup_path}")
|
||||
return {"success": False, "message": "Restore-Funktionalität nicht implementiert"}
|
||||
"""
|
||||
Stellt ein Backup wieder her.
|
||||
|
||||
Args:
|
||||
backup_path (str): Pfad zur Backup-Datei
|
||||
|
||||
Returns:
|
||||
dict: Ergebnis der Restore-Operation
|
||||
"""
|
||||
if not self.enabled or not self._db_backup_manager:
|
||||
backup_logger.warning("BackupManager nicht verfügbar - Restore fehlgeschlagen")
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Backup-System nicht verfügbar",
|
||||
"error": "BackupManager nicht initialisiert"
|
||||
}
|
||||
|
||||
try:
|
||||
backup_logger.info(f"Starte Backup-Wiederherstellung: {backup_path}")
|
||||
|
||||
# Nutze die vollständige DatabaseBackupManager-Implementation
|
||||
success = self._db_backup_manager.restore_backup(backup_path)
|
||||
|
||||
if success:
|
||||
backup_logger.info(f"Backup erfolgreich wiederhergestellt: {backup_path}")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Backup erfolgreich wiederhergestellt",
|
||||
"backup_path": backup_path
|
||||
}
|
||||
else:
|
||||
backup_logger.error(f"Backup-Wiederherstellung fehlgeschlagen: {backup_path}")
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Backup-Wiederherstellung fehlgeschlagen",
|
||||
"backup_path": backup_path
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
backup_logger.error(f"Fehler bei Backup-Wiederherstellung ({backup_path}): {str(e)}")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Restore fehlgeschlagen: {str(e)}",
|
||||
"error": str(e),
|
||||
"backup_path": backup_path
|
||||
}
|
||||
|
||||
def get_backup_list(self):
|
||||
"""
|
||||
Holt eine Liste aller verfügbaren Backups.
|
||||
|
||||
Returns:
|
||||
dict: Liste der verfügbaren Backups
|
||||
"""
|
||||
if not self.enabled or not self._db_backup_manager:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Backup-System nicht verfügbar",
|
||||
"backups": []
|
||||
}
|
||||
|
||||
try:
|
||||
backups = self._db_backup_manager.list_backups()
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"{len(backups)} Backups gefunden",
|
||||
"backups": backups
|
||||
}
|
||||
except Exception as e:
|
||||
backup_logger.error(f"Fehler beim Abrufen der Backup-Liste: {str(e)}")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Fehler beim Abrufen der Backups: {str(e)}",
|
||||
"backups": []
|
||||
}
|
||||
|
||||
def cleanup_old_backups(self, keep_count=10):
|
||||
"""
|
||||
Räumt alte Backups auf und behält nur die neuesten.
|
||||
|
||||
Args:
|
||||
keep_count (int): Anzahl der zu behaltenden Backups
|
||||
|
||||
Returns:
|
||||
dict: Ergebnis der Cleanup-Operation
|
||||
"""
|
||||
if not self.enabled or not self._db_backup_manager:
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Backup-System nicht verfügbar"
|
||||
}
|
||||
|
||||
try:
|
||||
removed_count = self._db_backup_manager.cleanup_old_backups(keep_count)
|
||||
backup_logger.info(f"Backup-Cleanup abgeschlossen: {removed_count} alte Backups entfernt")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"{removed_count} alte Backups entfernt",
|
||||
"removed_count": removed_count,
|
||||
"kept_count": keep_count
|
||||
}
|
||||
except Exception as e:
|
||||
backup_logger.error(f"Fehler beim Backup-Cleanup: {str(e)}")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Cleanup fehlgeschlagen: {str(e)}",
|
||||
"error": str(e)
|
||||
}
|
772
backend/utils/database_core.py
Normal file
772
backend/utils/database_core.py
Normal file
@@ -0,0 +1,772 @@
|
||||
"""
|
||||
Zentralisierte Datenbank-Operationen für das MYP System
|
||||
|
||||
Konsolidierte Implementierung aller datenbankbezogenen Funktionen:
|
||||
- CRUD-Operationen (ursprünglich db_manager.py)
|
||||
- Backup-Verwaltung (ursprünglich database_utils.py)
|
||||
- Cleanup-Operationen (ursprünglich database_cleanup.py)
|
||||
- Einheitliches Session-Management
|
||||
|
||||
Optimierungen:
|
||||
- Intelligente Session-Factory basierend auf Operationstyp
|
||||
- Zentrale Engine-Registry für verschiedene Anwendungsfälle
|
||||
- Koordinierte Lock-Behandlung und Retry-Logik
|
||||
- Vereinheitlichte Error-Handling-Patterns
|
||||
|
||||
Autor: MYP Team - Konsolidiert für IHK-Projektarbeit
|
||||
Datum: 2025-06-09
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sqlite3
|
||||
import threading
|
||||
import time
|
||||
import gzip
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Optional, Tuple, Any, Union
|
||||
from pathlib import Path
|
||||
from contextlib import contextmanager
|
||||
|
||||
from sqlalchemy import text, create_engine
|
||||
from sqlalchemy.engine import Engine
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
from sqlalchemy.exc import SQLAlchemyError, OperationalError
|
||||
|
||||
from utils.settings import DATABASE_PATH
|
||||
from utils.logging_config import get_logger
|
||||
from models import get_cached_session, create_optimized_engine, User, Printer, Job
|
||||
|
||||
# ===== ZENTRALER LOGGER =====
|
||||
db_logger = get_logger("database_core")
|
||||
|
||||
# ===== ENGINE-REGISTRY =====
|
||||
|
||||
class EngineRegistry:
|
||||
"""
|
||||
Zentrale Registry für verschiedene Datenbank-Engine-Konfigurationen.
|
||||
Vermeidet Duplikation und ermöglicht optimierte Engines für verschiedene Anwendungsfälle.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.engines: Dict[str, Engine] = {}
|
||||
self._lock = threading.RLock()
|
||||
|
||||
def get_engine(self, engine_type: str = 'default') -> Engine:
|
||||
"""
|
||||
Holt oder erstellt eine Engine basierend auf dem Typ.
|
||||
|
||||
Args:
|
||||
engine_type: Art der Engine ('default', 'cleanup', 'monitoring', 'backup')
|
||||
|
||||
Returns:
|
||||
Engine: Konfigurierte SQLAlchemy Engine
|
||||
"""
|
||||
with self._lock:
|
||||
if engine_type not in self.engines:
|
||||
self.engines[engine_type] = self._create_engine(engine_type)
|
||||
return self.engines[engine_type]
|
||||
|
||||
def _create_engine(self, engine_type: str) -> Engine:
|
||||
"""Erstellt optimierte Engine basierend auf Anwendungsfall"""
|
||||
base_url = f"sqlite:///{DATABASE_PATH}"
|
||||
|
||||
if engine_type == 'default':
|
||||
# Standard-Engine für CRUD-Operationen
|
||||
return create_optimized_engine()
|
||||
|
||||
elif engine_type == 'cleanup':
|
||||
# Engine für Cleanup-Operationen mit aggressiven Timeouts
|
||||
return create_engine(
|
||||
base_url,
|
||||
pool_timeout=1.0,
|
||||
pool_recycle=300,
|
||||
pool_pre_ping=True,
|
||||
connect_args={
|
||||
'timeout': 5,
|
||||
'check_same_thread': False,
|
||||
'isolation_level': None # Autocommit für Cleanup
|
||||
}
|
||||
)
|
||||
|
||||
elif engine_type == 'monitoring':
|
||||
# Engine für Monitoring mit minimaler Blockierung
|
||||
return create_engine(
|
||||
base_url,
|
||||
pool_timeout=0.5,
|
||||
pool_recycle=60,
|
||||
connect_args={
|
||||
'timeout': 2,
|
||||
'check_same_thread': False
|
||||
}
|
||||
)
|
||||
|
||||
elif engine_type == 'backup':
|
||||
# Engine für Backup-Operationen mit längeren Timeouts
|
||||
return create_engine(
|
||||
base_url,
|
||||
pool_timeout=30.0,
|
||||
pool_recycle=3600,
|
||||
connect_args={
|
||||
'timeout': 30,
|
||||
'check_same_thread': False
|
||||
}
|
||||
)
|
||||
|
||||
else:
|
||||
db_logger.warning(f"Unknown engine type '{engine_type}', using default")
|
||||
return create_optimized_engine()
|
||||
|
||||
def dispose_all(self):
|
||||
"""Schließt alle registrierten Engines"""
|
||||
with self._lock:
|
||||
for engine_type, engine in self.engines.items():
|
||||
try:
|
||||
engine.dispose()
|
||||
db_logger.debug(f"Engine '{engine_type}' disposed successfully")
|
||||
except Exception as e:
|
||||
db_logger.warning(f"Error disposing engine '{engine_type}': {e}")
|
||||
self.engines.clear()
|
||||
|
||||
# Globale Engine-Registry
|
||||
engine_registry = EngineRegistry()
|
||||
|
||||
# ===== SESSION-MANAGEMENT =====
|
||||
|
||||
@contextmanager
|
||||
def get_database_session(operation_type: str = 'default'):
|
||||
"""
|
||||
Intelligenter Session-Manager basierend auf Operationstyp.
|
||||
|
||||
Args:
|
||||
operation_type: Art der Operation ('default', 'cleanup', 'monitoring', 'backup', 'cached')
|
||||
|
||||
Yields:
|
||||
Session: Konfigurierte SQLAlchemy Session
|
||||
"""
|
||||
if operation_type == 'cached':
|
||||
# Verwende das bestehende Cached-Session-System für Standard-CRUD
|
||||
session = get_cached_session()
|
||||
try:
|
||||
yield session
|
||||
finally:
|
||||
# Cached Sessions werden automatisch verwaltet
|
||||
pass
|
||||
else:
|
||||
# Erstelle neue Session für spezielle Operationen
|
||||
engine = engine_registry.get_engine(operation_type)
|
||||
SessionClass = sessionmaker(bind=engine)
|
||||
session = SessionClass()
|
||||
|
||||
try:
|
||||
yield session
|
||||
except Exception as e:
|
||||
try:
|
||||
session.rollback()
|
||||
db_logger.error(f"Session rollback for {operation_type}: {e}")
|
||||
except Exception as rollback_error:
|
||||
db_logger.error(f"Session rollback failed for {operation_type}: {rollback_error}")
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
session.close()
|
||||
except Exception as close_error:
|
||||
db_logger.warning(f"Session close failed for {operation_type}: {close_error}")
|
||||
|
||||
# ===== CLEANUP-OPERATIONEN =====
|
||||
|
||||
class DatabaseCleanupManager:
|
||||
"""
|
||||
Robuste Cleanup-Operationen mit intelligenter Retry-Logik.
|
||||
Konsolidiert Funktionalität aus database_cleanup.py.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.cleanup_logger = get_logger("database_cleanup")
|
||||
self._registered_engines = set()
|
||||
|
||||
def register_engine_for_cleanup(self, engine: Engine):
|
||||
"""Registriert Engine für Cleanup bei WAL-Operationen"""
|
||||
self._registered_engines.add(engine)
|
||||
|
||||
def force_close_all_connections(self):
|
||||
"""Schließt alle offenen Datenbankverbindungen forciert"""
|
||||
try:
|
||||
# Standard-Engine-Registry schließen
|
||||
engine_registry.dispose_all()
|
||||
|
||||
# Registrierte Engines schließen
|
||||
for engine in self._registered_engines:
|
||||
try:
|
||||
engine.dispose()
|
||||
except Exception as e:
|
||||
self.cleanup_logger.warning(f"Failed to dispose registered engine: {e}")
|
||||
|
||||
self._registered_engines.clear()
|
||||
|
||||
# Warten auf Verbindungsschließung
|
||||
time.sleep(0.5)
|
||||
|
||||
self.cleanup_logger.info("All database connections forcefully closed")
|
||||
|
||||
except Exception as e:
|
||||
self.cleanup_logger.error(f"Error during connection cleanup: {e}")
|
||||
|
||||
def perform_wal_checkpoint(self, retries: int = 3) -> bool:
|
||||
"""
|
||||
Führt WAL-Checkpoint mit Retry-Logik durch.
|
||||
|
||||
Args:
|
||||
retries: Anzahl der Wiederholungsversuche
|
||||
|
||||
Returns:
|
||||
bool: True wenn erfolgreich
|
||||
"""
|
||||
for attempt in range(retries):
|
||||
try:
|
||||
if attempt > 0:
|
||||
self.force_close_all_connections()
|
||||
time.sleep(1.0 * attempt) # Exponential backoff
|
||||
|
||||
# Direkte SQLite3-Verbindung für maximale Kontrolle
|
||||
conn = sqlite3.connect(DATABASE_PATH, timeout=10.0)
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
# WAL-Checkpoint durchführen
|
||||
cursor.execute("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
result = cursor.fetchone()
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
self.cleanup_logger.info(f"WAL checkpoint successful on attempt {attempt + 1}: {result}")
|
||||
return True
|
||||
|
||||
except sqlite3.OperationalError as e:
|
||||
conn.close()
|
||||
if "database is locked" in str(e).lower() and attempt < retries - 1:
|
||||
self.cleanup_logger.warning(f"Database locked on attempt {attempt + 1}, retrying...")
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
self.cleanup_logger.error(f"WAL checkpoint attempt {attempt + 1} failed: {e}")
|
||||
if attempt == retries - 1:
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
def switch_journal_mode(self, mode: str = "WAL") -> bool:
|
||||
"""
|
||||
Wechselt den Journal-Modus der Datenbank.
|
||||
|
||||
Args:
|
||||
mode: Journal-Modus ('WAL', 'DELETE', 'TRUNCATE', etc.)
|
||||
|
||||
Returns:
|
||||
bool: True wenn erfolgreich
|
||||
"""
|
||||
try:
|
||||
self.force_close_all_connections()
|
||||
time.sleep(1.0)
|
||||
|
||||
conn = sqlite3.connect(DATABASE_PATH, timeout=15.0)
|
||||
cursor = conn.cursor()
|
||||
|
||||
try:
|
||||
cursor.execute(f"PRAGMA journal_mode = {mode}")
|
||||
result = cursor.fetchone()
|
||||
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
self.cleanup_logger.info(f"Journal mode switched to {mode}: {result}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
conn.close()
|
||||
self.cleanup_logger.error(f"Failed to switch journal mode to {mode}: {e}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
self.cleanup_logger.error(f"Error during journal mode switch: {e}")
|
||||
return False
|
||||
|
||||
# ===== BACKUP-OPERATIONEN =====
|
||||
|
||||
class DatabaseBackupManager:
|
||||
"""
|
||||
Erweiterte Backup-Verwaltung mit automatischer Rotation.
|
||||
Konsolidiert Funktionalität aus database_utils.py.
|
||||
"""
|
||||
|
||||
def __init__(self, backup_dir: str = None):
|
||||
self.backup_dir = backup_dir or os.path.join(os.path.dirname(DATABASE_PATH), "backups")
|
||||
self.backup_logger = get_logger("database_backup")
|
||||
self.ensure_backup_directory()
|
||||
self._backup_lock = threading.Lock()
|
||||
|
||||
def ensure_backup_directory(self):
|
||||
"""Stellt sicher, dass das Backup-Verzeichnis existiert"""
|
||||
Path(self.backup_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def create_backup(self, compress: bool = True) -> str:
|
||||
"""
|
||||
Erstellt ein Backup der Datenbank.
|
||||
|
||||
Args:
|
||||
compress: Ob das Backup komprimiert werden soll
|
||||
|
||||
Returns:
|
||||
str: Pfad zum erstellten Backup
|
||||
"""
|
||||
with self._backup_lock:
|
||||
try:
|
||||
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||
extension = '.gz' if compress else '.db'
|
||||
backup_filename = f"myp_backup_{timestamp}.db{extension}"
|
||||
backup_path = os.path.join(self.backup_dir, backup_filename)
|
||||
|
||||
# Checkpoint vor Backup
|
||||
cleanup_manager = DatabaseCleanupManager()
|
||||
cleanup_manager.perform_wal_checkpoint()
|
||||
|
||||
if compress:
|
||||
# Komprimiertes Backup
|
||||
with open(DATABASE_PATH, 'rb') as f_in:
|
||||
with gzip.open(backup_path, 'wb') as f_out:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
else:
|
||||
# Einfache Kopie
|
||||
shutil.copy2(DATABASE_PATH, backup_path)
|
||||
|
||||
backup_size = os.path.getsize(backup_path)
|
||||
self.backup_logger.info(f"Backup created: {backup_filename} ({backup_size / 1024 / 1024:.2f} MB)")
|
||||
|
||||
return backup_path
|
||||
|
||||
except Exception as e:
|
||||
self.backup_logger.error(f"Backup creation failed: {e}")
|
||||
raise
|
||||
|
||||
def list_backups(self) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Listet alle verfügbaren Backups auf.
|
||||
|
||||
Returns:
|
||||
List[Dict]: Liste der Backup-Informationen
|
||||
"""
|
||||
try:
|
||||
backups = []
|
||||
backup_pattern = "myp_backup_*.db*"
|
||||
|
||||
for backup_file in Path(self.backup_dir).glob(backup_pattern):
|
||||
stat = backup_file.stat()
|
||||
backups.append({
|
||||
'filename': backup_file.name,
|
||||
'path': str(backup_file),
|
||||
'size_bytes': stat.st_size,
|
||||
'size_mb': round(stat.st_size / 1024 / 1024, 2),
|
||||
'created_at': datetime.fromtimestamp(stat.st_ctime),
|
||||
'compressed': backup_file.suffix == '.gz'
|
||||
})
|
||||
|
||||
# Sortiere nach Datum (neueste zuerst)
|
||||
backups.sort(key=lambda x: x['created_at'], reverse=True)
|
||||
return backups
|
||||
|
||||
except Exception as e:
|
||||
self.backup_logger.error(f"Error listing backups: {e}")
|
||||
return []
|
||||
|
||||
def cleanup_old_backups(self, keep_count: int = 10) -> int:
|
||||
"""
|
||||
Räumt alte Backups auf und behält nur die neuesten.
|
||||
|
||||
Args:
|
||||
keep_count: Anzahl der zu behaltenden Backups
|
||||
|
||||
Returns:
|
||||
int: Anzahl der gelöschten Backups
|
||||
"""
|
||||
try:
|
||||
backups = self.list_backups()
|
||||
if len(backups) <= keep_count:
|
||||
return 0
|
||||
|
||||
backups_to_delete = backups[keep_count:]
|
||||
deleted_count = 0
|
||||
|
||||
for backup in backups_to_delete:
|
||||
try:
|
||||
os.remove(backup['path'])
|
||||
deleted_count += 1
|
||||
self.backup_logger.debug(f"Deleted old backup: {backup['filename']}")
|
||||
except Exception as e:
|
||||
self.backup_logger.warning(f"Failed to delete backup {backup['filename']}: {e}")
|
||||
|
||||
self.backup_logger.info(f"Cleaned up {deleted_count} old backups, kept {keep_count}")
|
||||
return deleted_count
|
||||
|
||||
except Exception as e:
|
||||
self.backup_logger.error(f"Error during backup cleanup: {e}")
|
||||
return 0
|
||||
|
||||
def restore_backup(self, backup_path: str) -> bool:
|
||||
"""
|
||||
Stellt ein Backup wieder her.
|
||||
|
||||
Args:
|
||||
backup_path: Pfad zur Backup-Datei
|
||||
|
||||
Returns:
|
||||
bool: True wenn erfolgreich
|
||||
"""
|
||||
try:
|
||||
if not os.path.exists(backup_path):
|
||||
self.backup_logger.error(f"Backup file not found: {backup_path}")
|
||||
return False
|
||||
|
||||
# Verbindungen schließen
|
||||
cleanup_manager = DatabaseCleanupManager()
|
||||
cleanup_manager.force_close_all_connections()
|
||||
time.sleep(2.0)
|
||||
|
||||
# Aktueller Datenbank-Backup erstellen
|
||||
current_backup = self.create_backup(compress=True)
|
||||
self.backup_logger.info(f"Current database backed up to: {current_backup}")
|
||||
|
||||
# Backup wiederherstellen
|
||||
if backup_path.endswith('.gz'):
|
||||
# Komprimiertes Backup entpacken
|
||||
with gzip.open(backup_path, 'rb') as f_in:
|
||||
with open(DATABASE_PATH, 'wb') as f_out:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
else:
|
||||
# Einfache Kopie
|
||||
shutil.copy2(backup_path, DATABASE_PATH)
|
||||
|
||||
self.backup_logger.info(f"Database restored from: {backup_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
self.backup_logger.error(f"Backup restoration failed: {e}")
|
||||
return False
|
||||
|
||||
# ===== CRUD-OPERATIONEN =====
|
||||
|
||||
class DatabaseCRUDManager:
|
||||
"""
|
||||
Geschäftslogik-orientierte CRUD-Operationen.
|
||||
Konsolidiert Funktionalität aus db_manager.py.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.crud_logger = get_logger("database_crud")
|
||||
|
||||
def get_active_jobs(self, limit: int = None) -> List[Job]:
|
||||
"""
|
||||
Holt aktive Jobs mit optimiertem Loading.
|
||||
|
||||
Args:
|
||||
limit: Maximale Anzahl Jobs
|
||||
|
||||
Returns:
|
||||
List[Job]: Liste der aktiven Jobs
|
||||
"""
|
||||
try:
|
||||
with get_database_session('cached') as session:
|
||||
query = session.query(Job).filter(
|
||||
Job.status.in_(['pending', 'printing', 'paused'])
|
||||
).order_by(Job.created_at.desc())
|
||||
|
||||
if limit:
|
||||
query = query.limit(limit)
|
||||
|
||||
jobs = query.all()
|
||||
self.crud_logger.debug(f"Retrieved {len(jobs)} active jobs")
|
||||
return jobs
|
||||
|
||||
except Exception as e:
|
||||
self.crud_logger.error(f"Error retrieving active jobs: {e}")
|
||||
return []
|
||||
|
||||
def get_printer_with_jobs(self, printer_id: int) -> Optional[Printer]:
|
||||
"""
|
||||
Holt Drucker mit zugehörigen Jobs (Eager Loading).
|
||||
|
||||
Args:
|
||||
printer_id: ID des Druckers
|
||||
|
||||
Returns:
|
||||
Optional[Printer]: Drucker mit Jobs oder None
|
||||
"""
|
||||
try:
|
||||
with get_database_session('cached') as session:
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
printer = session.query(Printer).options(
|
||||
joinedload(Printer.jobs)
|
||||
).filter(Printer.id == printer_id).first()
|
||||
|
||||
if printer:
|
||||
self.crud_logger.debug(f"Retrieved printer {printer.name} with {len(printer.jobs)} jobs")
|
||||
|
||||
return printer
|
||||
|
||||
except Exception as e:
|
||||
self.crud_logger.error(f"Error retrieving printer with jobs: {e}")
|
||||
return None
|
||||
|
||||
def get_user_job_statistics(self, user_id: int) -> Dict[str, Any]:
|
||||
"""
|
||||
Holt Benutzer-Job-Statistiken.
|
||||
|
||||
Args:
|
||||
user_id: ID des Benutzers
|
||||
|
||||
Returns:
|
||||
Dict: Statistiken des Benutzers
|
||||
"""
|
||||
try:
|
||||
with get_database_session('cached') as session:
|
||||
user = session.query(User).filter(User.id == user_id).first()
|
||||
if not user:
|
||||
return {}
|
||||
|
||||
# Job-Statistiken berechnen
|
||||
total_jobs = session.query(Job).filter(Job.user_id == user_id).count()
|
||||
completed_jobs = session.query(Job).filter(
|
||||
Job.user_id == user_id, Job.status == 'completed'
|
||||
).count()
|
||||
active_jobs = session.query(Job).filter(
|
||||
Job.user_id == user_id, Job.status.in_(['pending', 'printing', 'paused'])
|
||||
).count()
|
||||
|
||||
stats = {
|
||||
'user_id': user_id,
|
||||
'username': user.username,
|
||||
'total_jobs': total_jobs,
|
||||
'completed_jobs': completed_jobs,
|
||||
'active_jobs': active_jobs,
|
||||
'success_rate': round((completed_jobs / total_jobs * 100), 2) if total_jobs > 0 else 0
|
||||
}
|
||||
|
||||
self.crud_logger.debug(f"Generated statistics for user {user.username}")
|
||||
return stats
|
||||
|
||||
except Exception as e:
|
||||
self.crud_logger.error(f"Error generating user statistics: {e}")
|
||||
return {}
|
||||
|
||||
# ===== MONITORING-OPERATIONEN =====
|
||||
|
||||
class DatabaseMonitor:
|
||||
"""
|
||||
Performance-Überwachung und Gesundheitsprüfungen.
|
||||
Erweitert Funktionalität aus database_utils.py.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.monitor_logger = get_logger("database_monitor")
|
||||
|
||||
def get_database_health_check(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Umfassende Gesundheitsprüfung der Datenbank.
|
||||
|
||||
Returns:
|
||||
Dict: Gesundheitsstatus der Datenbank
|
||||
"""
|
||||
health_status = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'overall_status': 'unknown',
|
||||
'checks': {}
|
||||
}
|
||||
|
||||
try:
|
||||
with get_database_session('monitoring') as session:
|
||||
# 1. Verbindungstest
|
||||
try:
|
||||
session.execute(text("SELECT 1"))
|
||||
health_status['checks']['connection'] = {'status': 'ok', 'message': 'Database connection successful'}
|
||||
except Exception as e:
|
||||
health_status['checks']['connection'] = {'status': 'error', 'message': str(e)}
|
||||
|
||||
# 2. Integritätsprüfung
|
||||
try:
|
||||
result = session.execute(text("PRAGMA integrity_check")).fetchone()
|
||||
integrity_ok = result and result[0] == 'ok'
|
||||
health_status['checks']['integrity'] = {
|
||||
'status': 'ok' if integrity_ok else 'warning',
|
||||
'message': result[0] if result else 'No integrity result'
|
||||
}
|
||||
except Exception as e:
|
||||
health_status['checks']['integrity'] = {'status': 'error', 'message': str(e)}
|
||||
|
||||
# 3. WAL-Status
|
||||
try:
|
||||
wal_result = session.execute(text("PRAGMA journal_mode")).fetchone()
|
||||
wal_mode = wal_result[0] if wal_result else 'unknown'
|
||||
health_status['checks']['wal_mode'] = {
|
||||
'status': 'ok' if wal_mode == 'wal' else 'info',
|
||||
'message': f'Journal mode: {wal_mode}'
|
||||
}
|
||||
except Exception as e:
|
||||
health_status['checks']['wal_mode'] = {'status': 'error', 'message': str(e)}
|
||||
|
||||
# 4. Datenbankgröße
|
||||
try:
|
||||
if os.path.exists(DATABASE_PATH):
|
||||
db_size = os.path.getsize(DATABASE_PATH)
|
||||
health_status['checks']['database_size'] = {
|
||||
'status': 'ok',
|
||||
'message': f'Database size: {db_size / 1024 / 1024:.2f} MB',
|
||||
'size_bytes': db_size
|
||||
}
|
||||
except Exception as e:
|
||||
health_status['checks']['database_size'] = {'status': 'error', 'message': str(e)}
|
||||
|
||||
# Gesamtstatus bestimmen
|
||||
statuses = [check['status'] for check in health_status['checks'].values()]
|
||||
if 'error' in statuses:
|
||||
health_status['overall_status'] = 'error'
|
||||
elif 'warning' in statuses:
|
||||
health_status['overall_status'] = 'warning'
|
||||
else:
|
||||
health_status['overall_status'] = 'ok'
|
||||
|
||||
except Exception as e:
|
||||
health_status['overall_status'] = 'error'
|
||||
health_status['error'] = str(e)
|
||||
self.monitor_logger.error(f"Database health check failed: {e}")
|
||||
|
||||
return health_status
|
||||
|
||||
# ===== UNIFIED DATABASE SERVICE =====
|
||||
|
||||
class UnifiedDatabaseService:
|
||||
"""
|
||||
Zentrale Schnittstelle für alle Datenbankoperationen.
|
||||
Kombiniert CRUD, Wartung, Cleanup und Monitoring.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.logger = get_logger("unified_database")
|
||||
self.crud = DatabaseCRUDManager()
|
||||
self.backup = DatabaseBackupManager()
|
||||
self.cleanup = DatabaseCleanupManager()
|
||||
self.monitor = DatabaseMonitor()
|
||||
|
||||
# Engines für Cleanup registrieren
|
||||
for engine_type in ['default', 'monitoring', 'backup']:
|
||||
engine = engine_registry.get_engine(engine_type)
|
||||
self.cleanup.register_engine_for_cleanup(engine)
|
||||
|
||||
def get_service_status(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Holt den Status aller Datenbankdienste.
|
||||
|
||||
Returns:
|
||||
Dict: Umfassender Service-Status
|
||||
"""
|
||||
try:
|
||||
health_check = self.monitor.get_database_health_check()
|
||||
backups = self.backup.list_backups()
|
||||
|
||||
return {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'health': health_check,
|
||||
'backups': {
|
||||
'count': len(backups),
|
||||
'latest': backups[0] if backups else None
|
||||
},
|
||||
'engines': {
|
||||
'registered_count': len(engine_registry.engines),
|
||||
'types': list(engine_registry.engines.keys())
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Error getting service status: {e}")
|
||||
return {'error': str(e), 'timestamp': datetime.now().isoformat()}
|
||||
|
||||
def perform_maintenance(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Führt umfassende Datenbankwartung durch.
|
||||
|
||||
Returns:
|
||||
Dict: Wartungsergebnisse
|
||||
"""
|
||||
maintenance_results = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'operations': {}
|
||||
}
|
||||
|
||||
try:
|
||||
# 1. WAL-Checkpoint
|
||||
self.logger.info("Starting WAL checkpoint...")
|
||||
checkpoint_success = self.cleanup.perform_wal_checkpoint()
|
||||
maintenance_results['operations']['wal_checkpoint'] = {
|
||||
'success': checkpoint_success,
|
||||
'message': 'WAL checkpoint completed' if checkpoint_success else 'WAL checkpoint failed'
|
||||
}
|
||||
|
||||
# 2. Backup erstellen
|
||||
self.logger.info("Creating maintenance backup...")
|
||||
try:
|
||||
backup_path = self.backup.create_backup(compress=True)
|
||||
maintenance_results['operations']['backup'] = {
|
||||
'success': True,
|
||||
'message': f'Backup created: {os.path.basename(backup_path)}',
|
||||
'path': backup_path
|
||||
}
|
||||
except Exception as e:
|
||||
maintenance_results['operations']['backup'] = {
|
||||
'success': False,
|
||||
'message': f'Backup failed: {str(e)}'
|
||||
}
|
||||
|
||||
# 3. Alte Backups aufräumen
|
||||
self.logger.info("Cleaning up old backups...")
|
||||
try:
|
||||
deleted_count = self.backup.cleanup_old_backups(keep_count=10)
|
||||
maintenance_results['operations']['backup_cleanup'] = {
|
||||
'success': True,
|
||||
'message': f'Cleaned up {deleted_count} old backups'
|
||||
}
|
||||
except Exception as e:
|
||||
maintenance_results['operations']['backup_cleanup'] = {
|
||||
'success': False,
|
||||
'message': f'Backup cleanup failed: {str(e)}'
|
||||
}
|
||||
|
||||
# 4. Gesundheitsprüfung
|
||||
self.logger.info("Performing health check...")
|
||||
health_check = self.monitor.get_database_health_check()
|
||||
maintenance_results['health_check'] = health_check
|
||||
|
||||
# Gesamtergebnis
|
||||
operation_results = [op['success'] for op in maintenance_results['operations'].values()]
|
||||
maintenance_results['overall_success'] = all(operation_results)
|
||||
|
||||
self.logger.info(f"Maintenance completed with overall success: {maintenance_results['overall_success']}")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Maintenance operation failed: {e}")
|
||||
maintenance_results['error'] = str(e)
|
||||
maintenance_results['overall_success'] = False
|
||||
|
||||
return maintenance_results
|
||||
|
||||
# ===== GLOBALE INSTANZ =====
|
||||
|
||||
# Zentrale Datenbankdienst-Instanz
|
||||
database_service = UnifiedDatabaseService()
|
||||
|
||||
# Cleanup-Manager für Legacy-Kompatibilität
|
||||
cleanup_manager = database_service.cleanup
|
||||
|
||||
# Backup-Manager für Legacy-Kompatibilität
|
||||
backup_manager = database_service.backup
|
133
backend/utils/deprecated/db_manager.py
Normal file
133
backend/utils/deprecated/db_manager.py
Normal file
@@ -0,0 +1,133 @@
|
||||
import os
|
||||
import logging
|
||||
from typing import List, Optional, Any
|
||||
from datetime import datetime
|
||||
|
||||
from sqlalchemy import create_engine, func
|
||||
from sqlalchemy.orm import sessionmaker, Session, joinedload
|
||||
|
||||
from models import User, Printer, Job, Stats, Base
|
||||
from utils.settings import DATABASE_PATH, ensure_database_directory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DatabaseManager:
|
||||
"""Database manager class to handle database operations."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the database manager."""
|
||||
ensure_database_directory()
|
||||
self.engine = create_engine(f"sqlite:///{DATABASE_PATH}")
|
||||
self.Session = sessionmaker(bind=self.engine)
|
||||
|
||||
def get_session(self) -> Session:
|
||||
"""Get a new database session.
|
||||
|
||||
Returns:
|
||||
Session: A new SQLAlchemy session.
|
||||
"""
|
||||
return self.Session()
|
||||
|
||||
def test_connection(self) -> bool:
|
||||
"""Test the database connection.
|
||||
|
||||
Returns:
|
||||
bool: True if the connection is successful, False otherwise.
|
||||
"""
|
||||
try:
|
||||
session = self.get_session()
|
||||
session.execute("SELECT 1")
|
||||
session.close()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Database connection test failed: {str(e)}")
|
||||
return False
|
||||
|
||||
def get_all_jobs(self) -> List[Job]:
|
||||
"""Get all jobs with eager loading of relationships.
|
||||
|
||||
Returns:
|
||||
List[Job]: A list of all jobs.
|
||||
"""
|
||||
session = self.get_session()
|
||||
try:
|
||||
jobs = session.query(Job).options(
|
||||
joinedload(Job.user),
|
||||
joinedload(Job.printer)
|
||||
).all()
|
||||
return jobs
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_jobs_by_status(self, status: str) -> List[Job]:
|
||||
"""Get jobs by status with eager loading of relationships.
|
||||
|
||||
Args:
|
||||
status: The job status to filter by.
|
||||
|
||||
Returns:
|
||||
List[Job]: A list of jobs with the specified status.
|
||||
"""
|
||||
session = self.get_session()
|
||||
try:
|
||||
jobs = session.query(Job).options(
|
||||
joinedload(Job.user),
|
||||
joinedload(Job.printer)
|
||||
).filter(Job.status == status).all()
|
||||
return jobs
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_job_by_id(self, job_id: int) -> Optional[Job]:
|
||||
"""Get a job by ID with eager loading of relationships.
|
||||
|
||||
Args:
|
||||
job_id: The job ID to find.
|
||||
|
||||
Returns:
|
||||
Optional[Job]: The job if found, None otherwise.
|
||||
"""
|
||||
session = self.get_session()
|
||||
try:
|
||||
job = session.query(Job).options(
|
||||
joinedload(Job.user),
|
||||
joinedload(Job.printer)
|
||||
).filter(Job.id == job_id).first()
|
||||
return job
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_available_printers(self) -> List[Printer]:
|
||||
"""Get all available printers.
|
||||
|
||||
Returns:
|
||||
List[Printer]: A list of available printers.
|
||||
"""
|
||||
session = self.get_session()
|
||||
try:
|
||||
printers = session.query(Printer).filter(
|
||||
Printer.active == True,
|
||||
Printer.status != "busy"
|
||||
).all()
|
||||
return printers
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def get_jobs_since(self, since_date: datetime) -> List[Job]:
|
||||
"""Get jobs created since a specific date.
|
||||
|
||||
Args:
|
||||
since_date: The date to filter jobs from.
|
||||
|
||||
Returns:
|
||||
List[Job]: A list of jobs created since the specified date.
|
||||
"""
|
||||
session = self.get_session()
|
||||
try:
|
||||
jobs = session.query(Job).options(
|
||||
joinedload(Job.user),
|
||||
joinedload(Job.printer)
|
||||
).filter(Job.created_at >= since_date).all()
|
||||
return jobs
|
||||
finally:
|
||||
session.close()
|
29
backend/utils/fix_indentation.py
Normal file
29
backend/utils/fix_indentation.py
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python3.11
|
||||
"""
|
||||
Skript zur Behebung von Einrückungsproblemen in user_management.py
|
||||
"""
|
||||
|
||||
def fix_indentation():
|
||||
file_path = 'blueprints/user_management.py'
|
||||
|
||||
with open(file_path, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
lines = content.split('\n')
|
||||
fixed_lines = []
|
||||
|
||||
for line in lines:
|
||||
# Behebe die falsche Einrückung nach 'with get_cached_session() as session:'
|
||||
if line.startswith(' ') and not line.strip().startswith('#'):
|
||||
# 7 Leerzeichen entfernen (von 15 auf 8)
|
||||
fixed_lines.append(' ' + line[15:])
|
||||
else:
|
||||
fixed_lines.append(line)
|
||||
|
||||
with open(file_path, 'w') as f:
|
||||
f.write('\n'.join(fixed_lines))
|
||||
|
||||
print('✅ Einrückung behoben')
|
||||
|
||||
if __name__ == "__main__":
|
||||
fix_indentation()
|
62
backend/utils/fix_session_usage.py
Normal file
62
backend/utils/fix_session_usage.py
Normal file
@@ -0,0 +1,62 @@
|
||||
#!/usr/bin/env python3.11
|
||||
"""
|
||||
Skript zur automatischen Behebung von get_cached_session() Aufrufen
|
||||
Konvertiert direkte Session-Aufrufe zu Context Manager Pattern.
|
||||
|
||||
Autor: MYP Team
|
||||
Datum: 2025-06-09
|
||||
"""
|
||||
|
||||
import re
|
||||
import os
|
||||
|
||||
def fix_session_usage(file_path):
|
||||
"""Behebt Session-Usage in einer Datei"""
|
||||
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Pattern für direkte Session-Aufrufe
|
||||
patterns = [
|
||||
# session = get_cached_session() -> with get_cached_session() as session:
|
||||
(r'(\s+)session = get_cached_session\(\)', r'\1with get_cached_session() as session:'),
|
||||
|
||||
# session.close() entfernen (wird automatisch durch Context Manager gemacht)
|
||||
(r'\s+session\.close\(\)\s*\n', '\n'),
|
||||
|
||||
# Einrückung nach with-Statement anpassen
|
||||
(r'(with get_cached_session\(\) as session:\s*\n)(\s+)([^\s])',
|
||||
lambda m: m.group(1) + m.group(2) + ' ' + m.group(3))
|
||||
]
|
||||
|
||||
original_content = content
|
||||
|
||||
for pattern, replacement in patterns:
|
||||
if callable(replacement):
|
||||
content = re.sub(pattern, replacement, content, flags=re.MULTILINE)
|
||||
else:
|
||||
content = re.sub(pattern, replacement, content, flags=re.MULTILINE)
|
||||
|
||||
# Nur schreiben wenn sich etwas geändert hat
|
||||
if content != original_content:
|
||||
with open(file_path, 'w', encoding='utf-8') as f:
|
||||
f.write(content)
|
||||
print(f"✅ {file_path} wurde aktualisiert")
|
||||
return True
|
||||
else:
|
||||
print(f"ℹ️ {file_path} benötigt keine Änderungen")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Hauptfunktion"""
|
||||
backend_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
user_mgmt_file = os.path.join(backend_dir, 'blueprints', 'user_management.py')
|
||||
|
||||
if os.path.exists(user_mgmt_file):
|
||||
print(f"Bearbeite {user_mgmt_file}...")
|
||||
fix_session_usage(user_mgmt_file)
|
||||
else:
|
||||
print(f"❌ Datei nicht gefunden: {user_mgmt_file}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
83
backend/utils/migrate_user_settings.py
Normal file
83
backend/utils/migrate_user_settings.py
Normal file
@@ -0,0 +1,83 @@
|
||||
#!/usr/bin/env python3.11
|
||||
"""
|
||||
Migrations-Skript für Benutzereinstellungen
|
||||
Fügt neue Spalten zur users-Tabelle hinzu für erweiterte Benutzereinstellungen.
|
||||
|
||||
Autor: MYP Team
|
||||
Datum: 2025-06-09
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from sqlalchemy import text, inspect
|
||||
from models import get_db_session, engine
|
||||
from utils.logging_config import get_logger
|
||||
|
||||
logger = get_logger("migration")
|
||||
|
||||
def check_column_exists(table_name: str, column_name: str) -> bool:
|
||||
"""Prüft, ob eine Spalte in einer Tabelle existiert"""
|
||||
try:
|
||||
inspector = inspect(engine)
|
||||
columns = [col['name'] for col in inspector.get_columns(table_name)]
|
||||
return column_name in columns
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Prüfen der Spalte {column_name}: {e}")
|
||||
return False
|
||||
|
||||
def add_user_settings_columns():
|
||||
"""Fügt die neuen Benutzereinstellungs-Spalten hinzu"""
|
||||
session = get_db_session()
|
||||
|
||||
# Neue Spalten definieren
|
||||
new_columns = [
|
||||
("theme_preference", "VARCHAR(20) DEFAULT 'auto'"),
|
||||
("language_preference", "VARCHAR(10) DEFAULT 'de'"),
|
||||
("email_notifications", "BOOLEAN DEFAULT 1"),
|
||||
("browser_notifications", "BOOLEAN DEFAULT 1"),
|
||||
("dashboard_layout", "VARCHAR(20) DEFAULT 'default'"),
|
||||
("compact_mode", "BOOLEAN DEFAULT 0"),
|
||||
("show_completed_jobs", "BOOLEAN DEFAULT 1"),
|
||||
("auto_refresh_interval", "INTEGER DEFAULT 30"),
|
||||
("auto_logout_timeout", "INTEGER DEFAULT 0")
|
||||
]
|
||||
|
||||
try:
|
||||
for column_name, column_definition in new_columns:
|
||||
if not check_column_exists('users', column_name):
|
||||
logger.info(f"Füge Spalte {column_name} zur users-Tabelle hinzu...")
|
||||
|
||||
# SQLite-kompatible ALTER TABLE Syntax
|
||||
sql = f"ALTER TABLE users ADD COLUMN {column_name} {column_definition}"
|
||||
session.execute(text(sql))
|
||||
session.commit()
|
||||
|
||||
logger.info(f"Spalte {column_name} erfolgreich hinzugefügt")
|
||||
else:
|
||||
logger.info(f"Spalte {column_name} existiert bereits")
|
||||
|
||||
logger.info("Migration der Benutzereinstellungen erfolgreich abgeschlossen")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler bei der Migration: {e}")
|
||||
session.rollback()
|
||||
raise e
|
||||
finally:
|
||||
session.close()
|
||||
|
||||
def main():
|
||||
"""Hauptfunktion für die Migration"""
|
||||
try:
|
||||
logger.info("Starte Migration der Benutzereinstellungen...")
|
||||
add_user_settings_columns()
|
||||
logger.info("Migration erfolgreich abgeschlossen")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Migration fehlgeschlagen: {e}")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
197
backend/utils/performance_tracker.py
Normal file
197
backend/utils/performance_tracker.py
Normal file
@@ -0,0 +1,197 @@
|
||||
"""
|
||||
Performance Tracker Utility
|
||||
Messung der Ausführungszeit von Funktionen für Performance-Monitoring
|
||||
"""
|
||||
|
||||
import time
|
||||
import functools
|
||||
from typing import Callable, Any, Optional
|
||||
from utils.logging_config import get_logger
|
||||
|
||||
# Standard-Logger für Performance-Tracking
|
||||
performance_logger = get_logger("performance")
|
||||
|
||||
def measure_execution_time(logger: Optional[Any] = None, task_name: str = "Task",
|
||||
log_level: str = "INFO", threshold_ms: float = 100.0) -> Callable:
|
||||
"""
|
||||
Decorator zur Messung der Ausführungszeit von Funktionen
|
||||
|
||||
Args:
|
||||
logger: Logger-Instanz (optional, verwendet performance_logger als Standard)
|
||||
task_name: Name der Aufgabe für das Logging
|
||||
log_level: Log-Level (DEBUG, INFO, WARNING, ERROR)
|
||||
threshold_ms: Schwellenwert in Millisekunden ab dem geloggt wird
|
||||
|
||||
Returns:
|
||||
Decorator-Funktion
|
||||
"""
|
||||
def decorator(func: Callable) -> Callable:
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
# Logger bestimmen
|
||||
log = logger if logger else performance_logger
|
||||
|
||||
# Startzeit messen
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
# Funktion ausführen
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Endzeit messen
|
||||
end_time = time.perf_counter()
|
||||
execution_time_ms = (end_time - start_time) * 1000
|
||||
|
||||
# Nur loggen wenn über Schwellenwert
|
||||
if execution_time_ms >= threshold_ms:
|
||||
log_message = f"⏱️ {task_name} - Ausführungszeit: {execution_time_ms:.2f}ms"
|
||||
|
||||
if log_level.upper() == "DEBUG":
|
||||
log.debug(log_message)
|
||||
elif log_level.upper() == "INFO":
|
||||
log.info(log_message)
|
||||
elif log_level.upper() == "WARNING":
|
||||
log.warning(log_message)
|
||||
elif log_level.upper() == "ERROR":
|
||||
log.error(log_message)
|
||||
else:
|
||||
log.info(log_message)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
# Auch bei Fehlern die Zeit messen
|
||||
end_time = time.perf_counter()
|
||||
execution_time_ms = (end_time - start_time) * 1000
|
||||
|
||||
error_message = f"❌ {task_name} - Fehler nach {execution_time_ms:.2f}ms: {str(e)}"
|
||||
log.error(error_message)
|
||||
|
||||
# Exception weiterwerfen
|
||||
raise
|
||||
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
def measure_time_sync(func: Callable, task_name: str = "Function",
|
||||
logger: Optional[Any] = None) -> tuple[Any, float]:
|
||||
"""
|
||||
Synchrone Zeitmessung für einzelne Funktionsaufrufe
|
||||
|
||||
Args:
|
||||
func: Auszuführende Funktion
|
||||
task_name: Name für das Logging
|
||||
logger: Logger-Instanz (optional)
|
||||
|
||||
Returns:
|
||||
Tuple aus (Ergebnis, Ausführungszeit_in_ms)
|
||||
"""
|
||||
log = logger if logger else performance_logger
|
||||
|
||||
start_time = time.perf_counter()
|
||||
|
||||
try:
|
||||
result = func()
|
||||
end_time = time.perf_counter()
|
||||
execution_time_ms = (end_time - start_time) * 1000
|
||||
|
||||
log.info(f"⏱️ {task_name} - Ausführungszeit: {execution_time_ms:.2f}ms")
|
||||
|
||||
return result, execution_time_ms
|
||||
|
||||
except Exception as e:
|
||||
end_time = time.perf_counter()
|
||||
execution_time_ms = (end_time - start_time) * 1000
|
||||
|
||||
log.error(f"❌ {task_name} - Fehler nach {execution_time_ms:.2f}ms: {str(e)}")
|
||||
raise
|
||||
|
||||
class PerformanceTracker:
|
||||
"""
|
||||
Klasse für erweiterte Performance-Verfolgung
|
||||
"""
|
||||
|
||||
def __init__(self, name: str, logger: Optional[Any] = None):
|
||||
self.name = name
|
||||
self.logger = logger if logger else performance_logger
|
||||
self.start_time = None
|
||||
self.end_time = None
|
||||
self.checkpoints = []
|
||||
|
||||
def start(self):
|
||||
"""Startet die Zeitmessung"""
|
||||
self.start_time = time.perf_counter()
|
||||
self.checkpoints = []
|
||||
self.logger.debug(f"📊 Performance-Tracking gestartet für: {self.name}")
|
||||
|
||||
def checkpoint(self, name: str):
|
||||
"""Fügt einen Checkpoint hinzu"""
|
||||
if self.start_time is None:
|
||||
self.logger.warning(f"⚠️ Checkpoint '{name}' ohne gestartete Messung")
|
||||
return
|
||||
|
||||
current_time = time.perf_counter()
|
||||
elapsed_ms = (current_time - self.start_time) * 1000
|
||||
|
||||
self.checkpoints.append({
|
||||
'name': name,
|
||||
'time': current_time,
|
||||
'elapsed_ms': elapsed_ms
|
||||
})
|
||||
|
||||
self.logger.debug(f"📍 Checkpoint '{name}': {elapsed_ms:.2f}ms")
|
||||
|
||||
def stop(self) -> float:
|
||||
"""Stoppt die Zeitmessung und gibt die Gesamtzeit zurück"""
|
||||
if self.start_time is None:
|
||||
self.logger.warning(f"⚠️ Performance-Tracking wurde nicht gestartet für: {self.name}")
|
||||
return 0.0
|
||||
|
||||
self.end_time = time.perf_counter()
|
||||
total_time_ms = (self.end_time - self.start_time) * 1000
|
||||
|
||||
# Zusammenfassung loggen
|
||||
summary = f"🏁 {self.name} - Gesamtzeit: {total_time_ms:.2f}ms"
|
||||
if self.checkpoints:
|
||||
summary += f" ({len(self.checkpoints)} Checkpoints)"
|
||||
|
||||
self.logger.info(summary)
|
||||
|
||||
# Detaillierte Checkpoint-Info bei DEBUG-Level
|
||||
if self.checkpoints and self.logger.isEnabledFor(10): # DEBUG = 10
|
||||
for i, checkpoint in enumerate(self.checkpoints):
|
||||
if i == 0:
|
||||
duration = checkpoint['elapsed_ms']
|
||||
else:
|
||||
duration = checkpoint['elapsed_ms'] - self.checkpoints[i-1]['elapsed_ms']
|
||||
self.logger.debug(f" 📍 {checkpoint['name']}: +{duration:.2f}ms (total: {checkpoint['elapsed_ms']:.2f}ms)")
|
||||
|
||||
return total_time_ms
|
||||
|
||||
def __enter__(self):
|
||||
"""Context Manager - Start"""
|
||||
self.start()
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
"""Context Manager - Stop"""
|
||||
self.stop()
|
||||
|
||||
# Beispiel-Verwendung:
|
||||
if __name__ == "__main__":
|
||||
# Decorator-Verwendung
|
||||
@measure_execution_time(task_name="Test-Funktion", threshold_ms=0.1)
|
||||
def test_function():
|
||||
time.sleep(0.1)
|
||||
return "Fertig"
|
||||
|
||||
# Context Manager-Verwendung
|
||||
with PerformanceTracker("Test-Performance") as tracker:
|
||||
time.sleep(0.05)
|
||||
tracker.checkpoint("Mitte")
|
||||
time.sleep(0.05)
|
||||
tracker.checkpoint("Ende")
|
||||
|
||||
# Synchrone Messung
|
||||
result, exec_time = measure_time_sync(test_function, "Direkte Messung")
|
||||
print(f"Ergebnis: {result}, Zeit: {exec_time:.2f}ms")
|
Reference in New Issue
Block a user