feat: Implement frontend production deployment and enhance admin dashboard functionality
This commit is contained in:
@@ -36,7 +36,7 @@ def add_hardcoded_printers():
|
||||
new_printer = Printer(
|
||||
name=printer_name,
|
||||
model="P115", # Standard-Modell
|
||||
location="Labor", # Standard-Standort
|
||||
location="Werk 040 - Berlin - TBA", # Aktualisierter Standort
|
||||
ip_address=config["ip"],
|
||||
mac_address=f"98:25:4A:E1:{printer_name[-1]}0:0{printer_name[-1]}", # Dummy MAC
|
||||
plug_ip=config["ip"],
|
||||
|
@@ -40,7 +40,7 @@ def clean_and_add_printers():
|
||||
new_printer = Printer(
|
||||
name=printer_name,
|
||||
model="P115", # Standard-Modell
|
||||
location="Labor", # Standard-Standort
|
||||
location="Werk 040 - Berlin - TBA", # Aktualisierter Standort
|
||||
ip_address=config["ip"],
|
||||
mac_address=f"98:25:4A:E1:{printer_name[-1]}0:0{printer_name[-1]}", # Dummy MAC
|
||||
plug_ip=config["ip"],
|
||||
|
252
backend/app/utils/database_migration.py
Normal file
252
backend/app/utils/database_migration.py
Normal file
@@ -0,0 +1,252 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Database Migration Utility für MYP Platform
|
||||
Überprüft und aktualisiert die Datenbankschema automatisch.
|
||||
"""
|
||||
|
||||
import sqlite3
|
||||
import logging
|
||||
from typing import List, Dict, Any
|
||||
from datetime import datetime
|
||||
|
||||
from config.settings import DATABASE_PATH
|
||||
from models import init_db
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def get_table_columns(table_name: str) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Ruft die Spalten einer Tabelle ab.
|
||||
|
||||
Args:
|
||||
table_name: Name der Tabelle
|
||||
|
||||
Returns:
|
||||
List[Dict]: Liste der Spalten mit ihren Eigenschaften
|
||||
"""
|
||||
try:
|
||||
conn = sqlite3.connect(DATABASE_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f'PRAGMA table_info({table_name})')
|
||||
columns = cursor.fetchall()
|
||||
conn.close()
|
||||
|
||||
return [
|
||||
{
|
||||
'name': col[1],
|
||||
'type': col[2],
|
||||
'not_null': bool(col[3]),
|
||||
'default': col[4],
|
||||
'primary_key': bool(col[5])
|
||||
}
|
||||
for col in columns
|
||||
]
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Abrufen der Spalten für Tabelle {table_name}: {e}")
|
||||
return []
|
||||
|
||||
def table_exists(table_name: str) -> bool:
|
||||
"""
|
||||
Prüft, ob eine Tabelle existiert.
|
||||
|
||||
Args:
|
||||
table_name: Name der Tabelle
|
||||
|
||||
Returns:
|
||||
bool: True wenn die Tabelle existiert
|
||||
"""
|
||||
try:
|
||||
conn = sqlite3.connect(DATABASE_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("""
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table' AND name=?
|
||||
""", (table_name,))
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
return result is not None
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Prüfen der Tabelle {table_name}: {e}")
|
||||
return False
|
||||
|
||||
def column_exists(table_name: str, column_name: str) -> bool:
|
||||
"""
|
||||
Prüft, ob eine Spalte in einer Tabelle existiert.
|
||||
|
||||
Args:
|
||||
table_name: Name der Tabelle
|
||||
column_name: Name der Spalte
|
||||
|
||||
Returns:
|
||||
bool: True wenn die Spalte existiert
|
||||
"""
|
||||
columns = get_table_columns(table_name)
|
||||
return any(col['name'] == column_name for col in columns)
|
||||
|
||||
def add_column_if_missing(table_name: str, column_name: str, column_type: str, default_value: str = None) -> bool:
|
||||
"""
|
||||
Fügt eine Spalte hinzu, falls sie nicht existiert.
|
||||
|
||||
Args:
|
||||
table_name: Name der Tabelle
|
||||
column_name: Name der Spalte
|
||||
column_type: Datentyp der Spalte
|
||||
default_value: Optional - Standardwert
|
||||
|
||||
Returns:
|
||||
bool: True wenn erfolgreich
|
||||
"""
|
||||
if column_exists(table_name, column_name):
|
||||
logger.info(f"Spalte {column_name} existiert bereits in Tabelle {table_name}")
|
||||
return True
|
||||
|
||||
try:
|
||||
conn = sqlite3.connect(DATABASE_PATH)
|
||||
cursor = conn.cursor()
|
||||
|
||||
sql = f"ALTER TABLE {table_name} ADD COLUMN {column_name} {column_type}"
|
||||
if default_value:
|
||||
sql += f" DEFAULT {default_value}"
|
||||
|
||||
cursor.execute(sql)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
logger.info(f"Spalte {column_name} erfolgreich zu Tabelle {table_name} hinzugefügt")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Hinzufügen der Spalte {column_name} zu Tabelle {table_name}: {e}")
|
||||
return False
|
||||
|
||||
def migrate_database() -> bool:
|
||||
"""
|
||||
Führt alle notwendigen Datenbankmigrationen durch.
|
||||
|
||||
Returns:
|
||||
bool: True wenn erfolgreich
|
||||
"""
|
||||
logger.info("Starte Datenbankmigration...")
|
||||
|
||||
try:
|
||||
# Prüfe, ob grundlegende Tabellen existieren
|
||||
required_tables = ['users', 'printers', 'jobs', 'stats']
|
||||
missing_tables = [table for table in required_tables if not table_exists(table)]
|
||||
|
||||
if missing_tables:
|
||||
logger.warning(f"Fehlende Tabellen gefunden: {missing_tables}")
|
||||
logger.info("Erstelle alle Tabellen neu...")
|
||||
init_db()
|
||||
logger.info("Tabellen erfolgreich erstellt")
|
||||
return True
|
||||
|
||||
# Prüfe spezifische Spalten, die möglicherweise fehlen
|
||||
migrations = [
|
||||
# Printers Tabelle
|
||||
('printers', 'last_checked', 'DATETIME', 'NULL'),
|
||||
('printers', 'active', 'BOOLEAN', '1'),
|
||||
('printers', 'created_at', 'DATETIME', 'CURRENT_TIMESTAMP'),
|
||||
|
||||
# Jobs Tabelle
|
||||
('jobs', 'duration_minutes', 'INTEGER', '60'),
|
||||
('jobs', 'actual_end_time', 'DATETIME', 'NULL'),
|
||||
('jobs', 'owner_id', 'INTEGER', 'NULL'),
|
||||
('jobs', 'file_path', 'VARCHAR(500)', 'NULL'),
|
||||
|
||||
# Users Tabelle
|
||||
('users', 'username', 'VARCHAR(100)', 'NULL'),
|
||||
('users', 'active', 'BOOLEAN', '1'),
|
||||
('users', 'created_at', 'DATETIME', 'CURRENT_TIMESTAMP'),
|
||||
]
|
||||
|
||||
success = True
|
||||
for table_name, column_name, column_type, default_value in migrations:
|
||||
if not add_column_if_missing(table_name, column_name, column_type, default_value):
|
||||
success = False
|
||||
|
||||
if success:
|
||||
logger.info("Datenbankmigration erfolgreich abgeschlossen")
|
||||
else:
|
||||
logger.warning("Datenbankmigration mit Fehlern abgeschlossen")
|
||||
|
||||
return success
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler bei der Datenbankmigration: {e}")
|
||||
return False
|
||||
|
||||
def check_database_integrity() -> bool:
|
||||
"""
|
||||
Überprüft die Integrität der Datenbank.
|
||||
|
||||
Returns:
|
||||
bool: True wenn die Datenbank integer ist
|
||||
"""
|
||||
try:
|
||||
conn = sqlite3.connect(DATABASE_PATH)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('PRAGMA integrity_check')
|
||||
result = cursor.fetchone()
|
||||
conn.close()
|
||||
|
||||
if result and result[0] == 'ok':
|
||||
logger.info("Datenbankintegrität: OK")
|
||||
return True
|
||||
else:
|
||||
logger.error(f"Datenbankintegrität: FEHLER - {result}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler bei der Integritätsprüfung: {e}")
|
||||
return False
|
||||
|
||||
def backup_database(backup_path: str = None) -> bool:
|
||||
"""
|
||||
Erstellt ein Backup der Datenbank.
|
||||
|
||||
Args:
|
||||
backup_path: Optional - Pfad für das Backup
|
||||
|
||||
Returns:
|
||||
bool: True wenn erfolgreich
|
||||
"""
|
||||
if not backup_path:
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
backup_path = f"database/myp_backup_{timestamp}.db"
|
||||
|
||||
try:
|
||||
import shutil
|
||||
shutil.copy2(DATABASE_PATH, backup_path)
|
||||
logger.info(f"Datenbank-Backup erstellt: {backup_path}")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Erstellen des Backups: {e}")
|
||||
return False
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Logging konfigurieren
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
print("=== MYP Platform - Datenbankmigration ===")
|
||||
|
||||
# Backup erstellen
|
||||
if backup_database():
|
||||
print("✅ Backup erstellt")
|
||||
else:
|
||||
print("⚠️ Backup-Erstellung fehlgeschlagen")
|
||||
|
||||
# Integrität prüfen
|
||||
if check_database_integrity():
|
||||
print("✅ Datenbankintegrität OK")
|
||||
else:
|
||||
print("❌ Datenbankintegrität FEHLER")
|
||||
|
||||
# Migration durchführen
|
||||
if migrate_database():
|
||||
print("✅ Migration erfolgreich")
|
||||
else:
|
||||
print("❌ Migration fehlgeschlagen")
|
||||
|
||||
print("\nMigration abgeschlossen!")
|
425
backend/app/utils/database_utils.py
Normal file
425
backend/app/utils/database_utils.py
Normal file
@@ -0,0 +1,425 @@
|
||||
"""
|
||||
Erweiterte Datenbank-Utilities für Backup, Monitoring und Wartung.
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sqlite3
|
||||
import threading
|
||||
import time
|
||||
import gzip
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
from pathlib import Path
|
||||
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.engine import Engine
|
||||
|
||||
from config.settings import DATABASE_PATH
|
||||
from utils.logging_config import get_logger
|
||||
from models import get_cached_session, create_optimized_engine
|
||||
|
||||
logger = get_logger("database")
|
||||
|
||||
# ===== BACKUP-SYSTEM =====
|
||||
|
||||
class DatabaseBackupManager:
|
||||
"""
|
||||
Verwaltet automatische Datenbank-Backups mit Rotation.
|
||||
"""
|
||||
|
||||
def __init__(self, backup_dir: str = None):
|
||||
self.backup_dir = backup_dir or os.path.join(os.path.dirname(DATABASE_PATH), "backups")
|
||||
self.ensure_backup_directory()
|
||||
self._backup_lock = threading.Lock()
|
||||
|
||||
def ensure_backup_directory(self):
|
||||
"""Stellt sicher, dass das Backup-Verzeichnis existiert."""
|
||||
Path(self.backup_dir).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def create_backup(self, compress: bool = True) -> str:
|
||||
"""
|
||||
Erstellt ein Backup der Datenbank.
|
||||
|
||||
Args:
|
||||
compress: Ob das Backup komprimiert werden soll
|
||||
|
||||
Returns:
|
||||
str: Pfad zum erstellten Backup
|
||||
"""
|
||||
with self._backup_lock:
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
backup_filename = f"myp_backup_{timestamp}.db"
|
||||
|
||||
if compress:
|
||||
backup_filename += ".gz"
|
||||
|
||||
backup_path = os.path.join(self.backup_dir, backup_filename)
|
||||
|
||||
try:
|
||||
if compress:
|
||||
# Komprimiertes Backup erstellen
|
||||
with open(DATABASE_PATH, 'rb') as f_in:
|
||||
with gzip.open(backup_path, 'wb') as f_out:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
else:
|
||||
# Einfache Kopie
|
||||
shutil.copy2(DATABASE_PATH, backup_path)
|
||||
|
||||
logger.info(f"Datenbank-Backup erstellt: {backup_path}")
|
||||
return backup_path
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Erstellen des Backups: {str(e)}")
|
||||
raise
|
||||
|
||||
def restore_backup(self, backup_path: str) -> bool:
|
||||
"""
|
||||
Stellt ein Backup wieder her.
|
||||
|
||||
Args:
|
||||
backup_path: Pfad zum Backup
|
||||
|
||||
Returns:
|
||||
bool: True bei Erfolg
|
||||
"""
|
||||
with self._backup_lock:
|
||||
try:
|
||||
# Aktuelles Backup der bestehenden DB erstellen
|
||||
current_backup = self.create_backup()
|
||||
logger.info(f"Sicherheitsbackup erstellt: {current_backup}")
|
||||
|
||||
if backup_path.endswith('.gz'):
|
||||
# Komprimiertes Backup wiederherstellen
|
||||
with gzip.open(backup_path, 'rb') as f_in:
|
||||
with open(DATABASE_PATH, 'wb') as f_out:
|
||||
shutil.copyfileobj(f_in, f_out)
|
||||
else:
|
||||
# Einfache Kopie
|
||||
shutil.copy2(backup_path, DATABASE_PATH)
|
||||
|
||||
logger.info(f"Datenbank aus Backup wiederhergestellt: {backup_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Wiederherstellen des Backups: {str(e)}")
|
||||
return False
|
||||
|
||||
def cleanup_old_backups(self, keep_days: int = 30):
|
||||
"""
|
||||
Löscht alte Backups.
|
||||
|
||||
Args:
|
||||
keep_days: Anzahl Tage, die Backups aufbewahrt werden sollen
|
||||
"""
|
||||
cutoff_date = datetime.now() - timedelta(days=keep_days)
|
||||
deleted_count = 0
|
||||
|
||||
try:
|
||||
for filename in os.listdir(self.backup_dir):
|
||||
if filename.startswith("myp_backup_"):
|
||||
file_path = os.path.join(self.backup_dir, filename)
|
||||
file_time = datetime.fromtimestamp(os.path.getctime(file_path))
|
||||
|
||||
if file_time < cutoff_date:
|
||||
os.remove(file_path)
|
||||
deleted_count += 1
|
||||
logger.info(f"Altes Backup gelöscht: {filename}")
|
||||
|
||||
if deleted_count > 0:
|
||||
logger.info(f"{deleted_count} alte Backups gelöscht")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Bereinigen alter Backups: {str(e)}")
|
||||
|
||||
def get_backup_list(self) -> List[Dict]:
|
||||
"""
|
||||
Gibt eine Liste aller verfügbaren Backups zurück.
|
||||
|
||||
Returns:
|
||||
List[Dict]: Liste mit Backup-Informationen
|
||||
"""
|
||||
backups = []
|
||||
|
||||
try:
|
||||
for filename in os.listdir(self.backup_dir):
|
||||
if filename.startswith("myp_backup_"):
|
||||
file_path = os.path.join(self.backup_dir, filename)
|
||||
file_stat = os.stat(file_path)
|
||||
|
||||
backups.append({
|
||||
"filename": filename,
|
||||
"path": file_path,
|
||||
"size": file_stat.st_size,
|
||||
"created": datetime.fromtimestamp(file_stat.st_ctime),
|
||||
"compressed": filename.endswith('.gz')
|
||||
})
|
||||
|
||||
# Nach Erstellungsdatum sortieren (neueste zuerst)
|
||||
backups.sort(key=lambda x: x['created'], reverse=True)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Abrufen der Backup-Liste: {str(e)}")
|
||||
|
||||
return backups
|
||||
|
||||
|
||||
# ===== DATENBANK-MONITORING =====
|
||||
|
||||
class DatabaseMonitor:
|
||||
"""
|
||||
Überwacht die Datenbank-Performance und -Gesundheit.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.engine = create_optimized_engine()
|
||||
|
||||
def get_database_stats(self) -> Dict:
|
||||
"""
|
||||
Sammelt Datenbank-Statistiken.
|
||||
|
||||
Returns:
|
||||
Dict: Datenbank-Statistiken
|
||||
"""
|
||||
stats = {}
|
||||
|
||||
try:
|
||||
with self.engine.connect() as conn:
|
||||
# Datenbankgröße
|
||||
result = conn.execute(text("SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()"))
|
||||
db_size = result.fetchone()[0]
|
||||
stats['database_size_bytes'] = db_size
|
||||
stats['database_size_mb'] = round(db_size / (1024 * 1024), 2)
|
||||
|
||||
# WAL-Datei-Größe
|
||||
wal_path = DATABASE_PATH + "-wal"
|
||||
if os.path.exists(wal_path):
|
||||
wal_size = os.path.getsize(wal_path)
|
||||
stats['wal_size_bytes'] = wal_size
|
||||
stats['wal_size_mb'] = round(wal_size / (1024 * 1024), 2)
|
||||
else:
|
||||
stats['wal_size_bytes'] = 0
|
||||
stats['wal_size_mb'] = 0
|
||||
|
||||
# Journal-Modus
|
||||
result = conn.execute(text("PRAGMA journal_mode"))
|
||||
stats['journal_mode'] = result.fetchone()[0]
|
||||
|
||||
# Cache-Statistiken
|
||||
result = conn.execute(text("PRAGMA cache_size"))
|
||||
stats['cache_size'] = result.fetchone()[0]
|
||||
|
||||
# Synchronous-Modus
|
||||
result = conn.execute(text("PRAGMA synchronous"))
|
||||
stats['synchronous_mode'] = result.fetchone()[0]
|
||||
|
||||
# Tabellen-Statistiken
|
||||
result = conn.execute(text("""
|
||||
SELECT name,
|
||||
(SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=m.name) as table_count
|
||||
FROM sqlite_master m WHERE type='table'
|
||||
"""))
|
||||
|
||||
table_stats = {}
|
||||
for table_name, _ in result.fetchall():
|
||||
if not table_name.startswith('sqlite_'):
|
||||
count_result = conn.execute(text(f"SELECT COUNT(*) FROM {table_name}"))
|
||||
table_stats[table_name] = count_result.fetchone()[0]
|
||||
|
||||
stats['table_counts'] = table_stats
|
||||
|
||||
# Letzte Wartung
|
||||
stats['last_analyze'] = self._get_last_analyze_time()
|
||||
stats['last_vacuum'] = self._get_last_vacuum_time()
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Sammeln der Datenbank-Statistiken: {str(e)}")
|
||||
stats['error'] = str(e)
|
||||
|
||||
return stats
|
||||
|
||||
def _get_last_analyze_time(self) -> Optional[str]:
|
||||
"""Ermittelt den Zeitpunkt der letzten ANALYZE-Operation."""
|
||||
try:
|
||||
# SQLite speichert keine direkten Timestamps für ANALYZE
|
||||
# Wir verwenden die Modifikationszeit der Statistik-Tabellen
|
||||
stat_path = DATABASE_PATH + "-stat"
|
||||
if os.path.exists(stat_path):
|
||||
return datetime.fromtimestamp(os.path.getmtime(stat_path)).isoformat()
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _get_last_vacuum_time(self) -> Optional[str]:
|
||||
"""Ermittelt den Zeitpunkt der letzten VACUUM-Operation."""
|
||||
try:
|
||||
# Approximation über Datei-Modifikationszeit
|
||||
return datetime.fromtimestamp(os.path.getmtime(DATABASE_PATH)).isoformat()
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
def check_database_health(self) -> Dict:
|
||||
"""
|
||||
Führt eine Gesundheitsprüfung der Datenbank durch.
|
||||
|
||||
Returns:
|
||||
Dict: Gesundheitsstatus
|
||||
"""
|
||||
health = {
|
||||
"status": "healthy",
|
||||
"issues": [],
|
||||
"recommendations": []
|
||||
}
|
||||
|
||||
try:
|
||||
with self.engine.connect() as conn:
|
||||
# Integritätsprüfung
|
||||
result = conn.execute(text("PRAGMA integrity_check"))
|
||||
integrity_result = result.fetchone()[0]
|
||||
|
||||
if integrity_result != "ok":
|
||||
health["status"] = "critical"
|
||||
health["issues"].append(f"Integritätsprüfung fehlgeschlagen: {integrity_result}")
|
||||
|
||||
# WAL-Dateigröße prüfen
|
||||
wal_path = DATABASE_PATH + "-wal"
|
||||
if os.path.exists(wal_path):
|
||||
wal_size_mb = os.path.getsize(wal_path) / (1024 * 1024)
|
||||
if wal_size_mb > 100: # Über 100MB
|
||||
health["issues"].append(f"WAL-Datei sehr groß: {wal_size_mb:.1f}MB")
|
||||
health["recommendations"].append("WAL-Checkpoint durchführen")
|
||||
|
||||
# Freier Speicherplatz prüfen
|
||||
db_dir = os.path.dirname(DATABASE_PATH)
|
||||
free_space = shutil.disk_usage(db_dir).free / (1024 * 1024 * 1024) # GB
|
||||
|
||||
if free_space < 1: # Weniger als 1GB
|
||||
health["status"] = "warning" if health["status"] == "healthy" else health["status"]
|
||||
health["issues"].append(f"Wenig freier Speicherplatz: {free_space:.1f}GB")
|
||||
health["recommendations"].append("Speicherplatz freigeben oder alte Backups löschen")
|
||||
|
||||
# Connection Pool Status (falls verfügbar)
|
||||
# Hier könnten weitere Checks hinzugefügt werden
|
||||
|
||||
except Exception as e:
|
||||
health["status"] = "error"
|
||||
health["issues"].append(f"Fehler bei Gesundheitsprüfung: {str(e)}")
|
||||
logger.error(f"Fehler bei Datenbank-Gesundheitsprüfung: {str(e)}")
|
||||
|
||||
return health
|
||||
|
||||
def optimize_database(self) -> Dict:
|
||||
"""
|
||||
Führt Optimierungsoperationen auf der Datenbank durch.
|
||||
|
||||
Returns:
|
||||
Dict: Ergebnis der Optimierung
|
||||
"""
|
||||
result = {
|
||||
"operations": [],
|
||||
"success": True,
|
||||
"errors": []
|
||||
}
|
||||
|
||||
try:
|
||||
with self.engine.connect() as conn:
|
||||
# ANALYZE für bessere Query-Planung
|
||||
conn.execute(text("ANALYZE"))
|
||||
result["operations"].append("ANALYZE ausgeführt")
|
||||
|
||||
# WAL-Checkpoint
|
||||
checkpoint_result = conn.execute(text("PRAGMA wal_checkpoint(TRUNCATE)"))
|
||||
checkpoint_info = checkpoint_result.fetchone()
|
||||
result["operations"].append(f"WAL-Checkpoint: {checkpoint_info}")
|
||||
|
||||
# Incremental Vacuum
|
||||
conn.execute(text("PRAGMA incremental_vacuum"))
|
||||
result["operations"].append("Incremental Vacuum ausgeführt")
|
||||
|
||||
# Optimize Pragma
|
||||
conn.execute(text("PRAGMA optimize"))
|
||||
result["operations"].append("PRAGMA optimize ausgeführt")
|
||||
|
||||
conn.commit()
|
||||
|
||||
except Exception as e:
|
||||
result["success"] = False
|
||||
result["errors"].append(str(e))
|
||||
logger.error(f"Fehler bei Datenbank-Optimierung: {str(e)}")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ===== AUTOMATISCHE WARTUNG =====
|
||||
|
||||
class DatabaseMaintenanceScheduler:
|
||||
"""
|
||||
Plant und führt automatische Wartungsaufgaben durch.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.backup_manager = DatabaseBackupManager()
|
||||
self.monitor = DatabaseMonitor()
|
||||
self._running = False
|
||||
self._thread = None
|
||||
|
||||
def start_maintenance_scheduler(self):
|
||||
"""Startet den Wartungs-Scheduler."""
|
||||
if self._running:
|
||||
return
|
||||
|
||||
self._running = True
|
||||
self._thread = threading.Thread(target=self._maintenance_loop, daemon=True)
|
||||
self._thread.start()
|
||||
logger.info("Datenbank-Wartungs-Scheduler gestartet")
|
||||
|
||||
def stop_maintenance_scheduler(self):
|
||||
"""Stoppt den Wartungs-Scheduler."""
|
||||
self._running = False
|
||||
if self._thread:
|
||||
self._thread.join(timeout=5)
|
||||
logger.info("Datenbank-Wartungs-Scheduler gestoppt")
|
||||
|
||||
def _maintenance_loop(self):
|
||||
"""Hauptschleife für Wartungsaufgaben."""
|
||||
last_backup = datetime.now()
|
||||
last_cleanup = datetime.now()
|
||||
last_optimization = datetime.now()
|
||||
|
||||
while self._running:
|
||||
try:
|
||||
now = datetime.now()
|
||||
|
||||
# Tägliches Backup (alle 24 Stunden)
|
||||
if (now - last_backup).total_seconds() > 86400: # 24 Stunden
|
||||
self.backup_manager.create_backup()
|
||||
last_backup = now
|
||||
|
||||
# Wöchentliche Bereinigung alter Backups (alle 7 Tage)
|
||||
if (now - last_cleanup).total_seconds() > 604800: # 7 Tage
|
||||
self.backup_manager.cleanup_old_backups()
|
||||
last_cleanup = now
|
||||
|
||||
# Tägliche Optimierung (alle 24 Stunden)
|
||||
if (now - last_optimization).total_seconds() > 86400: # 24 Stunden
|
||||
self.monitor.optimize_database()
|
||||
last_optimization = now
|
||||
|
||||
# 1 Stunde warten bis zum nächsten Check
|
||||
time.sleep(3600)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler im Wartungs-Scheduler: {str(e)}")
|
||||
time.sleep(300) # 5 Minuten warten bei Fehlern
|
||||
|
||||
|
||||
# ===== GLOBALE INSTANZEN =====
|
||||
|
||||
# Globale Instanzen für einfachen Zugriff
|
||||
backup_manager = DatabaseBackupManager()
|
||||
database_monitor = DatabaseMonitor()
|
||||
maintenance_scheduler = DatabaseMaintenanceScheduler()
|
||||
|
||||
# Automatisch starten
|
||||
maintenance_scheduler.start_maintenance_scheduler()
|
@@ -41,7 +41,7 @@ def setup_drucker():
|
||||
new_printer = Printer(
|
||||
name=printer_name,
|
||||
model="P115", # Standard-Modell
|
||||
location="Labor", # Standard-Standort
|
||||
location="Werk 040 - Berlin - TBA", # Aktualisierter Standort
|
||||
ip_address=config["ip"],
|
||||
mac_address=f"98:25:4A:E1:{printer_name[-1]}0:0{printer_name[-1]}", # Dummy MAC
|
||||
plug_ip=config["ip"],
|
||||
|
60
backend/app/utils/update_printer_locations.py
Normal file
60
backend/app/utils/update_printer_locations.py
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/env python3.11
|
||||
"""
|
||||
Skript zur Aktualisierung der Drucker-Standorte in der Datenbank.
|
||||
Ändert alle Standorte von "Labor" zu "Werk 040 - Berlin - TBA".
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append('.')
|
||||
|
||||
from database.db_manager import DatabaseManager
|
||||
from models import Printer
|
||||
from datetime import datetime
|
||||
|
||||
def update_printer_locations():
|
||||
"""Aktualisiert alle Drucker-Standorte zu 'Werk 040 - Berlin - TBA'."""
|
||||
|
||||
print("=== Drucker-Standorte aktualisieren ===")
|
||||
|
||||
try:
|
||||
db = DatabaseManager()
|
||||
session = db.get_session()
|
||||
|
||||
# Alle Drucker abrufen
|
||||
all_printers = session.query(Printer).all()
|
||||
print(f"Gefundene Drucker: {len(all_printers)}")
|
||||
|
||||
if not all_printers:
|
||||
print("Keine Drucker in der Datenbank gefunden.")
|
||||
session.close()
|
||||
return
|
||||
|
||||
# Neue Standort-Bezeichnung
|
||||
new_location = "Werk 040 - Berlin - TBA"
|
||||
updated_count = 0
|
||||
|
||||
# Alle Drucker durchgehen und Standort aktualisieren
|
||||
for printer in all_printers:
|
||||
old_location = printer.location
|
||||
printer.location = new_location
|
||||
|
||||
print(f"✅ {printer.name}: '{old_location}' → '{new_location}'")
|
||||
updated_count += 1
|
||||
|
||||
# Änderungen speichern
|
||||
session.commit()
|
||||
session.close()
|
||||
|
||||
print(f"\n✅ {updated_count} Drucker-Standorte erfolgreich aktualisiert")
|
||||
print(f"Neuer Standort: {new_location}")
|
||||
print("Standort-Aktualisierung abgeschlossen!")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Fehler bei der Standort-Aktualisierung: {e}")
|
||||
if 'session' in locals():
|
||||
session.rollback()
|
||||
session.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
update_printer_locations()
|
Reference in New Issue
Block a user