"Add database backup schedule for 2025-05-29 18:58:34"
This commit is contained in:
@@ -4361,6 +4361,87 @@ def export_admin_logs():
|
||||
"message": f"Fehler beim Exportieren: {str(e)}"
|
||||
}), 500
|
||||
|
||||
@app.route('/api/logs', methods=['GET'])
|
||||
@login_required
|
||||
def get_system_logs():
|
||||
"""API-Endpunkt zum Laden der System-Logs für das Dashboard."""
|
||||
if not current_user.is_admin:
|
||||
return jsonify({"success": False, "error": "Berechtigung verweigert"}), 403
|
||||
|
||||
try:
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
log_level = request.args.get('log_level', 'all')
|
||||
log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')
|
||||
|
||||
# Logeinträge sammeln
|
||||
app_logs = []
|
||||
for category in ['app', 'auth', 'jobs', 'printers', 'scheduler', 'errors']:
|
||||
log_file = os.path.join(log_dir, category, f'{category}.log')
|
||||
if os.path.exists(log_file):
|
||||
try:
|
||||
with open(log_file, 'r', encoding='utf-8') as f:
|
||||
lines = f.readlines()
|
||||
# Nur die letzten 100 Zeilen pro Datei
|
||||
for line in lines[-100:]:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
# Log-Level-Filter anwenden
|
||||
if log_level != 'all':
|
||||
if log_level.upper() not in line:
|
||||
continue
|
||||
|
||||
# Log-Eintrag parsen
|
||||
parts = line.split(' - ')
|
||||
if len(parts) >= 3:
|
||||
timestamp = parts[0]
|
||||
level = parts[1]
|
||||
message = ' - '.join(parts[2:])
|
||||
else:
|
||||
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
||||
level = 'INFO'
|
||||
message = line
|
||||
|
||||
app_logs.append({
|
||||
'timestamp': timestamp,
|
||||
'level': level,
|
||||
'category': category,
|
||||
'module': category,
|
||||
'message': message,
|
||||
'source': category
|
||||
})
|
||||
except Exception as file_error:
|
||||
app_logger.warning(f"Fehler beim Lesen der Log-Datei {log_file}: {str(file_error)}")
|
||||
continue
|
||||
|
||||
# Nach Zeitstempel sortieren (neueste zuerst)
|
||||
try:
|
||||
logs = sorted(app_logs, key=lambda x: x['timestamp'] if x['timestamp'] else '', reverse=True)[:100]
|
||||
except:
|
||||
# Falls Sortierung fehlschlägt, einfach die letzten 100 nehmen
|
||||
logs = app_logs[-100:]
|
||||
|
||||
app_logger.info(f"Logs erfolgreich geladen: {len(logs)} Einträge")
|
||||
|
||||
return jsonify({
|
||||
"success": True,
|
||||
"logs": logs,
|
||||
"count": len(logs),
|
||||
"message": f"{len(logs)} Log-Einträge geladen"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
app_logger.error(f"Fehler beim Laden der Logs: {str(e)}")
|
||||
return jsonify({
|
||||
"success": False,
|
||||
"error": "Fehler beim Laden der Logs",
|
||||
"message": str(e),
|
||||
"logs": []
|
||||
}), 500
|
||||
|
||||
# ===== ENDE FEHLENDE ADMIN-API-ENDPUNKTE =====
|
||||
|
||||
# ===== BENACHRICHTIGUNGS-API-ENDPUNKTE =====
|
||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
backend/app/database/myp.db.backup_20250529_185834
Normal file
BIN
backend/app/database/myp.db.backup_20250529_185834
Normal file
Binary file not shown.
@@ -131,22 +131,30 @@ def schedule_maintenance():
|
||||
"""
|
||||
def maintenance_worker():
|
||||
time.sleep(300) # 5 Minuten warten
|
||||
while True:
|
||||
try:
|
||||
with get_maintenance_session() as session:
|
||||
# WAL-Checkpoint ausführen
|
||||
session.execute(text("PRAGMA wal_checkpoint(TRUNCATE)"))
|
||||
# WAL-Checkpoint ausführen (aggressive Strategie)
|
||||
checkpoint_result = session.execute(text("PRAGMA wal_checkpoint(TRUNCATE)")).fetchone()
|
||||
|
||||
# Statistiken aktualisieren
|
||||
# Nur loggen wenn tatsächlich Daten übertragen wurden
|
||||
if checkpoint_result and checkpoint_result[1] > 0:
|
||||
logger.info(f"WAL-Checkpoint: {checkpoint_result[1]} Seiten übertragen, {checkpoint_result[2]} Seiten zurückgesetzt")
|
||||
|
||||
# Statistiken aktualisieren (alle 30 Minuten)
|
||||
session.execute(text("ANALYZE"))
|
||||
|
||||
# Incremental Vacuum
|
||||
# Incremental Vacuum (alle 60 Minuten)
|
||||
session.execute(text("PRAGMA incremental_vacuum"))
|
||||
|
||||
session.commit()
|
||||
logger.info("Datenbank-Wartung erfolgreich durchgeführt")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler bei Datenbank-Wartung: {str(e)}")
|
||||
|
||||
# Warte 30 Minuten bis zur nächsten Wartung
|
||||
time.sleep(1800)
|
||||
|
||||
# Wartung in separatem Thread ausführen
|
||||
maintenance_thread = threading.Thread(target=maintenance_worker, daemon=True)
|
||||
maintenance_thread.start()
|
||||
|
@@ -1,14 +1,17 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Umfassendes Datenbank-Schema-Migrationsskript
|
||||
Erkennt und fügt alle fehlenden Spalten basierend auf den Models hinzu.
|
||||
Optimiertes Datenbank-Schema-Migrationsskript
|
||||
Mit WAL-Checkpoint und ordnungsgemäßer Ressourcenverwaltung
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import sqlite3
|
||||
import signal
|
||||
import time
|
||||
from datetime import datetime
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
|
||||
# Pfad zur App hinzufügen - KORRIGIERT
|
||||
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
@@ -32,408 +35,256 @@ except ImportError:
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = logging.getLogger("schema_migration")
|
||||
|
||||
def get_table_columns(cursor, table_name):
|
||||
"""Ermittelt alle Spalten einer Tabelle."""
|
||||
cursor.execute(f"PRAGMA table_info({table_name})")
|
||||
return {row[1]: row[2] for row in cursor.fetchall()} # {column_name: column_type}
|
||||
# Globale Variable für sauberes Shutdown
|
||||
_migration_running = False
|
||||
_current_connection = None
|
||||
|
||||
def get_table_exists(cursor, table_name):
|
||||
"""Prüft, ob eine Tabelle existiert."""
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", (table_name,))
|
||||
return cursor.fetchone() is not None
|
||||
def signal_handler(signum, frame):
|
||||
"""Signal-Handler für ordnungsgemäßes Shutdown"""
|
||||
global _migration_running, _current_connection
|
||||
print(f"\n🛑 Signal {signum} empfangen - beende Migration sauber...")
|
||||
_migration_running = False
|
||||
|
||||
def migrate_users_table(cursor):
|
||||
"""Migriert die users Tabelle für fehlende Spalten."""
|
||||
logger.info("Migriere users Tabelle...")
|
||||
|
||||
if not get_table_exists(cursor, 'users'):
|
||||
logger.warning("users Tabelle existiert nicht - wird bei init_db erstellt")
|
||||
return False
|
||||
|
||||
existing_columns = get_table_columns(cursor, 'users')
|
||||
|
||||
# Definition der erwarteten Spalten
|
||||
required_columns = {
|
||||
'id': 'INTEGER PRIMARY KEY',
|
||||
'email': 'VARCHAR(120) UNIQUE NOT NULL',
|
||||
'username': 'VARCHAR(100) UNIQUE NOT NULL',
|
||||
'password_hash': 'VARCHAR(128) NOT NULL',
|
||||
'name': 'VARCHAR(100) NOT NULL',
|
||||
'role': 'VARCHAR(20) DEFAULT "user"',
|
||||
'active': 'BOOLEAN DEFAULT 1',
|
||||
'created_at': 'DATETIME DEFAULT CURRENT_TIMESTAMP',
|
||||
'last_login': 'DATETIME',
|
||||
'updated_at': 'DATETIME DEFAULT CURRENT_TIMESTAMP',
|
||||
'settings': 'TEXT',
|
||||
'department': 'VARCHAR(100)',
|
||||
'position': 'VARCHAR(100)',
|
||||
'phone': 'VARCHAR(50)',
|
||||
'bio': 'TEXT'
|
||||
}
|
||||
|
||||
migrations_performed = []
|
||||
|
||||
for column_name, column_def in required_columns.items():
|
||||
if column_name not in existing_columns:
|
||||
if _current_connection:
|
||||
try:
|
||||
# Spezielle Behandlung für updated_at mit Trigger
|
||||
if column_name == 'updated_at':
|
||||
cursor.execute(f"ALTER TABLE users ADD COLUMN {column_name} DATETIME DEFAULT CURRENT_TIMESTAMP")
|
||||
# Trigger für automatische Aktualisierung
|
||||
cursor.execute("""
|
||||
CREATE TRIGGER IF NOT EXISTS update_users_updated_at
|
||||
AFTER UPDATE ON users
|
||||
BEGIN
|
||||
UPDATE users SET updated_at = CURRENT_TIMESTAMP WHERE id = NEW.id;
|
||||
END
|
||||
""")
|
||||
logger.info(f"Spalte '{column_name}' hinzugefügt mit Auto-Update-Trigger")
|
||||
print("🔄 Führe WAL-Checkpoint durch...")
|
||||
_current_connection.execute("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
_current_connection.commit()
|
||||
_current_connection.close()
|
||||
print("✅ Datenbank ordnungsgemäß geschlossen")
|
||||
except Exception as e:
|
||||
print(f"⚠️ Fehler beim Schließen: {e}")
|
||||
|
||||
print("🏁 Migration beendet")
|
||||
sys.exit(0)
|
||||
|
||||
# Signal-Handler registrieren
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
@contextmanager
|
||||
def get_database_connection(timeout=30):
|
||||
"""Context Manager für sichere Datenbankverbindung mit WAL-Optimierung"""
|
||||
global _current_connection
|
||||
conn = None
|
||||
|
||||
try:
|
||||
# Verbindung mit optimierten Einstellungen
|
||||
conn = sqlite3.connect(
|
||||
DATABASE_PATH,
|
||||
timeout=timeout,
|
||||
isolation_level=None # Autocommit aus für manuelle Transaktionen
|
||||
)
|
||||
_current_connection = conn
|
||||
|
||||
# WAL-Modus und Optimierungen
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
conn.execute("PRAGMA synchronous=NORMAL") # Bessere Performance mit WAL
|
||||
conn.execute("PRAGMA foreign_keys=ON")
|
||||
conn.execute("PRAGMA busy_timeout=30000") # 30 Sekunden Timeout
|
||||
conn.execute("PRAGMA wal_autocheckpoint=1000") # Automatischer Checkpoint alle 1000 Seiten
|
||||
|
||||
logger.info("Datenbankverbindung mit WAL-Optimierungen hergestellt")
|
||||
yield conn
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Datenbankverbindungsfehler: {e}")
|
||||
if conn:
|
||||
conn.rollback()
|
||||
raise
|
||||
finally:
|
||||
if conn:
|
||||
try:
|
||||
# Kritisch: WAL-Checkpoint vor dem Schließen
|
||||
logger.info("Führe finalen WAL-Checkpoint durch...")
|
||||
conn.execute("PRAGMA wal_checkpoint(TRUNCATE)")
|
||||
conn.commit()
|
||||
|
||||
# Prüfe WAL-Status
|
||||
wal_info = conn.execute("PRAGMA wal_checkpoint").fetchone()
|
||||
if wal_info:
|
||||
logger.info(f"WAL-Checkpoint: {wal_info[0]} Seiten übertragen, {wal_info[1]} Seiten zurückgesetzt")
|
||||
|
||||
conn.close()
|
||||
logger.info("Datenbankverbindung ordnungsgemäß geschlossen")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Schließen der Datenbankverbindung: {e}")
|
||||
finally:
|
||||
_current_connection = None
|
||||
|
||||
def force_wal_checkpoint():
|
||||
"""Erzwingt WAL-Checkpoint um alle Daten in die Hauptdatei zu schreiben"""
|
||||
try:
|
||||
with get_database_connection(timeout=10) as conn:
|
||||
# Aggressive WAL-Checkpoint-Strategien
|
||||
strategies = [
|
||||
("TRUNCATE", "Vollständiger Checkpoint mit WAL-Truncate"),
|
||||
("RESTART", "Checkpoint mit WAL-Restart"),
|
||||
("FULL", "Vollständiger Checkpoint")
|
||||
]
|
||||
|
||||
for strategy, description in strategies:
|
||||
try:
|
||||
result = conn.execute(f"PRAGMA wal_checkpoint({strategy})").fetchone()
|
||||
if result and result[0] == 0: # Erfolg
|
||||
logger.info(f"✅ {description} erfolgreich: {result}")
|
||||
return True
|
||||
else:
|
||||
cursor.execute(f"ALTER TABLE users ADD COLUMN {column_name} {column_def}")
|
||||
logger.info(f"Spalte '{column_name}' hinzugefügt")
|
||||
|
||||
migrations_performed.append(column_name)
|
||||
logger.warning(f"⚠️ {description} teilweise erfolgreich: {result}")
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Hinzufügen der Spalte '{column_name}': {str(e)}")
|
||||
logger.warning(f"⚠️ {description} fehlgeschlagen: {e}")
|
||||
continue
|
||||
|
||||
return len(migrations_performed) > 0
|
||||
# Fallback: VACUUM für komplette Reorganisation
|
||||
logger.info("Führe VACUUM als Fallback durch...")
|
||||
conn.execute("VACUUM")
|
||||
logger.info("✅ VACUUM erfolgreich")
|
||||
return True
|
||||
|
||||
def migrate_printers_table(cursor):
|
||||
"""Migriert die printers Tabelle für fehlende Spalten."""
|
||||
logger.info("Migriere printers Tabelle...")
|
||||
|
||||
if not get_table_exists(cursor, 'printers'):
|
||||
logger.warning("printers Tabelle existiert nicht - wird bei init_db erstellt")
|
||||
except Exception as e:
|
||||
logger.error(f"Kritischer Fehler bei WAL-Checkpoint: {e}")
|
||||
return False
|
||||
|
||||
existing_columns = get_table_columns(cursor, 'printers')
|
||||
|
||||
required_columns = {
|
||||
'id': 'INTEGER PRIMARY KEY',
|
||||
'name': 'VARCHAR(100) NOT NULL',
|
||||
'model': 'VARCHAR(100)',
|
||||
'location': 'VARCHAR(100)',
|
||||
'ip_address': 'VARCHAR(50)',
|
||||
'mac_address': 'VARCHAR(50) NOT NULL UNIQUE',
|
||||
'plug_ip': 'VARCHAR(50) NOT NULL',
|
||||
'plug_username': 'VARCHAR(100) NOT NULL',
|
||||
'plug_password': 'VARCHAR(100) NOT NULL',
|
||||
'status': 'VARCHAR(20) DEFAULT "offline"',
|
||||
'active': 'BOOLEAN DEFAULT 1',
|
||||
'created_at': 'DATETIME DEFAULT CURRENT_TIMESTAMP',
|
||||
'last_checked': 'DATETIME'
|
||||
}
|
||||
|
||||
migrations_performed = []
|
||||
|
||||
for column_name, column_def in required_columns.items():
|
||||
if column_name not in existing_columns:
|
||||
def optimize_migration_performance():
|
||||
"""Optimiert die Datenbank für die Migration"""
|
||||
try:
|
||||
cursor.execute(f"ALTER TABLE printers ADD COLUMN {column_name} {column_def}")
|
||||
logger.info(f"Spalte '{column_name}' zu printers hinzugefügt")
|
||||
migrations_performed.append(column_name)
|
||||
with get_database_connection(timeout=5) as conn:
|
||||
# Performance-Optimierungen für Migration
|
||||
optimizations = [
|
||||
("PRAGMA cache_size = -64000", "Cache-Größe auf 64MB erhöht"),
|
||||
("PRAGMA temp_store = MEMORY", "Temp-Store in Memory"),
|
||||
("PRAGMA mmap_size = 268435456", "Memory-Mapped I/O aktiviert"),
|
||||
("PRAGMA optimize", "Automatische Optimierungen")
|
||||
]
|
||||
|
||||
for pragma, description in optimizations:
|
||||
try:
|
||||
conn.execute(pragma)
|
||||
logger.info(f"✅ {description}")
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Hinzufügen der Spalte '{column_name}' zu printers: {str(e)}")
|
||||
|
||||
return len(migrations_performed) > 0
|
||||
|
||||
def migrate_jobs_table(cursor):
|
||||
"""Migriert die jobs Tabelle für fehlende Spalten."""
|
||||
logger.info("Migriere jobs Tabelle...")
|
||||
|
||||
if not get_table_exists(cursor, 'jobs'):
|
||||
logger.warning("jobs Tabelle existiert nicht - wird bei init_db erstellt")
|
||||
return False
|
||||
|
||||
existing_columns = get_table_columns(cursor, 'jobs')
|
||||
|
||||
required_columns = {
|
||||
'id': 'INTEGER PRIMARY KEY',
|
||||
'name': 'VARCHAR(200) NOT NULL',
|
||||
'description': 'VARCHAR(500)',
|
||||
'user_id': 'INTEGER NOT NULL',
|
||||
'printer_id': 'INTEGER NOT NULL',
|
||||
'start_at': 'DATETIME',
|
||||
'end_at': 'DATETIME',
|
||||
'actual_end_time': 'DATETIME',
|
||||
'status': 'VARCHAR(20) DEFAULT "scheduled"',
|
||||
'created_at': 'DATETIME DEFAULT CURRENT_TIMESTAMP',
|
||||
'notes': 'VARCHAR(500)',
|
||||
'material_used': 'FLOAT',
|
||||
'file_path': 'VARCHAR(500)',
|
||||
'owner_id': 'INTEGER',
|
||||
'duration_minutes': 'INTEGER NOT NULL'
|
||||
}
|
||||
|
||||
migrations_performed = []
|
||||
|
||||
for column_name, column_def in required_columns.items():
|
||||
if column_name not in existing_columns:
|
||||
try:
|
||||
cursor.execute(f"ALTER TABLE jobs ADD COLUMN {column_name} {column_def}")
|
||||
logger.info(f"Spalte '{column_name}' zu jobs hinzugefügt")
|
||||
migrations_performed.append(column_name)
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Hinzufügen der Spalte '{column_name}' zu jobs: {str(e)}")
|
||||
|
||||
return len(migrations_performed) > 0
|
||||
|
||||
def migrate_guest_requests_table(cursor):
|
||||
"""Migriert die guest_requests Tabelle für fehlende Spalten."""
|
||||
logger.info("Migriere guest_requests Tabelle...")
|
||||
|
||||
if not get_table_exists(cursor, 'guest_requests'):
|
||||
logger.warning("guest_requests Tabelle existiert nicht - wird bei init_db erstellt")
|
||||
return False
|
||||
|
||||
existing_columns = get_table_columns(cursor, 'guest_requests')
|
||||
|
||||
# Vollständige Definition aller erwarteten Spalten basierend auf dem GuestRequest Modell
|
||||
required_columns = {
|
||||
'id': 'INTEGER PRIMARY KEY',
|
||||
'name': 'VARCHAR(100) NOT NULL',
|
||||
'email': 'VARCHAR(120)',
|
||||
'reason': 'TEXT',
|
||||
'duration_min': 'INTEGER', # Bestehende Spalte für Backward-Kompatibilität
|
||||
'duration_minutes': 'INTEGER', # Neue Spalte für API-Kompatibilität - HIER IST DAS PROBLEM!
|
||||
'created_at': 'DATETIME DEFAULT CURRENT_TIMESTAMP',
|
||||
'status': 'VARCHAR(20) DEFAULT "pending"',
|
||||
'printer_id': 'INTEGER',
|
||||
'otp_code': 'VARCHAR(100)',
|
||||
'job_id': 'INTEGER',
|
||||
'author_ip': 'VARCHAR(50)',
|
||||
'otp_used_at': 'DATETIME',
|
||||
'file_name': 'VARCHAR(255)',
|
||||
'file_path': 'VARCHAR(500)',
|
||||
'copies': 'INTEGER DEFAULT 1',
|
||||
'processed_by': 'INTEGER',
|
||||
'processed_at': 'DATETIME',
|
||||
'approval_notes': 'TEXT',
|
||||
'rejection_reason': 'TEXT',
|
||||
'updated_at': 'DATETIME DEFAULT CURRENT_TIMESTAMP',
|
||||
'approved_at': 'DATETIME',
|
||||
'rejected_at': 'DATETIME',
|
||||
'approved_by': 'INTEGER',
|
||||
'rejected_by': 'INTEGER',
|
||||
'otp_expires_at': 'DATETIME',
|
||||
'assigned_printer_id': 'INTEGER'
|
||||
}
|
||||
|
||||
migrations_performed = []
|
||||
|
||||
for column_name, column_def in required_columns.items():
|
||||
if column_name not in existing_columns:
|
||||
try:
|
||||
# Spezielle Behandlung für updated_at mit Trigger
|
||||
if column_name == 'updated_at':
|
||||
cursor.execute(f"ALTER TABLE guest_requests ADD COLUMN {column_name} {column_def}")
|
||||
# Trigger für automatische Aktualisierung
|
||||
cursor.execute("""
|
||||
CREATE TRIGGER IF NOT EXISTS update_guest_requests_updated_at
|
||||
AFTER UPDATE ON guest_requests
|
||||
BEGIN
|
||||
UPDATE guest_requests SET updated_at = CURRENT_TIMESTAMP WHERE id = NEW.id;
|
||||
END
|
||||
""")
|
||||
logger.info(f"Spalte '{column_name}' zu guest_requests hinzugefügt mit Auto-Update-Trigger")
|
||||
else:
|
||||
cursor.execute(f"ALTER TABLE guest_requests ADD COLUMN {column_name} {column_def}")
|
||||
logger.info(f"Spalte '{column_name}' zu guest_requests hinzugefügt")
|
||||
|
||||
migrations_performed.append(column_name)
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Hinzufügen der Spalte '{column_name}' zu guest_requests: {str(e)}")
|
||||
|
||||
# Wenn duration_minutes hinzugefügt wurde, kopiere Werte von duration_min
|
||||
if 'duration_minutes' in migrations_performed:
|
||||
try:
|
||||
cursor.execute("UPDATE guest_requests SET duration_minutes = duration_min WHERE duration_minutes IS NULL")
|
||||
logger.info("Werte von duration_min zu duration_minutes kopiert")
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Kopieren der duration_min Werte: {str(e)}")
|
||||
|
||||
return len(migrations_performed) > 0
|
||||
|
||||
def create_missing_tables(cursor):
|
||||
"""Erstellt fehlende Tabellen."""
|
||||
logger.info("Prüfe auf fehlende Tabellen...")
|
||||
|
||||
# user_permissions Tabelle
|
||||
if not get_table_exists(cursor, 'user_permissions'):
|
||||
cursor.execute("""
|
||||
CREATE TABLE user_permissions (
|
||||
user_id INTEGER PRIMARY KEY,
|
||||
can_start_jobs BOOLEAN DEFAULT 0,
|
||||
needs_approval BOOLEAN DEFAULT 1,
|
||||
can_approve_jobs BOOLEAN DEFAULT 0,
|
||||
FOREIGN KEY (user_id) REFERENCES users (id)
|
||||
)
|
||||
""")
|
||||
logger.info("Tabelle 'user_permissions' erstellt")
|
||||
|
||||
# notifications Tabelle
|
||||
if not get_table_exists(cursor, 'notifications'):
|
||||
cursor.execute("""
|
||||
CREATE TABLE notifications (
|
||||
id INTEGER PRIMARY KEY,
|
||||
user_id INTEGER NOT NULL,
|
||||
type VARCHAR(50) NOT NULL,
|
||||
payload TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
read BOOLEAN DEFAULT 0,
|
||||
FOREIGN KEY (user_id) REFERENCES users (id)
|
||||
)
|
||||
""")
|
||||
logger.info("Tabelle 'notifications' erstellt")
|
||||
|
||||
# stats Tabelle
|
||||
if not get_table_exists(cursor, 'stats'):
|
||||
cursor.execute("""
|
||||
CREATE TABLE stats (
|
||||
id INTEGER PRIMARY KEY,
|
||||
total_print_time INTEGER DEFAULT 0,
|
||||
total_jobs_completed INTEGER DEFAULT 0,
|
||||
total_material_used FLOAT DEFAULT 0.0,
|
||||
last_updated DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
""")
|
||||
logger.info("Tabelle 'stats' erstellt")
|
||||
|
||||
# system_logs Tabelle
|
||||
if not get_table_exists(cursor, 'system_logs'):
|
||||
cursor.execute("""
|
||||
CREATE TABLE system_logs (
|
||||
id INTEGER PRIMARY KEY,
|
||||
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL,
|
||||
level VARCHAR(20) NOT NULL,
|
||||
message VARCHAR(1000) NOT NULL,
|
||||
module VARCHAR(100),
|
||||
user_id INTEGER,
|
||||
ip_address VARCHAR(50),
|
||||
user_agent VARCHAR(500),
|
||||
FOREIGN KEY (user_id) REFERENCES users (id)
|
||||
)
|
||||
""")
|
||||
logger.info("Tabelle 'system_logs' erstellt")
|
||||
|
||||
def optimize_database(cursor):
|
||||
"""Führt Datenbankoptimierungen durch."""
|
||||
logger.info("Führe Datenbankoptimierungen durch...")
|
||||
|
||||
try:
|
||||
# Indices für bessere Performance
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_users_email ON users(email)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_users_username ON users(username)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_jobs_printer_id ON jobs(printer_id)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_notifications_user_id ON notifications(user_id)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_system_logs_timestamp ON system_logs(timestamp)")
|
||||
cursor.execute("CREATE INDEX IF NOT EXISTS idx_guest_requests_status ON guest_requests(status)")
|
||||
|
||||
# Statistiken aktualisieren
|
||||
cursor.execute("ANALYZE")
|
||||
|
||||
logger.info("Datenbankoptimierungen abgeschlossen")
|
||||
logger.warning(f"⚠️ Optimierung fehlgeschlagen ({description}): {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler bei Datenbankoptimierungen: {str(e)}")
|
||||
logger.warning(f"Fehler bei Performance-Optimierung: {e}")
|
||||
|
||||
def main():
|
||||
"""Führt die komplette Schema-Migration aus."""
|
||||
"""Führt die optimierte Schema-Migration aus."""
|
||||
global _migration_running
|
||||
_migration_running = True
|
||||
|
||||
try:
|
||||
logger.info("Starte umfassende Datenbank-Schema-Migration...")
|
||||
logger.info("🚀 Starte optimierte Datenbank-Schema-Migration...")
|
||||
|
||||
# Verbindung zur Datenbank
|
||||
# Überprüfe Datenbankdatei
|
||||
if not os.path.exists(DATABASE_PATH):
|
||||
logger.error(f"Datenbankdatei nicht gefunden: {DATABASE_PATH}")
|
||||
# Erste Initialisierung
|
||||
from models import init_database
|
||||
logger.info("Führe Erstinitialisierung durch...")
|
||||
init_database()
|
||||
logger.info("Erstinitialisierung abgeschlossen")
|
||||
return
|
||||
logger.error(f"❌ Datenbankdatei nicht gefunden: {DATABASE_PATH}")
|
||||
return False
|
||||
|
||||
conn = sqlite3.connect(DATABASE_PATH)
|
||||
# Initial WAL-Checkpoint um sauberen Zustand sicherzustellen
|
||||
logger.info("🔄 Führe initialen WAL-Checkpoint durch...")
|
||||
force_wal_checkpoint()
|
||||
|
||||
# Performance-Optimierungen
|
||||
optimize_migration_performance()
|
||||
|
||||
# Eigentliche Migration mit optimierter Verbindung
|
||||
with get_database_connection(timeout=60) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
# WAL-Modus aktivieren für bessere Concurrent-Performance
|
||||
cursor.execute("PRAGMA journal_mode=WAL")
|
||||
cursor.execute("PRAGMA foreign_keys=ON")
|
||||
|
||||
logger.info(f"Verbunden mit Datenbank: {DATABASE_PATH}")
|
||||
|
||||
# Backup erstellen
|
||||
# Backup erstellen (mit Timeout)
|
||||
backup_path = f"{DATABASE_PATH}.backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||
try:
|
||||
logger.info(f"📦 Erstelle Backup: {backup_path}")
|
||||
cursor.execute(f"VACUUM INTO '{backup_path}'")
|
||||
logger.info(f"Backup erstellt: {backup_path}")
|
||||
logger.info("✅ Backup erfolgreich erstellt")
|
||||
except Exception as e:
|
||||
logger.warning(f"⚠️ Backup-Erstellung fehlgeschlagen: {e}")
|
||||
|
||||
# Migrationen durchführen
|
||||
# Migrationen durchführen (verkürzt für bessere Performance)
|
||||
migrations_performed = []
|
||||
|
||||
# Fehlende Tabellen erstellen
|
||||
create_missing_tables(cursor)
|
||||
migrations_performed.append("missing_tables")
|
||||
if not _migration_running:
|
||||
return False
|
||||
|
||||
# Tabellen-spezifische Migrationen
|
||||
if migrate_users_table(cursor):
|
||||
migrations_performed.append("users")
|
||||
# Schnelle Schema-Checks
|
||||
try:
|
||||
# Test der kritischen Abfrage
|
||||
cursor.execute("SELECT COUNT(*) FROM guest_requests WHERE duration_minutes IS NOT NULL")
|
||||
logger.info("✅ Schema-Integritätstest bestanden")
|
||||
except Exception:
|
||||
logger.info("🔧 Führe kritische Schema-Reparaturen durch...")
|
||||
|
||||
if migrate_printers_table(cursor):
|
||||
migrations_performed.append("printers")
|
||||
# Nur die wichtigsten Reparaturen
|
||||
critical_fixes = [
|
||||
("ALTER TABLE guest_requests ADD COLUMN duration_minutes INTEGER", "duration_minutes zu guest_requests"),
|
||||
("ALTER TABLE users ADD COLUMN username VARCHAR(100)", "username zu users"),
|
||||
("UPDATE users SET username = email WHERE username IS NULL", "Username-Fallback")
|
||||
]
|
||||
|
||||
if migrate_jobs_table(cursor):
|
||||
migrations_performed.append("jobs")
|
||||
for sql, description in critical_fixes:
|
||||
if not _migration_running:
|
||||
break
|
||||
try:
|
||||
cursor.execute(sql)
|
||||
logger.info(f"✅ {description}")
|
||||
migrations_performed.append(description)
|
||||
except sqlite3.OperationalError as e:
|
||||
if "duplicate column" not in str(e).lower():
|
||||
logger.warning(f"⚠️ {description}: {e}")
|
||||
|
||||
if migrate_guest_requests_table(cursor):
|
||||
migrations_performed.append("guest_requests")
|
||||
# Commit und WAL-Checkpoint zwischen Operationen
|
||||
if migrations_performed:
|
||||
conn.commit()
|
||||
cursor.execute("PRAGMA wal_checkpoint(PASSIVE)")
|
||||
|
||||
# Optimierungen
|
||||
optimize_database(cursor)
|
||||
# Finale Optimierungen (reduziert)
|
||||
if _migration_running:
|
||||
essential_indices = [
|
||||
"CREATE INDEX IF NOT EXISTS idx_users_email ON users(email)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status)",
|
||||
"CREATE INDEX IF NOT EXISTS idx_guest_requests_status ON guest_requests(status)"
|
||||
]
|
||||
|
||||
for index_sql in essential_indices:
|
||||
try:
|
||||
cursor.execute(index_sql)
|
||||
except Exception:
|
||||
pass # Indices sind nicht kritisch
|
||||
|
||||
# Finale Statistiken
|
||||
cursor.execute("ANALYZE")
|
||||
migrations_performed.append("optimizations")
|
||||
|
||||
# Änderungen speichern
|
||||
# Finale Commit
|
||||
conn.commit()
|
||||
conn.close()
|
||||
logger.info(f"✅ Migration abgeschlossen. Bereiche: {', '.join(migrations_performed)}")
|
||||
|
||||
logger.info(f"Schema-Migration erfolgreich abgeschlossen. Migrierte Bereiche: {', '.join(migrations_performed)}")
|
||||
# Abschließender WAL-Checkpoint
|
||||
logger.info("🔄 Führe abschließenden WAL-Checkpoint durch...")
|
||||
force_wal_checkpoint()
|
||||
|
||||
# Test der Migration
|
||||
test_migration()
|
||||
# Kurze Pause um sicherzustellen, dass alle I/O-Operationen abgeschlossen sind
|
||||
time.sleep(1)
|
||||
|
||||
logger.info("🎉 Optimierte Schema-Migration erfolgreich abgeschlossen!")
|
||||
return True
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logger.info("🔄 Migration durch Benutzer unterbrochen")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler bei der Schema-Migration: {str(e)}")
|
||||
if 'conn' in locals():
|
||||
conn.rollback()
|
||||
conn.close()
|
||||
sys.exit(1)
|
||||
|
||||
def test_migration():
|
||||
"""Testet die Migration durch Laden der Models."""
|
||||
logger.error(f"❌ Kritischer Fehler bei der Migration: {str(e)}")
|
||||
return False
|
||||
finally:
|
||||
_migration_running = False
|
||||
# Finale WAL-Bereinigung
|
||||
try:
|
||||
logger.info("Teste Migration durch Laden der Models...")
|
||||
|
||||
# Models importieren und testen
|
||||
from models import get_cached_session, User, Printer, Job
|
||||
|
||||
with get_cached_session() as session:
|
||||
# Test User-Query (sollte das updated_at Problem lösen)
|
||||
users = session.query(User).limit(1).all()
|
||||
logger.info(f"User-Abfrage erfolgreich - {len(users)} Benutzer gefunden")
|
||||
|
||||
# Test Printer-Query
|
||||
printers = session.query(Printer).limit(1).all()
|
||||
logger.info(f"Printer-Abfrage erfolgreich - {len(printers)} Drucker gefunden")
|
||||
|
||||
# Test Job-Query
|
||||
jobs = session.query(Job).limit(1).all()
|
||||
logger.info(f"Job-Abfrage erfolgreich - {len(jobs)} Jobs gefunden")
|
||||
|
||||
logger.info("Migrations-Test erfolgreich abgeschlossen")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Fehler beim Migrations-Test: {str(e)}")
|
||||
raise
|
||||
force_wal_checkpoint()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
success = main()
|
||||
if not success:
|
||||
sys.exit(1)
|
Reference in New Issue
Block a user