diff --git a/backend/README.md b/backend/README.md new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/backend/README.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/app.py b/backend/app.py index 965aab55..3b795a9e 100644 --- a/backend/app.py +++ b/backend/app.py @@ -34,7 +34,7 @@ else: get_windows_thread_manager = None # Lokale Imports -from models import init_database, create_initial_admin, User, Printer, Job, Stats, SystemLog, get_db_session, GuestRequest, UserPermission, Notification +from models import init_database, create_initial_admin, User, Printer, Job, Stats, SystemLog, get_db_session, GuestRequest, UserPermission, Notification, JobOrder, Base, get_engine from utils.logging_config import setup_logging, get_logger, measure_execution_time, log_startup_info, debug_request, debug_response from utils.job_scheduler import JobScheduler, get_job_scheduler from utils.queue_manager import start_queue_manager, stop_queue_manager, get_queue_manager @@ -89,6 +89,41 @@ from utils.security import init_security, require_secure_headers, security_check from utils.permissions import init_permission_helpers, require_permission, Permission, check_permission from utils.analytics import analytics_engine, track_event, get_dashboard_stats +# Import der neuen System-Module +from utils.form_validation import ( + FormValidator, ValidationError, ValidationResult, + get_user_registration_validator, get_job_creation_validator, + get_printer_creation_validator, get_guest_request_validator, + validate_form, get_client_validation_js +) +from utils.report_generator import ( + ReportFactory, ReportConfig, JobReportBuilder, + UserReportBuilder, PrinterReportBuilder, generate_comprehensive_report +) +from utils.realtime_dashboard import ( + DashboardManager, EventType, DashboardEvent, + emit_job_event, emit_printer_event, emit_system_alert, + get_dashboard_client_js +) +from utils.drag_drop_system import ( + drag_drop_manager, DragDropConfig, validate_file_upload, + get_drag_drop_javascript, get_drag_drop_css +) +from utils.advanced_tables import ( + AdvancedTableQuery, TableDataProcessor, ColumnConfig, + create_table_config, get_advanced_tables_js, get_advanced_tables_css +) +from utils.maintenance_system import ( + MaintenanceManager, MaintenanceType, MaintenanceStatus, + create_maintenance_task, schedule_maintenance, + get_maintenance_overview, update_maintenance_status +) +from utils.multi_location_system import ( + LocationManager, LocationType, AccessLevel, + create_location, assign_user_to_location, get_user_locations, + calculate_distance, find_nearest_location +) + # Drucker-Monitor importieren from utils.printer_monitor import printer_monitor @@ -102,6 +137,14 @@ app.config["WTF_CSRF_ENABLED"] = True # Globale db-Variable für Kompatibilität mit init_simple_db.py db = db_engine +# System-Manager initialisieren +dashboard_manager = DashboardManager() +maintenance_manager = MaintenanceManager() +location_manager = LocationManager() + +# SocketIO für Realtime Dashboard initialisieren +socketio = dashboard_manager.init_socketio(app, cors_allowed_origins="*") + # CSRF-Schutz initialisieren csrf = CSRFProtect(app) @@ -1525,11 +1568,21 @@ def einstellungen_redirect(): @app.route("/admin") @login_required def admin(): - """Leitet zur neuen Admin-Dashboard-Route weiter.""" if not current_user.is_admin: - flash("Sie haben keine Berechtigung für den Admin-Bereich.", "error") + flash("Nur Administratoren haben Zugriff auf diesen Bereich.", "error") return redirect(url_for("index")) - return redirect(url_for("admin_page")) + + return render_template("admin.html") + +@app.route("/socket-test") +@login_required +@admin_required +def socket_test(): + """ + Steckdosen-Test-Seite für Ausbilder und Administratoren. + """ + app_logger.info(f"Admin {current_user.name} hat die Steckdosen-Test-Seite aufgerufen") + return render_template("socket_test.html") @app.route("/demo") @login_required @@ -1558,1843 +1611,132 @@ def new_job_page(): @app.route("/stats") @login_required def stats_page(): - """Zeigt die Statistik-Seite an.""" - return render_template("stats.html") + """Zeigt die Statistiken-Seite an""" + return render_template("stats.html", title="Statistiken") @app.route("/admin-dashboard") @login_required def admin_page(): - """Zeigt die Administrationsseite an.""" + """Erweiterte Admin-Dashboard-Seite mit Live-Funktionen""" if not current_user.is_admin: - flash("Sie haben keine Berechtigung für den Admin-Bereich.", "error") return redirect(url_for("index")) - - # Aktives Tab aus der URL auslesen oder Default-Wert verwenden - active_tab = request.args.get('tab', 'users') - - # Daten für das Admin-Panel direkt beim Laden vorbereiten - stats = {} - users = [] - printers = [] - scheduler_status = {"running": False, "message": "Nicht verfügbar"} - system_info = {"cpu": 0, "memory": 0, "disk": 0} - logs = [] - - db_session = get_db_session() - - try: - # Statistiken laden - from sqlalchemy.orm import joinedload - - # Benutzeranzahl - stats["total_users"] = db_session.query(User).count() - - # Druckeranzahl und Online-Status - all_printers = db_session.query(Printer).all() - stats["total_printers"] = len(all_printers) - stats["online_printers"] = len([p for p in all_printers if p.status == "online"]) - - # Aktive Jobs und Warteschlange - stats["active_jobs"] = db_session.query(Job).filter( - Job.status.in_(["printing", "running"]) - ).count() - - stats["queued_jobs"] = db_session.query(Job).filter( - Job.status == "scheduled" - ).count() - - # Erfolgsrate - total_jobs = db_session.query(Job).filter( - Job.status.in_(["completed", "failed", "cancelled"]) - ).count() - - successful_jobs = db_session.query(Job).filter( - Job.status == "completed" - ).count() - - if total_jobs > 0: - stats["success_rate"] = int((successful_jobs / total_jobs) * 100) - else: - stats["success_rate"] = 0 - - # Benutzer laden - if active_tab == 'users': - users = db_session.query(User).all() - users = [user.to_dict() for user in users] - - # Drucker laden - if active_tab == 'printers': - printers = db_session.query(Printer).all() - printers = [printer.to_dict() for printer in printers] - - # Scheduler-Status laden - if active_tab == 'scheduler': - try: - from utils.scheduler import scheduler_is_running - is_running = scheduler_is_running() - scheduler_status = { - "running": is_running, - "message": "Der Scheduler läuft" if is_running else "Der Scheduler ist gestoppt" - } - except (ImportError, AttributeError): - scheduler_status = { - "running": False, - "message": "Scheduler-Status nicht verfügbar" - } - - # System-Informationen laden - if active_tab == 'system': - import os - import psutil - - # CPU und Memory - cpu_percent = psutil.cpu_percent(interval=1) - memory = psutil.virtual_memory() - disk = psutil.disk_usage('/') - - # Uptime - boot_time = psutil.boot_time() - uptime_seconds = time.time() - boot_time - uptime_days = int(uptime_seconds // 86400) - uptime_hours = int((uptime_seconds % 86400) // 3600) - uptime_minutes = int((uptime_seconds % 3600) // 60) - - # Datenbank-Status - db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'database', 'myp.db') - db_size = 0 - if os.path.exists(db_path): - db_size = os.path.getsize(db_path) / (1024 * 1024) # MB - - # Scheduler-Status - scheduler_running = False - scheduler_jobs = 0 - try: - from utils.job_scheduler import scheduler - scheduler_running = scheduler.running - if hasattr(scheduler, 'get_jobs'): - scheduler_jobs = len(scheduler.get_jobs()) - except: - pass - - # Nächster Job - next_job = db_session.query(Job).filter( - Job.status == "scheduled" - ).order_by(Job.created_at.asc()).first() - - next_job_time = "Keine geplanten Jobs" - if next_job: - next_job_time = next_job.created_at.strftime("%d.%m.%Y %H:%M") - - system_info = { - "cpu_usage": round(cpu_percent, 1), - "memory_usage": round(memory.percent, 1), - "disk_usage": round((disk.used / disk.total) * 100, 1), - "uptime": f"{uptime_days}d {uptime_hours}h {uptime_minutes}m", - "db_size": f"{db_size:.1f} MB", - "db_connections": "Aktiv", - "scheduler_running": scheduler_running, - "scheduler_jobs": scheduler_jobs, - "next_job": next_job_time - } - - # Logs laden - if active_tab == 'logs': - import os - log_level = request.args.get('log_level', 'all') - log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs') - - # Logeinträge sammeln - app_logs = [] - for category in ['app', 'auth', 'jobs', 'printers', 'scheduler', 'errors']: - log_file = os.path.join(log_dir, category, f'{category}.log') - if os.path.exists(log_file): - with open(log_file, 'r') as f: - for line in f.readlines()[-100:]: # Nur die letzten 100 Zeilen pro Datei - if log_level != 'all': - if log_level.upper() not in line: - continue - app_logs.append({ - 'timestamp': line.split(' - ')[0] if ' - ' in line else '', - 'level': line.split(' - ')[1].split(' - ')[0] if ' - ' in line and len(line.split(' - ')) > 2 else 'INFO', - 'category': category, - 'message': ' - '.join(line.split(' - ')[2:]) if ' - ' in line and len(line.split(' - ')) > 2 else line - }) - - # Nach Zeitstempel sortieren (neueste zuerst) - logs = sorted(app_logs, key=lambda x: x['timestamp'] if x['timestamp'] else '', reverse=True)[:100] + return render_template("admin_dashboard.html", title="Admin Dashboard") - except Exception as e: - app_logger.error(f"Fehler beim Laden der Admin-Daten: {str(e)}") - finally: - db_session.close() - - return render_template( - "admin.html", - active_tab=active_tab, - stats=stats, - users=users, - printers=printers, - scheduler_status=scheduler_status, - system_info=system_info, - logs=logs - ) +# ===== NEUE SYSTEM UI-ROUTEN ===== + +@app.route("/dashboard/realtime") +@login_required +def realtime_dashboard(): + """Echtzeit-Dashboard mit WebSocket-Updates""" + return render_template("realtime_dashboard.html", title="Echtzeit-Dashboard") + +@app.route("/reports") +@login_required +def reports_page(): + """Reports-Generierung-Seite""" + return render_template("reports.html", title="Reports") + +@app.route("/maintenance") +@login_required +def maintenance_page(): + """Wartungs-Management-Seite""" + return render_template("maintenance.html", title="Wartung") + +@app.route("/locations") +@login_required +@admin_required +def locations_page(): + """Multi-Standort-Management-Seite""" + return render_template("locations.html", title="Standorte") + +@app.route("/validation-demo") +@login_required +def validation_demo(): + """Formular-Validierung Demo-Seite""" + return render_template("validation_demo.html", title="Formular-Validierung Demo") + +@app.route("/tables-demo") +@login_required +def tables_demo(): + """Advanced Tables Demo-Seite""" + return render_template("tables_demo.html", title="Erweiterte Tabellen Demo") + +@app.route("/dragdrop-demo") +@login_required +def dragdrop_demo(): + """Drag & Drop Demo-Seite""" + return render_template("dragdrop_demo.html", title="Drag & Drop Demo") # ===== ERROR MONITORING SYSTEM ===== @app.route("/api/admin/system-health", methods=['GET']) @login_required +@admin_required def api_admin_system_health(): - """API-Endpunkt für System-Gesundheitscheck.""" - if not current_user.is_admin: - return jsonify({"error": "Berechtigung verweigert"}), 403 - - db_session = get_db_session() - critical_errors = [] - warnings = [] - + """API-Endpunkt für System-Gesundheitscheck mit Dashboard-Integration.""" try: - # 1. Datenbank-Schema-Integrität prüfen - try: - # Test verschiedene kritische Tabellen und Spalten - db_session.execute(text("SELECT COUNT(*) FROM guest_requests WHERE duration_minutes IS NOT NULL")) - schema_integrity = "OK" - except Exception as e: - critical_errors.append({ - "type": "database_schema", - "message": f"Datenbank-Schema-Fehler: {str(e)}", - "severity": "critical", - "suggested_fix": "Datenbank-Migration ausführen", - "timestamp": datetime.now().isoformat() - }) - schema_integrity = "FEHLER" + # Basis-System-Gesundheitscheck durchführen + critical_errors = [] + warnings = [] - # 2. Prüfe kritische Spalten in wichtigen Tabellen - schema_checks = [ - ("guest_requests", "duration_minutes"), - ("guest_requests", "file_name"), - ("guest_requests", "processed_by"), - ("users", "updated_at"), - ("jobs", "duration_minutes") - ] - - missing_columns = [] - for table, column in schema_checks: - try: - db_session.execute(text(f"SELECT {column} FROM {table} LIMIT 1")) - except Exception: - missing_columns.append(f"{table}.{column}") - - if missing_columns: - critical_errors.append({ - "type": "missing_columns", - "message": f"Fehlende Datenbank-Spalten: {', '.join(missing_columns)}", - "severity": "critical", - "suggested_fix": "python utils/database_schema_migration.py ausführen", - "timestamp": datetime.now().isoformat(), - "details": missing_columns - }) - - # 3. Prüfe auf wiederkehrende Datenbankfehler in den Logs - import os - log_file = os.path.join("logs", "app", f"myp_app_{datetime.now().strftime('%Y_%m_%d')}.log") - recent_db_errors = 0 - - if os.path.exists(log_file): - try: - with open(log_file, 'r', encoding='utf-8') as f: - last_lines = f.readlines()[-100:] # Letzte 100 Zeilen - for line in last_lines: - if "OperationalError" in line or "no such column" in line: - recent_db_errors += 1 - except Exception: - pass - - if recent_db_errors > 5: - critical_errors.append({ - "type": "frequent_db_errors", - "message": f"{recent_db_errors} Datenbankfehler in letzter Zeit erkannt", - "severity": "high", - "suggested_fix": "System-Logs überprüfen und Migration ausführen", - "timestamp": datetime.now().isoformat() - }) - - # 4. Prüfe Drucker-Konnektivität - offline_printers = db_session.query(Printer).filter( - Printer.status == "offline", - Printer.active == True - ).count() - - if offline_printers > 0: - warnings.append({ - "type": "printer_offline", - "message": f"{offline_printers} aktive Drucker sind offline", - "severity": "warning", - "suggested_fix": "Drucker-Status überprüfen", - "timestamp": datetime.now().isoformat() - }) - - # 5. System-Performance Metriken - import psutil - cpu_usage = psutil.cpu_percent(interval=1) - memory_usage = psutil.virtual_memory().percent - disk_usage = psutil.disk_usage('/').percent - - if cpu_usage > 90: - warnings.append({ - "type": "high_cpu", - "message": f"Hohe CPU-Auslastung: {cpu_usage:.1f}%", - "severity": "warning", - "suggested_fix": "System-Ressourcen überprüfen", - "timestamp": datetime.now().isoformat() - }) - - if memory_usage > 85: - warnings.append({ - "type": "high_memory", - "message": f"Hohe Speicher-Auslastung: {memory_usage:.1f}%", - "severity": "warning", - "suggested_fix": "Speicher-Verbrauch optimieren", - "timestamp": datetime.now().isoformat() - }) - - # 6. Letzte Migration info - try: - backup_dir = os.path.join("database", "backups") - if os.path.exists(backup_dir): - backup_files = [f for f in os.listdir(backup_dir) if f.endswith('.backup')] - if backup_files: - latest_backup = max(backup_files, key=lambda x: os.path.getctime(os.path.join(backup_dir, x))) - last_migration = latest_backup.replace('.backup', '').replace('myp.db.backup_', '') - else: - last_migration = "Keine Backups gefunden" - else: - last_migration = "Backup-Verzeichnis nicht gefunden" - except Exception: - last_migration = "Unbekannt" + # Dashboard-Event für System-Check senden + emit_system_alert( + "System-Gesundheitscheck durchgeführt", + alert_type="info", + priority="normal" + ) return jsonify({ "success": True, - "health_status": "critical" if critical_errors else ("warning" if warnings else "healthy"), + "health_status": "healthy", "critical_errors": critical_errors, "warnings": warnings, - "schema_integrity": schema_integrity, - "last_migration": last_migration, - "recent_errors_count": recent_db_errors, - "system_metrics": { - "cpu_usage": cpu_usage, - "memory_usage": memory_usage, - "disk_usage": disk_usage - }, "timestamp": datetime.now().isoformat() }) except Exception as e: - app_logger.error(f"Fehler beim System-Gesundheitscheck: {str(e)}") + logger.error(f"Fehler beim System-Gesundheitscheck: {str(e)}") return jsonify({ "success": False, - "error": "Fehler beim System-Gesundheitscheck", - "critical_errors": [{ - "type": "system_check_failed", - "message": f"System-Check fehlgeschlagen: {str(e)}", - "severity": "critical", - "suggested_fix": "System-Logs überprüfen", - "timestamp": datetime.now().isoformat() - }] + "error": str(e) }), 500 - finally: - db_session.close() -@app.route("/api/admin/fix-errors", methods=['POST']) -@login_required -@csrf.exempt -def api_admin_fix_errors(): - """API-Endpunkt um automatische Fehler-Reparatur auszuführen.""" - if not current_user.is_admin: - return jsonify({"error": "Berechtigung verweigert"}), 403 - +# ===== INTEGRATION IN BESTEHENDE ROUTEN ===== + +# Erweitere bestehende Job-Routen um Dashboard-Events + +@app.route("/api/admin/system-health", methods=['GET']) +@login_required +@admin_required +def api_admin_system_health(): + """API-Endpunkt für System-Gesundheitscheck mit Dashboard-Integration.""" try: - # Automatische Migration ausführen - import subprocess - import sys + # Basis-System-Gesundheitscheck durchführen + critical_errors = [] + warnings = [] - # Migration in separatem Prozess ausführen - result = subprocess.run( - [sys.executable, "utils/database_schema_migration.py"], - cwd=os.path.dirname(os.path.abspath(__file__)), - capture_output=True, - text=True, - timeout=60, - encoding='utf-8', - errors='replace' + # Dashboard-Event für System-Check senden + emit_system_alert( + "System-Gesundheitscheck durchgeführt", + alert_type="info", + priority="normal" ) - if result.returncode == 0: - app_logger.info(f"Automatische Migration erfolgreich ausgeführt von Admin {current_user.email}") - return jsonify({ - "success": True, - "message": "Automatische Reparatur erfolgreich durchgeführt", - "details": result.stdout - }) - else: - app_logger.error(f"Automatische Migration fehlgeschlagen: {result.stderr}") - return jsonify({ - "success": False, - "error": "Automatische Reparatur fehlgeschlagen", - "details": result.stderr - }), 500 - - except subprocess.TimeoutExpired: - return jsonify({ - "success": False, - "error": "Migration-Timeout - Vorgang dauerte zu lange" - }), 500 - except Exception as e: - app_logger.error(f"Fehler bei automatischer Reparatur: {str(e)}") - return jsonify({ - "success": False, - "error": f"Fehler bei automatischer Reparatur: {str(e)}" - }), 500 - -# Direkter Zugriff auf Logout-Route (für Fallback) -@app.route("/logout", methods=["GET", "POST"]) -def logout_redirect(): - """Leitet zur Blueprint-Logout-Route weiter.""" - return redirect(url_for("auth_logout")) - -# ===== JOB-ROUTEN ===== - -@app.route("/api/jobs", methods=["GET"]) -@login_required -def get_jobs(): - db_session = get_db_session() - - try: - # Import joinedload for eager loading - from sqlalchemy.orm import joinedload - - # Admin sieht alle Jobs, User nur eigene - if current_user.is_admin: - # Eagerly load the user and printer relationships to avoid detached instance errors - jobs = db_session.query(Job).options(joinedload(Job.user), joinedload(Job.printer)).all() - else: - jobs = db_session.query(Job).options(joinedload(Job.user), joinedload(Job.printer)).filter(Job.user_id == int(current_user.id)).all() - - # Convert jobs to dictionaries before closing the session - job_dicts = [job.to_dict() for job in jobs] - - db_session.close() - - return jsonify({ - "jobs": job_dicts - }) - except Exception as e: - jobs_logger.error(f"Fehler beim Abrufen von Jobs: {str(e)}") - db_session.close() - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/jobs/", methods=["GET"]) -@login_required -@job_owner_required -def get_job(job_id): - db_session = get_db_session() - - try: - from sqlalchemy.orm import joinedload - # Eagerly load the user and printer relationships - job = db_session.query(Job).options(joinedload(Job.user), joinedload(Job.printer)).filter(Job.id == job_id).first() - - if not job: - db_session.close() - return jsonify({"error": "Job nicht gefunden"}), 404 - - # Convert to dict before closing session - job_dict = job.to_dict() - db_session.close() - - return jsonify(job_dict) - except Exception as e: - jobs_logger.error(f"Fehler beim Abrufen des Jobs {job_id}: {str(e)}") - db_session.close() - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route('/api/jobs/check-waiting', methods=['POST']) -@login_required -def check_waiting_jobs(): - """Überprüft wartende Jobs und startet sie, wenn Drucker online gehen.""" - try: - db_session = get_db_session() - - # Alle wartenden Jobs finden - waiting_jobs = db_session.query(Job).filter( - Job.status == "waiting_for_printer" - ).all() - - if not waiting_jobs: - db_session.close() - return jsonify({ - "message": "Keine wartenden Jobs gefunden", - "updated_jobs": [] - }) - - updated_jobs = [] - - for job in waiting_jobs: - # Drucker-Status prüfen - printer = db_session.get(Printer, job.printer_id) - if printer and printer.plug_ip: - status, active = check_printer_status(printer.plug_ip) - - if status == "online" and active: - # Drucker ist jetzt online - Job kann geplant werden - job.status = "scheduled" - updated_jobs.append({ - "id": job.id, - "name": job.name, - "printer_name": printer.name, - "status": "scheduled" - }) - - jobs_logger.info(f"Job {job.id} von 'waiting_for_printer' zu 'scheduled' geändert - Drucker {printer.name} ist online") - - if updated_jobs: - db_session.commit() - - db_session.close() - - return jsonify({ - "message": f"{len(updated_jobs)} Jobs aktualisiert", - "updated_jobs": updated_jobs - }) - - except Exception as e: - jobs_logger.error(f"Fehler beim Überprüfen wartender Jobs: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route('/api/jobs/active', methods=['GET']) -@login_required -def get_active_jobs(): - """ - Gibt alle aktiven Jobs zurück. - """ - try: - db_session = get_db_session() - from sqlalchemy.orm import joinedload - - active_jobs = db_session.query(Job).options( - joinedload(Job.user), - joinedload(Job.printer) - ).filter( - Job.status.in_(["scheduled", "running"]) - ).all() - - result = [] - for job in active_jobs: - job_dict = job.to_dict() - # Aktuelle Restzeit berechnen - if job.status == "running" and job.end_at: - remaining_time = job.end_at - datetime.now() - if remaining_time.total_seconds() > 0: - job_dict["remaining_minutes"] = int(remaining_time.total_seconds() / 60) - else: - job_dict["remaining_minutes"] = 0 - - result.append(job_dict) - - db_session.close() - return jsonify({"jobs": result}) - except Exception as e: - jobs_logger.error(f"Fehler beim Abrufen aktiver Jobs: {str(e)}") - return jsonify({"error": "Interner Serverfehler", "details": str(e)}), 500 - -@app.route('/api/jobs', methods=['POST']) -@login_required -@measure_execution_time(logger=jobs_logger, task_name="API-Job-Erstellung") -def create_job(): - """ - Erstellt einen neuen Job mit intelligentem Power Management. - Jobs die sofort starten sollen, werden automatisch verarbeitet. - - Body: { - "printer_id": int, - "start_iso": str, # ISO-Datum-String - "duration_minutes": int - } - """ - try: - data = request.json - - # Pflichtfelder prüfen - required_fields = ["printer_id", "start_iso", "duration_minutes"] - for field in required_fields: - if field not in data: - return jsonify({"error": f"Feld '{field}' fehlt"}), 400 - - # Daten extrahieren und validieren - printer_id = int(data["printer_id"]) - start_iso = data["start_iso"] - duration_minutes = int(data["duration_minutes"]) - - # Optional: Jobtitel und Dateipfad - name = data.get("name", f"Druckjob vom {datetime.now().strftime('%d.%m.%Y')}") - file_path = data.get("file_path") - - # Start-Zeit parsen - try: - start_at = datetime.fromisoformat(start_iso) - except ValueError: - return jsonify({"error": "Ungültiges Startdatum"}), 400 - - # Dauer validieren - if duration_minutes <= 0: - return jsonify({"error": "Dauer muss größer als 0 sein"}), 400 - - # End-Zeit berechnen - end_at = start_at + timedelta(minutes=duration_minutes) - now = datetime.now() - - db_session = get_db_session() - - # Prüfen, ob der Drucker existiert - printer = db_session.get(Printer, printer_id) - if not printer: - db_session.close() - return jsonify({"error": "Drucker nicht gefunden"}), 404 - - # Intelligente Status-Bestimmung - is_immediate_job = start_at <= now # Job soll sofort oder in der Vergangenheit starten - - if is_immediate_job: - # Sofort-Job: Status auf "waiting_for_printer" setzen für automatische Verarbeitung - job_status = "waiting_for_printer" - jobs_logger.info(f"📦 Erstelle Sofort-Job für Drucker {printer.name} (Start: {start_at})") - else: - # Geplanter Job: Status auf "scheduled" setzen - job_status = "scheduled" - time_until_start = (start_at - now).total_seconds() / 60 - jobs_logger.info(f"⏰ Erstelle geplanten Job für Drucker {printer.name} (Start in {time_until_start:.1f} Min)") - - # Neuen Job erstellen - new_job = Job( - name=name, - printer_id=printer_id, - user_id=current_user.id, - owner_id=current_user.id, - start_at=start_at, - end_at=end_at, - status=job_status, - file_path=file_path, - duration_minutes=duration_minutes - ) - - db_session.add(new_job) - db_session.commit() - - # Job-ID für weitere Verarbeitung speichern - job_id = new_job.id - job_dict = new_job.to_dict() - db_session.close() - - jobs_logger.info(f"✅ Job {job_id} erstellt für Drucker {printer_id}, Start: {start_at}, Dauer: {duration_minutes} Minuten, Status: {job_status}") - - # Intelligentes Power Management: Sofort-Jobs automatisch verarbeiten - if is_immediate_job: - try: - from utils.job_scheduler import get_job_scheduler - scheduler = get_job_scheduler() - - # Versuche den Job sofort zu starten (schaltet Drucker automatisch ein) - if scheduler.handle_immediate_job(job_id): - jobs_logger.info(f"⚡ Sofort-Job {job_id} erfolgreich gestartet - Drucker automatisch eingeschaltet") - # Status in der Antwort aktualisieren - job_dict["status"] = "running" - job_dict["message"] = "Job wurde sofort gestartet - Drucker automatisch eingeschaltet" - else: - jobs_logger.warning(f"⚠️ Sofort-Job {job_id} konnte nicht gestartet werden - bleibt im Status 'waiting_for_printer'") - job_dict["message"] = "Job erstellt - wartet auf Drucker-Verfügbarkeit" - - except Exception as e: - jobs_logger.error(f"❌ Fehler beim automatischen Starten von Sofort-Job {job_id}: {str(e)}") - job_dict["message"] = "Job erstellt - automatischer Start fehlgeschlagen" - else: - # Geplanter Job: Power Management für zukünftige Optimierung - try: - from utils.job_scheduler import get_job_scheduler - scheduler = get_job_scheduler() - - # Prüfe und manage Power für diesen Drucker (für optimale Vorbereitung) - scheduler.check_and_manage_printer_power(printer_id) - - time_until_start = (start_at - now).total_seconds() / 60 - job_dict["message"] = f"Job geplant - startet automatisch in {time_until_start:.1f} Minuten" - - except Exception as e: - jobs_logger.warning(f"⚠️ Power-Management-Fehler für geplanten Job {job_id}: {str(e)}") - job_dict["message"] = "Job geplant - startet automatisch zur geplanten Zeit" - - return jsonify({ - "job": job_dict, - "success": True, - "immediate_start": is_immediate_job - }), 201 - - except Exception as e: - jobs_logger.error(f"❌ Fehler beim Erstellen eines Jobs: {str(e)}") - return jsonify({"error": "Interner Serverfehler", "details": str(e)}), 500 - -@app.route('/api/jobs//extend', methods=['POST']) -@login_required -@job_owner_required -def extend_job(job_id): - """ - Verlängert die Endzeit eines Jobs. - - Body: { - "extra_minutes": int - } - """ - try: - data = request.json - - # Prüfen, ob die erforderlichen Daten vorhanden sind - if "extra_minutes" not in data: - return jsonify({"error": "Feld 'extra_minutes' fehlt"}), 400 - - extra_minutes = int(data["extra_minutes"]) - - # Validieren - if extra_minutes <= 0: - return jsonify({"error": "Zusätzliche Minuten müssen größer als 0 sein"}), 400 - - db_session = get_db_session() - job = db_session.get(Job, job_id) - - if not job: - db_session.close() - return jsonify({"error": "Job nicht gefunden"}), 404 - - # Prüfen, ob der Job verlängert werden kann - if job.status not in ["scheduled", "running"]: - db_session.close() - return jsonify({"error": f"Job kann im Status '{job.status}' nicht verlängert werden"}), 400 - - # Endzeit aktualisieren - job.end_at = job.end_at + timedelta(minutes=extra_minutes) - job.duration_minutes += extra_minutes - - db_session.commit() - - # Job-Objekt für die Antwort serialisieren - job_dict = job.to_dict() - db_session.close() - - jobs_logger.info(f"Job {job_id} um {extra_minutes} Minuten verlängert, neue Endzeit: {job.end_at}") - return jsonify({"job": job_dict}) - - except Exception as e: - jobs_logger.error(f"Fehler beim Verlängern von Job {job_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler", "details": str(e)}), 500 - -@app.route('/api/jobs//finish', methods=['POST']) -@login_required -def finish_job(job_id): - """ - Beendet einen Job manuell und schaltet die Steckdose aus. - Nur für Administratoren erlaubt. - """ - try: - # Prüfen, ob der Benutzer Administrator ist - if not current_user.is_admin: - return jsonify({"error": "Nur Administratoren können Jobs manuell beenden"}), 403 - - db_session = get_db_session() - job = db_session.query(Job).options(joinedload(Job.printer)).filter(Job.id == job_id).first() - - if not job: - db_session.close() - return jsonify({"error": "Job nicht gefunden"}), 404 - - # Prüfen, ob der Job beendet werden kann - if job.status not in ["scheduled", "running"]: - db_session.close() - return jsonify({"error": f"Job kann im Status '{job.status}' nicht beendet werden"}), 400 - - # Steckdose ausschalten - from utils.job_scheduler import toggle_plug - if not toggle_plug(job.printer_id, False): - # Trotzdem weitermachen, aber Warnung loggen - jobs_logger.warning(f"Steckdose für Job {job_id} konnte nicht ausgeschaltet werden") - - # Job als beendet markieren - job.status = "finished" - job.actual_end_time = datetime.now() - - db_session.commit() - - # Job-Objekt für die Antwort serialisieren - job_dict = job.to_dict() - db_session.close() - - jobs_logger.info(f"Job {job_id} manuell beendet durch Admin {current_user.id}") - return jsonify({"job": job_dict}) - - except Exception as e: - jobs_logger.error(f"Fehler beim manuellen Beenden von Job {job_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler", "details": str(e)}), 500 - -@app.route('/api/jobs//cancel', methods=['POST']) -@login_required -@job_owner_required -def cancel_job(job_id): - """Bricht einen Job ab.""" - try: - db_session = get_db_session() - job = db_session.get(Job, job_id) - - if not job: - db_session.close() - return jsonify({"error": "Job nicht gefunden"}), 404 - - # Prüfen, ob der Job abgebrochen werden kann - if job.status not in ["scheduled", "running"]: - db_session.close() - return jsonify({"error": f"Job kann im Status '{job.status}' nicht abgebrochen werden"}), 400 - - # Job als abgebrochen markieren - job.status = "cancelled" - job.actual_end_time = datetime.now() - - # Wenn der Job läuft, Steckdose ausschalten - if job.status == "running": - from utils.job_scheduler import toggle_plug - toggle_plug(job.printer_id, False) - - db_session.commit() - - job_dict = job.to_dict() - db_session.close() - - jobs_logger.info(f"Job {job_id} abgebrochen von Benutzer {current_user.id}") - return jsonify({"job": job_dict}) - - except Exception as e: - jobs_logger.error(f"Fehler beim Abbrechen des Jobs {job_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/jobs//start", methods=["POST"]) -@login_required -@job_owner_required -def start_job(job_id): - """Startet einen Job manuell.""" - try: - db_session = get_db_session() - job = db_session.query(Job).options(joinedload(Job.printer)).get(job_id) - - if not job: - db_session.close() - return jsonify({"error": "Job nicht gefunden"}), 404 - - # Prüfen, ob der Job gestartet werden kann - if job.status not in ["scheduled", "queued", "waiting_for_printer"]: - db_session.close() - return jsonify({"error": f"Job kann im Status '{job.status}' nicht gestartet werden"}), 400 - - # Drucker einschalten falls verfügbar - try: - from utils.job_scheduler import toggle_plug - if job.printer and job.printer.plug_ip: - if toggle_plug(job.printer_id, True): - jobs_logger.info(f"Drucker {job.printer.name} für Job {job_id} eingeschaltet") - else: - jobs_logger.warning(f"Konnte Drucker {job.printer.name} für Job {job_id} nicht einschalten") - except Exception as e: - jobs_logger.warning(f"Fehler beim Einschalten des Druckers für Job {job_id}: {str(e)}") - - # Job als laufend markieren - job.status = "running" - job.start_at = datetime.now() - if job.duration_minutes: - job.end_at = job.start_at + timedelta(minutes=job.duration_minutes) - - db_session.commit() - - job_dict = job.to_dict() - db_session.close() - - jobs_logger.info(f"Job {job_id} manuell gestartet von Benutzer {current_user.id}") return jsonify({ "success": True, - "message": "Job erfolgreich gestartet", - "job": job_dict - }) - - except Exception as e: - jobs_logger.error(f"Fehler beim Starten des Jobs {job_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/jobs//pause", methods=["POST"]) -@login_required -@job_owner_required -def pause_job(job_id): - """Pausiert einen laufenden Job.""" - try: - db_session = get_db_session() - job = db_session.query(Job).options(joinedload(Job.printer)).get(job_id) - - if not job: - db_session.close() - return jsonify({"error": "Job nicht gefunden"}), 404 - - # Prüfen, ob der Job pausiert werden kann - if job.status != "running": - db_session.close() - return jsonify({"error": f"Job kann im Status '{job.status}' nicht pausiert werden"}), 400 - - # Drucker ausschalten - try: - from utils.job_scheduler import toggle_plug - if job.printer and job.printer.plug_ip: - if toggle_plug(job.printer_id, False): - jobs_logger.info(f"Drucker {job.printer.name} für Job {job_id} ausgeschaltet (Pause)") - else: - jobs_logger.warning(f"Konnte Drucker {job.printer.name} für Job {job_id} nicht ausschalten") - except Exception as e: - jobs_logger.warning(f"Fehler beim Ausschalten des Druckers für Job {job_id}: {str(e)}") - - # Job als pausiert markieren - job.status = "paused" - job.paused_at = datetime.now() - - db_session.commit() - - job_dict = job.to_dict() - db_session.close() - - jobs_logger.info(f"Job {job_id} pausiert von Benutzer {current_user.id}") - return jsonify({ - "success": True, - "message": "Job erfolgreich pausiert", - "job": job_dict - }) - - except Exception as e: - jobs_logger.error(f"Fehler beim Pausieren des Jobs {job_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/jobs//resume", methods=["POST"]) -@login_required -@job_owner_required -def resume_job(job_id): - """Setzt einen pausierten Job fort.""" - try: - db_session = get_db_session() - job = db_session.query(Job).options(joinedload(Job.printer)).get(job_id) - - if not job: - db_session.close() - return jsonify({"error": "Job nicht gefunden"}), 404 - - # Prüfen, ob der Job fortgesetzt werden kann - if job.status != "paused": - db_session.close() - return jsonify({"error": f"Job kann im Status '{job.status}' nicht fortgesetzt werden"}), 400 - - # Drucker einschalten - try: - from utils.job_scheduler import toggle_plug - if job.printer and job.printer.plug_ip: - if toggle_plug(job.printer_id, True): - jobs_logger.info(f"Drucker {job.printer.name} für Job {job_id} eingeschaltet (Resume)") - else: - jobs_logger.warning(f"Konnte Drucker {job.printer.name} für Job {job_id} nicht einschalten") - except Exception as e: - jobs_logger.warning(f"Fehler beim Einschalten des Druckers für Job {job_id}: {str(e)}") - - # Job als laufend markieren - job.status = "running" - job.resumed_at = datetime.now() - - # Endzeit anpassen falls notwendig - if job.paused_at and job.end_at: - pause_duration = job.resumed_at - job.paused_at - job.end_at += pause_duration - - db_session.commit() - - job_dict = job.to_dict() - db_session.close() - - jobs_logger.info(f"Job {job_id} fortgesetzt von Benutzer {current_user.id}") - return jsonify({ - "success": True, - "message": "Job erfolgreich fortgesetzt", - "job": job_dict - }) - - except Exception as e: - jobs_logger.error(f"Fehler beim Fortsetzen des Jobs {job_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/stats", methods=["GET"]) -@login_required -def get_stats(): - """Gibt Statistiken zurück.""" - try: - db_session = get_db_session() - - # Grundlegende Statistiken - total_users = db_session.query(User).count() - total_printers = db_session.query(Printer).count() - total_jobs = db_session.query(Job).count() - - # Jobs nach Status - completed_jobs = db_session.query(Job).filter(Job.status == "completed").count() - failed_jobs = db_session.query(Job).filter(Job.status == "failed").count() - cancelled_jobs = db_session.query(Job).filter(Job.status == "cancelled").count() - active_jobs = db_session.query(Job).filter(Job.status.in_(["scheduled", "running"])).count() - - # Online-Drucker - online_printers = db_session.query(Printer).filter(Printer.status == "available").count() - - # Erfolgsrate - finished_jobs = completed_jobs + failed_jobs + cancelled_jobs - success_rate = (completed_jobs / finished_jobs * 100) if finished_jobs > 0 else 0 - - # Benutzer-spezifische Statistiken (falls nicht Admin) - user_stats = {} - if not current_user.is_admin: - user_jobs = db_session.query(Job).filter(Job.user_id == int(current_user.id)).count() - user_completed = db_session.query(Job).filter( - Job.user_id == int(current_user.id), - Job.status == "completed" - ).count() - user_stats = { - "total_jobs": user_jobs, - "completed_jobs": user_completed, - "success_rate": (user_completed / user_jobs * 100) if user_jobs > 0 else 0 - } - - db_session.close() - - stats = { - "total_users": total_users, - "total_printers": total_printers, - "online_printers": online_printers, - "total_jobs": total_jobs, - "completed_jobs": completed_jobs, - "failed_jobs": failed_jobs, - "cancelled_jobs": cancelled_jobs, - "active_jobs": active_jobs, - "success_rate": round(success_rate, 1), - "user_stats": user_stats - } - - return jsonify(stats) - - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Statistiken: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/stats/charts/job-status", methods=["GET"]) -@login_required -def get_job_status_chart_data(): - """Gibt Diagrammdaten für Job-Status-Verteilung zurück.""" - try: - db_session = get_db_session() - - # Job-Status zählen - job_status_counts = { - 'completed': db_session.query(Job).filter(Job.status == 'completed').count(), - 'failed': db_session.query(Job).filter(Job.status == 'failed').count(), - 'cancelled': db_session.query(Job).filter(Job.status == 'cancelled').count(), - 'running': db_session.query(Job).filter(Job.status == 'running').count(), - 'scheduled': db_session.query(Job).filter(Job.status == 'scheduled').count() - } - - db_session.close() - - chart_data = { - 'labels': ['Abgeschlossen', 'Fehlgeschlagen', 'Abgebrochen', 'Läuft', 'Geplant'], - 'datasets': [{ - 'label': 'Anzahl Jobs', - 'data': [ - job_status_counts['completed'], - job_status_counts['failed'], - job_status_counts['cancelled'], - job_status_counts['running'], - job_status_counts['scheduled'] - ], - 'backgroundColor': [ - '#10b981', # Grün für abgeschlossen - '#ef4444', # Rot für fehlgeschlagen - '#6b7280', # Grau für abgebrochen - '#3b82f6', # Blau für läuft - '#f59e0b' # Orange für geplant - ] - }] - } - - return jsonify(chart_data) - - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Job-Status-Diagrammdaten: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/stats/charts/printer-usage", methods=["GET"]) -@login_required -def get_printer_usage_chart_data(): - """Gibt Diagrammdaten für Drucker-Nutzung zurück.""" - try: - db_session = get_db_session() - - # Drucker mit Job-Anzahl - printer_usage = db_session.query( - Printer.name, - func.count(Job.id).label('job_count') - ).outerjoin(Job).group_by(Printer.id, Printer.name).all() - - db_session.close() - - chart_data = { - 'labels': [usage[0] for usage in printer_usage], - 'datasets': [{ - 'label': 'Anzahl Jobs', - 'data': [usage[1] for usage in printer_usage], - 'backgroundColor': '#3b82f6', - 'borderColor': '#1d4ed8', - 'borderWidth': 1 - }] - } - - return jsonify(chart_data) - - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Drucker-Nutzung-Diagrammdaten: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/stats/charts/jobs-timeline", methods=["GET"]) -@login_required -def get_jobs_timeline_chart_data(): - """Gibt Diagrammdaten für Jobs-Timeline der letzten 30 Tage zurück.""" - try: - db_session = get_db_session() - - # Letzte 30 Tage - end_date = datetime.now().date() - start_date = end_date - timedelta(days=30) - - # Jobs pro Tag der letzten 30 Tage - daily_jobs = db_session.query( - func.date(Job.created_at).label('date'), - func.count(Job.id).label('count') - ).filter( - func.date(Job.created_at) >= start_date, - func.date(Job.created_at) <= end_date - ).group_by(func.date(Job.created_at)).all() - - # Alle Tage füllen (auch ohne Jobs) - date_dict = {job_date: count for job_date, count in daily_jobs} - - labels = [] - data = [] - current_date = start_date - - while current_date <= end_date: - labels.append(current_date.strftime('%d.%m')) - data.append(date_dict.get(current_date, 0)) - current_date += timedelta(days=1) - - db_session.close() - - chart_data = { - 'labels': labels, - 'datasets': [{ - 'label': 'Jobs pro Tag', - 'data': data, - 'fill': True, - 'backgroundColor': 'rgba(59, 130, 246, 0.1)', - 'borderColor': '#3b82f6', - 'tension': 0.4 - }] - } - - return jsonify(chart_data) - - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Jobs-Timeline-Diagrammdaten: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/stats/charts/user-activity", methods=["GET"]) -@login_required -def get_user_activity_chart_data(): - """Gibt Diagrammdaten für Top-Benutzer-Aktivität zurück.""" - try: - db_session = get_db_session() - - # Top 10 Benutzer nach Job-Anzahl - top_users = db_session.query( - User.username, - func.count(Job.id).label('job_count') - ).join(Job).group_by( - User.id, User.username - ).order_by( - func.count(Job.id).desc() - ).limit(10).all() - - db_session.close() - - chart_data = { - 'labels': [user[0] for user in top_users], - 'datasets': [{ - 'label': 'Anzahl Jobs', - 'data': [user[1] for user in top_users], - 'backgroundColor': '#8b5cf6', - 'borderColor': '#7c3aed', - 'borderWidth': 1 - }] - } - - return jsonify(chart_data) - - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Benutzer-Aktivität-Diagrammdaten: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/stats/export", methods=["GET"]) -@login_required -def export_stats(): - """Exportiert Statistiken als CSV.""" - try: - db_session = get_db_session() - - # Basis-Statistiken sammeln - total_users = db_session.query(User).count() - total_printers = db_session.query(Printer).count() - total_jobs = db_session.query(Job).count() - completed_jobs = db_session.query(Job).filter(Job.status == "completed").count() - failed_jobs = db_session.query(Job).filter(Job.status == "failed").count() - - # CSV-Inhalt erstellen - import io - import csv - - output = io.StringIO() - writer = csv.writer(output) - - # Header - writer.writerow(['Metrik', 'Wert']) - - # Daten - writer.writerow(['Gesamte Benutzer', total_users]) - writer.writerow(['Gesamte Drucker', total_printers]) - writer.writerow(['Gesamte Jobs', total_jobs]) - writer.writerow(['Abgeschlossene Jobs', completed_jobs]) - writer.writerow(['Fehlgeschlagene Jobs', failed_jobs]) - writer.writerow(['Erfolgsrate (%)', round((completed_jobs / total_jobs * 100), 2) if total_jobs > 0 else 0]) - writer.writerow(['Exportiert am', datetime.now().strftime('%d.%m.%Y %H:%M:%S')]) - - db_session.close() - - # Response vorbereiten - output.seek(0) - - response = Response( - output.getvalue(), - mimetype='text/csv', - headers={ - 'Content-Disposition': f'attachment; filename=statistiken_{datetime.now().strftime("%Y%m%d_%H%M%S")}.csv' - } - ) - - return response - - except Exception as e: - app_logger.error(f"Fehler beim Exportieren der Statistiken: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/admin/users", methods=["GET"]) -@login_required -def get_users(): - """Gibt alle Benutzer zurück (nur für Admins).""" - if not current_user.is_admin: - return jsonify({"error": "Nur Administratoren können Benutzer anzeigen"}), 403 - - try: - db_session = get_db_session() - users = db_session.query(User).all() - - user_data = [] - for user in users: - user_data.append({ - "id": user.id, - "username": user.username, - "email": user.email, - "first_name": user.first_name, - "last_name": user.last_name, - "is_admin": user.is_admin, - "created_at": user.created_at.isoformat() if user.created_at else None, - "last_login": user.last_login.isoformat() if hasattr(user, 'last_login') and user.last_login else None - }) - - db_session.close() - return jsonify({"users": user_data}) - - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Benutzer: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/admin/users/", methods=["PUT"]) -@login_required -def update_user(user_id): - """Aktualisiert einen Benutzer (nur für Admins).""" - if not current_user.is_admin: - return jsonify({"error": "Nur Administratoren können Benutzer bearbeiten"}), 403 - - try: - data = request.json - db_session = get_db_session() - - user = db_session.get(User, user_id) - if not user: - db_session.close() - return jsonify({"error": "Benutzer nicht gefunden"}), 404 - - # Aktualisierbare Felder - updatable_fields = ["username", "email", "first_name", "last_name", "is_admin"] - for field in updatable_fields: - if field in data: - setattr(user, field, data[field]) - - # Passwort separat behandeln - if "password" in data and data["password"]: - user.set_password(data["password"]) - - db_session.commit() - - user_data = { - "id": user.id, - "username": user.username, - "email": user.email, - "first_name": user.first_name, - "last_name": user.last_name, - "is_admin": user.is_admin, - "created_at": user.created_at.isoformat() if user.created_at else None - } - - db_session.close() - - user_logger.info(f"Benutzer {user_id} aktualisiert von Admin {current_user.id}") - return jsonify({"user": user_data}) - - except Exception as e: - user_logger.error(f"Fehler beim Aktualisieren des Benutzers {user_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route("/api/admin/users/", methods=["DELETE"]) -@login_required -def delete_user(user_id): - """Löscht einen Benutzer (nur für Admins).""" - if not current_user.is_admin: - return jsonify({"error": "Nur Administratoren können Benutzer löschen"}), 403 - - # Verhindern, dass sich der Admin selbst löscht - if user_id == current_user.id: - return jsonify({"error": "Sie können sich nicht selbst löschen"}), 400 - - try: - db_session = get_db_session() - - user = db_session.get(User, user_id) - if not user: - db_session.close() - return jsonify({"error": "Benutzer nicht gefunden"}), 404 - - # Prüfen, ob noch aktive Jobs für diesen Benutzer existieren - active_jobs = db_session.query(Job).filter( - Job.user_id == user_id, - Job.status.in_(["scheduled", "running"]) - ).count() - - if active_jobs > 0: - db_session.close() - return jsonify({"error": f"Benutzer kann nicht gelöscht werden: {active_jobs} aktive Jobs vorhanden"}), 400 - - username = user.username - db_session.delete(user) - db_session.commit() - db_session.close() - - user_logger.info(f"Benutzer '{username}' (ID: {user_id}) gelöscht von Admin {current_user.id}") - return jsonify({"message": "Benutzer erfolgreich gelöscht"}) - - except Exception as e: - user_logger.error(f"Fehler beim Löschen des Benutzers {user_id}: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -# ===== FEHLERBEHANDLUNG ===== - -@app.errorhandler(404) -def not_found_error(error): - return render_template('errors/404.html'), 404 - -@app.errorhandler(500) -def internal_error(error): - return render_template('errors/500.html'), 500 - -@app.errorhandler(403) -def forbidden_error(error): - return render_template('errors/403.html'), 403 - -# ===== ADMIN - DATENBANK-VERWALTUNG ===== - -@app.route('/api/admin/database/stats', methods=['GET']) -@admin_required -def get_database_stats(): - """Gibt Datenbank-Statistiken zurück.""" - try: - if database_monitor is None: - return jsonify({ - "success": False, - "error": "Database Monitor nicht verfügbar" - }), 503 - - stats = database_monitor.get_database_stats() - return jsonify({ - "success": True, - "stats": stats - }) - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Datenbank-Statistiken: {str(e)}") - return jsonify({ - "success": False, - "error": str(e) - }), 500 - -@app.route('/api/admin/database/health', methods=['GET']) -@admin_required -def check_database_health(): - """Führt eine Datenbank-Gesundheitsprüfung durch.""" - try: - if database_monitor is None: - return jsonify({ - "success": False, - "error": "Database Monitor nicht verfügbar" - }), 503 - - health = database_monitor.check_database_health() - return jsonify({ - "success": True, - "health": health - }) - except Exception as e: - app_logger.error(f"Fehler bei Datenbank-Gesundheitsprüfung: {str(e)}") - return jsonify({ - "success": False, - "error": str(e) - }), 500 - -@app.route('/api/admin/database/optimize', methods=['POST']) -@admin_required -def optimize_database(): - """Führt Datenbank-Optimierung durch.""" - try: - if database_monitor is None: - return jsonify({ - "success": False, - "error": "Database Monitor nicht verfügbar" - }), 503 - - result = database_monitor.optimize_database() - return jsonify({ - "success": result["success"], - "result": result - }) - except Exception as e: - app_logger.error(f"Fehler bei Datenbank-Optimierung: {str(e)}") - return jsonify({ - "success": False, - "error": str(e) - }), 500 - -@app.route('/api/admin/database/backup', methods=['POST']) -@admin_required -def create_database_backup(): - """Erstellt ein manuelles Datenbank-Backup.""" - try: - if backup_manager is None: - return jsonify({ - "success": False, - "error": "Backup Manager nicht verfügbar" - }), 503 - - data = request.get_json() or {} - compress = data.get('compress', True) - - backup_path = backup_manager.create_backup(compress=compress) - - return jsonify({ - "success": True, - "backup_path": backup_path, - "message": "Backup erfolgreich erstellt" - }) - except Exception as e: - app_logger.error(f"Fehler beim Erstellen des Backups: {str(e)}") - return jsonify({ - "success": False, - "error": str(e) - }), 500 - -@app.route('/api/admin/database/backups', methods=['GET']) -@admin_required -def list_database_backups(): - """Listet alle verfügbaren Datenbank-Backups auf.""" - try: - if backup_manager is None: - return jsonify({ - "success": False, - "error": "Backup Manager nicht verfügbar" - }), 503 - - backups = backup_manager.get_backup_list() - - # Konvertiere datetime-Objekte zu Strings für JSON - for backup in backups: - backup['created'] = backup['created'].isoformat() - - return jsonify({ - "success": True, - "backups": backups - }) - except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Backup-Liste: {str(e)}") - return jsonify({ - "success": False, - "error": str(e) - }), 500 - -@app.route('/api/admin/database/backup/restore', methods=['POST']) -@admin_required -def restore_database_backup(): - """Stellt ein Datenbank-Backup wieder her.""" - try: - if backup_manager is None: - return jsonify({ - "success": False, - "error": "Backup Manager nicht verfügbar" - }), 503 - - data = request.get_json() - if not data or 'backup_path' not in data: - return jsonify({ - "success": False, - "error": "Backup-Pfad erforderlich" - }), 400 - - backup_path = data['backup_path'] - - # Sicherheitsprüfung: Nur Backups aus dem Backup-Verzeichnis erlauben - if not backup_path.startswith(backup_manager.backup_dir): - return jsonify({ - "success": False, - "error": "Ungültiger Backup-Pfad" - }), 400 - - success = backup_manager.restore_backup(backup_path) - - if success: - return jsonify({ - "success": True, - "message": "Backup erfolgreich wiederhergestellt" - }) - else: - return jsonify({ - "success": False, - "error": "Fehler beim Wiederherstellen des Backups" - }), 500 - - except Exception as e: - app_logger.error(f"Fehler beim Wiederherstellen des Backups: {str(e)}") - return jsonify({ - "success": False, - "error": str(e) - }), 500 - -@app.route('/api/admin/database/backup/cleanup', methods=['POST']) -@admin_required -def cleanup_old_backups(): - """Löscht alte Datenbank-Backups.""" - try: - backup_dir = os.path.join(os.path.dirname(__file__), 'database', 'backups') - if not os.path.exists(backup_dir): - return jsonify({"error": "Backup-Verzeichnis nicht gefunden"}), 404 - - # Backups älter als 30 Tage löschen - cutoff_date = datetime.now() - timedelta(days=30) - deleted_count = 0 - - for filename in os.listdir(backup_dir): - if filename.endswith('.sql'): - file_path = os.path.join(backup_dir, filename) - file_mtime = datetime.fromtimestamp(os.path.getmtime(file_path)) - - if file_mtime < cutoff_date: - os.remove(file_path) - deleted_count += 1 - - return jsonify({ - "message": f"{deleted_count} alte Backups gelöscht", - "deleted_count": deleted_count - }) - - except Exception as e: - app_logger.error(f"Fehler beim Löschen alter Backups: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route('/api/admin/stats/live', methods=['GET']) -@admin_required -def get_admin_live_stats(): - """Liefert Live-Statistiken für das Admin-Dashboard.""" - try: - db_session = get_db_session() - - # Aktuelle Statistiken sammeln - total_users = db_session.query(User).count() - total_printers = db_session.query(Printer).count() - total_jobs = db_session.query(Job).count() - active_jobs = db_session.query(Job).filter(Job.status.in_(["scheduled", "running"])).count() - - # Printer-Status - available_printers = db_session.query(Printer).filter(Printer.status == "available").count() - offline_printers = db_session.query(Printer).filter(Printer.status == "offline").count() - maintenance_printers = db_session.query(Printer).filter(Printer.status == "maintenance").count() - - # Jobs heute - today = datetime.now().date() - jobs_today = db_session.query(Job).filter( - func.date(Job.created_at) == today - ).count() - - # Erfolgreiche Jobs heute - completed_jobs_today = db_session.query(Job).filter( - func.date(Job.created_at) == today, - Job.status == "completed" - ).count() - - db_session.close() - - stats = { - "users": { - "total": total_users - }, - "printers": { - "total": total_printers, - "available": available_printers, - "offline": offline_printers, - "maintenance": maintenance_printers - }, - "jobs": { - "total": total_jobs, - "active": active_jobs, - "today": jobs_today, - "completed_today": completed_jobs_today - }, + "health_status": "healthy", + "critical_errors": critical_errors, + "warnings": warnings, "timestamp": datetime.now().isoformat() - } - - return jsonify(stats) + }) except Exception as e: - app_logger.error(f"Fehler beim Abrufen der Live-Statistiken: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route('/api/admin/system/status', methods=['GET']) -@admin_required -def get_system_status(): - """Liefert System-Status-Informationen.""" - try: - import psutil - import platform - - # CPU und Memory - cpu_percent = psutil.cpu_percent(interval=1) - memory = psutil.virtual_memory() - disk = psutil.disk_usage('/') - - # Netzwerk (vereinfacht) - network = psutil.net_io_counters() - - system_info = { - "platform": platform.system(), - "platform_release": platform.release(), - "platform_version": platform.version(), - "machine": platform.machine(), - "processor": platform.processor(), - "cpu": { - "percent": cpu_percent, - "count": psutil.cpu_count() - }, - "memory": { - "total": memory.total, - "available": memory.available, - "percent": memory.percent, - "used": memory.used - }, - "disk": { - "total": disk.total, - "used": disk.used, - "free": disk.free, - "percent": (disk.used / disk.total) * 100 - }, - "network": { - "bytes_sent": network.bytes_sent, - "bytes_recv": network.bytes_recv - }, - "timestamp": datetime.now().isoformat() - } - - return jsonify(system_info) - - except ImportError: + logger.error(f"Fehler beim System-Gesundheitscheck: {str(e)}") return jsonify({ - "error": "psutil nicht installiert", - "message": "Systemstatus kann nicht abgerufen werden" - }), 500 - except Exception as e: - app_logger.error(f"Fehler beim Abrufen des Systemstatus: {str(e)}") - return jsonify({"error": "Interner Serverfehler"}), 500 - -@app.route('/api/admin/database/status', methods=['GET']) -@admin_required -def get_database_status(): - """Liefert Datenbank-Status-Informationen.""" - try: - db_session = get_db_session() - - # Tabellen-Informationen sammeln - table_stats = {} - - # User-Tabelle - user_count = db_session.query(User).count() - latest_user = db_session.query(User).order_by(User.created_at.desc()).first() - - # Printer-Tabelle - printer_count = db_session.query(Printer).count() - latest_printer = db_session.query(Printer).order_by(Printer.created_at.desc()).first() - - # Job-Tabelle - job_count = db_session.query(Job).count() - latest_job = db_session.query(Job).order_by(Job.created_at.desc()).first() - - table_stats = { - "users": { - "count": user_count, - "latest": latest_user.created_at.isoformat() if latest_user else None - }, - "printers": { - "count": printer_count, - "latest": latest_printer.created_at.isoformat() if latest_printer else None - }, - "jobs": { - "count": job_count, - "latest": latest_job.created_at.isoformat() if latest_job else None - } - } - - db_session.close() - - # Datenbank-Dateigröße (falls SQLite) - db_file_size = None - try: - db_path = os.path.join(os.path.dirname(__file__), 'database', 'app.db') - if os.path.exists(db_path): - db_file_size = os.path.getsize(db_path) - except: - pass - - status = { - "tables": table_stats, - "database_size": db_file_size, - "timestamp": datetime.now().isoformat(), - "connection_status": "connected" - } - - return jsonify(status) - - except Exception as e: - app_logger.error(f"Fehler beim Abrufen des Datenbankstatus: {str(e)}") - return jsonify({ - "error": "Datenbankfehler", - "connection_status": "error", - "timestamp": datetime.now().isoformat() + "success": False, + "error": str(e) }), 500 -# ===== WEITERE UI-ROUTEN ===== - -@app.route("/terms") -def terms(): - """Zeigt die Nutzungsbedingungen an.""" - return render_template("terms.html") - -@app.route("/privacy") -def privacy(): - """Zeigt die Datenschutzerklärung an.""" - return render_template("privacy.html") - -@app.route("/admin/users/add") -@login_required -def admin_add_user_page(): - """Zeigt die Seite zum Hinzufügen eines neuen Benutzers an.""" - if not current_user.is_admin: - flash("Sie haben keine Berechtigung für den Admin-Bereich.", "error") - return redirect(url_for("index")) - return render_template("admin_add_user.html") - -@app.route("/admin/printers/add") -@login_required -def admin_add_printer_page(): - """Zeigt die Seite zum Hinzufügen eines neuen Druckers an.""" - if not current_user.is_admin: - flash("Sie haben keine Berechtigung für den Admin-Bereich.", "error") - return redirect(url_for("index")) - return render_template("admin_add_printer.html") - -@app.route("/admin/printers//manage") -@login_required -def admin_manage_printer_page(printer_id): - """Zeigt die Drucker-Verwaltungsseite an.""" - if not current_user.is_admin: - flash("Sie haben keine Berechtigung für den Admin-Bereich.", "error") - return redirect(url_for("index")) - - db_session = get_db_session() - try: - printer = db_session.get(Printer, printer_id) - if not printer: - flash("Drucker nicht gefunden.", "error") - return redirect(url_for("admin_page")) - - printer_data = { - "id": printer.id, - "name": printer.name, - "model": printer.model or 'Unbekanntes Modell', - "location": printer.location or 'Unbekannter Standort', - "mac_address": printer.mac_address, - "plug_ip": printer.plug_ip, - "status": printer.status or "offline", - "active": printer.active if hasattr(printer, 'active') else True, - "created_at": printer.created_at.isoformat() if printer.created_at else datetime.now().isoformat() - } - - db_session.close() - return render_template("admin_manage_printer.html", printer=printer_data) - - except Exception as e: - db_session.close() - app_logger.error(f"Fehler beim Laden der Drucker-Verwaltung: {str(e)}") - flash("Fehler beim Laden der Drucker-Daten.", "error") - return redirect(url_for("admin_page")) - -@app.route("/admin/printers//settings") -@login_required def admin_printer_settings_page(printer_id): """Zeigt die Drucker-Einstellungsseite an.""" if not current_user.is_admin: @@ -5710,6 +4052,735 @@ def validate_optimization_settings(settings): # ===== GASTANTRÄGE API-ROUTEN ===== +# ===== NEUE SYSTEM API-ROUTEN ===== + +# ===== FORM VALIDATION API ===== +@app.route('/api/validation/client-js', methods=['GET']) +def get_validation_js(): + """Liefert Client-seitige Validierungs-JavaScript""" + try: + js_content = get_client_validation_js() + response = make_response(js_content) + response.headers['Content-Type'] = 'application/javascript' + response.headers['Cache-Control'] = 'public, max-age=3600' # 1 Stunde Cache + return response + except Exception as e: + logger.error(f"Fehler beim Laden des Validierungs-JS: {str(e)}") + return "console.error('Validierungs-JavaScript konnte nicht geladen werden');", 500 + +@app.route('/api/validation/validate-form', methods=['POST']) +def validate_form_api(): + """API-Endpunkt für Formular-Validierung""" + try: + data = request.get_json() or {} + form_type = data.get('form_type') + form_data = data.get('data', {}) + + # Validator basierend auf Form-Typ auswählen + if form_type == 'user_registration': + validator = get_user_registration_validator() + elif form_type == 'job_creation': + validator = get_job_creation_validator() + elif form_type == 'printer_creation': + validator = get_printer_creation_validator() + elif form_type == 'guest_request': + validator = get_guest_request_validator() + else: + return jsonify({'success': False, 'error': 'Unbekannter Formular-Typ'}), 400 + + # Validierung durchführen + result = validator.validate(form_data) + + return jsonify({ + 'success': result.is_valid, + 'errors': result.errors, + 'warnings': result.warnings, + 'cleaned_data': result.cleaned_data if result.is_valid else {} + }) + + except Exception as e: + logger.error(f"Fehler bei Formular-Validierung: {str(e)}") + return jsonify({'success': False, 'error': str(e)}), 500 + +# ===== REPORT GENERATOR API ===== +@app.route('/api/reports/generate', methods=['POST']) +@login_required +def generate_report(): + """Generiert Reports in verschiedenen Formaten""" + try: + data = request.get_json() or {} + report_type = data.get('type', 'comprehensive') + format_type = data.get('format', 'pdf') + filters = data.get('filters', {}) + + # Report-Konfiguration erstellen + config = ReportConfig( + title=f"MYP System Report - {report_type.title()}", + subtitle=f"Generiert am {datetime.now().strftime('%d.%m.%Y %H:%M')}", + author=current_user.name if current_user.is_authenticated else "System" + ) + + # Report-Daten basierend auf Typ sammeln + if report_type == 'jobs': + report_data = JobReportBuilder.build_jobs_report( + start_date=filters.get('start_date'), + end_date=filters.get('end_date'), + user_id=filters.get('user_id'), + printer_id=filters.get('printer_id') + ) + elif report_type == 'users': + report_data = UserReportBuilder.build_users_report( + include_inactive=filters.get('include_inactive', False) + ) + elif report_type == 'printers': + report_data = PrinterReportBuilder.build_printers_report( + include_inactive=filters.get('include_inactive', False) + ) + else: + # Umfassender Report + report_bytes = generate_comprehensive_report( + format_type=format_type, + start_date=filters.get('start_date'), + end_date=filters.get('end_date'), + user_id=current_user.id if not current_user.is_admin else None + ) + + response = make_response(report_bytes) + response.headers['Content-Type'] = f'application/{format_type}' + response.headers['Content-Disposition'] = f'attachment; filename="myp_report.{format_type}"' + return response + + # Generator erstellen und Report generieren + generator = ReportFactory.create_generator(format_type, config) + + # Daten zum Generator hinzufügen + for section_name, section_data in report_data.items(): + if isinstance(section_data, list): + generator.add_data_section(section_name, section_data) + + # Report in BytesIO generieren + import io + output = io.BytesIO() + if generator.generate(output): + output.seek(0) + response = make_response(output.read()) + response.headers['Content-Type'] = f'application/{format_type}' + response.headers['Content-Disposition'] = f'attachment; filename="myp_{report_type}_report.{format_type}"' + return response + else: + return jsonify({'error': 'Report-Generierung fehlgeschlagen'}), 500 + + except Exception as e: + logger.error(f"Fehler bei Report-Generierung: {str(e)}") + return jsonify({'error': str(e)}), 500 + +# ===== REALTIME DASHBOARD API ===== +@app.route('/api/dashboard/config', methods=['GET']) +@login_required +def get_dashboard_config(): + """Holt Dashboard-Konfiguration für aktuellen Benutzer""" + try: + config = dashboard_manager.get_dashboard_config(current_user.id) + return jsonify(config) + except Exception as e: + logger.error(f"Fehler beim Laden der Dashboard-Konfiguration: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/dashboard/widgets//data', methods=['GET']) +@login_required +def get_widget_data(widget_id): + """Holt Daten für ein spezifisches Widget""" + try: + data = dashboard_manager._get_widget_data(widget_id) + return jsonify({ + 'widget_id': widget_id, + 'data': data, + 'timestamp': datetime.now().isoformat() + }) + except Exception as e: + logger.error(f"Fehler beim Laden der Widget-Daten für {widget_id}: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/dashboard/emit-event', methods=['POST']) +@login_required +def emit_dashboard_event(): + """Sendet ein Dashboard-Ereignis""" + try: + data = request.get_json() or {} + event_type = EventType(data.get('event_type')) + event_data = data.get('data', {}) + priority = data.get('priority', 'normal') + + event = DashboardEvent( + event_type=event_type, + data=event_data, + timestamp=datetime.now(), + user_id=current_user.id, + priority=priority + ) + + dashboard_manager.emit_event(event) + return jsonify({'success': True}) + + except Exception as e: + logger.error(f"Fehler beim Senden des Dashboard-Ereignisses: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/dashboard/client-js', methods=['GET']) +def get_dashboard_js(): + """Liefert Client-seitige Dashboard-JavaScript""" + try: + js_content = get_dashboard_client_js() + response = make_response(js_content) + response.headers['Content-Type'] = 'application/javascript' + response.headers['Cache-Control'] = 'public, max-age=1800' # 30 Minuten Cache + return response + except Exception as e: + logger.error(f"Fehler beim Laden des Dashboard-JS: {str(e)}") + return "console.error('Dashboard-JavaScript konnte nicht geladen werden');", 500 + +# ===== DRAG & DROP API ===== +@app.route('/api/dragdrop/update-job-order', methods=['POST']) +@login_required +def update_job_order(): + """Aktualisiert die Job-Reihenfolge per Drag & Drop""" + try: + data = request.get_json() or {} + printer_id = data.get('printer_id') + job_ids = data.get('job_ids', []) + + if not printer_id or not isinstance(job_ids, list): + return jsonify({'error': 'Ungültige Parameter'}), 400 + + success = drag_drop_manager.update_job_order(printer_id, job_ids) + + if success: + # Dashboard-Event senden + emit_system_alert( + f"Job-Reihenfolge für Drucker {printer_id} aktualisiert", + alert_type="info", + priority="normal" + ) + + return jsonify({ + 'success': True, + 'message': 'Job-Reihenfolge erfolgreich aktualisiert' + }) + else: + return jsonify({'error': 'Fehler beim Aktualisieren der Job-Reihenfolge'}), 500 + + except Exception as e: + logger.error(f"Fehler beim Aktualisieren der Job-Reihenfolge: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/dragdrop/get-job-order/', methods=['GET']) +@login_required +def get_job_order_api(printer_id): + """Holt die aktuelle Job-Reihenfolge für einen Drucker""" + try: + job_ids = drag_drop_manager.get_job_order(printer_id) + ordered_jobs = drag_drop_manager.get_ordered_jobs_for_printer(printer_id) + + job_data = [] + for job in ordered_jobs: + job_data.append({ + 'id': job.id, + 'name': job.name, + 'duration_minutes': job.duration_minutes, + 'user_name': job.user.name if job.user else 'Unbekannt', + 'status': job.status, + 'created_at': job.created_at.isoformat() if job.created_at else None + }) + + return jsonify({ + 'printer_id': printer_id, + 'job_ids': job_ids, + 'jobs': job_data, + 'total_jobs': len(job_data) + }) + + except Exception as e: + logger.error(f"Fehler beim Abrufen der Job-Reihenfolge: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/dragdrop/upload-session', methods=['POST']) +@login_required +def create_upload_session(): + """Erstellt eine neue Upload-Session""" + try: + import uuid + session_id = str(uuid.uuid4()) + drag_drop_manager.create_upload_session(session_id) + + return jsonify({ + 'session_id': session_id, + 'success': True + }) + + except Exception as e: + logger.error(f"Fehler beim Erstellen der Upload-Session: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/dragdrop/upload-progress/', methods=['GET']) +@login_required +def get_upload_progress(session_id): + """Holt Upload-Progress für eine Session""" + try: + progress = drag_drop_manager.get_session_progress(session_id) + return jsonify(progress) + except Exception as e: + logger.error(f"Fehler beim Abrufen des Upload-Progress: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/dragdrop/client-js', methods=['GET']) +def get_dragdrop_js(): + """Liefert Client-seitige Drag & Drop JavaScript""" + try: + js_content = get_drag_drop_javascript() + response = make_response(js_content) + response.headers['Content-Type'] = 'application/javascript' + response.headers['Cache-Control'] = 'public, max-age=3600' + return response + except Exception as e: + logger.error(f"Fehler beim Laden des Drag & Drop JS: {str(e)}") + return "console.error('Drag & Drop JavaScript konnte nicht geladen werden');", 500 + +@app.route('/api/dragdrop/client-css', methods=['GET']) +def get_dragdrop_css(): + """Liefert Client-seitige Drag & Drop CSS""" + try: + css_content = get_drag_drop_css() + response = make_response(css_content) + response.headers['Content-Type'] = 'text/css' + response.headers['Cache-Control'] = 'public, max-age=3600' + return response + except Exception as e: + logger.error(f"Fehler beim Laden des Drag & Drop CSS: {str(e)}") + return "/* Drag & Drop CSS konnte nicht geladen werden */", 500 + +# ===== ADVANCED TABLES API ===== +@app.route('/api/tables/query', methods=['POST']) +@login_required +def query_advanced_table(): + """Führt erweiterte Tabellen-Abfragen durch""" + try: + data = request.get_json() or {} + table_type = data.get('table_type') + query_params = data.get('query', {}) + + # Tabellen-Konfiguration erstellen + if table_type == 'jobs': + config = create_table_config( + 'jobs', + ['id', 'name', 'user_name', 'printer_name', 'status', 'created_at'], + base_query='Job' + ) + elif table_type == 'printers': + config = create_table_config( + 'printers', + ['id', 'name', 'model', 'location', 'status', 'ip_address'], + base_query='Printer' + ) + elif table_type == 'users': + config = create_table_config( + 'users', + ['id', 'name', 'email', 'role', 'active', 'last_login'], + base_query='User' + ) + else: + return jsonify({'error': 'Unbekannter Tabellen-Typ'}), 400 + + # Erweiterte Abfrage erstellen + query_builder = AdvancedTableQuery(config) + + # Filter anwenden + if 'filters' in query_params: + for filter_data in query_params['filters']: + query_builder.add_filter( + filter_data['column'], + filter_data['operator'], + filter_data['value'] + ) + + # Sortierung anwenden + if 'sort' in query_params: + query_builder.set_sorting( + query_params['sort']['column'], + query_params['sort']['direction'] + ) + + # Paginierung anwenden + if 'pagination' in query_params: + query_builder.set_pagination( + query_params['pagination']['page'], + query_params['pagination']['per_page'] + ) + + # Abfrage ausführen + result = query_builder.execute() + + return jsonify(result) + + except Exception as e: + logger.error(f"Fehler bei erweiterte Tabellen-Abfrage: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/tables/export', methods=['POST']) +@login_required +def export_table_data(): + """Exportiert Tabellen-Daten in verschiedenen Formaten""" + try: + data = request.get_json() or {} + table_type = data.get('table_type') + export_format = data.get('format', 'csv') + query_params = data.get('query', {}) + + # Hier würde die Export-Logik implementiert + # Für jetzt einfache CSV-Export-Simulation + + if export_format == 'csv': + import csv + import io + + output = io.StringIO() + writer = csv.writer(output) + + # Beispiel-Daten (würde durch echte Abfrage ersetzt) + writer.writerow(['ID', 'Name', 'Status', 'Erstellt']) + writer.writerow([1, 'Beispiel Job', 'Aktiv', '2025-01-07']) + + response = make_response(output.getvalue()) + response.headers['Content-Type'] = 'text/csv' + response.headers['Content-Disposition'] = f'attachment; filename="{table_type}_export.csv"' + return response + + return jsonify({'error': 'Export-Format nicht unterstützt'}), 400 + + except Exception as e: + logger.error(f"Fehler beim Tabellen-Export: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/tables/client-js', methods=['GET']) +def get_tables_js(): + """Liefert Client-seitige Advanced Tables JavaScript""" + try: + js_content = get_advanced_tables_js() + response = make_response(js_content) + response.headers['Content-Type'] = 'application/javascript' + response.headers['Cache-Control'] = 'public, max-age=3600' + return response + except Exception as e: + logger.error(f"Fehler beim Laden des Tables-JS: {str(e)}") + return "console.error('Advanced Tables JavaScript konnte nicht geladen werden');", 500 + +@app.route('/api/tables/client-css', methods=['GET']) +def get_tables_css(): + """Liefert Client-seitige Advanced Tables CSS""" + try: + css_content = get_advanced_tables_css() + response = make_response(css_content) + response.headers['Content-Type'] = 'text/css' + response.headers['Cache-Control'] = 'public, max-age=3600' + return response + except Exception as e: + logger.error(f"Fehler beim Laden des Tables-CSS: {str(e)}") + return "/* Advanced Tables CSS konnte nicht geladen werden */", 500 + +# ===== MAINTENANCE SYSTEM API ===== +@app.route('/api/maintenance/tasks', methods=['GET', 'POST']) +@login_required +def maintenance_tasks(): + """Wartungsaufgaben abrufen oder erstellen""" + if request.method == 'GET': + try: + filters = { + 'printer_id': request.args.get('printer_id', type=int), + 'status': request.args.get('status'), + 'priority': request.args.get('priority'), + 'due_date_from': request.args.get('due_date_from'), + 'due_date_to': request.args.get('due_date_to') + } + + tasks = maintenance_manager.get_tasks(filters) + return jsonify({ + 'tasks': [task.to_dict() for task in tasks], + 'total': len(tasks) + }) + + except Exception as e: + logger.error(f"Fehler beim Abrufen der Wartungsaufgaben: {str(e)}") + return jsonify({'error': str(e)}), 500 + + elif request.method == 'POST': + try: + data = request.get_json() or {} + + task = create_maintenance_task( + printer_id=data.get('printer_id'), + task_type=MaintenanceType(data.get('task_type')), + title=data.get('title'), + description=data.get('description'), + priority=data.get('priority', 'normal'), + assigned_to=data.get('assigned_to'), + due_date=data.get('due_date') + ) + + if task: + # Dashboard-Event senden + emit_system_alert( + f"Neue Wartungsaufgabe erstellt: {task.title}", + alert_type="info", + priority=task.priority + ) + + return jsonify({ + 'success': True, + 'task': task.to_dict(), + 'message': 'Wartungsaufgabe erfolgreich erstellt' + }) + else: + return jsonify({'error': 'Fehler beim Erstellen der Wartungsaufgabe'}), 500 + + except Exception as e: + logger.error(f"Fehler beim Erstellen der Wartungsaufgabe: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/maintenance/tasks//status', methods=['PUT']) +@login_required +def update_maintenance_task_status(task_id): + """Aktualisiert den Status einer Wartungsaufgabe""" + try: + data = request.get_json() or {} + new_status = MaintenanceStatus(data.get('status')) + notes = data.get('notes', '') + + success = update_maintenance_status( + task_id=task_id, + new_status=new_status, + updated_by=current_user.id, + notes=notes + ) + + if success: + return jsonify({ + 'success': True, + 'message': 'Wartungsaufgaben-Status erfolgreich aktualisiert' + }) + else: + return jsonify({'error': 'Fehler beim Aktualisieren des Status'}), 500 + + except Exception as e: + logger.error(f"Fehler beim Aktualisieren des Wartungsaufgaben-Status: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/maintenance/overview', methods=['GET']) +@login_required +def get_maintenance_overview(): + """Holt Wartungs-Übersicht""" + try: + overview = get_maintenance_overview() + return jsonify(overview) + except Exception as e: + logger.error(f"Fehler beim Abrufen der Wartungs-Übersicht: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/maintenance/schedule', methods=['POST']) +@login_required +@admin_required +def schedule_maintenance_api(): + """Plant automatische Wartungen""" + try: + data = request.get_json() or {} + + schedule = schedule_maintenance( + printer_id=data.get('printer_id'), + maintenance_type=MaintenanceType(data.get('maintenance_type')), + interval_days=data.get('interval_days'), + start_date=data.get('start_date') + ) + + if schedule: + return jsonify({ + 'success': True, + 'schedule': schedule.to_dict(), + 'message': 'Wartungsplan erfolgreich erstellt' + }) + else: + return jsonify({'error': 'Fehler beim Erstellen des Wartungsplans'}), 500 + + except Exception as e: + logger.error(f"Fehler beim Planen der Wartung: {str(e)}") + return jsonify({'error': str(e)}), 500 + +# ===== MULTI-LOCATION SYSTEM API ===== +@app.route('/api/locations', methods=['GET', 'POST']) +@login_required +def locations(): + """Standorte abrufen oder erstellen""" + if request.method == 'GET': + try: + filters = { + 'location_type': request.args.get('type'), + 'active_only': request.args.get('active_only', 'true').lower() == 'true' + } + + locations = location_manager.get_locations(filters) + return jsonify({ + 'locations': [loc.to_dict() for loc in locations], + 'total': len(locations) + }) + + except Exception as e: + logger.error(f"Fehler beim Abrufen der Standorte: {str(e)}") + return jsonify({'error': str(e)}), 500 + + elif request.method == 'POST': + try: + data = request.get_json() or {} + + location = create_location( + name=data.get('name'), + location_type=LocationType(data.get('type')), + address=data.get('address'), + description=data.get('description'), + coordinates=data.get('coordinates'), + parent_location_id=data.get('parent_location_id') + ) + + if location: + return jsonify({ + 'success': True, + 'location': location.to_dict(), + 'message': 'Standort erfolgreich erstellt' + }) + else: + return jsonify({'error': 'Fehler beim Erstellen des Standorts'}), 500 + + except Exception as e: + logger.error(f"Fehler beim Erstellen des Standorts: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/locations//users', methods=['GET', 'POST']) +@login_required +@admin_required +def location_users(location_id): + """Benutzer-Zuweisungen für einen Standort verwalten""" + if request.method == 'GET': + try: + users = location_manager.get_location_users(location_id) + return jsonify({ + 'location_id': location_id, + 'users': [user.to_dict() for user in users], + 'total': len(users) + }) + + except Exception as e: + logger.error(f"Fehler beim Abrufen der Standort-Benutzer: {str(e)}") + return jsonify({'error': str(e)}), 500 + + elif request.method == 'POST': + try: + data = request.get_json() or {} + + success = assign_user_to_location( + user_id=data.get('user_id'), + location_id=location_id, + access_level=AccessLevel(data.get('access_level', 'READ')), + valid_until=data.get('valid_until') + ) + + if success: + return jsonify({ + 'success': True, + 'message': 'Benutzer erfolgreich zu Standort zugewiesen' + }) + else: + return jsonify({'error': 'Fehler bei der Benutzer-Zuweisung'}), 500 + + except Exception as e: + logger.error(f"Fehler bei der Benutzer-Zuweisung: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/locations/user/', methods=['GET']) +@login_required +def get_user_locations_api(user_id): + """Holt alle Standorte eines Benutzers""" + try: + # Berechtigung prüfen + if current_user.id != user_id and not current_user.is_admin: + return jsonify({'error': 'Keine Berechtigung'}), 403 + + locations = get_user_locations(user_id) + return jsonify({ + 'user_id': user_id, + 'locations': [loc.to_dict() for loc in locations], + 'total': len(locations) + }) + + except Exception as e: + logger.error(f"Fehler beim Abrufen der Benutzer-Standorte: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/locations/distance', methods=['POST']) +@login_required +def calculate_distance_api(): + """Berechnet Entfernung zwischen zwei Standorten""" + try: + data = request.get_json() or {} + coord1 = data.get('coordinates1') # [lat, lon] + coord2 = data.get('coordinates2') # [lat, lon] + + if not coord1 or not coord2: + return jsonify({'error': 'Koordinaten erforderlich'}), 400 + + distance = calculate_distance(coord1, coord2) + + return jsonify({ + 'distance_km': distance, + 'distance_m': distance * 1000 + }) + + except Exception as e: + logger.error(f"Fehler bei Entfernungsberechnung: {str(e)}") + return jsonify({'error': str(e)}), 500 + +@app.route('/api/locations/nearest', methods=['POST']) +@login_required +def find_nearest_location_api(): + """Findet den nächstgelegenen Standort""" + try: + data = request.get_json() or {} + coordinates = data.get('coordinates') # [lat, lon] + location_type = data.get('location_type') + max_distance = data.get('max_distance', 50) # km + + if not coordinates: + return jsonify({'error': 'Koordinaten erforderlich'}), 400 + + nearest = find_nearest_location( + coordinates=coordinates, + location_type=LocationType(location_type) if location_type else None, + max_distance_km=max_distance + ) + + if nearest: + location, distance = nearest + return jsonify({ + 'location': location.to_dict(), + 'distance_km': distance + }) + else: + return jsonify({ + 'location': None, + 'message': 'Kein Standort in der Nähe gefunden' + }) + + except Exception as e: + logger.error(f"Fehler bei der Suche nach nächstem Standort: {str(e)}") + return jsonify({'error': str(e)}), 500 + +# ===== GASTANTRÄGE API-ROUTEN ===== + # ===== STARTUP UND MAIN ===== if __name__ == "__main__": import sys @@ -5803,9 +4874,8 @@ if __name__ == "__main__": signal.signal(signal.SIGHUP, signal_handler) try: - # Datenbank initialisieren - init_database() - create_initial_admin() + # Datenbank initialisieren und Migrationen durchführen + setup_database_with_migrations() # Template-Hilfsfunktionen registrieren register_template_helpers(app) @@ -5937,4 +5007,101 @@ if __name__ == "__main__": stop_queue_manager() except: pass - sys.exit(1) \ No newline at end of file + sys.exit(1) + +def setup_database_with_migrations(): + """ + Datenbank initialisieren und alle erforderlichen Tabellen erstellen. + Führt Migrationen für neue Tabellen wie JobOrder durch. + """ + try: + app_logger.info("🔄 Starte Datenbank-Setup und Migrationen...") + + # Standard-Datenbank-Initialisierung + init_database() + + # Explizite Migration für JobOrder-Tabelle + engine = get_engine() + + # Erstelle alle Tabellen (nur neue werden tatsächlich erstellt) + Base.metadata.create_all(engine) + + # Prüfe ob JobOrder-Tabelle existiert + from sqlalchemy import inspect + inspector = inspect(engine) + existing_tables = inspector.get_table_names() + + if 'job_orders' in existing_tables: + app_logger.info("✅ JobOrder-Tabelle bereits vorhanden") + else: + # Tabelle manuell erstellen + JobOrder.__table__.create(engine, checkfirst=True) + app_logger.info("✅ JobOrder-Tabelle erfolgreich erstellt") + + # Initial-Admin erstellen falls nicht vorhanden + create_initial_admin() + + app_logger.info("✅ Datenbank-Setup und Migrationen erfolgreich abgeschlossen") + + except Exception as e: + app_logger.error(f"❌ Fehler bei Datenbank-Setup: {str(e)}") + raise e + +@app.route("/admin/printers//settings") +@login_required +def admin_printer_settings_page(printer_id): + """Zeigt die Drucker-Einstellungsseite an.""" + if not current_user.is_admin: + flash("Sie haben keine Berechtigung für den Admin-Bereich.", "error") + return redirect(url_for("index")) + + db_session = get_db_session() + try: + printer = db_session.get(Printer, printer_id) + if not printer: + flash("Drucker nicht gefunden.", "error") + return redirect(url_for("admin_page")) + + printer_data = { + "id": printer.id, + "name": printer.name, + "model": printer.model or 'Unbekanntes Modell', + "location": printer.location or 'Unbekannter Standort', + "mac_address": printer.mac_address, + "plug_ip": printer.plug_ip, + "status": printer.status or "offline", + "active": printer.active if hasattr(printer, 'active') else True, + "created_at": printer.created_at.isoformat() if printer.created_at else datetime.now().isoformat() + } + + db_session.close() + return render_template("admin_printer_settings.html", printer=printer_data) + + except Exception as e: + db_session.close() + app_logger.error(f"Fehler beim Laden der Drucker-Einstellungen: {str(e)}") + flash("Fehler beim Laden der Drucker-Daten.", "error") + return redirect(url_for("admin_page")) + # Erstelle alle Tabellen (nur neue werden tatsächlich erstellt) + Base.metadata.create_all(engine) + + # Prüfe ob JobOrder-Tabelle existiert + from sqlalchemy import inspect + inspector = inspect(engine) + existing_tables = inspector.get_table_names() + + if 'job_orders' in existing_tables: + app_logger.info("✅ JobOrder-Tabelle bereits vorhanden") + else: + # Tabelle manuell erstellen + JobOrder.__table__.create(engine, checkfirst=True) + app_logger.info("✅ JobOrder-Tabelle erfolgreich erstellt") + + # Initial-Admin erstellen falls nicht vorhanden + create_initial_admin() + + app_logger.info("✅ Datenbank-Setup und Migrationen erfolgreich abgeschlossen") + + except Exception as e: + app_logger.error(f"❌ Fehler bei Datenbank-Setup: {str(e)}") + raise e \ No newline at end of file diff --git a/backend/blueprints/guest.py b/backend/blueprints/guest.py index df7f81c9..867d5b98 100644 --- a/backend/blueprints/guest.py +++ b/backend/blueprints/guest.py @@ -96,6 +96,12 @@ def guest_request_form(): ) db_session.add(guest_request) + db_session.flush() # Um ID zu erhalten + + # OTP-Code sofort generieren für Status-Abfrage + otp_code = guest_request.generate_otp() + guest_request.otp_expires_at = datetime.now() + timedelta(hours=72) # 72h gültig + db_session.commit() # Benachrichtigung für Genehmiger erstellen @@ -109,10 +115,10 @@ def guest_request_form(): } ) - logger.info(f"Neue Gastanfrage erstellt: ID {guest_request.id}, Name: {name}") + logger.info(f"Neue Gastanfrage erstellt: ID {guest_request.id}, Name: {name}, OTP generiert") flash("Ihr Antrag wurde erfolgreich eingereicht!", "success") - # Weiterleitung zur Status-Seite + # Weiterleitung zur Status-Seite mit OTP-Code-Info return redirect(url_for('guest.guest_request_status', request_id=guest_request.id)) except Exception as e: @@ -295,6 +301,12 @@ def api_create_guest_request(): ) db_session.add(guest_request) + db_session.flush() # Um ID zu erhalten + + # OTP-Code sofort generieren für Status-Abfrage + otp_code = guest_request.generate_otp() + guest_request.otp_expires_at = datetime.now() + timedelta(hours=72) # 72h gültig + db_session.commit() # Benachrichtigung für Genehmiger erstellen @@ -308,12 +320,14 @@ def api_create_guest_request(): } ) - logger.info(f"Neue Gastanfrage erstellt: ID {guest_request.id}, Name: {name}") + logger.info(f"Neue Gastanfrage erstellt: ID {guest_request.id}, Name: {name}, OTP generiert") return jsonify({ "success": True, "request_id": guest_request.id, "status": guest_request.status, + "otp_code": otp_code, # Code wird nur bei Erstellung zurückgegeben + "status_check_url": url_for('guest.guest_status_check_page', _external=True), "redirect_url": url_for('guest.guest_request_status', request_id=guest_request.id) }) @@ -852,4 +866,126 @@ def api_deny_request(request_id): except Exception as e: logger.error(f"Fehler beim Ablehnen der Gastanfrage: {str(e)}") - return jsonify({"error": "Fehler beim Verarbeiten der Anfrage"}), 500 \ No newline at end of file + return jsonify({"error": "Fehler beim Verarbeiten der Anfrage"}), 500 + +@guest_blueprint.route('/api/guest/status', methods=['POST']) +def api_guest_status_by_otp(): + """ + Öffentliche Route für Gäste um ihren Auftragsstatus mit OTP-Code zu prüfen. + Keine Authentifizierung erforderlich. + """ + try: + data = request.get_json() + if not data: + return jsonify({ + 'success': False, + 'message': 'Keine Daten empfangen' + }), 400 + + otp_code = data.get('otp_code', '').strip() + email = data.get('email', '').strip() # Optional für zusätzliche Verifikation + + if not otp_code: + return jsonify({ + 'success': False, + 'message': 'OTP-Code ist erforderlich' + }), 400 + + with get_cached_session() as db_session: + # Alle Gastaufträge mit OTP-Codes finden + guest_requests = db_session.query(GuestRequest).filter( + GuestRequest.otp_code.isnot(None) + ).all() + + found_request = None + for request_obj in guest_requests: + if request_obj.verify_otp(otp_code): + # Zusätzliche E-Mail-Verifikation falls angegeben + if email and request_obj.email and request_obj.email.lower() != email.lower(): + continue + found_request = request_obj + break + + if not found_request: + logger.warning(f"Ungültiger OTP-Code für Gast-Status-Abfrage: {otp_code[:4]}****") + return jsonify({ + 'success': False, + 'message': 'Ungültiger Code oder E-Mail-Adresse' + }), 404 + + # Status-Informationen für den Gast zusammenstellen + status_info = { + 'id': found_request.id, + 'name': found_request.name, + 'file_name': found_request.file_name, + 'status': found_request.status, + 'created_at': found_request.created_at.isoformat() if found_request.created_at else None, + 'updated_at': found_request.updated_at.isoformat() if found_request.updated_at else None, + 'duration_min': found_request.duration_min, + 'reason': found_request.reason + } + + # Status-spezifische Informationen hinzufügen + if found_request.status == 'approved': + status_info.update({ + 'approved_at': found_request.approved_at.isoformat() if found_request.approved_at else None, + 'approval_notes': found_request.approval_notes, + 'message': 'Ihr Auftrag wurde genehmigt! Sie können mit dem Drucken beginnen.', + 'can_start_job': found_request.otp_used_at is None # Noch nicht verwendet + }) + + # Job-Informationen hinzufügen falls vorhanden + if found_request.job_id: + job = db_session.query(Job).options(joinedload(Job.printer)).filter_by(id=found_request.job_id).first() + if job: + status_info['job'] = { + 'id': job.id, + 'name': job.name, + 'status': job.status, + 'start_at': job.start_at.isoformat() if job.start_at else None, + 'end_at': job.end_at.isoformat() if job.end_at else None, + 'printer_name': job.printer.name if job.printer else None + } + + elif found_request.status == 'rejected': + status_info.update({ + 'rejected_at': found_request.rejected_at.isoformat() if found_request.rejected_at else None, + 'rejection_reason': found_request.rejection_reason, + 'message': 'Ihr Auftrag wurde leider abgelehnt.' + }) + + elif found_request.status == 'pending': + # Berechne wie lange der Auftrag schon wartet + if found_request.created_at: + waiting_time = datetime.now() - found_request.created_at + hours_waiting = int(waiting_time.total_seconds() / 3600) + status_info.update({ + 'hours_waiting': hours_waiting, + 'message': f'Ihr Auftrag wird bearbeitet. Wartezeit: {hours_waiting} Stunden.' + }) + else: + status_info['message'] = 'Ihr Auftrag wird bearbeitet.' + + # OTP als verwendet markieren (da erfolgreich abgefragt) + db_session.commit() + + logger.info(f"Gast-Status-Abfrage erfolgreich für Request {found_request.id}") + + return jsonify({ + 'success': True, + 'request': status_info + }) + + except Exception as e: + logger.error(f"Fehler bei Gast-Status-Abfrage: {str(e)}") + return jsonify({ + 'success': False, + 'message': 'Fehler beim Abrufen des Status' + }), 500 + +@guest_blueprint.route('/status-check') +def guest_status_check_page(): + """ + Öffentliche Seite für Gäste um ihren Auftragsstatus zu prüfen. + """ + return render_template('guest_status_check.html') \ No newline at end of file diff --git a/backend/blueprints/printers.py b/backend/blueprints/printers.py index 1ce48a84..2bdb7a3a 100644 --- a/backend/blueprints/printers.py +++ b/backend/blueprints/printers.py @@ -19,6 +19,7 @@ from models import Printer, User, Job, get_db_session from utils.logging_config import get_logger, measure_execution_time from utils.permissions import require_permission, Permission, check_permission from utils.printer_monitor import printer_monitor +from utils.drag_drop_system import drag_drop_manager # Logger initialisieren printers_logger = get_logger("printers") @@ -613,4 +614,353 @@ def test_all_sockets_status(): return jsonify({ "success": False, "error": f"Allgemeiner Fehler: {str(e)}" - }), 500 \ No newline at end of file + }), 500 + + +# ============================================================================= +# DRAG & DROP API - JOB-REIHENFOLGE-MANAGEMENT +# ============================================================================= + +@printers_blueprint.route("//jobs/order", methods=["GET"]) +@login_required +@measure_execution_time(logger=printers_logger, task_name="API-Job-Reihenfolge-Abfrage") +def get_job_order(printer_id): + """ + Holt die aktuelle Job-Reihenfolge für einen Drucker. + + Args: + printer_id: ID des Druckers + + Returns: + JSON mit Jobs in der korrekten Reihenfolge + """ + printers_logger.info(f"📋 Job-Reihenfolge-Abfrage für Drucker {printer_id} von Benutzer {current_user.name}") + + try: + # Drucker existiert prüfen + db_session = get_db_session() + printer = db_session.query(Printer).filter(Printer.id == printer_id).first() + + if not printer: + db_session.close() + return jsonify({ + "success": False, + "error": f"Drucker mit ID {printer_id} nicht gefunden" + }), 404 + + db_session.close() + + # Job-Reihenfolge und Details holen + ordered_jobs = drag_drop_manager.get_ordered_jobs_for_printer(printer_id) + job_order_ids = drag_drop_manager.get_job_order(printer_id) + + # Job-Details für Response aufbereiten + jobs_data = [] + for job in ordered_jobs: + jobs_data.append({ + "id": job.id, + "name": job.name, + "description": job.description, + "user_name": job.user.name if job.user else "Unbekannt", + "user_id": job.user_id, + "duration_minutes": job.duration_minutes, + "created_at": job.created_at.isoformat() if job.created_at else None, + "start_at": job.start_at.isoformat() if job.start_at else None, + "status": job.status, + "file_path": job.file_path + }) + + printers_logger.info(f"✅ Job-Reihenfolge erfolgreich abgerufen: {len(jobs_data)} Jobs für Drucker {printer.name}") + + return jsonify({ + "success": True, + "printer": { + "id": printer.id, + "name": printer.name, + "model": printer.model, + "location": printer.location + }, + "jobs": jobs_data, + "job_order": job_order_ids, + "total_jobs": len(jobs_data), + "total_duration_minutes": sum(job.duration_minutes for job in ordered_jobs), + "timestamp": datetime.now().isoformat() + }) + + except Exception as e: + printers_logger.error(f"❌ Fehler bei Job-Reihenfolge-Abfrage für Drucker {printer_id}: {str(e)}") + return jsonify({ + "success": False, + "error": f"Fehler beim Laden der Job-Reihenfolge: {str(e)}" + }), 500 + +@printers_blueprint.route("//jobs/order", methods=["POST"]) +@login_required +@require_permission(Permission.APPROVE_JOBS) # Nur Benutzer mit Job-Genehmigungsrechten können Reihenfolge ändern +@measure_execution_time(logger=printers_logger, task_name="API-Job-Reihenfolge-Update") +def update_job_order(printer_id): + """ + Aktualisiert die Job-Reihenfolge für einen Drucker per Drag & Drop. + + Args: + printer_id: ID des Druckers + + JSON-Parameter: + - job_ids: Liste der Job-IDs in der gewünschten Reihenfolge + + Returns: + JSON mit Bestätigung der Aktualisierung + """ + printers_logger.info(f"🔄 Job-Reihenfolge-Update für Drucker {printer_id} von Benutzer {current_user.name}") + + # Parameter validieren + data = request.get_json() + if not data or "job_ids" not in data: + return jsonify({ + "success": False, + "error": "Parameter 'job_ids' fehlt" + }), 400 + + job_ids = data["job_ids"] + if not isinstance(job_ids, list): + return jsonify({ + "success": False, + "error": "Parameter 'job_ids' muss eine Liste sein" + }), 400 + + if not all(isinstance(job_id, int) for job_id in job_ids): + return jsonify({ + "success": False, + "error": "Alle Job-IDs müssen Zahlen sein" + }), 400 + + try: + # Drucker existiert prüfen + db_session = get_db_session() + printer = db_session.query(Printer).filter(Printer.id == printer_id).first() + + if not printer: + db_session.close() + return jsonify({ + "success": False, + "error": f"Drucker mit ID {printer_id} nicht gefunden" + }), 404 + + # Validierung: Alle Jobs gehören zum Drucker und sind editierbar + valid_jobs = db_session.query(Job).filter( + Job.id.in_(job_ids), + Job.printer_id == printer_id, + Job.status.in_(['scheduled', 'paused']) + ).all() + + db_session.close() + + if len(valid_jobs) != len(job_ids): + invalid_ids = set(job_ids) - {job.id for job in valid_jobs} + return jsonify({ + "success": False, + "error": f"Ungültige oder nicht editierbare Job-IDs: {list(invalid_ids)}" + }), 400 + + # Berechtigung prüfen: Benutzer kann nur eigene Jobs oder als Admin alle verschieben + if not current_user.is_admin: + user_job_ids = {job.id for job in valid_jobs if job.user_id == current_user.id} + if user_job_ids != set(job_ids): + unauthorized_ids = set(job_ids) - user_job_ids + return jsonify({ + "success": False, + "error": f"Keine Berechtigung für Jobs: {list(unauthorized_ids)}" + }), 403 + + # Job-Reihenfolge aktualisieren + success = drag_drop_manager.update_job_order(printer_id, job_ids) + + if success: + # Neue Reihenfolge zur Bestätigung laden + updated_order = drag_drop_manager.get_job_order(printer_id) + + printers_logger.info(f"✅ Job-Reihenfolge erfolgreich aktualisiert für Drucker {printer.name}") + printers_logger.info(f" Neue Reihenfolge: {job_ids}") + printers_logger.info(f" Benutzer: {current_user.name} (ID: {current_user.id})") + + return jsonify({ + "success": True, + "message": "Job-Reihenfolge erfolgreich aktualisiert", + "printer": { + "id": printer.id, + "name": printer.name + }, + "old_order": job_ids, # Eingabe des Benutzers + "new_order": updated_order, # Bestätigung aus Datenbank + "total_jobs": len(job_ids), + "updated_by": { + "id": current_user.id, + "name": current_user.name + }, + "timestamp": datetime.now().isoformat() + }) + else: + return jsonify({ + "success": False, + "error": "Fehler beim Speichern der Job-Reihenfolge" + }), 500 + + except Exception as e: + printers_logger.error(f"❌ Fehler bei Job-Reihenfolge-Update für Drucker {printer_id}: {str(e)}") + return jsonify({ + "success": False, + "error": f"Unerwarteter Fehler: {str(e)}" + }), 500 + +@printers_blueprint.route("//jobs/summary", methods=["GET"]) +@login_required +@measure_execution_time(logger=printers_logger, task_name="API-Drucker-Job-Zusammenfassung") +def get_printer_job_summary(printer_id): + """ + Erstellt eine detaillierte Zusammenfassung der Jobs für einen Drucker. + + Args: + printer_id: ID des Druckers + + Returns: + JSON mit Zusammenfassung, Statistiken und Zeitschätzungen + """ + printers_logger.info(f"📊 Drucker-Job-Zusammenfassung für Drucker {printer_id} von Benutzer {current_user.name}") + + try: + # Drucker existiert prüfen + db_session = get_db_session() + printer = db_session.query(Printer).filter(Printer.id == printer_id).first() + + if not printer: + db_session.close() + return jsonify({ + "success": False, + "error": f"Drucker mit ID {printer_id} nicht gefunden" + }), 404 + + db_session.close() + + # Zusammenfassung über Drag-Drop-Manager erstellen + summary = drag_drop_manager.get_printer_summary(printer_id) + + printers_logger.info(f"✅ Drucker-Job-Zusammenfassung erfolgreich erstellt für {printer.name}") + + return jsonify({ + "success": True, + "printer": { + "id": printer.id, + "name": printer.name, + "model": printer.model, + "location": printer.location, + "status": printer.status + }, + "summary": summary, + "timestamp": datetime.now().isoformat() + }) + + except Exception as e: + printers_logger.error(f"❌ Fehler bei Drucker-Job-Zusammenfassung für Drucker {printer_id}: {str(e)}") + return jsonify({ + "success": False, + "error": f"Fehler beim Erstellen der Zusammenfassung: {str(e)}" + }), 500 + +@printers_blueprint.route("/jobs/cleanup-orders", methods=["POST"]) +@login_required +@require_permission(Permission.ADMIN) +@measure_execution_time(logger=printers_logger, task_name="API-Job-Reihenfolgen-Bereinigung") +def cleanup_job_orders(): + """ + Bereinigt ungültige Job-Reihenfolgen (nur für Administratoren). + Entfernt Einträge für abgeschlossene oder gelöschte Jobs. + + Returns: + JSON mit Bereinigungsergebnis + """ + printers_logger.info(f"🧹 Job-Reihenfolgen-Bereinigung von Admin {current_user.name}") + + try: + # Bereinigung durchführen + drag_drop_manager.cleanup_invalid_orders() + + printers_logger.info(f"✅ Job-Reihenfolgen-Bereinigung erfolgreich abgeschlossen") + + return jsonify({ + "success": True, + "message": "Job-Reihenfolgen erfolgreich bereinigt", + "admin": { + "id": current_user.id, + "name": current_user.name + }, + "timestamp": datetime.now().isoformat() + }) + + except Exception as e: + printers_logger.error(f"❌ Fehler bei Job-Reihenfolgen-Bereinigung: {str(e)}") + return jsonify({ + "success": False, + "error": f"Fehler bei der Bereinigung: {str(e)}" + }), 500 + +@printers_blueprint.route("/drag-drop/config", methods=["GET"]) +@login_required +def get_drag_drop_config(): + """ + Liefert die Konfiguration für das Drag & Drop System. + + Returns: + JSON mit Drag & Drop Konfiguration und JavaScript/CSS + """ + printers_logger.info(f"⚙️ Drag-Drop-Konfiguration abgerufen von Benutzer {current_user.name}") + + try: + from utils.drag_drop_system import get_drag_drop_javascript, get_drag_drop_css + + # Benutzerberechtigungen prüfen + can_reorder_jobs = check_permission(current_user, Permission.APPROVE_JOBS) + can_upload_files = check_permission(current_user, Permission.CREATE_JOB) + + config = { + "permissions": { + "can_reorder_jobs": can_reorder_jobs, + "can_upload_files": can_upload_files, + "is_admin": current_user.is_admin + }, + "settings": { + "max_file_size": 50 * 1024 * 1024, # 50MB + "accepted_file_types": ["gcode", "stl", "3mf", "obj"], + "auto_upload": False, + "show_preview": True, + "enable_progress_tracking": True + }, + "endpoints": { + "get_job_order": f"/api/printers/{{printer_id}}/jobs/order", + "update_job_order": f"/api/printers/{{printer_id}}/jobs/order", + "get_summary": f"/api/printers/{{printer_id}}/jobs/summary" + }, + "javascript": get_drag_drop_javascript(), + "css": get_drag_drop_css() + } + + return jsonify({ + "success": True, + "config": config, + "user": { + "id": current_user.id, + "name": current_user.name, + "role": current_user.role + }, + "timestamp": datetime.now().isoformat() + }) + + except Exception as e: + printers_logger.error(f"❌ Fehler bei Drag-Drop-Konfiguration: {str(e)}") + return jsonify({ + "success": False, + "error": f"Fehler beim Laden der Konfiguration: {str(e)}" + }), 500 + +# ============================================================================= +# ENDE DRAG & DROP API +# ============================================================================= \ No newline at end of file diff --git a/backend/docs/DRAG_DROP_IMPLEMENTATION.md b/backend/docs/DRAG_DROP_IMPLEMENTATION.md new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/backend/docs/DRAG_DROP_IMPLEMENTATION.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/docs/STECKDOSEN_TEST_DOKUMENTATION.md b/backend/docs/STECKDOSEN_TEST_DOKUMENTATION.md new file mode 100644 index 00000000..0519ecba --- /dev/null +++ b/backend/docs/STECKDOSEN_TEST_DOKUMENTATION.md @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/backend/models.py b/backend/models.py index ae124232..99b02a55 100644 --- a/backend/models.py +++ b/backend/models.py @@ -33,7 +33,7 @@ _cache_lock = threading.Lock() _cache_ttl = {} # Time-to-live für Cache-Einträge # Alle exportierten Modelle -__all__ = ['User', 'Printer', 'Job', 'Stats', 'SystemLog', 'Base', 'GuestRequest', 'UserPermission', 'Notification', 'init_db', 'init_database', 'create_initial_admin', 'get_db_session', 'get_cached_session', 'clear_cache', 'engine'] +__all__ = ['User', 'Printer', 'Job', 'Stats', 'SystemLog', 'Base', 'GuestRequest', 'UserPermission', 'Notification', 'JobOrder', 'init_db', 'init_database', 'create_initial_admin', 'get_db_session', 'get_cached_session', 'clear_cache', 'engine'] # ===== DATENBANK-KONFIGURATION MIT WAL UND OPTIMIERUNGEN ===== @@ -819,25 +819,29 @@ class GuestRequest(Base): rejected_by_user = relationship("User", foreign_keys=[rejected_by]) # Admin der abgelehnt hat def to_dict(self) -> dict: - return { + # Cache-Key für GuestRequest-Dict + cache_key = get_cache_key("GuestRequest", self.id, "dict") + cached_result = get_cache(cache_key) + + if cached_result is not None: + return cached_result + + result = { "id": self.id, "name": self.name, "email": self.email, "reason": self.reason, "duration_min": self.duration_min, - "duration_minutes": self.duration_minutes or self.duration_min, # Fallback auf duration_min - "file_name": self.file_name, - "file_path": self.file_path, - "copies": self.copies, + "duration_minutes": self.duration_minutes, "created_at": self.created_at.isoformat() if self.created_at else None, "status": self.status, "printer_id": self.printer_id, - "assigned_printer_id": self.assigned_printer_id, - "otp_code": self.otp_code, - "otp_expires_at": self.otp_expires_at.isoformat() if self.otp_expires_at else None, - "otp_used_at": self.otp_used_at.isoformat() if self.otp_used_at else None, "job_id": self.job_id, "author_ip": self.author_ip, + "otp_used_at": self.otp_used_at.isoformat() if self.otp_used_at else None, + "file_name": self.file_name, + "file_path": self.file_path, + "copies": self.copies, "processed_by": self.processed_by, "processed_at": self.processed_at.isoformat() if self.processed_at else None, "approval_notes": self.approval_notes, @@ -847,60 +851,304 @@ class GuestRequest(Base): "rejected_at": self.rejected_at.isoformat() if self.rejected_at else None, "approved_by": self.approved_by, "rejected_by": self.rejected_by, - "printer": self.printer.to_dict() if self.printer else None, - "assigned_printer": self.assigned_printer.to_dict() if self.assigned_printer else None, - "job": self.job.to_dict() if self.job else None, - "processed_by_user": self.processed_by_user.to_dict() if self.processed_by_user else None, - "approved_by_user": self.approved_by_user.to_dict() if self.approved_by_user else None, - "rejected_by_user": self.rejected_by_user.to_dict() if self.rejected_by_user else None + "otp_expires_at": self.otp_expires_at.isoformat() if self.otp_expires_at else None, + "assigned_printer_id": self.assigned_printer_id, } + + # Ergebnis cachen (5 Minuten) + set_cache(cache_key, result, 300) + return result def generate_otp(self) -> str: """ - Generiert einen einmaligen OTP-Code und speichert den Hash in der Datenbank. - - Returns: - str: Der generierte OTP-Code im Klartext + Generiert einen neuen OTP-Code und speichert den Hash. """ - # Generiere 6-stelligen Code (Großbuchstaben + Ziffern) - otp_plain = ''.join(secrets.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ') for _ in range(6)) + otp_plain = secrets.token_hex(8) # 16-stelliger hexadezimaler Code - # Hash für die Speicherung erstellen + # Hash des OTP-Codes speichern otp_bytes = otp_plain.encode('utf-8') salt = bcrypt.gensalt() - otp_hash = bcrypt.hashpw(otp_bytes, salt).decode('utf-8') + self.otp_code = bcrypt.hashpw(otp_bytes, salt).decode('utf-8') - # Hash in der Datenbank speichern - self.otp_code = otp_hash + logger.info(f"OTP generiert für Guest Request {self.id}") + + # Cache invalidieren + invalidate_model_cache("GuestRequest", self.id) return otp_plain def verify_otp(self, otp_plain: str) -> bool: """ - Verifiziert einen OTP-Code gegen den gespeicherten Hash. - - Args: - otp_plain: Der zu prüfende OTP-Code im Klartext - - Returns: - bool: True wenn der Code korrekt ist, False andernfalls + Verifiziert einen OTP-Code. """ if not self.otp_code or not otp_plain: return False try: - # Code normalisieren (Großbuchstaben) - otp_plain = otp_plain.upper().strip() - - # Hash verifizieren otp_bytes = otp_plain.encode('utf-8') - stored_hash = self.otp_code.encode('utf-8') + hash_bytes = self.otp_code.encode('utf-8') - return bcrypt.checkpw(otp_bytes, stored_hash) - except Exception: + is_valid = bcrypt.checkpw(otp_bytes, hash_bytes) + + if is_valid: + self.otp_used_at = datetime.now() + logger.info(f"OTP erfolgreich verifiziert für Guest Request {self.id}") + + # Cache invalidieren + invalidate_model_cache("GuestRequest", self.id) + else: + logger.warning(f"Ungültiger OTP-Code für Guest Request {self.id}") + + return is_valid + + except Exception as e: + logger.error(f"Fehler bei OTP-Verifizierung: {str(e)}") return False +class JobOrder(Base): + """ + Job-Reihenfolge für Drucker im Drag & Drop System. + Speichert die benutzerdefinierte Reihenfolge der Jobs pro Drucker. + """ + __tablename__ = "job_orders" + + id = Column(Integer, primary_key=True) + printer_id = Column(Integer, ForeignKey("printers.id"), nullable=False) + job_id = Column(Integer, ForeignKey("jobs.id"), nullable=False) + order_position = Column(Integer, nullable=False) # Position in der Reihenfolge (0-basiert) + created_at = Column(DateTime, default=datetime.now) + updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now) + last_modified_by = Column(Integer, ForeignKey("users.id"), nullable=True) # Wer die Reihenfolge geändert hat + + # Beziehungen + printer = relationship("Printer", foreign_keys=[printer_id]) + job = relationship("Job", foreign_keys=[job_id]) + modified_by_user = relationship("User", foreign_keys=[last_modified_by]) + + # Eindeutige Kombination: Ein Job kann nur eine Position pro Drucker haben + __table_args__ = ( + # Sicherstellen, dass jeder Job nur einmal pro Drucker existiert + # und jede Position pro Drucker nur einmal vergeben wird + ) + + def to_dict(self) -> dict: + """ + Konvertiert JobOrder zu Dictionary. + """ + cache_key = get_cache_key("JobOrder", f"{self.printer_id}_{self.job_id}", "dict") + cached_result = get_cache(cache_key) + + if cached_result is not None: + return cached_result + + result = { + "id": self.id, + "printer_id": self.printer_id, + "job_id": self.job_id, + "order_position": self.order_position, + "created_at": self.created_at.isoformat() if self.created_at else None, + "updated_at": self.updated_at.isoformat() if self.updated_at else None, + "last_modified_by": self.last_modified_by + } + + # Ergebnis cachen (2 Minuten) + set_cache(cache_key, result, 120) + return result + + @classmethod + def get_order_for_printer(cls, printer_id: int) -> List['JobOrder']: + """ + Holt die Job-Reihenfolge für einen bestimmten Drucker. + """ + cache_key = get_cache_key("JobOrder", printer_id, "printer_order") + cached_orders = get_cache(cache_key) + + if cached_orders is not None: + return cached_orders + + with get_cached_session() as session: + orders = session.query(cls).filter( + cls.printer_id == printer_id + ).order_by(cls.order_position).all() + + # Ergebnis cachen (1 Minute für häufige Abfragen) + set_cache(cache_key, orders, 60) + + return orders + + @classmethod + def update_printer_order(cls, printer_id: int, job_ids: List[int], + modified_by_user_id: int = None) -> bool: + """ + Aktualisiert die komplette Job-Reihenfolge für einen Drucker. + + Args: + printer_id: ID des Druckers + job_ids: Liste der Job-IDs in der gewünschten Reihenfolge + modified_by_user_id: ID des Users der die Änderung durchführt + + Returns: + bool: True wenn erfolgreich, False bei Fehler + """ + try: + with get_cached_session() as session: + # Validiere dass alle Jobs existieren und zum Drucker gehören + valid_jobs = session.query(Job).filter( + Job.id.in_(job_ids), + Job.printer_id == printer_id, + Job.status.in_(['scheduled', 'paused']) + ).all() + + if len(valid_jobs) != len(job_ids): + logger.warning(f"Nicht alle Jobs gültig für Drucker {printer_id}. " + f"Erwartet: {len(job_ids)}, Gefunden: {len(valid_jobs)}") + return False + + # Alte Reihenfolge-Einträge für diesen Drucker löschen + session.query(cls).filter(cls.printer_id == printer_id).delete() + + # Neue Reihenfolge-Einträge erstellen + for position, job_id in enumerate(job_ids): + order_entry = cls( + printer_id=printer_id, + job_id=job_id, + order_position=position, + last_modified_by=modified_by_user_id + ) + session.add(order_entry) + + session.commit() + + # Cache invalidieren + clear_cache(f"JobOrder:{printer_id}") + + logger.info(f"Job-Reihenfolge für Drucker {printer_id} erfolgreich aktualisiert. " + f"Jobs: {job_ids}, Benutzer: {modified_by_user_id}") + + return True + + except Exception as e: + logger.error(f"Fehler beim Aktualisieren der Job-Reihenfolge für Drucker {printer_id}: {str(e)}") + return False + + @classmethod + def get_ordered_job_ids(cls, printer_id: int) -> List[int]: + """ + Holt die Job-IDs in der korrekten Reihenfolge für einen Drucker. + + Args: + printer_id: ID des Druckers + + Returns: + List[int]: Liste der Job-IDs in der richtigen Reihenfolge + """ + cache_key = get_cache_key("JobOrder", printer_id, "job_ids") + cached_ids = get_cache(cache_key) + + if cached_ids is not None: + return cached_ids + + try: + with get_cached_session() as session: + orders = session.query(cls).filter( + cls.printer_id == printer_id + ).order_by(cls.order_position).all() + + job_ids = [order.job_id for order in orders] + + # Ergebnis cachen (1 Minute) + set_cache(cache_key, job_ids, 60) + + return job_ids + + except Exception as e: + logger.error(f"Fehler beim Laden der Job-Reihenfolge für Drucker {printer_id}: {str(e)}") + return [] + + @classmethod + def remove_job_from_orders(cls, job_id: int): + """ + Entfernt einen Job aus allen Drucker-Reihenfolgen (z.B. wenn Job gelöscht wird). + + Args: + job_id: ID des zu entfernenden Jobs + """ + try: + with get_cached_session() as session: + # Alle Order-Einträge für diesen Job finden + orders_to_remove = session.query(cls).filter(cls.job_id == job_id).all() + printer_ids = {order.printer_id for order in orders_to_remove} + + # Order-Einträge löschen + session.query(cls).filter(cls.job_id == job_id).delete() + + # Positionen neu ordnen für betroffene Drucker + for printer_id in printer_ids: + remaining_orders = session.query(cls).filter( + cls.printer_id == printer_id + ).order_by(cls.order_position).all() + + # Positionen neu setzen (lückenlos) + for new_position, order in enumerate(remaining_orders): + order.order_position = new_position + order.updated_at = datetime.now() + + session.commit() + + # Cache für betroffene Drucker invalidieren + for printer_id in printer_ids: + clear_cache(f"JobOrder:{printer_id}") + + logger.info(f"Job {job_id} aus allen Drucker-Reihenfolgen entfernt. " + f"Betroffene Drucker: {list(printer_ids)}") + + except Exception as e: + logger.error(f"Fehler beim Entfernen des Jobs {job_id} aus Reihenfolgen: {str(e)}") + + @classmethod + def cleanup_invalid_orders(cls): + """ + Bereinigt ungültige Order-Einträge (Jobs die nicht mehr existieren oder abgeschlossen sind). + """ + try: + with get_cached_session() as session: + # Finde Order-Einträge mit nicht existierenden oder abgeschlossenen Jobs + invalid_orders = session.query(cls).join(Job).filter( + Job.status.in_(['finished', 'aborted', 'cancelled']) + ).all() + + printer_ids = {order.printer_id for order in invalid_orders} + + # Ungültige Einträge löschen + session.query(cls).join(Job).filter( + Job.status.in_(['finished', 'aborted', 'cancelled']) + ).delete(synchronize_session='fetch') + + # Positionen für betroffene Drucker neu ordnen + for printer_id in printer_ids: + remaining_orders = session.query(cls).filter( + cls.printer_id == printer_id + ).order_by(cls.order_position).all() + + for new_position, order in enumerate(remaining_orders): + order.order_position = new_position + order.updated_at = datetime.now() + + session.commit() + + # Cache für betroffene Drucker invalidieren + for printer_id in printer_ids: + clear_cache(f"JobOrder:{printer_id}") + + logger.info(f"Bereinigung der Job-Reihenfolgen abgeschlossen. " + f"Entfernte Einträge: {len(invalid_orders)}, " + f"Betroffene Drucker: {list(printer_ids)}") + + except Exception as e: + logger.error(f"Fehler bei der Bereinigung der Job-Reihenfolgen: {str(e)}") + + # ===== DATENBANK-INITIALISIERUNG MIT OPTIMIERUNGEN ===== def init_db() -> None: diff --git a/backend/templates/socket_test.html b/backend/templates/socket_test.html new file mode 100644 index 00000000..6bc5d329 --- /dev/null +++ b/backend/templates/socket_test.html @@ -0,0 +1,511 @@ +{% extends "base.html" %} + +{% block title %}Steckdosen-Test - Mercedes-Benz TBA Marienfelde{% endblock %} + +{% block extra_css %} + +{% endblock %} + +{% block content %} +
+ +
+
+
+ + + +
+
+

⚡ Steckdosen-Test

+

Sichere Testfunktion für Ausbilder und Administratoren

+
+
+
+ + +
+
+ + + +
+

⚠️ SICHERHEITSHINWEIS

+

+ Diese Funktion ist nur für geschulte Ausbilder und Administratoren bestimmt. + Prüfen Sie immer den Status vor dem Ein-/Ausschalten von Steckdosen. +

+
+
+
+ + +
+
+

Übersicht aller Steckdosen

+ +
+ + +
+ +
+ + +
+
+
+ Lade Steckdosen-Status... +
+
+
+ + +
+

Einzelne Steckdose testen

+ +
+ +
+ + + + +
+ + +
+
+

+ Wählen Sie einen Drucker aus um den Steckdosen-Status zu prüfen. +

+
+
+
+
+
+ + + + + +{% endblock %} \ No newline at end of file diff --git a/backend/utils/drag_drop_system.py b/backend/utils/drag_drop_system.py index 4b7a4c8c..f3cbf340 100644 --- a/backend/utils/drag_drop_system.py +++ b/backend/utils/drag_drop_system.py @@ -21,7 +21,7 @@ from flask import request, jsonify, current_app from flask_login import current_user from utils.logging_config import get_logger -from models import Job, Printer, get_db_session +from models import Job, Printer, JobOrder, get_db_session from utils.file_manager import save_job_file, save_temp_file from config.settings import ALLOWED_EXTENSIONS, MAX_FILE_SIZE, UPLOAD_FOLDER @@ -119,34 +119,282 @@ class DragDropManager: def update_job_order(self, printer_id: int, job_ids: List[int]) -> bool: """Aktualisiert die Job-Reihenfolge für einen Drucker""" try: - with get_db_session() as db_session: - # Validiere dass alle Jobs existieren und zum Drucker gehören - jobs = db_session.query(Job).filter( - Job.id.in_(job_ids), - Job.printer_id == printer_id, - Job.status.in_(['scheduled', 'paused']) - ).all() - - if len(jobs) != len(job_ids): - logger.warning(f"Nicht alle Jobs gefunden oder gehören zu Drucker {printer_id}") - return False - + # Aktuelle Benutzer-ID für Audit-Trail + user_id = current_user.id if current_user.is_authenticated else None + + # Validierung der Eingaben + if not isinstance(printer_id, int) or printer_id <= 0: + logger.error(f"Ungültige Drucker-ID: {printer_id}") + return False + + if not isinstance(job_ids, list) or not job_ids: + logger.error(f"Ungültige Job-IDs Liste: {job_ids}") + return False + + # Duplikate entfernen und Reihenfolge beibehalten + unique_job_ids = [] + seen = set() + for job_id in job_ids: + if job_id not in seen: + unique_job_ids.append(job_id) + seen.add(job_id) + + if len(unique_job_ids) != len(job_ids): + logger.warning(f"Duplikate in Job-IDs entfernt: {job_ids} -> {unique_job_ids}") + job_ids = unique_job_ids + + # Datenbank-Implementierung mit JobOrder-Tabelle + success = JobOrder.update_printer_order( + printer_id=printer_id, + job_ids=job_ids, + modified_by_user_id=user_id + ) + + if success: # Cache aktualisieren self.job_order_cache[printer_id] = job_ids - # Optional: In Datenbank speichern (erweiterte Implementierung) - # Hier könnte man ein separates Job-Order-Table verwenden + logger.info(f"Job-Reihenfolge für Drucker {printer_id} erfolgreich aktualisiert: {job_ids}") + logger.info(f"Aktualisiert von Benutzer: {user_id}") + + # Optional: Bereinigung ungültiger Einträge im Hintergrund + self._schedule_cleanup() - logger.info(f"Job-Reihenfolge für Drucker {printer_id} aktualisiert: {job_ids}") return True + else: + logger.error(f"Fehler beim Speichern der Job-Reihenfolge in der Datenbank") + return False except Exception as e: - logger.error(f"Fehler beim Aktualisieren der Job-Reihenfolge: {str(e)}") + logger.error(f"Unerwarteter Fehler beim Aktualisieren der Job-Reihenfolge: {str(e)}") return False def get_job_order(self, printer_id: int) -> List[int]: """Holt die aktuelle Job-Reihenfolge für einen Drucker""" - return self.job_order_cache.get(printer_id, []) + try: + # Erst aus Cache versuchen + if printer_id in self.job_order_cache: + cached_order = self.job_order_cache[printer_id] + logger.debug(f"Job-Reihenfolge aus Cache für Drucker {printer_id}: {cached_order}") + return cached_order + + # Aus Datenbank laden + job_ids = JobOrder.get_ordered_job_ids(printer_id) + + # Cache aktualisieren + self.job_order_cache[printer_id] = job_ids + + logger.debug(f"Job-Reihenfolge aus Datenbank geladen für Drucker {printer_id}: {job_ids}") + return job_ids + + except Exception as e: + logger.error(f"Fehler beim Laden der Job-Reihenfolge für Drucker {printer_id}: {str(e)}") + return [] + + def get_ordered_jobs_for_printer(self, printer_id: int) -> List[Job]: + """ + Holt die Jobs für einen Drucker in der korrekten Reihenfolge. + + Args: + printer_id: ID des Druckers + + Returns: + List[Job]: Jobs sortiert nach der benutzerdefinierten Reihenfolge + """ + try: + # Job-IDs in der korrekten Reihenfolge holen + ordered_job_ids = self.get_job_order(printer_id) + + if not ordered_job_ids: + # Fallback: Jobs nach Standard-Kriterien sortieren + with get_db_session() as db_session: + jobs = db_session.query(Job).filter( + Job.printer_id == printer_id, + Job.status.in_(['scheduled', 'paused']) + ).order_by(Job.created_at).all() + return jobs + + # Jobs in der definierten Reihenfolge laden + with get_db_session() as db_session: + # Alle relevanten Jobs laden + all_jobs = db_session.query(Job).filter( + Job.printer_id == printer_id, + Job.status.in_(['scheduled', 'paused']) + ).all() + + # Dictionary für schnelle Zugriffe + jobs_dict = {job.id: job for job in all_jobs} + + # Jobs in der korrekten Reihenfolge zusammenstellen + ordered_jobs = [] + for job_id in ordered_job_ids: + if job_id in jobs_dict: + ordered_jobs.append(jobs_dict[job_id]) + + # Jobs hinzufügen, die nicht in der Reihenfolge sind (neue Jobs) + ordered_job_ids_set = set(ordered_job_ids) + unordered_jobs = [job for job in all_jobs if job.id not in ordered_job_ids_set] + + if unordered_jobs: + # Neue Jobs nach Erstellungsdatum sortieren und anhängen + unordered_jobs.sort(key=lambda x: x.created_at) + ordered_jobs.extend(unordered_jobs) + + # Reihenfolge automatisch aktualisieren für neue Jobs + new_order = [job.id for job in ordered_jobs] + self.update_job_order(printer_id, new_order) + + logger.debug(f"Jobs für Drucker {printer_id} in Reihenfolge geladen: {len(ordered_jobs)} Jobs") + return ordered_jobs + + except Exception as e: + logger.error(f"Fehler beim Laden der sortierten Jobs für Drucker {printer_id}: {str(e)}") + + # Fallback: Unsortierte Jobs zurückgeben + try: + with get_db_session() as db_session: + jobs = db_session.query(Job).filter( + Job.printer_id == printer_id, + Job.status.in_(['scheduled', 'paused']) + ).order_by(Job.created_at).all() + return jobs + except Exception as fallback_error: + logger.error(f"Auch Fallback fehlgeschlagen: {str(fallback_error)}") + return [] + + def remove_job_from_order(self, job_id: int) -> bool: + """ + Entfernt einen Job aus allen Drucker-Reihenfolgen. + + Args: + job_id: ID des zu entfernenden Jobs + + Returns: + bool: True wenn erfolgreich + """ + try: + # Aus Datenbank entfernen + JobOrder.remove_job_from_orders(job_id) + + # Cache aktualisieren: Job aus allen Caches entfernen + for printer_id in list(self.job_order_cache.keys()): + if job_id in self.job_order_cache[printer_id]: + self.job_order_cache[printer_id].remove(job_id) + logger.debug(f"Job {job_id} aus Cache für Drucker {printer_id} entfernt") + + logger.info(f"Job {job_id} erfolgreich aus allen Reihenfolgen entfernt") + return True + + except Exception as e: + logger.error(f"Fehler beim Entfernen des Jobs {job_id} aus Reihenfolgen: {str(e)}") + return False + + def cleanup_invalid_orders(self): + """Bereinigt ungültige Job-Reihenfolgen""" + try: + # Datenbank-Bereinigung + JobOrder.cleanup_invalid_orders() + + # Cache komplett leeren (wird bei Bedarf neu geladen) + self.job_order_cache.clear() + + logger.info("Job-Reihenfolgen bereinigt") + + except Exception as e: + logger.error(f"Fehler bei der Bereinigung der Job-Reihenfolgen: {str(e)}") + + def _schedule_cleanup(self): + """Plant eine Bereinigung für später (non-blocking)""" + try: + # In produktiver Umgebung könnte hier ein Background-Task gestartet werden + # Für jetzt führen wir eine schnelle Bereinigung durch + import threading + + def cleanup_worker(): + try: + self.cleanup_invalid_orders() + except Exception as e: + logger.error(f"Hintergrund-Bereinigung fehlgeschlagen: {str(e)}") + + cleanup_thread = threading.Thread(target=cleanup_worker, daemon=True) + cleanup_thread.start() + + except Exception as e: + logger.debug(f"Konnte Hintergrund-Bereinigung nicht starten: {str(e)}") + + def get_printer_summary(self, printer_id: int) -> Dict[str, Any]: + """ + Erstellt eine Zusammenfassung der Job-Reihenfolge für einen Drucker. + + Args: + printer_id: ID des Druckers + + Returns: + Dict: Zusammenfassung mit Jobs, Reihenfolge, Statistiken + """ + try: + ordered_jobs = self.get_ordered_jobs_for_printer(printer_id) + + # Statistiken berechnen + total_duration = sum(job.duration_minutes for job in ordered_jobs) + total_jobs = len(ordered_jobs) + + # Nächster Job + next_job = ordered_jobs[0] if ordered_jobs else None + + # Job-Details für die Ausgabe + job_details = [] + for position, job in enumerate(ordered_jobs): + job_details.append({ + 'position': position, + 'job_id': job.id, + 'name': job.name, + 'duration_minutes': job.duration_minutes, + 'user_name': job.user.name if job.user else 'Unbekannt', + 'created_at': job.created_at.isoformat() if job.created_at else None, + 'status': job.status + }) + + return { + 'printer_id': printer_id, + 'total_jobs': total_jobs, + 'total_duration_minutes': total_duration, + 'estimated_completion': self._calculate_completion_time(ordered_jobs), + 'next_job': { + 'id': next_job.id, + 'name': next_job.name, + 'user': next_job.user.name if next_job and next_job.user else None + } if next_job else None, + 'jobs': job_details, + 'last_updated': datetime.now().isoformat() + } + + except Exception as e: + logger.error(f"Fehler beim Erstellen der Drucker-Zusammenfassung für {printer_id}: {str(e)}") + return { + 'printer_id': printer_id, + 'total_jobs': 0, + 'total_duration_minutes': 0, + 'error': str(e) + } + + def _calculate_completion_time(self, jobs: List[Job]) -> Optional[str]: + """Berechnet die geschätzte Fertigstellungszeit""" + try: + if not jobs: + return None + + total_minutes = sum(job.duration_minutes for job in jobs) + completion_time = datetime.now() + completion_time = completion_time.replace( + minute=(completion_time.minute + total_minutes) % 60, + hour=(completion_time.hour + (completion_time.minute + total_minutes) // 60) % 24 + ) + + return completion_time.isoformat() + + except Exception: + return None # Globale Instanz drag_drop_manager = DragDropManager() diff --git a/backend/utils/email_notification.py b/backend/utils/email_notification.py new file mode 100644 index 00000000..fefab350 --- /dev/null +++ b/backend/utils/email_notification.py @@ -0,0 +1,175 @@ +""" +Offline-kompatible E-Mail-Benachrichtigung für MYP-System +======================================================== + +Da das System im Produktionsbetrieb offline läuft, werden alle E-Mail-Benachrichtigungen +nur geloggt aber nicht tatsächlich versendet. +""" + +import logging +from datetime import datetime +from typing import Optional, Dict, Any + +from utils.logging_config import get_logger + +logger = get_logger("email_notification") + +class OfflineEmailNotification: + """ + Offline-E-Mail-Benachrichtigung die nur Logs erstellt. + Simuliert E-Mail-Versand für Offline-Betrieb. + """ + + def __init__(self): + self.enabled = False # Immer deaktiviert im Offline-Modus + logger.info("📧 Offline-E-Mail-Benachrichtigung initialisiert (kein echter E-Mail-Versand)") + + def send_email(self, to: str, subject: str, body: str, **kwargs) -> bool: + """ + Simuliert E-Mail-Versand durch Logging. + + Args: + to: E-Mail-Empfänger + subject: E-Mail-Betreff + body: E-Mail-Inhalt + **kwargs: Zusätzliche Parameter + + Returns: + bool: Immer True (Simulation erfolgreich) + """ + logger.info(f"📧 [OFFLINE-SIMULATION] E-Mail würde versendet werden:") + logger.info(f" 📮 An: {to}") + logger.info(f" 📋 Betreff: {subject}") + logger.info(f" 📝 Inhalt: {body[:100]}{'...' if len(body) > 100 else ''}") + logger.info(f" 🕒 Zeitpunkt: {datetime.now().strftime('%d.%m.%Y %H:%M:%S')}") + + if kwargs: + logger.info(f" ⚙️ Zusätzliche Parameter: {kwargs}") + + return True + + def send_notification_email(self, recipient: str, notification_type: str, + data: Dict[str, Any]) -> bool: + """ + Sendet Benachrichtigungs-E-Mail (Offline-Simulation). + + Args: + recipient: E-Mail-Empfänger + notification_type: Art der Benachrichtigung + data: Daten für die Benachrichtigung + + Returns: + bool: Immer True (Simulation erfolgreich) + """ + subject = f"MYP-Benachrichtigung: {notification_type}" + body = f"Benachrichtigung vom MYP-System:\n\n{data}" + + return self.send_email(recipient, subject, body, notification_type=notification_type) + + def send_maintenance_notification(self, recipient: str, task_title: str, + task_description: str) -> bool: + """ + Sendet Wartungs-Benachrichtigung (Offline-Simulation). + + Args: + recipient: E-Mail-Empfänger + task_title: Titel der Wartungsaufgabe + task_description: Beschreibung der Wartungsaufgabe + + Returns: + bool: Immer True (Simulation erfolgreich) + """ + subject = f"MYP-Wartungsaufgabe: {task_title}" + body = f""" +Neue Wartungsaufgabe im MYP-System: + +Titel: {task_title} +Beschreibung: {task_description} +Erstellt: {datetime.now().strftime('%d.%m.%Y %H:%M:%S')} + +Bitte loggen Sie sich in das MYP-System ein, um weitere Details zu sehen. + """ + + return self.send_email(recipient, subject, body, task_type="maintenance") + +# Globale Instanz für einfache Verwendung +email_notifier = OfflineEmailNotification() + +def send_email_notification(recipient: str, subject: str, body: str, **kwargs) -> bool: + """ + Haupt-Funktion für E-Mail-Versand (Offline-kompatibel). + + Args: + recipient: E-Mail-Empfänger + subject: E-Mail-Betreff + body: E-Mail-Inhalt + **kwargs: Zusätzliche Parameter + + Returns: + bool: True wenn "erfolgreich" (geloggt) + """ + return email_notifier.send_email(recipient, subject, body, **kwargs) + +def send_maintenance_email(recipient: str, task_title: str, task_description: str) -> bool: + """ + Sendet Wartungs-E-Mail (Offline-kompatibel). + + Args: + recipient: E-Mail-Empfänger + task_title: Titel der Wartungsaufgabe + task_description: Beschreibung der Wartungsaufgabe + + Returns: + bool: True wenn "erfolgreich" (geloggt) + """ + return email_notifier.send_maintenance_notification(recipient, task_title, task_description) + +def send_guest_approval_email(recipient: str, otp_code: str, expires_at: str) -> bool: + """ + Sendet Gastauftrags-Genehmigung-E-Mail (Offline-kompatibel). + + Args: + recipient: E-Mail-Empfänger + otp_code: OTP-Code für den Gastauftrag + expires_at: Ablaufzeit des OTP-Codes + + Returns: + bool: True wenn "erfolgreich" (geloggt) + """ + subject = "MYP-Gastauftrag genehmigt" + body = f""" +Ihr Gastauftrag wurde genehmigt! + +OTP-Code: {otp_code} +Gültig bis: {expires_at} + +Bitte verwenden Sie diesen Code am MYP-Terminal, um Ihren Druckauftrag zu starten. + """ + + return email_notifier.send_email(recipient, subject, body, + otp_code=otp_code, expires_at=expires_at) + +def send_guest_rejection_email(recipient: str, reason: str) -> bool: + """ + Sendet Gastauftrags-Ablehnungs-E-Mail (Offline-kompatibel). + + Args: + recipient: E-Mail-Empfänger + reason: Grund für die Ablehnung + + Returns: + bool: True wenn "erfolgreich" (geloggt) + """ + subject = "MYP-Gastauftrag abgelehnt" + body = f""" +Ihr Gastauftrag wurde leider abgelehnt. + +Grund: {reason} + +Bei Fragen wenden Sie sich bitte an das MYP-Team. + """ + + return email_notifier.send_email(recipient, subject, body, rejection_reason=reason) + +# Für Backward-Kompatibilität +send_notification = send_email_notification \ No newline at end of file diff --git a/backend/utils/maintenance_system.py b/backend/utils/maintenance_system.py new file mode 100644 index 00000000..74674cdf --- /dev/null +++ b/backend/utils/maintenance_system.py @@ -0,0 +1,688 @@ +""" +Wartungsplanungs- und Tracking-System für das MYP-System +======================================================== + +Dieses Modul stellt umfassende Wartungsfunktionalität bereit: +- Geplante und ungeplante Wartungen +- Wartungsintervalle und Erinnerungen +- Wartungshistorie und Berichte +- Automatische Wartungsprüfungen +- Ersatzteil-Management +- Techniker-Zuweisungen +""" + +import asyncio +import json +import logging +from datetime import datetime, timedelta +from typing import Dict, List, Any, Optional, Callable +from dataclasses import dataclass, asdict +from enum import Enum +import threading +import schedule +import time + +from utils.logging_config import get_logger +from models import Printer, get_db_session +from utils.email_notification import send_email_notification +from utils.realtime_dashboard import emit_system_alert + +logger = get_logger("maintenance") + +class MaintenanceType(Enum): + """Arten von Wartungen""" + PREVENTIVE = "preventive" # Vorbeugende Wartung + CORRECTIVE = "corrective" # Reparatur/Korrektur + EMERGENCY = "emergency" # Notfall-Wartung + SCHEDULED = "scheduled" # Geplante Wartung + INSPECTION = "inspection" # Inspektion + +class MaintenanceStatus(Enum): + """Status einer Wartung""" + PLANNED = "planned" # Geplant + SCHEDULED = "scheduled" # Terminiert + IN_PROGRESS = "in_progress" # In Bearbeitung + COMPLETED = "completed" # Abgeschlossen + CANCELLED = "cancelled" # Abgebrochen + OVERDUE = "overdue" # Überfällig + +class MaintenancePriority(Enum): + """Priorität einer Wartung""" + LOW = "low" # Niedrig + NORMAL = "normal" # Normal + HIGH = "high" # Hoch + CRITICAL = "critical" # Kritisch + EMERGENCY = "emergency" # Notfall + +@dataclass +class MaintenanceTask: + """Wartungsaufgabe""" + id: Optional[int] = None + printer_id: int = None + title: str = "" + description: str = "" + maintenance_type: MaintenanceType = MaintenanceType.PREVENTIVE + priority: MaintenancePriority = MaintenancePriority.NORMAL + status: MaintenanceStatus = MaintenanceStatus.PLANNED + scheduled_date: Optional[datetime] = None + due_date: Optional[datetime] = None + estimated_duration: int = 60 # Minuten + actual_duration: Optional[int] = None + assigned_technician: Optional[str] = None + created_at: datetime = None + started_at: Optional[datetime] = None + completed_at: Optional[datetime] = None + notes: str = "" + required_parts: List[str] = None + actual_parts_used: List[str] = None + cost: Optional[float] = None + checklist: List[Dict[str, Any]] = None + photos: List[str] = None + created_by: Optional[int] = None + +@dataclass +class MaintenanceSchedule: + """Wartungsplan""" + printer_id: int + maintenance_type: MaintenanceType + interval_days: int + next_due: datetime + last_completed: Optional[datetime] = None + is_active: bool = True + description: str = "" + checklist_template: List[str] = None + +@dataclass +class MaintenanceMetrics: + """Wartungsmetriken""" + total_tasks: int = 0 + completed_tasks: int = 0 + overdue_tasks: int = 0 + average_completion_time: float = 0.0 + total_cost: float = 0.0 + mtbf: float = 0.0 # Mean Time Between Failures + mttr: float = 0.0 # Mean Time To Repair + uptime_percentage: float = 0.0 + +class MaintenanceManager: + """Manager für Wartungsplanung und -tracking""" + + def __init__(self): + self.tasks: Dict[int, MaintenanceTask] = {} + self.schedules: Dict[int, List[MaintenanceSchedule]] = {} + self.maintenance_history: List[MaintenanceTask] = [] + self.next_task_id = 1 + self.is_running = False + + self._setup_scheduler() + + def _setup_scheduler(self): + """Richtet automatische Wartungsplanung ein""" + schedule.every().day.at("06:00").do(self._check_scheduled_maintenance) + schedule.every().hour.do(self._check_overdue_tasks) + schedule.every().monday.at("08:00").do(self._generate_weekly_report) + + # Scheduler in separatem Thread + def run_scheduler(): + while self.is_running: + schedule.run_pending() + time.sleep(60) # Check every minute + + self.is_running = True + scheduler_thread = threading.Thread(target=run_scheduler, daemon=True) + scheduler_thread.start() + + logger.info("Wartungs-Scheduler gestartet") + + def create_task(self, task: MaintenanceTask) -> int: + """Erstellt eine neue Wartungsaufgabe""" + task.id = self.next_task_id + self.next_task_id += 1 + task.created_at = datetime.now() + + self.tasks[task.id] = task + + # Automatische Terminierung für vorbeugende Wartungen + if task.maintenance_type == MaintenanceType.PREVENTIVE and not task.scheduled_date: + task.scheduled_date = self._calculate_next_maintenance_date(task.printer_id) + + # Benachrichtigungen senden + self._send_task_notifications(task, "created") + + logger.info(f"Wartungsaufgabe erstellt: {task.title} für Drucker {task.printer_id}") + return task.id + + def update_task_status(self, task_id: int, new_status: MaintenanceStatus, notes: str = "") -> bool: + """Aktualisiert den Status einer Wartungsaufgabe""" + if task_id not in self.tasks: + return False + + task = self.tasks[task_id] + old_status = task.status + task.status = new_status + + # Zeitstempel setzen + if new_status == MaintenanceStatus.IN_PROGRESS: + task.started_at = datetime.now() + elif new_status == MaintenanceStatus.COMPLETED: + task.completed_at = datetime.now() + if task.started_at: + task.actual_duration = int((task.completed_at - task.started_at).total_seconds() / 60) + + # Zur Historie hinzufügen + self.maintenance_history.append(task) + + # Nächste Wartung planen + self._schedule_next_maintenance(task) + + if notes: + task.notes += f"\n{datetime.now().strftime('%d.%m.%Y %H:%M')}: {notes}" + + # Benachrichtigungen senden + if old_status != new_status: + self._send_task_notifications(task, "status_changed") + + logger.info(f"Wartungsaufgabe {task_id} Status: {old_status.value} → {new_status.value}") + return True + + def schedule_maintenance(self, printer_id: int, maintenance_type: MaintenanceType, + interval_days: int, description: str = "") -> MaintenanceSchedule: + """Plant regelmäßige Wartungen""" + schedule_item = MaintenanceSchedule( + printer_id=printer_id, + maintenance_type=maintenance_type, + interval_days=interval_days, + next_due=datetime.now() + timedelta(days=interval_days), + description=description + ) + + if printer_id not in self.schedules: + self.schedules[printer_id] = [] + + self.schedules[printer_id].append(schedule_item) + + logger.info(f"Wartungsplan erstellt: {maintenance_type.value} alle {interval_days} Tage für Drucker {printer_id}") + return schedule_item + + def get_upcoming_maintenance(self, days_ahead: int = 7) -> List[MaintenanceTask]: + """Holt anstehende Wartungen""" + cutoff_date = datetime.now() + timedelta(days=days_ahead) + + upcoming = [] + for task in self.tasks.values(): + if (task.status in [MaintenanceStatus.PLANNED, MaintenanceStatus.SCHEDULED] and + task.due_date and task.due_date <= cutoff_date): + upcoming.append(task) + + return sorted(upcoming, key=lambda t: t.due_date or datetime.max) + + def get_overdue_tasks(self) -> List[MaintenanceTask]: + """Holt überfällige Wartungen""" + now = datetime.now() + overdue = [] + + for task in self.tasks.values(): + if (task.status in [MaintenanceStatus.PLANNED, MaintenanceStatus.SCHEDULED] and + task.due_date and task.due_date < now): + task.status = MaintenanceStatus.OVERDUE + overdue.append(task) + + return overdue + + def get_maintenance_metrics(self, printer_id: Optional[int] = None, + start_date: Optional[datetime] = None, + end_date: Optional[datetime] = None) -> MaintenanceMetrics: + """Berechnet Wartungsmetriken""" + # Filter tasks + tasks = self.maintenance_history.copy() + if printer_id: + tasks = [t for t in tasks if t.printer_id == printer_id] + if start_date: + tasks = [t for t in tasks if t.completed_at and t.completed_at >= start_date] + if end_date: + tasks = [t for t in tasks if t.completed_at and t.completed_at <= end_date] + + if not tasks: + return MaintenanceMetrics() + + completed_tasks = [t for t in tasks if t.status == MaintenanceStatus.COMPLETED] + + # Grundmetriken + total_tasks = len(tasks) + completed_count = len(completed_tasks) + + # Durchschnittliche Bearbeitungszeit + completion_times = [t.actual_duration for t in completed_tasks if t.actual_duration] + avg_completion_time = sum(completion_times) / len(completion_times) if completion_times else 0 + + # Gesamtkosten + total_cost = sum(t.cost for t in completed_tasks if t.cost) + + # MTBF und MTTR berechnen + mtbf = self._calculate_mtbf(tasks, printer_id) + mttr = avg_completion_time / 60 # Konvertiere zu Stunden + + # Verfügbarkeit berechnen + uptime_percentage = self._calculate_uptime(printer_id, start_date, end_date) + + return MaintenanceMetrics( + total_tasks=total_tasks, + completed_tasks=completed_count, + overdue_tasks=len(self.get_overdue_tasks()), + average_completion_time=avg_completion_time, + total_cost=total_cost, + mtbf=mtbf, + mttr=mttr, + uptime_percentage=uptime_percentage + ) + + def create_maintenance_checklist(self, maintenance_type: MaintenanceType) -> List[Dict[str, Any]]: + """Erstellt eine Wartungs-Checkliste""" + checklists = { + MaintenanceType.PREVENTIVE: [ + {"task": "Drucker äußerlich reinigen", "completed": False, "required": True}, + {"task": "Druckbett-Level prüfen", "completed": False, "required": True}, + {"task": "Extruder-Düse reinigen", "completed": False, "required": True}, + {"task": "Riemen-Spannung prüfen", "completed": False, "required": True}, + {"task": "Filament-Führung prüfen", "completed": False, "required": False}, + {"task": "Software-Updates prüfen", "completed": False, "required": False}, + {"task": "Lüfter reinigen", "completed": False, "required": True}, + {"task": "Schrauben nachziehen", "completed": False, "required": False} + ], + MaintenanceType.CORRECTIVE: [ + {"task": "Problem-Diagnose durchführen", "completed": False, "required": True}, + {"task": "Defekte Teile identifizieren", "completed": False, "required": True}, + {"task": "Ersatzteile bestellen/bereitstellen", "completed": False, "required": True}, + {"task": "Reparatur durchführen", "completed": False, "required": True}, + {"task": "Funktionstest durchführen", "completed": False, "required": True}, + {"task": "Kalibrierung prüfen", "completed": False, "required": True} + ], + MaintenanceType.INSPECTION: [ + {"task": "Sichtprüfung der Mechanik", "completed": False, "required": True}, + {"task": "Druckqualität testen", "completed": False, "required": True}, + {"task": "Temperaturen prüfen", "completed": False, "required": True}, + {"task": "Bewegungen testen", "completed": False, "required": True}, + {"task": "Verschleiß bewerten", "completed": False, "required": True} + ] + } + + return checklists.get(maintenance_type, []) + + def _check_scheduled_maintenance(self): + """Prüft täglich auf fällige Wartungen""" + logger.info("Prüfe fällige Wartungen...") + + today = datetime.now() + + for printer_id, schedules in self.schedules.items(): + for schedule_item in schedules: + if not schedule_item.is_active: + continue + + if schedule_item.next_due <= today: + # Erstelle Wartungsaufgabe + task = MaintenanceTask( + printer_id=printer_id, + title=f"{schedule_item.maintenance_type.value.title()} Wartung", + description=schedule_item.description, + maintenance_type=schedule_item.maintenance_type, + priority=MaintenancePriority.NORMAL, + due_date=schedule_item.next_due, + checklist=self.create_maintenance_checklist(schedule_item.maintenance_type) + ) + + task_id = self.create_task(task) + + # Nächsten Termin berechnen + schedule_item.next_due = today + timedelta(days=schedule_item.interval_days) + + logger.info(f"Automatische Wartungsaufgabe erstellt: {task_id}") + + def _check_overdue_tasks(self): + """Prüft stündlich auf überfällige Aufgaben""" + overdue = self.get_overdue_tasks() + + if overdue: + logger.warning(f"{len(overdue)} überfällige Wartungsaufgaben gefunden") + + for task in overdue: + emit_system_alert( + f"Wartung überfällig: {task.title} (Drucker {task.printer_id})", + "warning", + "high" + ) + + def _generate_weekly_report(self): + """Generiert wöchentlichen Wartungsbericht""" + logger.info("Generiere wöchentlichen Wartungsbericht...") + + # Sammle Daten der letzten Woche + last_week = datetime.now() - timedelta(days=7) + metrics = self.get_maintenance_metrics(start_date=last_week) + + # Sende Report (Implementation abhängig von verfügbaren Services) + # send_maintenance_report(metrics) + + def _calculate_next_maintenance_date(self, printer_id: int) -> datetime: + """Berechnet nächstes Wartungsdatum basierend auf Nutzung""" + # Vereinfachte Implementierung - kann erweitert werden + base_interval = 30 # Tage + + # Hier könnte man Nutzungsstatistiken einbeziehen + with get_db_session() as db_session: + printer = db_session.query(Printer).filter(Printer.id == printer_id).first() + if printer: + # Berücksichtige letzten Check + if printer.last_checked: + days_since_check = (datetime.now() - printer.last_checked).days + if days_since_check < 15: # Kürzlich gecheckt + base_interval += 15 + + return datetime.now() + timedelta(days=base_interval) + + def _schedule_next_maintenance(self, completed_task: MaintenanceTask): + """Plant nächste Wartung nach Abschluss einer Aufgabe""" + if completed_task.maintenance_type == MaintenanceType.PREVENTIVE: + # Finde entsprechenden Schedule + printer_schedules = self.schedules.get(completed_task.printer_id, []) + for schedule_item in printer_schedules: + if schedule_item.maintenance_type == completed_task.maintenance_type: + schedule_item.last_completed = completed_task.completed_at + schedule_item.next_due = datetime.now() + timedelta(days=schedule_item.interval_days) + break + + def _calculate_mtbf(self, tasks: List[MaintenanceTask], printer_id: Optional[int]) -> float: + """Berechnet Mean Time Between Failures""" + # Vereinfachte MTBF-Berechnung + failure_tasks = [t for t in tasks if t.maintenance_type == MaintenanceType.CORRECTIVE] + + if len(failure_tasks) < 2: + return 0.0 + + # Zeitspanne zwischen ersten und letzten Ausfall + first_failure = min(failure_tasks, key=lambda t: t.created_at) + last_failure = max(failure_tasks, key=lambda t: t.created_at) + + total_time = (last_failure.created_at - first_failure.created_at).total_seconds() / 3600 # Stunden + failure_count = len(failure_tasks) - 1 + + return total_time / failure_count if failure_count > 0 else 0.0 + + def _calculate_uptime(self, printer_id: Optional[int], start_date: Optional[datetime], + end_date: Optional[datetime]) -> float: + """Berechnet Verfügbarkeit in Prozent""" + # Vereinfachte Uptime-Berechnung + if not start_date: + start_date = datetime.now() - timedelta(days=30) + if not end_date: + end_date = datetime.now() + + total_time = (end_date - start_date).total_seconds() + + # Berechne Downtime aus Wartungszeiten + downtime = 0 + for task in self.maintenance_history: + if printer_id and task.printer_id != printer_id: + continue + + if (task.status == MaintenanceStatus.COMPLETED and + task.started_at and task.completed_at and + task.started_at >= start_date and task.completed_at <= end_date): + downtime += (task.completed_at - task.started_at).total_seconds() + + uptime = ((total_time - downtime) / total_time) * 100 if total_time > 0 else 0 + return max(0, min(100, uptime)) + + def _send_task_notifications(self, task: MaintenanceTask, event_type: str): + """Sendet Benachrichtigungen für Wartungsaufgaben""" + try: + if event_type == "created": + emit_system_alert( + f"Neue Wartungsaufgabe: {task.title} (Drucker {task.printer_id})", + "info", + "normal" + ) + elif event_type == "status_changed": + emit_system_alert( + f"Wartungsstatus geändert: {task.title} → {task.status.value}", + "info", + "normal" + ) + except Exception as e: + logger.error(f"Fehler beim Senden der Wartungsbenachrichtigung: {str(e)}") + +# Globale Instanz +maintenance_manager = MaintenanceManager() + +def get_maintenance_dashboard_data() -> Dict[str, Any]: + """Holt Dashboard-Daten für Wartungen""" + upcoming = maintenance_manager.get_upcoming_maintenance() + overdue = maintenance_manager.get_overdue_tasks() + metrics = maintenance_manager.get_maintenance_metrics() + + return { + 'upcoming_count': len(upcoming), + 'overdue_count': len(overdue), + 'upcoming_tasks': [asdict(task) for task in upcoming[:5]], + 'overdue_tasks': [asdict(task) for task in overdue], + 'metrics': asdict(metrics), + 'next_scheduled': upcoming[0] if upcoming else None + } + +def create_emergency_maintenance(printer_id: int, description: str, + priority: MaintenancePriority = MaintenancePriority.CRITICAL) -> int: + """Erstellt eine Notfall-Wartung""" + task = MaintenanceTask( + printer_id=printer_id, + title="Notfall-Wartung", + description=description, + maintenance_type=MaintenanceType.EMERGENCY, + priority=priority, + due_date=datetime.now(), # Sofort fällig + checklist=maintenance_manager.create_maintenance_checklist(MaintenanceType.CORRECTIVE) + ) + + return maintenance_manager.create_task(task) + +def schedule_preventive_maintenance(printer_id: int, interval_days: int = 30) -> MaintenanceSchedule: + """Plant vorbeugende Wartung""" + return maintenance_manager.schedule_maintenance( + printer_id=printer_id, + maintenance_type=MaintenanceType.PREVENTIVE, + interval_days=interval_days, + description="Regelmäßige vorbeugende Wartung" + ) + +# JavaScript für Wartungs-Frontend +def get_maintenance_javascript() -> str: + """JavaScript für Wartungsmanagement""" + return """ + class MaintenanceManager { + constructor() { + this.currentTasks = []; + this.selectedTask = null; + + this.init(); + } + + init() { + this.loadTasks(); + this.setupEventListeners(); + this.startAutoRefresh(); + } + + setupEventListeners() { + // Task status updates + document.addEventListener('click', (e) => { + if (e.target.matches('.maintenance-status-btn')) { + const taskId = e.target.dataset.taskId; + const newStatus = e.target.dataset.status; + this.updateTaskStatus(taskId, newStatus); + } + + if (e.target.matches('.maintenance-details-btn')) { + const taskId = e.target.dataset.taskId; + this.showTaskDetails(taskId); + } + }); + + // Create maintenance form + const createForm = document.getElementById('create-maintenance-form'); + createForm?.addEventListener('submit', (e) => { + e.preventDefault(); + this.createTask(new FormData(createForm)); + }); + } + + async loadTasks() { + try { + const response = await fetch('/api/maintenance/tasks'); + const data = await response.json(); + + if (data.success) { + this.currentTasks = data.tasks; + this.renderTasks(); + } + } catch (error) { + console.error('Fehler beim Laden der Wartungsaufgaben:', error); + } + } + + async updateTaskStatus(taskId, newStatus) { + try { + const response = await fetch(`/api/maintenance/tasks/${taskId}/status`, { + method: 'PUT', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ status: newStatus }) + }); + + const result = await response.json(); + + if (result.success) { + this.loadTasks(); // Refresh + this.showNotification('Wartungsstatus aktualisiert', 'success'); + } else { + this.showNotification('Fehler beim Aktualisieren', 'error'); + } + } catch (error) { + console.error('Status-Update fehlgeschlagen:', error); + } + } + + renderTasks() { + const container = document.getElementById('maintenance-tasks-container'); + if (!container) return; + + container.innerHTML = this.currentTasks.map(task => ` +
+
+

${task.title}

+ ${task.priority} +
+
+

Drucker: ${task.printer_id}

+

Typ: ${task.maintenance_type}

+

Fällig: ${this.formatDate(task.due_date)}

+

Status: ${task.status}

+
+
+ + + +
+
+ `).join(''); + } + + showTaskDetails(taskId) { + const task = this.currentTasks.find(t => t.id == taskId); + if (!task) return; + + // Create modal with task details + const modal = document.createElement('div'); + modal.className = 'maintenance-modal'; + modal.innerHTML = ` + + `; + + document.body.appendChild(modal); + + // Close modal handlers + modal.querySelector('.close-modal').onclick = () => modal.remove(); + modal.onclick = (e) => { + if (e.target === modal) modal.remove(); + }; + } + + renderChecklist(checklist) { + return ` +
+

Checkliste:

+ ${checklist.map((item, index) => ` + + `).join('')} +
+ `; + } + + formatDate(dateString) { + if (!dateString) return 'Nicht gesetzt'; + const date = new Date(dateString); + return date.toLocaleDateString('de-DE') + ' ' + date.toLocaleTimeString('de-DE', {hour: '2-digit', minute: '2-digit'}); + } + + showNotification(message, type = 'info') { + const notification = document.createElement('div'); + notification.className = `notification notification-${type}`; + notification.textContent = message; + + document.body.appendChild(notification); + + setTimeout(() => { + notification.remove(); + }, 3000); + } + + startAutoRefresh() { + setInterval(() => { + this.loadTasks(); + }, 30000); // Refresh every 30 seconds + } + } + + // Initialize when DOM is ready + document.addEventListener('DOMContentLoaded', function() { + window.maintenanceManager = new MaintenanceManager(); + }); + """ \ No newline at end of file diff --git a/backend/utils/multi_location_system.py b/backend/utils/multi_location_system.py new file mode 100644 index 00000000..7311ea55 --- /dev/null +++ b/backend/utils/multi_location_system.py @@ -0,0 +1,784 @@ +""" +Multi-Standort-Unterstützungssystem für das MYP-System +====================================================== + +Dieses Modul stellt umfassende Multi-Location-Funktionalität bereit: +- Standort-Management und Hierarchien +- Standort-spezifische Konfigurationen +- Zentrale und dezentrale Verwaltung +- Standort-übergreifende Berichte +- Ressourcen-Sharing zwischen Standorten +- Benutzer-Standort-Zuweisungen +""" + +import json +import logging +from datetime import datetime, timedelta +from typing import Dict, List, Any, Optional, Tuple +from dataclasses import dataclass, asdict +from enum import Enum +import geocoder +import requests + +from utils.logging_config import get_logger +from models import User, Printer, Job, get_db_session + +logger = get_logger("multi_location") + +class LocationType(Enum): + """Arten von Standorten""" + HEADQUARTERS = "headquarters" # Hauptsitz + BRANCH = "branch" # Niederlassung + DEPARTMENT = "department" # Abteilung + FLOOR = "floor" # Stockwerk + ROOM = "room" # Raum + AREA = "area" # Bereich + +class AccessLevel(Enum): + """Zugriffslevel für Standorte""" + FULL = "full" # Vollzugriff + READ_WRITE = "read_write" # Lesen und Schreiben + READ_ONLY = "read_only" # Nur Lesen + NO_ACCESS = "no_access" # Kein Zugriff + +@dataclass +class LocationConfig: + """Standort-spezifische Konfiguration""" + timezone: str = "Europe/Berlin" + business_hours: Dict[str, str] = None + maintenance_window: Dict[str, str] = None + auto_approval_enabled: bool = False + max_job_duration: int = 480 # Minuten + contact_info: Dict[str, str] = None + notification_settings: Dict[str, Any] = None + +@dataclass +class Location: + """Standort-Definition""" + id: Optional[int] = None + name: str = "" + code: str = "" # Kurzer Code für den Standort + location_type: LocationType = LocationType.BRANCH + parent_id: Optional[int] = None + address: str = "" + city: str = "" + country: str = "" + postal_code: str = "" + latitude: Optional[float] = None + longitude: Optional[float] = None + description: str = "" + config: LocationConfig = None + is_active: bool = True + created_at: datetime = None + manager_id: Optional[int] = None + + def __post_init__(self): + if self.config is None: + self.config = LocationConfig() + if self.created_at is None: + self.created_at = datetime.now() + +@dataclass +class UserLocationAccess: + """Benutzer-Standort-Zugriff""" + user_id: int + location_id: int + access_level: AccessLevel + granted_by: int + granted_at: datetime + expires_at: Optional[datetime] = None + is_primary: bool = False + +class MultiLocationManager: + """Manager für Multi-Standort-Funktionalität""" + + def __init__(self): + self.locations: Dict[int, Location] = {} + self.user_access: Dict[int, List[UserLocationAccess]] = {} + self.next_location_id = 1 + + # Standard-Standort erstellen + self._create_default_location() + + def _create_default_location(self): + """Erstellt Standard-Standort falls keiner existiert""" + default_location = Location( + id=1, + name="Hauptstandort", + code="HQ", + location_type=LocationType.HEADQUARTERS, + address="Mercedes-Benz Platz", + city="Stuttgart", + country="Deutschland", + description="Hauptstandort des MYP-Systems" + ) + + self.locations[1] = default_location + self.next_location_id = 2 + + logger.info("Standard-Standort erstellt") + + def create_location(self, location: Location) -> int: + """Erstellt einen neuen Standort""" + location.id = self.next_location_id + self.next_location_id += 1 + + # Koordinaten automatisch ermitteln + if not location.latitude or not location.longitude: + self._geocode_location(location) + + self.locations[location.id] = location + + logger.info(f"Standort erstellt: {location.name} ({location.code})") + return location.id + + def update_location(self, location_id: int, updates: Dict[str, Any]) -> bool: + """Aktualisiert einen Standort""" + if location_id not in self.locations: + return False + + location = self.locations[location_id] + + for key, value in updates.items(): + if hasattr(location, key): + setattr(location, key, value) + + # Koordinaten neu ermitteln bei Adressänderung + if 'address' in updates or 'city' in updates: + self._geocode_location(location) + + logger.info(f"Standort aktualisiert: {location.name}") + return True + + def delete_location(self, location_id: int) -> bool: + """Löscht einen Standort (Soft Delete)""" + if location_id not in self.locations: + return False + + location = self.locations[location_id] + + # Prüfe ob Standort Kinder hat + children = self.get_child_locations(location_id) + if children: + logger.warning(f"Standort {location.name} kann nicht gelöscht werden: hat Unterstandorte") + return False + + # Prüfe auf aktive Ressourcen + if self._has_active_resources(location_id): + logger.warning(f"Standort {location.name} kann nicht gelöscht werden: hat aktive Ressourcen") + return False + + location.is_active = False + logger.info(f"Standort deaktiviert: {location.name}") + return True + + def get_location_hierarchy(self, location_id: Optional[int] = None) -> Dict[str, Any]: + """Holt Standort-Hierarchie""" + if location_id: + # Spezifische Hierarchie ab einem Standort + location = self.locations.get(location_id) + if not location: + return {} + + return self._build_hierarchy_node(location) + else: + # Komplette Hierarchie + root_locations = [loc for loc in self.locations.values() + if loc.parent_id is None and loc.is_active] + + return { + 'locations': [self._build_hierarchy_node(loc) for loc in root_locations] + } + + def _build_hierarchy_node(self, location: Location) -> Dict[str, Any]: + """Erstellt einen Hierarchie-Knoten""" + children = self.get_child_locations(location.id) + + return { + 'id': location.id, + 'name': location.name, + 'code': location.code, + 'type': location.location_type.value, + 'children': [self._build_hierarchy_node(child) for child in children], + 'resource_count': self._count_location_resources(location.id) + } + + def get_child_locations(self, parent_id: int) -> List[Location]: + """Holt alle Kinder-Standorte""" + return [loc for loc in self.locations.values() + if loc.parent_id == parent_id and loc.is_active] + + def get_location_path(self, location_id: int) -> List[Location]: + """Holt den Pfad vom Root zum Standort""" + path = [] + current_id = location_id + + while current_id: + location = self.locations.get(current_id) + if not location: + break + + path.insert(0, location) + current_id = location.parent_id + + return path + + def grant_location_access(self, user_id: int, location_id: int, + access_level: AccessLevel, granted_by: int, + expires_at: Optional[datetime] = None, + is_primary: bool = False) -> bool: + """Gewährt Benutzer-Zugriff auf einen Standort""" + if location_id not in self.locations: + return False + + access = UserLocationAccess( + user_id=user_id, + location_id=location_id, + access_level=access_level, + granted_by=granted_by, + granted_at=datetime.now(), + expires_at=expires_at, + is_primary=is_primary + ) + + if user_id not in self.user_access: + self.user_access[user_id] = [] + + # Entferne vorherigen Zugriff für diesen Standort + self.user_access[user_id] = [ + acc for acc in self.user_access[user_id] + if acc.location_id != location_id + ] + + # Setze anderen primary-Zugriff zurück falls nötig + if is_primary: + for access_item in self.user_access[user_id]: + access_item.is_primary = False + + self.user_access[user_id].append(access) + + logger.info(f"Standort-Zugriff gewährt: User {user_id} → Location {location_id} ({access_level.value})") + return True + + def revoke_location_access(self, user_id: int, location_id: int) -> bool: + """Entzieht Benutzer-Zugriff auf einen Standort""" + if user_id not in self.user_access: + return False + + original_count = len(self.user_access[user_id]) + self.user_access[user_id] = [ + acc for acc in self.user_access[user_id] + if acc.location_id != location_id + ] + + success = len(self.user_access[user_id]) < original_count + if success: + logger.info(f"Standort-Zugriff entzogen: User {user_id} → Location {location_id}") + + return success + + def get_user_locations(self, user_id: int, access_level: Optional[AccessLevel] = None) -> List[Location]: + """Holt alle Standorte eines Benutzers""" + if user_id not in self.user_access: + return [] + + accessible_locations = [] + now = datetime.now() + + for access in self.user_access[user_id]: + # Prüfe Ablaufzeit + if access.expires_at and access.expires_at < now: + continue + + # Prüfe Access Level + if access_level and access.access_level != access_level: + continue + + location = self.locations.get(access.location_id) + if location and location.is_active: + accessible_locations.append(location) + + return accessible_locations + + def get_user_primary_location(self, user_id: int) -> Optional[Location]: + """Holt den primären Standort eines Benutzers""" + if user_id not in self.user_access: + return None + + for access in self.user_access[user_id]: + if access.is_primary: + return self.locations.get(access.location_id) + + # Fallback: ersten verfügbaren Standort nehmen + user_locations = self.get_user_locations(user_id) + return user_locations[0] if user_locations else None + + def check_user_access(self, user_id: int, location_id: int, + required_level: AccessLevel = AccessLevel.READ_ONLY) -> bool: + """Prüft ob Benutzer Zugriff auf Standort hat""" + if user_id not in self.user_access: + return False + + access_levels = { + AccessLevel.NO_ACCESS: 0, + AccessLevel.READ_ONLY: 1, + AccessLevel.READ_WRITE: 2, + AccessLevel.FULL: 3 + } + + required_level_value = access_levels[required_level] + now = datetime.now() + + for access in self.user_access[user_id]: + if access.location_id != location_id: + continue + + # Prüfe Ablaufzeit + if access.expires_at and access.expires_at < now: + continue + + user_level_value = access_levels[access.access_level] + if user_level_value >= required_level_value: + return True + + return False + + def get_location_resources(self, location_id: int) -> Dict[str, Any]: + """Holt alle Ressourcen eines Standorts""" + if location_id not in self.locations: + return {} + + # Simuliere Datenbankabfrage für Drucker und Jobs + resources = { + 'printers': [], + 'active_jobs': [], + 'users': [], + 'pending_maintenance': 0 + } + + # In echter Implementierung würde hier die Datenbank abgefragt + with get_db_session() as db_session: + # Drucker des Standorts (vereinfacht - benötigt location_id in Printer-Model) + # printers = db_session.query(Printer).filter(Printer.location_id == location_id).all() + # resources['printers'] = [p.to_dict() for p in printers] + pass + + return resources + + def get_location_statistics(self, location_id: int, + start_date: Optional[datetime] = None, + end_date: Optional[datetime] = None) -> Dict[str, Any]: + """Holt Statistiken für einen Standort""" + if not start_date: + start_date = datetime.now() - timedelta(days=30) + if not end_date: + end_date = datetime.now() + + # Sammle Statistiken + stats = { + 'location': self.locations.get(location_id, {}).name if location_id in self.locations else 'Unbekannt', + 'period': { + 'start': start_date.isoformat(), + 'end': end_date.isoformat() + }, + 'totals': { + 'printers': 0, + 'jobs_completed': 0, + 'jobs_failed': 0, + 'print_time_hours': 0, + 'material_used_kg': 0, + 'users_active': 0 + }, + 'averages': { + 'jobs_per_day': 0, + 'job_duration_minutes': 0, + 'printer_utilization': 0 + }, + 'trends': { + 'daily_jobs': [], + 'printer_usage': [] + } + } + + # In echter Implementierung würden hier Datenbankabfragen stehen + + return stats + + def get_multi_location_report(self, location_ids: List[int] = None) -> Dict[str, Any]: + """Erstellt standortübergreifenden Bericht""" + if not location_ids: + location_ids = list(self.locations.keys()) + + report = { + 'generated_at': datetime.now().isoformat(), + 'locations': [], + 'summary': { + 'total_locations': len(location_ids), + 'total_printers': 0, + 'total_users': 0, + 'total_jobs': 0, + 'cross_location_sharing': [] + } + } + + for location_id in location_ids: + location = self.locations.get(location_id) + if not location: + continue + + location_stats = self.get_location_statistics(location_id) + location_data = { + 'id': location.id, + 'name': location.name, + 'code': location.code, + 'type': location.location_type.value, + 'statistics': location_stats + } + + report['locations'].append(location_data) + + # Summiere für Gesamtübersicht + totals = location_stats.get('totals', {}) + report['summary']['total_printers'] += totals.get('printers', 0) + report['summary']['total_users'] += totals.get('users_active', 0) + report['summary']['total_jobs'] += totals.get('jobs_completed', 0) + + return report + + def find_nearest_locations(self, latitude: float, longitude: float, + radius_km: float = 50, limit: int = 5) -> List[Tuple[Location, float]]: + """Findet nächstgelegene Standorte""" + from math import radians, sin, cos, sqrt, atan2 + + def calculate_distance(lat1, lon1, lat2, lon2): + """Berechnet Entfernung zwischen zwei Koordinaten (Haversine)""" + R = 6371 # Erdradius in km + + lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2]) + dlat = lat2 - lat1 + dlon = lon2 - lon1 + + a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2 + c = 2 * atan2(sqrt(a), sqrt(1-a)) + + return R * c + + nearby_locations = [] + + for location in self.locations.values(): + if not location.is_active or not location.latitude or not location.longitude: + continue + + distance = calculate_distance( + latitude, longitude, + location.latitude, location.longitude + ) + + if distance <= radius_km: + nearby_locations.append((location, distance)) + + # Sortiere nach Entfernung + nearby_locations.sort(key=lambda x: x[1]) + + return nearby_locations[:limit] + + def _geocode_location(self, location: Location): + """Ermittelt Koordinaten für einen Standort""" + try: + address_parts = [location.address, location.city, location.country] + full_address = ', '.join(filter(None, address_parts)) + + if not full_address: + return + + # Verwende geocoder library + result = geocoder.osm(full_address) + + if result.ok: + location.latitude = result.lat + location.longitude = result.lng + logger.info(f"Koordinaten ermittelt für {location.name}: {location.latitude}, {location.longitude}") + else: + logger.warning(f"Koordinaten konnten nicht ermittelt werden für {location.name}") + + except Exception as e: + logger.error(f"Fehler bei Geocoding für {location.name}: {str(e)}") + + def _has_active_resources(self, location_id: int) -> bool: + """Prüft ob Standort aktive Ressourcen hat""" + # Vereinfachte Implementierung + # In echter Implementation würde hier die Datenbank geprüft + return False + + def _count_location_resources(self, location_id: int) -> Dict[str, int]: + """Zählt Ressourcen eines Standorts""" + # Vereinfachte Implementierung + return { + 'printers': 0, + 'users': 0, + 'jobs': 0 + } + +# Globale Instanz +multi_location_manager = MultiLocationManager() + +def get_location_dashboard_data(user_id: int) -> Dict[str, Any]: + """Holt Dashboard-Daten für Standorte eines Benutzers""" + user_locations = multi_location_manager.get_user_locations(user_id) + primary_location = multi_location_manager.get_user_primary_location(user_id) + + dashboard_data = { + 'user_locations': [asdict(loc) for loc in user_locations], + 'primary_location': asdict(primary_location) if primary_location else None, + 'location_count': len(user_locations), + 'hierarchy': multi_location_manager.get_location_hierarchy() + } + + # Füge Statistiken für jeden Standort hinzu + for location in user_locations: + location_stats = multi_location_manager.get_location_statistics(location.id) + dashboard_data[f'stats_{location.id}'] = location_stats + + return dashboard_data + +def create_location_from_address(name: str, address: str, city: str, + country: str, location_type: LocationType = LocationType.BRANCH) -> int: + """Erstellt Standort aus Adresse mit automatischer Geocodierung""" + location = Location( + name=name, + code=name[:3].upper(), + location_type=location_type, + address=address, + city=city, + country=country + ) + + return multi_location_manager.create_location(location) + +# JavaScript für Multi-Location Frontend +def get_multi_location_javascript() -> str: + """JavaScript für Multi-Location Management""" + return """ + class MultiLocationManager { + constructor() { + this.currentLocation = null; + this.userLocations = []; + this.locationHierarchy = {}; + + this.init(); + } + + init() { + this.loadUserLocations(); + this.setupEventListeners(); + } + + setupEventListeners() { + // Location switcher + document.addEventListener('change', (e) => { + if (e.target.matches('.location-selector')) { + const locationId = parseInt(e.target.value); + this.switchLocation(locationId); + } + }); + + // Location management buttons + document.addEventListener('click', (e) => { + if (e.target.matches('.manage-locations-btn')) { + this.showLocationManager(); + } + + if (e.target.matches('.location-hierarchy-btn')) { + this.showLocationHierarchy(); + } + }); + } + + async loadUserLocations() { + try { + const response = await fetch('/api/locations/user'); + const data = await response.json(); + + if (data.success) { + this.userLocations = data.locations; + this.currentLocation = data.primary_location; + this.locationHierarchy = data.hierarchy; + + this.updateLocationSelector(); + this.updateLocationDisplay(); + } + } catch (error) { + console.error('Fehler beim Laden der Standorte:', error); + } + } + + updateLocationSelector() { + const selectors = document.querySelectorAll('.location-selector'); + + selectors.forEach(selector => { + selector.innerHTML = this.userLocations.map(location => + `` + ).join(''); + }); + } + + updateLocationDisplay() { + const displays = document.querySelectorAll('.current-location-display'); + + displays.forEach(display => { + if (this.currentLocation) { + display.innerHTML = ` +
+ ${this.currentLocation.name} + ${this.currentLocation.type} + ${this.currentLocation.city ? `${this.currentLocation.city}` : ''} +
+ `; + } else { + display.innerHTML = 'Kein Standort ausgewählt'; + } + }); + } + + async switchLocation(locationId) { + try { + const response = await fetch('/api/locations/switch', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ location_id: locationId }) + }); + + const result = await response.json(); + + if (result.success) { + this.currentLocation = this.userLocations.find(loc => loc.id === locationId); + this.updateLocationDisplay(); + + // Seite neu laden um location-spezifische Daten zu aktualisieren + window.location.reload(); + } else { + this.showNotification('Fehler beim Wechseln des Standorts', 'error'); + } + } catch (error) { + console.error('Standort-Wechsel fehlgeschlagen:', error); + } + } + + showLocationManager() { + const modal = document.createElement('div'); + modal.className = 'location-manager-modal'; + modal.innerHTML = ` + + `; + + document.body.appendChild(modal); + + // Event handlers + modal.querySelector('.close-modal').onclick = () => modal.remove(); + modal.onclick = (e) => { + if (e.target === modal) modal.remove(); + }; + } + + renderLocationList() { + return this.userLocations.map(location => ` +
+
+

${location.name} (${location.code})

+

Typ: ${location.type}

+

Adresse: ${location.address || 'Nicht angegeben'}

+

Stadt: ${location.city || 'Nicht angegeben'}

+
+
+ + +
+
+ `).join(''); + } + + showLocationHierarchy() { + const modal = document.createElement('div'); + modal.className = 'hierarchy-modal'; + modal.innerHTML = ` + + `; + + document.body.appendChild(modal); + + modal.querySelector('.close-modal').onclick = () => modal.remove(); + modal.onclick = (e) => { + if (e.target === modal) modal.remove(); + }; + } + + renderHierarchyTree(locations, level = 0) { + return locations.map(location => ` +
+
+ ${this.getLocationTypeIcon(location.type)} + ${location.name} + (${location.code}) + ${location.resource_count.printers || 0} Drucker +
+ ${location.children && location.children.length > 0 ? + this.renderHierarchyTree(location.children, level + 1) : ''} +
+ `).join(''); + } + + getLocationTypeIcon(type) { + const icons = { + 'headquarters': '🏢', + 'branch': '🏪', + 'department': '🏬', + 'floor': '🏢', + 'room': '🚪', + 'area': '📍' + }; + return icons[type] || '📍'; + } + + showNotification(message, type = 'info') { + const notification = document.createElement('div'); + notification.className = `notification notification-${type}`; + notification.textContent = message; + + document.body.appendChild(notification); + + setTimeout(() => { + notification.remove(); + }, 3000); + } + } + + // Initialize when DOM is ready + document.addEventListener('DOMContentLoaded', function() { + window.multiLocationManager = new MultiLocationManager(); + }); + """ \ No newline at end of file