🎉 Fix printer monitor complete issue & logs updates 📝
This commit is contained in:
@ -1144,7 +1144,7 @@ def export_logs_api():
|
||||
|
||||
# ===== API-ENDPUNKTE FÜR SYSTEM-INFORMATIONEN =====
|
||||
|
||||
@admin_blueprint.route("/api/system/status", methods=["GET"])
|
||||
@admin_api_blueprint.route("/system/status", methods=["GET"])
|
||||
@admin_required
|
||||
def get_system_status_api():
|
||||
"""API-Endpunkt für System-Status-Informationen"""
|
||||
@ -1473,7 +1473,7 @@ def api_admin_plug_schedules_cleanup():
|
||||
@admin_required
|
||||
def api_admin_plug_schedules_calendar():
|
||||
"""
|
||||
API-Endpoint für Kalender-Daten der Steckdosenschaltzeiten.
|
||||
API-Endpunkt für Kalender-Daten der Steckdosenschaltzeiten.
|
||||
Liefert Events für FullCalendar im JSON-Format.
|
||||
"""
|
||||
try:
|
||||
@ -1549,6 +1549,218 @@ def api_admin_plug_schedules_calendar():
|
||||
admin_logger.error(f"Fehler beim Laden der Kalender-Daten: {str(e)}")
|
||||
return jsonify([])
|
||||
|
||||
@admin_api_blueprint.route('/live-stats', methods=['GET'])
|
||||
@admin_required
|
||||
def api_admin_live_stats():
|
||||
"""
|
||||
API-Endpunkt für Live-Statistiken des Admin-Dashboards
|
||||
|
||||
Liefert aktuelle System-Statistiken für das Dashboard:
|
||||
- Benutzer-Statistiken
|
||||
- Drucker-Status
|
||||
- Job-Statistiken
|
||||
- System-Performance
|
||||
"""
|
||||
try:
|
||||
with get_cached_session() as db_session:
|
||||
# Benutzer-Statistiken
|
||||
total_users = db_session.query(User).count()
|
||||
active_users = db_session.query(User).filter(User.active == True).count()
|
||||
admin_users = db_session.query(User).filter(User.role == 'admin').count()
|
||||
|
||||
# Drucker-Statistiken
|
||||
total_printers = db_session.query(Printer).count()
|
||||
active_printers = db_session.query(Printer).filter(Printer.active == True).count()
|
||||
online_printers = db_session.query(Printer).filter(
|
||||
Printer.active == True,
|
||||
Printer.status == 'online'
|
||||
).count()
|
||||
|
||||
# Job-Statistiken
|
||||
total_jobs = db_session.query(Job).count()
|
||||
active_jobs = db_session.query(Job).filter(
|
||||
Job.status.in_(['pending', 'printing', 'paused'])
|
||||
).count()
|
||||
completed_jobs = db_session.query(Job).filter(
|
||||
Job.status == 'completed'
|
||||
).count()
|
||||
failed_jobs = db_session.query(Job).filter(
|
||||
Job.status == 'failed'
|
||||
).count()
|
||||
|
||||
# Jobs der letzten 24 Stunden
|
||||
last_24h = datetime.now() - timedelta(hours=24)
|
||||
jobs_24h = db_session.query(Job).filter(
|
||||
Job.created_at >= last_24h
|
||||
).count()
|
||||
|
||||
# Jobs der letzten 7 Tage
|
||||
last_7d = datetime.now() - timedelta(days=7)
|
||||
jobs_7d = db_session.query(Job).filter(
|
||||
Job.created_at >= last_7d
|
||||
).count()
|
||||
|
||||
# Steckdosen-Statistiken
|
||||
plug_logs_24h = db_session.query(PlugStatusLog).filter(
|
||||
PlugStatusLog.timestamp >= last_24h
|
||||
).count()
|
||||
|
||||
# System-Logs der letzten Stunde
|
||||
last_hour = datetime.now() - timedelta(hours=1)
|
||||
system_logs_1h = db_session.query(SystemLog).filter(
|
||||
SystemLog.timestamp >= last_hour
|
||||
).count()
|
||||
|
||||
# Response-Struktur
|
||||
stats = {
|
||||
'users': {
|
||||
'total': total_users,
|
||||
'active': active_users,
|
||||
'admins': admin_users
|
||||
},
|
||||
'printers': {
|
||||
'total': total_printers,
|
||||
'active': active_printers,
|
||||
'online': online_printers,
|
||||
'offline': active_printers - online_printers
|
||||
},
|
||||
'jobs': {
|
||||
'total': total_jobs,
|
||||
'active': active_jobs,
|
||||
'completed': completed_jobs,
|
||||
'failed': failed_jobs,
|
||||
'last_24h': jobs_24h,
|
||||
'last_7d': jobs_7d
|
||||
},
|
||||
'system': {
|
||||
'plug_logs_24h': plug_logs_24h,
|
||||
'system_logs_1h': system_logs_1h,
|
||||
'uptime': 'Unbekannt' # Könnte später implementiert werden
|
||||
},
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
admin_api_logger.info(f"Live-Statistiken abgerufen von Admin {current_user.username}")
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'stats': stats,
|
||||
'message': 'Live-Statistiken erfolgreich geladen'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
admin_api_logger.error(f"Fehler beim Abrufen der Live-Statistiken: {str(e)}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Fehler beim Laden der Statistiken',
|
||||
'message': str(e),
|
||||
'stats': {}
|
||||
}), 500
|
||||
|
||||
@admin_api_blueprint.route('/system/health', methods=['GET'])
|
||||
@admin_required
|
||||
def api_admin_system_health():
|
||||
"""
|
||||
API-Endpunkt für System-Health-Check
|
||||
|
||||
Überprüft verschiedene System-Komponenten:
|
||||
- Datenbank-Verbindung
|
||||
- Dateisystem
|
||||
- Speicherplatz
|
||||
- Service-Status
|
||||
"""
|
||||
try:
|
||||
health_status = {
|
||||
'database': 'unknown',
|
||||
'filesystem': 'unknown',
|
||||
'storage': {},
|
||||
'services': {},
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# Datenbank-Check
|
||||
try:
|
||||
with get_cached_session() as db_session:
|
||||
# Einfacher Query-Test
|
||||
db_session.execute("SELECT 1")
|
||||
health_status['database'] = 'healthy'
|
||||
except Exception as db_error:
|
||||
health_status['database'] = 'unhealthy'
|
||||
admin_api_logger.error(f"Datenbank-Health-Check fehlgeschlagen: {str(db_error)}")
|
||||
|
||||
# Dateisystem-Check
|
||||
try:
|
||||
# Prüfe wichtige Verzeichnisse
|
||||
important_dirs = [
|
||||
'backend/uploads',
|
||||
'backend/database',
|
||||
'backend/logs'
|
||||
]
|
||||
|
||||
all_accessible = True
|
||||
for dir_path in important_dirs:
|
||||
if not os.path.exists(dir_path) or not os.access(dir_path, os.W_OK):
|
||||
all_accessible = False
|
||||
break
|
||||
|
||||
health_status['filesystem'] = 'healthy' if all_accessible else 'unhealthy'
|
||||
except Exception as fs_error:
|
||||
health_status['filesystem'] = 'unhealthy'
|
||||
admin_api_logger.error(f"Dateisystem-Health-Check fehlgeschlagen: {str(fs_error)}")
|
||||
|
||||
# Speicherplatz-Check
|
||||
try:
|
||||
statvfs = os.statvfs('.')
|
||||
total_space = statvfs.f_blocks * statvfs.f_frsize
|
||||
free_space = statvfs.f_bavail * statvfs.f_frsize
|
||||
used_space = total_space - free_space
|
||||
|
||||
health_status['storage'] = {
|
||||
'total_gb': round(total_space / (1024**3), 2),
|
||||
'used_gb': round(used_space / (1024**3), 2),
|
||||
'free_gb': round(free_space / (1024**3), 2),
|
||||
'percent_used': round((used_space / total_space) * 100, 1)
|
||||
}
|
||||
except Exception as storage_error:
|
||||
admin_api_logger.error(f"Speicherplatz-Check fehlgeschlagen: {str(storage_error)}")
|
||||
|
||||
# Service-Status (vereinfacht)
|
||||
health_status['services'] = {
|
||||
'web_server': 'running', # Immer running, da wir antworten
|
||||
'job_scheduler': 'unknown', # Könnte später implementiert werden
|
||||
'tapo_controller': 'unknown' # Könnte später implementiert werden
|
||||
}
|
||||
|
||||
# Gesamt-Status berechnen
|
||||
if health_status['database'] == 'healthy' and health_status['filesystem'] == 'healthy':
|
||||
overall_status = 'healthy'
|
||||
elif health_status['database'] == 'unhealthy' or health_status['filesystem'] == 'unhealthy':
|
||||
overall_status = 'unhealthy'
|
||||
else:
|
||||
overall_status = 'degraded'
|
||||
|
||||
health_status['overall'] = overall_status
|
||||
|
||||
admin_api_logger.info(f"System-Health-Check durchgeführt: {overall_status}")
|
||||
|
||||
return jsonify({
|
||||
'success': True,
|
||||
'health': health_status,
|
||||
'message': f'System-Status: {overall_status}'
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
admin_api_logger.error(f"Fehler beim System-Health-Check: {str(e)}")
|
||||
return jsonify({
|
||||
'success': False,
|
||||
'error': 'Fehler beim Health-Check',
|
||||
'message': str(e),
|
||||
'health': {
|
||||
'overall': 'error',
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
}), 500
|
||||
|
||||
# ===== HELPER FUNCTIONS FOR PLUG SCHEDULES =====
|
||||
|
||||
def get_relative_time(timestamp):
|
||||
|
Reference in New Issue
Block a user