This commit is contained in:
2025-06-04 10:03:22 +02:00
commit 785a2b6134
14182 changed files with 1764617 additions and 0 deletions

1
utils/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Utils package for MYP

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,107 @@
#!/usr/bin/env python3
"""
Skript zum Hinzufügen der hardkodierten Drucker in die Datenbank.
"""
import sys
import os
sys.path.append('.')
from config.settings import PRINTERS
from database.db_manager import DatabaseManager
from models import Printer
from datetime import datetime
def add_hardcoded_printers():
"""Fügt die hardkodierten Drucker in die Datenbank ein."""
print("=== Hardkodierte Drucker hinzufügen ===")
print(f"Zu erstellende Drucker: {len(PRINTERS)}")
try:
db = DatabaseManager()
session = db.get_session()
added_count = 0
for printer_name, config in PRINTERS.items():
# Prüfen, ob Drucker bereits existiert
existing = session.query(Printer).filter(Printer.name == printer_name).first()
if existing:
print(f"⚠️ {printer_name}: Bereits vorhanden (ID: {existing.id})")
continue
# Neuen Drucker erstellen
new_printer = Printer(
name=printer_name,
model="P115", # Standard-Modell
location="Werk 040 - Berlin - TBA", # Aktualisierter Standort
ip_address=config["ip"],
mac_address=f"98:25:4A:E1:{printer_name[-1]}0:0{printer_name[-1]}", # Dummy MAC
plug_ip=config["ip"],
plug_username="admin",
plug_password="admin",
status="available", # Verfügbar, da in Konfiguration
active=True,
created_at=datetime.now()
)
session.add(new_printer)
print(f"{printer_name}: Hinzugefügt (IP: {config['ip']})")
added_count += 1
# Änderungen speichern
session.commit()
session.close()
print(f"\n{added_count} neue Drucker hinzugefügt")
print("Drucker-Erstellung abgeschlossen!")
except Exception as e:
print(f"❌ Fehler beim Hinzufügen: {e}")
if 'session' in locals():
session.rollback()
session.close()
def list_all_printers():
"""Zeigt alle Drucker in der Datenbank an."""
print("\n=== Alle Drucker in der Datenbank ===")
try:
db = DatabaseManager()
session = db.get_session()
printers = session.query(Printer).all()
if not printers:
print("Keine Drucker in der Datenbank gefunden.")
return
print(f"{'ID':<5} {'Name':<15} {'Status':<12} {'IP-Adresse':<15} {'Aktiv':<8}")
print("-" * 60)
for printer in printers:
active_str = "" if printer.active else ""
print(f"{printer.id:<5} {printer.name:<15} {printer.status:<12} {printer.ip_address:<15} {active_str:<8}")
session.close()
except Exception as e:
print(f"❌ Fehler beim Abrufen: {e}")
if 'session' in locals():
session.close()
if __name__ == "__main__":
print("Hardkodierte Drucker-Erstellung")
print("=" * 35)
# Aktuelle Drucker anzeigen
list_all_printers()
# Hardkodierte Drucker hinzufügen
add_hardcoded_printers()
# Alle Drucker anzeigen
list_all_printers()

178
utils/add_test_printers.py Normal file
View File

@@ -0,0 +1,178 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Skript zum Hinzufügen von Testdruckern zur Datenbank
"""
import sys
import os
from datetime import datetime
# Füge das Anwendungsverzeichnis zum Python-Pfad hinzu
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from models import get_db_session, Printer
def add_test_printers():
"""Fügt Testdrucker zur Datenbank hinzu"""
test_printers = [
{
"name": "Prusa i3 MK3S+",
"model": "Prusa i3 MK3S+",
"location": "Labor A - Arbeitsplatz 1",
"mac_address": "AA:BB:CC:DD:EE:01",
"plug_ip": "192.168.1.101",
"status": "available",
"active": True
},
{
"name": "Ender 3 V2",
"model": "Creality Ender 3 V2",
"location": "Labor A - Arbeitsplatz 2",
"mac_address": "AA:BB:CC:DD:EE:02",
"plug_ip": "192.168.1.102",
"status": "available",
"active": True
},
{
"name": "Ultimaker S3",
"model": "Ultimaker S3",
"location": "Labor B - Arbeitsplatz 1",
"mac_address": "AA:BB:CC:DD:EE:03",
"plug_ip": "192.168.1.103",
"status": "available",
"active": True
},
{
"name": "Bambu Lab X1 Carbon",
"model": "Bambu Lab X1 Carbon",
"location": "Labor B - Arbeitsplatz 2",
"mac_address": "AA:BB:CC:DD:EE:04",
"plug_ip": "192.168.1.104",
"status": "available",
"active": True
},
{
"name": "Formlabs Form 3",
"model": "Formlabs Form 3",
"location": "Labor C - Harz-Bereich",
"mac_address": "AA:BB:CC:DD:EE:05",
"plug_ip": "192.168.1.105",
"status": "offline",
"active": False
}
]
db_session = get_db_session()
try:
added_count = 0
for printer_data in test_printers:
# Prüfen, ob Drucker bereits existiert
existing = db_session.query(Printer).filter(
Printer.name == printer_data["name"]
).first()
if existing:
print(f"⚠️ Drucker '{printer_data['name']}' existiert bereits - überspringe")
continue
# Neuen Drucker erstellen
new_printer = Printer(
name=printer_data["name"],
model=printer_data["model"],
location=printer_data["location"],
mac_address=printer_data["mac_address"],
plug_ip=printer_data["plug_ip"],
status=printer_data["status"],
active=printer_data["active"],
created_at=datetime.now()
)
db_session.add(new_printer)
added_count += 1
print(f"✅ Drucker '{printer_data['name']}' hinzugefügt")
if added_count > 0:
db_session.commit()
print(f"\n🎉 {added_count} Testdrucker erfolgreich zur Datenbank hinzugefügt!")
else:
print("\n📋 Alle Testdrucker existieren bereits in der Datenbank")
# Zeige alle Drucker in der Datenbank
all_printers = db_session.query(Printer).all()
print(f"\n📊 Gesamt {len(all_printers)} Drucker in der Datenbank:")
print("-" * 80)
print(f"{'ID':<4} {'Name':<20} {'Modell':<20} {'Status':<12} {'Aktiv':<6}")
print("-" * 80)
for printer in all_printers:
active_str = "" if printer.active else ""
print(f"{printer.id:<4} {printer.name[:19]:<20} {(printer.model or 'Unbekannt')[:19]:<20} {printer.status:<12} {active_str:<6}")
db_session.close()
except Exception as e:
db_session.rollback()
db_session.close()
print(f"❌ Fehler beim Hinzufügen der Testdrucker: {str(e)}")
return False
return True
def remove_test_printers():
"""Entfernt alle Testdrucker aus der Datenbank"""
test_printer_names = [
"Prusa i3 MK3S+",
"Ender 3 V2",
"Ultimaker S3",
"Bambu Lab X1 Carbon",
"Formlabs Form 3"
]
db_session = get_db_session()
try:
removed_count = 0
for name in test_printer_names:
printer = db_session.query(Printer).filter(Printer.name == name).first()
if printer:
db_session.delete(printer)
removed_count += 1
print(f"🗑️ Drucker '{name}' entfernt")
if removed_count > 0:
db_session.commit()
print(f"\n🧹 {removed_count} Testdrucker erfolgreich entfernt!")
else:
print("\n📋 Keine Testdrucker zum Entfernen gefunden")
db_session.close()
except Exception as e:
db_session.rollback()
db_session.close()
print(f"❌ Fehler beim Entfernen der Testdrucker: {str(e)}")
return False
return True
if __name__ == "__main__":
print("=== MYP Druckerverwaltung - Testdrucker-Verwaltung ===")
print()
if len(sys.argv) > 1 and sys.argv[1] == "--remove":
print("Entferne Testdrucker...")
remove_test_printers()
else:
print("Füge Testdrucker hinzu...")
print("(Verwende --remove um Testdrucker zu entfernen)")
print()
add_test_printers()
print("\nFertig! 🚀")

968
utils/advanced_tables.py Normal file
View File

@@ -0,0 +1,968 @@
"""
Erweitertes Tabellen-System für das MYP-System
=============================================
Dieses Modul stellt erweiterte Tabellen-Funktionalität bereit:
- Sortierung nach allen Spalten
- Erweiterte Filter-Optionen
- Pagination mit anpassbaren Seitengrößen
- Spalten-Auswahl und -anpassung
- Export-Funktionen
- Responsive Design
"""
import re
import json
import math
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional, Tuple, Union, Callable
from dataclasses import dataclass, asdict
from enum import Enum
from flask import request, jsonify
from sqlalchemy import func, text, or_, and_
from sqlalchemy.orm import Query
from utils.logging_config import get_logger
from models import Job, User, Printer, GuestRequest, get_db_session
logger = get_logger("advanced_tables")
class SortDirection(Enum):
ASC = "asc"
DESC = "desc"
class FilterOperator(Enum):
EQUALS = "eq"
NOT_EQUALS = "ne"
CONTAINS = "contains"
NOT_CONTAINS = "not_contains"
STARTS_WITH = "starts_with"
ENDS_WITH = "ends_with"
GREATER_THAN = "gt"
LESS_THAN = "lt"
GREATER_EQUAL = "gte"
LESS_EQUAL = "lte"
BETWEEN = "between"
IN = "in"
NOT_IN = "not_in"
IS_NULL = "is_null"
IS_NOT_NULL = "is_not_null"
@dataclass
class SortConfig:
"""Sortierung-Konfiguration"""
column: str
direction: SortDirection = SortDirection.ASC
@dataclass
class FilterConfig:
"""Filter-Konfiguration"""
column: str
operator: FilterOperator
value: Any = None
values: List[Any] = None
@dataclass
class PaginationConfig:
"""Pagination-Konfiguration"""
page: int = 1
page_size: int = 25
max_page_size: int = 100
@dataclass
class ColumnConfig:
"""Spalten-Konfiguration"""
key: str
label: str
sortable: bool = True
filterable: bool = True
searchable: bool = True
visible: bool = True
width: Optional[str] = None
align: str = "left" # left, center, right
format_type: str = "text" # text, number, date, datetime, boolean, currency
format_options: Dict[str, Any] = None
@dataclass
class TableConfig:
"""Gesamt-Tabellen-Konfiguration"""
table_id: str
columns: List[ColumnConfig]
default_sort: List[SortConfig] = None
default_filters: List[FilterConfig] = None
pagination: PaginationConfig = None
searchable: bool = True
exportable: bool = True
selectable: bool = False
row_actions: List[Dict[str, Any]] = None
class AdvancedTableQuery:
"""Builder für erweiterte Tabellen-Abfragen"""
def __init__(self, base_query: Query, model_class):
self.base_query = base_query
self.model_class = model_class
self.filters = []
self.sorts = []
self.search_term = None
self.search_columns = []
def add_filter(self, filter_config: FilterConfig):
"""Fügt einen Filter hinzu"""
self.filters.append(filter_config)
return self
def add_sort(self, sort_config: SortConfig):
"""Fügt eine Sortierung hinzu"""
self.sorts.append(sort_config)
return self
def set_search(self, term: str, columns: List[str]):
"""Setzt globale Suche"""
self.search_term = term
self.search_columns = columns
return self
def build_query(self) -> Query:
"""Erstellt die finale Query"""
query = self.base_query
# Filter anwenden
for filter_config in self.filters:
query = self._apply_filter(query, filter_config)
# Globale Suche anwenden
if self.search_term and self.search_columns:
query = self._apply_search(query)
# Sortierung anwenden
for sort_config in self.sorts:
query = self._apply_sort(query, sort_config)
return query
def _apply_filter(self, query: Query, filter_config: FilterConfig) -> Query:
"""Wendet einen Filter auf die Query an"""
column = getattr(self.model_class, filter_config.column, None)
if not column:
logger.warning(f"Spalte {filter_config.column} nicht gefunden in {self.model_class}")
return query
op = filter_config.operator
value = filter_config.value
values = filter_config.values
if op == FilterOperator.EQUALS:
return query.filter(column == value)
elif op == FilterOperator.NOT_EQUALS:
return query.filter(column != value)
elif op == FilterOperator.CONTAINS:
return query.filter(column.ilike(f"%{value}%"))
elif op == FilterOperator.NOT_CONTAINS:
return query.filter(~column.ilike(f"%{value}%"))
elif op == FilterOperator.STARTS_WITH:
return query.filter(column.ilike(f"{value}%"))
elif op == FilterOperator.ENDS_WITH:
return query.filter(column.ilike(f"%{value}"))
elif op == FilterOperator.GREATER_THAN:
return query.filter(column > value)
elif op == FilterOperator.LESS_THAN:
return query.filter(column < value)
elif op == FilterOperator.GREATER_EQUAL:
return query.filter(column >= value)
elif op == FilterOperator.LESS_EQUAL:
return query.filter(column <= value)
elif op == FilterOperator.BETWEEN and values and len(values) >= 2:
return query.filter(column.between(values[0], values[1]))
elif op == FilterOperator.IN and values:
return query.filter(column.in_(values))
elif op == FilterOperator.NOT_IN and values:
return query.filter(~column.in_(values))
elif op == FilterOperator.IS_NULL:
return query.filter(column.is_(None))
elif op == FilterOperator.IS_NOT_NULL:
return query.filter(column.isnot(None))
return query
def _apply_search(self, query: Query) -> Query:
"""Wendet globale Suche an"""
if not self.search_term or not self.search_columns:
return query
search_conditions = []
for column_name in self.search_columns:
column = getattr(self.model_class, column_name, None)
if column:
# Konvertiere zu String für Suche in numerischen Spalten
search_conditions.append(
func.cast(column, sqlalchemy.String).ilike(f"%{self.search_term}%")
)
if search_conditions:
return query.filter(or_(*search_conditions))
return query
def _apply_sort(self, query: Query, sort_config: SortConfig) -> Query:
"""Wendet Sortierung an"""
column = getattr(self.model_class, sort_config.column, None)
if not column:
logger.warning(f"Spalte {sort_config.column} für Sortierung nicht gefunden")
return query
if sort_config.direction == SortDirection.DESC:
return query.order_by(column.desc())
else:
return query.order_by(column.asc())
class TableDataProcessor:
"""Verarbeitet Tabellendaten für die Ausgabe"""
def __init__(self, config: TableConfig):
self.config = config
def process_data(self, data: List[Any]) -> List[Dict[str, Any]]:
"""Verarbeitet rohe Daten für Tabellen-Ausgabe"""
processed_rows = []
for item in data:
row = {}
for column in self.config.columns:
if not column.visible:
continue
# Wert extrahieren
value = self._extract_value(item, column.key)
# Formatieren
formatted_value = self._format_value(value, column)
row[column.key] = {
'raw': value,
'formatted': formatted_value,
'sortable': column.sortable,
'filterable': column.filterable
}
# Row Actions hinzufügen
if self.config.row_actions:
row['_actions'] = self._get_row_actions(item)
# Row Metadata
row['_id'] = getattr(item, 'id', None)
row['_type'] = item.__class__.__name__.lower()
processed_rows.append(row)
return processed_rows
def _extract_value(self, item: Any, key: str) -> Any:
"""Extrahiert Wert aus einem Objekt"""
try:
# Unterstützung für verschachtelte Attribute (z.B. "user.name")
if '.' in key:
obj = item
for part in key.split('.'):
obj = getattr(obj, part, None)
if obj is None:
break
return obj
else:
return getattr(item, key, None)
except AttributeError:
return None
def _format_value(self, value: Any, column: ColumnConfig) -> str:
"""Formatiert einen Wert basierend auf dem Spaltentyp"""
if value is None:
return ""
format_type = column.format_type
options = column.format_options or {}
if format_type == "date" and isinstance(value, datetime):
date_format = options.get('format', '%d.%m.%Y')
return value.strftime(date_format)
elif format_type == "datetime" and isinstance(value, datetime):
datetime_format = options.get('format', '%d.%m.%Y %H:%M')
return value.strftime(datetime_format)
elif format_type == "number" and isinstance(value, (int, float)):
decimals = options.get('decimals', 0)
return f"{value:.{decimals}f}"
elif format_type == "currency" and isinstance(value, (int, float)):
currency = options.get('currency', '')
decimals = options.get('decimals', 2)
return f"{value:.{decimals}f} {currency}"
elif format_type == "boolean":
true_text = options.get('true_text', 'Ja')
false_text = options.get('false_text', 'Nein')
return true_text if value else false_text
elif format_type == "truncate":
max_length = options.get('max_length', 50)
text = str(value)
if len(text) > max_length:
return text[:max_length-3] + "..."
return text
return str(value)
def _get_row_actions(self, item: Any) -> List[Dict[str, Any]]:
"""Generiert verfügbare Aktionen für eine Zeile"""
actions = []
for action_config in self.config.row_actions:
# Prüfe Bedingungen für Aktion
if self._check_action_condition(item, action_config):
actions.append({
'type': action_config['type'],
'label': action_config['label'],
'icon': action_config.get('icon'),
'url': self._build_action_url(item, action_config),
'method': action_config.get('method', 'GET'),
'confirm': action_config.get('confirm'),
'class': action_config.get('class', '')
})
return actions
def _check_action_condition(self, item: Any, action_config: Dict[str, Any]) -> bool:
"""Prüft ob eine Aktion für ein Item verfügbar ist"""
condition = action_config.get('condition')
if not condition:
return True
try:
# Einfache Bedingungsprüfung
if isinstance(condition, dict):
for key, expected_value in condition.items():
actual_value = self._extract_value(item, key)
if actual_value != expected_value:
return False
return True
except Exception:
return False
def _build_action_url(self, item: Any, action_config: Dict[str, Any]) -> str:
"""Erstellt URL für eine Aktion"""
url_template = action_config.get('url', '')
# Ersetze Platzhalter in URL
try:
return url_template.format(id=getattr(item, 'id', ''))
except Exception:
return url_template
def parse_table_request(request_data: Dict[str, Any]) -> Tuple[List[SortConfig], List[FilterConfig], PaginationConfig, str]:
"""Parst Tabellen-Request-Parameter"""
# Sortierung parsen
sorts = []
sort_data = request_data.get('sort', [])
if isinstance(sort_data, dict):
sort_data = [sort_data]
for sort_item in sort_data:
if isinstance(sort_item, dict):
column = sort_item.get('column')
direction = SortDirection(sort_item.get('direction', 'asc'))
if column:
sorts.append(SortConfig(column=column, direction=direction))
# Filter parsen
filters = []
filter_data = request_data.get('filters', [])
if isinstance(filter_data, dict):
filter_data = [filter_data]
for filter_item in filter_data:
if isinstance(filter_item, dict):
column = filter_item.get('column')
operator = FilterOperator(filter_item.get('operator', 'eq'))
value = filter_item.get('value')
values = filter_item.get('values')
if column:
filters.append(FilterConfig(
column=column,
operator=operator,
value=value,
values=values
))
# Pagination parsen
page = int(request_data.get('page', 1))
page_size = min(int(request_data.get('page_size', 25)), 100)
pagination = PaginationConfig(page=page, page_size=page_size)
# Suche parsen
search = request_data.get('search', '')
return sorts, filters, pagination, search
def get_advanced_table_javascript() -> str:
"""JavaScript für erweiterte Tabellen"""
return """
class AdvancedTable {
constructor(tableId, config = {}) {
this.tableId = tableId;
this.config = {
apiUrl: '/api/table-data',
pageSize: 25,
searchDelay: 500,
sortable: true,
filterable: true,
searchable: true,
...config
};
this.currentSort = [];
this.currentFilters = [];
this.currentPage = 1;
this.currentSearch = '';
this.totalPages = 1;
this.totalItems = 0;
this.searchTimeout = null;
this.init();
}
init() {
this.setupTable();
this.setupEventListeners();
this.loadData();
}
setupTable() {
const table = document.getElementById(this.tableId);
if (!table) return;
table.classList.add('advanced-table');
// Add table wrapper
const wrapper = document.createElement('div');
wrapper.className = 'table-wrapper';
table.parentNode.insertBefore(wrapper, table);
wrapper.appendChild(table);
// Add controls
this.createControls(wrapper);
}
createControls(wrapper) {
const controls = document.createElement('div');
controls.className = 'table-controls';
controls.innerHTML = `
<div class="table-controls-left">
<div class="search-box">
<input type="text" id="${this.tableId}-search" placeholder="Suchen..." class="search-input">
<span class="search-icon">🔍</span>
</div>
<div class="page-size-selector">
<label>Einträge pro Seite:</label>
<select id="${this.tableId}-page-size">
<option value="10">10</option>
<option value="25" selected>25</option>
<option value="50">50</option>
<option value="100">100</option>
</select>
</div>
</div>
<div class="table-controls-right">
<button class="btn-filter" id="${this.tableId}-filter-btn">Filter</button>
<button class="btn-export" id="${this.tableId}-export-btn">Export</button>
<button class="btn-refresh" id="${this.tableId}-refresh-btn">↻</button>
</div>
`;
wrapper.insertBefore(controls, wrapper.firstChild);
// Add pagination
const pagination = document.createElement('div');
pagination.className = 'table-pagination';
pagination.id = `${this.tableId}-pagination`;
wrapper.appendChild(pagination);
}
setupEventListeners() {
// Search
const searchInput = document.getElementById(`${this.tableId}-search`);
searchInput?.addEventListener('input', (e) => {
clearTimeout(this.searchTimeout);
this.searchTimeout = setTimeout(() => {
this.currentSearch = e.target.value;
this.currentPage = 1;
this.loadData();
}, this.config.searchDelay);
});
// Page size
const pageSizeSelect = document.getElementById(`${this.tableId}-page-size`);
pageSizeSelect?.addEventListener('change', (e) => {
this.config.pageSize = parseInt(e.target.value);
this.currentPage = 1;
this.loadData();
});
// Refresh
const refreshBtn = document.getElementById(`${this.tableId}-refresh-btn`);
refreshBtn?.addEventListener('click', () => {
this.loadData();
});
// Export
const exportBtn = document.getElementById(`${this.tableId}-export-btn`);
exportBtn?.addEventListener('click', () => {
this.exportData();
});
// Table header clicks (sorting)
const table = document.getElementById(this.tableId);
table?.addEventListener('click', (e) => {
const th = e.target.closest('th[data-sortable="true"]');
if (th) {
const column = th.dataset.column;
this.toggleSort(column);
}
});
}
toggleSort(column) {
const existingSort = this.currentSort.find(s => s.column === column);
if (existingSort) {
if (existingSort.direction === 'asc') {
existingSort.direction = 'desc';
} else {
// Remove sort
this.currentSort = this.currentSort.filter(s => s.column !== column);
}
} else {
this.currentSort.push({ column, direction: 'asc' });
}
this.updateSortHeaders();
this.loadData();
}
updateSortHeaders() {
const table = document.getElementById(this.tableId);
const headers = table?.querySelectorAll('th[data-column]');
headers?.forEach(th => {
const column = th.dataset.column;
const sort = this.currentSort.find(s => s.column === column);
th.classList.remove('sort-asc', 'sort-desc');
if (sort) {
th.classList.add(`sort-${sort.direction}`);
}
});
}
async loadData() {
try {
const params = {
page: this.currentPage,
page_size: this.config.pageSize,
search: this.currentSearch,
sort: this.currentSort,
filters: this.currentFilters
};
const response = await fetch(this.config.apiUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(params)
});
const data = await response.json();
if (data.success) {
this.renderTable(data.data);
this.updatePagination(data.pagination);
} else {
console.error('Table data loading failed:', data.error);
}
} catch (error) {
console.error('Table data loading error:', error);
}
}
renderTable(data) {
const table = document.getElementById(this.tableId);
const tbody = table?.querySelector('tbody');
if (!tbody) return;
tbody.innerHTML = '';
data.forEach(row => {
const tr = document.createElement('tr');
tr.dataset.id = row._id;
// Render cells
Object.keys(row).forEach(key => {
if (key.startsWith('_')) return; // Skip metadata
const td = document.createElement('td');
const cellData = row[key];
if (typeof cellData === 'object' && cellData.formatted !== undefined) {
td.innerHTML = cellData.formatted;
td.dataset.raw = cellData.raw;
} else {
td.textContent = cellData;
}
tr.appendChild(td);
});
// Add actions column if exists
if (row._actions && row._actions.length > 0) {
const actionsTd = document.createElement('td');
actionsTd.className = 'actions-cell';
actionsTd.innerHTML = this.renderActions(row._actions);
tr.appendChild(actionsTd);
}
tbody.appendChild(tr);
});
}
renderActions(actions) {
return actions.map(action => {
const confirmAttr = action.confirm ? `onclick="return confirm('${action.confirm}')"` : '';
const icon = action.icon ? `<span class="action-icon">${action.icon}</span>` : '';
return `<a href="${action.url}" class="action-btn ${action.class}" ${confirmAttr}>
${icon}${action.label}
</a>`;
}).join(' ');
}
updatePagination(pagination) {
this.currentPage = pagination.page;
this.totalPages = pagination.total_pages;
this.totalItems = pagination.total_items;
const paginationEl = document.getElementById(`${this.tableId}-pagination`);
if (!paginationEl) return;
paginationEl.innerHTML = `
<div class="pagination-info">
Zeige ${pagination.start_item}-${pagination.end_item} von ${pagination.total_items} Einträgen
</div>
<div class="pagination-controls">
${this.renderPaginationButtons()}
</div>
`;
// Event listeners für Pagination
paginationEl.querySelectorAll('.page-btn').forEach(btn => {
btn.addEventListener('click', (e) => {
e.preventDefault();
const page = parseInt(btn.dataset.page);
if (page !== this.currentPage) {
this.currentPage = page;
this.loadData();
}
});
});
}
renderPaginationButtons() {
const buttons = [];
const maxButtons = 7;
// Previous button
buttons.push(`
<button class="page-btn ${this.currentPage === 1 ? 'disabled' : ''}"
data-page="${this.currentPage - 1}" ${this.currentPage === 1 ? 'disabled' : ''}>
</button>
`);
// Page number buttons
let startPage = Math.max(1, this.currentPage - Math.floor(maxButtons / 2));
let endPage = Math.min(this.totalPages, startPage + maxButtons - 1);
if (endPage - startPage + 1 < maxButtons) {
startPage = Math.max(1, endPage - maxButtons + 1);
}
for (let i = startPage; i <= endPage; i++) {
buttons.push(`
<button class="page-btn ${i === this.currentPage ? 'active' : ''}"
data-page="${i}">
${i}
</button>
`);
}
// Next button
buttons.push(`
<button class="page-btn ${this.currentPage === this.totalPages ? 'disabled' : ''}"
data-page="${this.currentPage + 1}" ${this.currentPage === this.totalPages ? 'disabled' : ''}>
</button>
`);
return buttons.join('');
}
exportData() {
const params = new URLSearchParams({
search: this.currentSearch,
sort: JSON.stringify(this.currentSort),
filters: JSON.stringify(this.currentFilters),
format: 'csv'
});
window.open(`${this.config.apiUrl}/export?${params}`, '_blank');
}
}
// Auto-initialize tables with data-advanced-table attribute
document.addEventListener('DOMContentLoaded', function() {
document.querySelectorAll('[data-advanced-table]').forEach(table => {
const config = JSON.parse(table.dataset.advancedTable || '{}');
new AdvancedTable(table.id, config);
});
});
"""
def get_advanced_table_css() -> str:
"""CSS für erweiterte Tabellen"""
return """
.table-wrapper {
background: white;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
overflow: hidden;
}
.table-controls {
display: flex;
justify-content: space-between;
align-items: center;
padding: 1rem;
background: #f8f9fa;
border-bottom: 1px solid #e9ecef;
}
.table-controls-left {
display: flex;
align-items: center;
gap: 1rem;
}
.search-box {
position: relative;
}
.search-input {
padding: 0.5rem 0.75rem;
padding-right: 2rem;
border: 1px solid #d1d5db;
border-radius: 6px;
font-size: 0.875rem;
}
.search-icon {
position: absolute;
right: 0.5rem;
top: 50%;
transform: translateY(-50%);
color: #6b7280;
}
.page-size-selector {
display: flex;
align-items: center;
gap: 0.5rem;
font-size: 0.875rem;
}
.table-controls-right {
display: flex;
gap: 0.5rem;
}
.advanced-table {
width: 100%;
border-collapse: collapse;
}
.advanced-table th {
background: #f8f9fa;
padding: 0.75rem;
text-align: left;
font-weight: 600;
border-bottom: 2px solid #e9ecef;
position: relative;
}
.advanced-table th[data-sortable="true"] {
cursor: pointer;
user-select: none;
}
.advanced-table th[data-sortable="true"]:hover {
background: #e9ecef;
}
.advanced-table th.sort-asc::after {
content: "";
color: #3b82f6;
}
.advanced-table th.sort-desc::after {
content: "";
color: #3b82f6;
}
.advanced-table td {
padding: 0.75rem;
border-bottom: 1px solid #e9ecef;
}
.advanced-table tbody tr:hover {
background: #f8f9fa;
}
.actions-cell {
white-space: nowrap;
}
.action-btn {
display: inline-block;
padding: 0.25rem 0.5rem;
margin: 0 0.125rem;
font-size: 0.75rem;
text-decoration: none;
border-radius: 4px;
background: #e5e7eb;
color: #374151;
}
.action-btn:hover {
background: #d1d5db;
}
.action-btn.btn-primary {
background: #3b82f6;
color: white;
}
.action-btn.btn-danger {
background: #ef4444;
color: white;
}
.table-pagination {
display: flex;
justify-content: space-between;
align-items: center;
padding: 1rem;
background: #f8f9fa;
border-top: 1px solid #e9ecef;
}
.pagination-controls {
display: flex;
gap: 0.25rem;
}
.page-btn {
padding: 0.5rem 0.75rem;
border: 1px solid #d1d5db;
background: white;
cursor: pointer;
border-radius: 4px;
}
.page-btn:hover:not(.disabled) {
background: #f3f4f6;
}
.page-btn.active {
background: #3b82f6;
color: white;
border-color: #3b82f6;
}
.page-btn.disabled {
opacity: 0.5;
cursor: not-allowed;
}
@media (max-width: 768px) {
.table-controls {
flex-direction: column;
gap: 1rem;
align-items: stretch;
}
.table-controls-left,
.table-controls-right {
justify-content: center;
}
.advanced-table {
font-size: 0.875rem;
}
.advanced-table th,
.advanced-table td {
padding: 0.5rem;
}
.table-pagination {
flex-direction: column;
gap: 1rem;
}
}
"""
def create_table_config(table_id: str, columns: List[ColumnConfig], **kwargs) -> TableConfig:
"""
Erstellt eine neue Tabellen-Konfiguration.
Args:
table_id: Eindeutige ID für die Tabelle
columns: Liste der Spalten-Konfigurationen
**kwargs: Zusätzliche Konfigurationsoptionen
Returns:
TableConfig: Konfiguration für die erweiterte Tabelle
"""
return TableConfig(
table_id=table_id,
columns=columns,
default_sort=kwargs.get('default_sort', []),
default_filters=kwargs.get('default_filters', []),
pagination=kwargs.get('pagination', PaginationConfig()),
searchable=kwargs.get('searchable', True),
exportable=kwargs.get('exportable', True),
selectable=kwargs.get('selectable', False),
row_actions=kwargs.get('row_actions', [])
)
def get_advanced_tables_js() -> str:
"""Alias für die bestehende Funktion"""
return get_advanced_table_javascript()
def get_advanced_tables_css() -> str:
"""Alias für die bestehende Funktion"""
return get_advanced_table_css()

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from models import get_db_session, Printer
def aktiviere_alle_drucker():
"""Aktiviert alle Drucker in der Datenbank."""
try:
session = get_db_session()
drucker = session.query(Printer).all()
if not drucker:
print("Keine Drucker in der Datenbank gefunden.")
session.close()
return
print(f"Anzahl Drucker: {len(drucker)}")
print("Aktiviere alle Drucker...")
for d in drucker:
d.active = True
print(f"Drucker {d.id}: {d.name} - IP: {d.plug_ip} - Aktiv: {d.active}")
session.commit()
print("Alle Drucker wurden erfolgreich aktiviert!")
session.close()
except Exception as e:
print(f"Fehler: {str(e)}")
try:
session.rollback()
session.close()
except:
pass
if __name__ == "__main__":
aktiviere_alle_drucker()

667
utils/analytics.py Normal file
View File

@@ -0,0 +1,667 @@
#!/usr/bin/env python3
"""
Erweiterte Analytik und Statistiken für MYP Platform
Umfassende Datenanalyse, Berichte und KPI-Tracking
"""
import json
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple, Any
from sqlalchemy import func, desc, and_, or_, extract
from sqlalchemy.orm import Session
from dataclasses import dataclass, asdict
from enum import Enum
from utils.logging_config import get_logger
logger = get_logger("analytics")
# ===== ANALYTICS ENUMS =====
class MetricType(Enum):
"""Typen von Metriken"""
COUNTER = "counter" # Zähler (erhöht sich)
GAUGE = "gauge" # Momentanwert
HISTOGRAM = "histogram" # Verteilung von Werten
RATE = "rate" # Rate über Zeit
class TimeRange(Enum):
"""Zeiträume für Analysen"""
HOUR = "hour"
DAY = "day"
WEEK = "week"
MONTH = "month"
QUARTER = "quarter"
YEAR = "year"
CUSTOM = "custom"
class ReportFormat(Enum):
"""Ausgabeformate für Berichte"""
JSON = "json"
CSV = "csv"
PDF = "pdf"
EXCEL = "excel"
# ===== DATA CLASSES =====
@dataclass
class Metric:
"""Einzelne Metrik"""
name: str
value: float
unit: str
timestamp: datetime
tags: Dict[str, str] = None
def to_dict(self) -> Dict:
result = asdict(self)
result['timestamp'] = self.timestamp.isoformat()
return result
@dataclass
class AnalyticsData:
"""Container für Analytik-Daten"""
metrics: List[Metric]
timerange: TimeRange
start_date: datetime
end_date: datetime
filters: Dict[str, Any] = None
def to_dict(self) -> Dict:
return {
'metrics': [m.to_dict() for m in self.metrics],
'timerange': self.timerange.value,
'start_date': self.start_date.isoformat(),
'end_date': self.end_date.isoformat(),
'filters': self.filters or {}
}
@dataclass
class KPI:
"""Key Performance Indicator"""
name: str
current_value: float
previous_value: float
target_value: float
unit: str
trend: str # "up", "down", "stable"
change_percent: float
def to_dict(self) -> Dict:
return asdict(self)
# ===== ANALYTICS ENGINE =====
class AnalyticsEngine:
"""Hauptklasse für Analytik und Statistiken"""
def __init__(self):
self.cache = {}
self.cache_timeout = timedelta(minutes=10)
def get_printer_statistics(self, time_range: TimeRange = TimeRange.MONTH,
start_date: datetime = None, end_date: datetime = None) -> Dict:
"""
Drucker-Statistiken abrufen
Args:
time_range: Zeitraum für Analyse
start_date: Startdatum (optional)
end_date: Enddatum (optional)
Returns:
Dict: Drucker-Statistiken
"""
try:
from models import get_db_session, Printer, Job
if not start_date or not end_date:
start_date, end_date = self._get_date_range(time_range)
db_session = get_db_session()
# Basis-Statistiken
total_printers = db_session.query(Printer).filter(Printer.active == True).count()
online_printers = db_session.query(Printer).filter(
and_(Printer.active == True, Printer.status.in_(["online", "idle"]))
).count()
# Auslastung nach Druckern
printer_usage = db_session.query(
Printer.name,
func.count(Job.id).label('job_count'),
func.sum(Job.duration_minutes).label('total_duration')
).outerjoin(Job, and_(
Job.printer_id == Printer.id,
Job.created_at.between(start_date, end_date)
)).group_by(Printer.id, Printer.name).all()
# Status-Verteilung
status_distribution = db_session.query(
Printer.status,
func.count(Printer.id).label('count')
).filter(Printer.active == True).group_by(Printer.status).all()
# Durchschnittliche Verfügbarkeit
availability_stats = self._calculate_printer_availability(db_session, start_date, end_date)
db_session.close()
return {
'summary': {
'total_printers': total_printers,
'online_printers': online_printers,
'availability_rate': round((online_printers / total_printers * 100) if total_printers > 0 else 0, 1)
},
'usage_by_printer': [
{
'name': usage.name,
'jobs': usage.job_count or 0,
'total_hours': round((usage.total_duration or 0) / 60, 1),
'utilization_rate': self._calculate_utilization_rate(usage.total_duration, start_date, end_date)
}
for usage in printer_usage
],
'status_distribution': [
{'status': status.status, 'count': status.count}
for status in status_distribution
],
'availability': availability_stats,
'time_range': {
'start': start_date.isoformat(),
'end': end_date.isoformat(),
'type': time_range.value
}
}
except Exception as e:
logger.error(f"Fehler beim Abrufen der Drucker-Statistiken: {e}")
return {'error': str(e)}
def get_job_statistics(self, time_range: TimeRange = TimeRange.MONTH,
start_date: datetime = None, end_date: datetime = None) -> Dict:
"""
Job-Statistiken abrufen
Args:
time_range: Zeitraum für Analyse
start_date: Startdatum (optional)
end_date: Enddatum (optional)
Returns:
Dict: Job-Statistiken
"""
try:
from models import get_db_session, Job, User
if not start_date or not end_date:
start_date, end_date = self._get_date_range(time_range)
db_session = get_db_session()
# Basis-Statistiken
base_query = db_session.query(Job).filter(
Job.created_at.between(start_date, end_date)
)
total_jobs = base_query.count()
completed_jobs = base_query.filter(Job.status == 'completed').count()
failed_jobs = base_query.filter(Job.status == 'failed').count()
cancelled_jobs = base_query.filter(Job.status == 'cancelled').count()
# Status-Verteilung
status_distribution = db_session.query(
Job.status,
func.count(Job.id).label('count')
).filter(
Job.created_at.between(start_date, end_date)
).group_by(Job.status).all()
# Durchschnittliche Job-Dauer
avg_duration = db_session.query(
func.avg(Job.duration_minutes)
).filter(
and_(
Job.created_at.between(start_date, end_date),
Job.status == 'completed'
)
).scalar() or 0
# Top-Benutzer
top_users = db_session.query(
User.username,
User.name,
func.count(Job.id).label('job_count'),
func.sum(Job.duration_minutes).label('total_duration')
).join(Job).filter(
Job.created_at.between(start_date, end_date)
).group_by(User.id, User.username, User.name).order_by(
desc('job_count')
).limit(10).all()
# Jobs über Zeit (täglich)
daily_jobs = self._get_daily_job_trend(db_session, start_date, end_date)
# Material-Verbrauch (falls verfügbar)
material_usage = db_session.query(
func.sum(Job.material_used)
).filter(
and_(
Job.created_at.between(start_date, end_date),
Job.material_used.isnot(None)
)
).scalar() or 0
db_session.close()
success_rate = round((completed_jobs / total_jobs * 100) if total_jobs > 0 else 0, 1)
return {
'summary': {
'total_jobs': total_jobs,
'completed_jobs': completed_jobs,
'failed_jobs': failed_jobs,
'cancelled_jobs': cancelled_jobs,
'success_rate': success_rate,
'avg_duration_hours': round(avg_duration / 60, 1),
'total_material_g': round(material_usage, 1)
},
'status_distribution': [
{'status': status.status, 'count': status.count}
for status in status_distribution
],
'top_users': [
{
'username': user.username,
'name': user.name,
'jobs': user.job_count,
'total_hours': round((user.total_duration or 0) / 60, 1)
}
for user in top_users
],
'daily_trend': daily_jobs,
'time_range': {
'start': start_date.isoformat(),
'end': end_date.isoformat(),
'type': time_range.value
}
}
except Exception as e:
logger.error(f"Fehler beim Abrufen der Job-Statistiken: {e}")
return {'error': str(e)}
def get_user_statistics(self, time_range: TimeRange = TimeRange.MONTH,
start_date: datetime = None, end_date: datetime = None) -> Dict:
"""
Benutzer-Statistiken abrufen
Args:
time_range: Zeitraum für Analyse
start_date: Startdatum (optional)
end_date: Enddatum (optional)
Returns:
Dict: Benutzer-Statistiken
"""
try:
from models import get_db_session, User, Job
if not start_date or not end_date:
start_date, end_date = self._get_date_range(time_range)
db_session = get_db_session()
# Basis-Statistiken
total_users = db_session.query(User).filter(User.active == True).count()
active_users = db_session.query(func.distinct(Job.user_id)).filter(
Job.created_at.between(start_date, end_date)
).count()
# Neue Benutzer im Zeitraum
new_users = db_session.query(User).filter(
and_(
User.created_at.between(start_date, end_date),
User.active == True
)
).count()
# Benutzer-Aktivität
user_activity = db_session.query(
User.username,
User.name,
func.count(Job.id).label('jobs'),
func.max(Job.created_at).label('last_activity'),
func.sum(Job.duration_minutes).label('total_duration')
).outerjoin(Job, and_(
Job.user_id == User.id,
Job.created_at.between(start_date, end_date)
)).filter(User.active == True).group_by(
User.id, User.username, User.name
).all()
# Rollenverteilung
role_distribution = db_session.query(
User.role,
func.count(User.id).label('count')
).filter(User.active == True).group_by(User.role).all()
db_session.close()
# Engagement-Rate berechnen
engagement_rate = round((active_users / total_users * 100) if total_users > 0 else 0, 1)
return {
'summary': {
'total_users': total_users,
'active_users': active_users,
'new_users': new_users,
'engagement_rate': engagement_rate
},
'role_distribution': [
{'role': role.role or 'user', 'count': role.count}
for role in role_distribution
],
'user_activity': [
{
'username': user.username,
'name': user.name,
'jobs': user.jobs or 0,
'last_activity': user.last_activity.isoformat() if user.last_activity else None,
'total_hours': round((user.total_duration or 0) / 60, 1)
}
for user in user_activity
],
'time_range': {
'start': start_date.isoformat(),
'end': end_date.isoformat(),
'type': time_range.value
}
}
except Exception as e:
logger.error(f"Fehler beim Abrufen der Benutzer-Statistiken: {e}")
return {'error': str(e)}
def get_system_kpis(self, time_range: TimeRange = TimeRange.MONTH) -> Dict:
"""
System-KPIs abrufen
Args:
time_range: Zeitraum für Vergleich
Returns:
Dict: KPI-Daten
"""
try:
current_start, current_end = self._get_date_range(time_range)
previous_start, previous_end = self._get_previous_period(current_start, current_end)
# Aktuelle Periode
current_printer_stats = self.get_printer_statistics(TimeRange.CUSTOM, current_start, current_end)
current_job_stats = self.get_job_statistics(TimeRange.CUSTOM, current_start, current_end)
current_user_stats = self.get_user_statistics(TimeRange.CUSTOM, current_start, current_end)
# Vorherige Periode
previous_printer_stats = self.get_printer_statistics(TimeRange.CUSTOM, previous_start, previous_end)
previous_job_stats = self.get_job_statistics(TimeRange.CUSTOM, previous_start, previous_end)
previous_user_stats = self.get_user_statistics(TimeRange.CUSTOM, previous_start, previous_end)
# KPIs berechnen
kpis = [
self._create_kpi(
name="Drucker-Verfügbarkeit",
current=current_printer_stats['summary']['availability_rate'],
previous=previous_printer_stats['summary']['availability_rate'],
target=95.0,
unit="%"
),
self._create_kpi(
name="Job-Erfolgsrate",
current=current_job_stats['summary']['success_rate'],
previous=previous_job_stats['summary']['success_rate'],
target=90.0,
unit="%"
),
self._create_kpi(
name="Aktive Benutzer",
current=current_user_stats['summary']['active_users'],
previous=previous_user_stats['summary']['active_users'],
target=50,
unit="Benutzer"
),
self._create_kpi(
name="Durchschnittliche Job-Dauer",
current=current_job_stats['summary']['avg_duration_hours'],
previous=previous_job_stats['summary']['avg_duration_hours'],
target=4.0,
unit="Stunden"
),
self._create_kpi(
name="Material-Verbrauch",
current=current_job_stats['summary']['total_material_g'],
previous=previous_job_stats['summary']['total_material_g'],
target=10000,
unit="g"
)
]
return {
'kpis': [kpi.to_dict() for kpi in kpis],
'period': {
'current': {
'start': current_start.isoformat(),
'end': current_end.isoformat()
},
'previous': {
'start': previous_start.isoformat(),
'end': previous_end.isoformat()
}
}
}
except Exception as e:
logger.error(f"Fehler beim Abrufen der System-KPIs: {e}")
return {'error': str(e)}
def generate_report(self, report_type: str, time_range: TimeRange = TimeRange.MONTH,
format: ReportFormat = ReportFormat.JSON, **kwargs) -> Dict:
"""
Bericht generieren
Args:
report_type: Art des Berichts
time_range: Zeitraum
format: Ausgabeformat
**kwargs: Zusätzliche Parameter
Returns:
Dict: Bericht-Daten
"""
try:
start_date = kwargs.get('start_date')
end_date = kwargs.get('end_date')
if not start_date or not end_date:
start_date, end_date = self._get_date_range(time_range)
if report_type == "comprehensive":
return self._generate_comprehensive_report(start_date, end_date, format)
elif report_type == "printer_usage":
return self._generate_printer_usage_report(start_date, end_date, format)
elif report_type == "user_activity":
return self._generate_user_activity_report(start_date, end_date, format)
elif report_type == "efficiency":
return self._generate_efficiency_report(start_date, end_date, format)
else:
raise ValueError(f"Unbekannter Berichtstyp: {report_type}")
except Exception as e:
logger.error(f"Fehler beim Generieren des Berichts: {e}")
return {'error': str(e)}
# ===== HELPER METHODS =====
def _get_date_range(self, time_range: TimeRange) -> Tuple[datetime, datetime]:
"""Berechnet Datumsbereich basierend auf TimeRange"""
end_date = datetime.now()
if time_range == TimeRange.HOUR:
start_date = end_date - timedelta(hours=1)
elif time_range == TimeRange.DAY:
start_date = end_date - timedelta(days=1)
elif time_range == TimeRange.WEEK:
start_date = end_date - timedelta(weeks=1)
elif time_range == TimeRange.MONTH:
start_date = end_date - timedelta(days=30)
elif time_range == TimeRange.QUARTER:
start_date = end_date - timedelta(days=90)
elif time_range == TimeRange.YEAR:
start_date = end_date - timedelta(days=365)
else:
start_date = end_date - timedelta(days=30) # Default
return start_date, end_date
def _get_previous_period(self, start_date: datetime, end_date: datetime) -> Tuple[datetime, datetime]:
"""Berechnet vorherige Periode für Vergleiche"""
duration = end_date - start_date
previous_end = start_date
previous_start = previous_end - duration
return previous_start, previous_end
def _create_kpi(self, name: str, current: float, previous: float,
target: float, unit: str) -> KPI:
"""Erstellt KPI-Objekt mit Berechnungen"""
if previous > 0:
change_percent = round(((current - previous) / previous) * 100, 1)
else:
change_percent = 0.0
if abs(change_percent) < 1:
trend = "stable"
elif change_percent > 0:
trend = "up"
else:
trend = "down"
return KPI(
name=name,
current_value=current,
previous_value=previous,
target_value=target,
unit=unit,
trend=trend,
change_percent=change_percent
)
def _calculate_printer_availability(self, db_session: Session,
start_date: datetime, end_date: datetime) -> Dict:
"""Berechnet Drucker-Verfügbarkeit"""
# Vereinfachte Berechnung - kann erweitert werden
from models import Printer
total_printers = db_session.query(Printer).filter(Printer.active == True).count()
online_printers = db_session.query(Printer).filter(
and_(Printer.active == True, Printer.status.in_(["online", "idle"]))
).count()
availability_rate = round((online_printers / total_printers * 100) if total_printers > 0 else 0, 1)
return {
'total_printers': total_printers,
'online_printers': online_printers,
'availability_rate': availability_rate,
'downtime_hours': 0 # Placeholder - kann mit detaillierter Logging berechnet werden
}
def _calculate_utilization_rate(self, total_minutes: int,
start_date: datetime, end_date: datetime) -> float:
"""Berechnet Auslastungsrate"""
if not total_minutes:
return 0.0
total_hours = (end_date - start_date).total_seconds() / 3600
utilization_rate = (total_minutes / 60) / total_hours * 100
return round(min(utilization_rate, 100), 1)
def _get_daily_job_trend(self, db_session: Session,
start_date: datetime, end_date: datetime) -> List[Dict]:
"""Holt tägliche Job-Trends"""
from models import Job
daily_jobs = db_session.query(
func.date(Job.created_at).label('date'),
func.count(Job.id).label('count')
).filter(
Job.created_at.between(start_date, end_date)
).group_by(
func.date(Job.created_at)
).order_by('date').all()
return [
{
'date': job.date.isoformat(),
'jobs': job.count
}
for job in daily_jobs
]
def _generate_comprehensive_report(self, start_date: datetime,
end_date: datetime, format: ReportFormat) -> Dict:
"""Generiert umfassenden Bericht"""
printer_stats = self.get_printer_statistics(TimeRange.CUSTOM, start_date, end_date)
job_stats = self.get_job_statistics(TimeRange.CUSTOM, start_date, end_date)
user_stats = self.get_user_statistics(TimeRange.CUSTOM, start_date, end_date)
kpis = self.get_system_kpis(TimeRange.CUSTOM)
report = {
'title': 'Umfassender System-Bericht',
'generated_at': datetime.now().isoformat(),
'period': {
'start': start_date.isoformat(),
'end': end_date.isoformat()
},
'summary': {
'total_jobs': job_stats['summary']['total_jobs'],
'success_rate': job_stats['summary']['success_rate'],
'active_users': user_stats['summary']['active_users'],
'printer_availability': printer_stats['summary']['availability_rate']
},
'sections': {
'printers': printer_stats,
'jobs': job_stats,
'users': user_stats,
'kpis': kpis
}
}
if format == ReportFormat.JSON:
return report
else:
# Für andere Formate würde hier die Konvertierung stattfinden
return {'error': f'Format {format.value} noch nicht implementiert'}
# ===== GLOBALE INSTANZ =====
analytics_engine = AnalyticsEngine()
# ===== UTILITY FUNCTIONS =====
def get_dashboard_stats() -> Dict:
"""Schnelle Dashboard-Statistiken"""
return analytics_engine.get_system_kpis(TimeRange.DAY)
def export_statistics(report_type: str, time_range: TimeRange, format: ReportFormat = ReportFormat.JSON) -> Dict:
"""Exportiert Statistiken in verschiedenen Formaten"""
return analytics_engine.generate_report(report_type, time_range, format)
def track_event(event_name: str, properties: Dict = None):
"""Verfolgt Events für Analytik"""
try:
logger.info(f"📊 Event tracked: {event_name} - {properties or {}}")
# Hier könnte Event-Tracking implementiert werden
except Exception as e:
logger.error(f"Fehler beim Event-Tracking: {e}")
# Logging für Analytics-System
logger.info("📈 Analytics Engine initialisiert")

25
utils/backup_manager.py Normal file
View File

@@ -0,0 +1,25 @@
"""
Backup Manager - Datensicherungsverwaltung
Minimal implementation to resolve import dependencies.
"""
from utils.logging_config import get_logger
backup_logger = get_logger("backup")
class BackupManager:
"""Minimale BackupManager-Implementierung"""
def __init__(self):
self.enabled = False
backup_logger.info("BackupManager initialisiert (minimal implementation)")
def create_backup(self, backup_type="manual"):
"""Erstellt ein Backup (Placeholder)"""
backup_logger.info(f"Backup-Erstellung angefordert: {backup_type}")
return {"success": False, "message": "Backup-Funktionalität nicht implementiert"}
def restore_backup(self, backup_path):
"""Stellt ein Backup wieder her (Placeholder)"""
backup_logger.info(f"Backup-Wiederherstellung angefordert: {backup_path}")
return {"success": False, "message": "Restore-Funktionalität nicht implementiert"}

View File

@@ -0,0 +1,110 @@
#!/usr/bin/env python3
"""
Skript zur Bereinigung der Drucker-Datenbank und Hinzufügung der korrekten hardkodierten Drucker.
"""
import sys
import os
sys.path.append('.')
from config.settings import PRINTERS
from database.db_manager import DatabaseManager
from models import Printer
from datetime import datetime
def clean_and_add_printers():
"""Bereinigt die Drucker-Datenbank und fügt die korrekten hardkodierten Drucker hinzu."""
print("=== Drucker-Datenbank bereinigen und neu erstellen ===")
print(f"Hardkodierte Drucker: {len(PRINTERS)}")
try:
db = DatabaseManager()
session = db.get_session()
# Alle existierenden Drucker löschen
existing_printers = session.query(Printer).all()
print(f"Lösche {len(existing_printers)} existierende Drucker...")
for printer in existing_printers:
session.delete(printer)
session.commit()
print("✅ Alle alten Drucker gelöscht")
# Neue Drucker hinzufügen
added_count = 0
for printer_name, config in PRINTERS.items():
# Neuen Drucker erstellen
new_printer = Printer(
name=printer_name,
model="P115", # Standard-Modell
location="Werk 040 - Berlin - TBA", # Aktualisierter Standort
ip_address=config["ip"],
mac_address=f"98:25:4A:E1:{printer_name[-1]}0:0{printer_name[-1]}", # Dummy MAC
plug_ip=config["ip"],
plug_username="admin",
plug_password="admin",
status="available", # Verfügbar, da in Konfiguration
active=True,
created_at=datetime.now()
)
session.add(new_printer)
print(f"{printer_name}: Hinzugefügt (IP: {config['ip']})")
added_count += 1
# Änderungen speichern
session.commit()
session.close()
print(f"\n{added_count} neue Drucker hinzugefügt")
print("Drucker-Datenbank erfolgreich bereinigt und neu erstellt!")
except Exception as e:
print(f"❌ Fehler beim Bereinigen: {e}")
if 'session' in locals():
session.rollback()
session.close()
def list_final_printers():
"""Zeigt die finalen Drucker in der Datenbank an."""
print("\n=== Finale Drucker-Liste ===")
try:
db = DatabaseManager()
session = db.get_session()
printers = session.query(Printer).all()
if not printers:
print("Keine Drucker in der Datenbank gefunden.")
return
print(f"{'ID':<5} {'Name':<15} {'Status':<12} {'IP-Adresse':<15} {'Aktiv':<8}")
print("-" * 60)
for printer in printers:
active_str = "" if printer.active else ""
print(f"{printer.id:<5} {printer.name:<15} {printer.status:<12} {printer.ip_address:<15} {active_str:<8}")
session.close()
print(f"\nGesamt: {len(printers)} Drucker")
except Exception as e:
print(f"❌ Fehler beim Abrufen: {e}")
if 'session' in locals():
session.close()
if __name__ == "__main__":
print("Drucker-Datenbank Bereinigung und Neuerstellung")
print("=" * 50)
# Datenbank bereinigen und neue Drucker hinzufügen
clean_and_add_printers()
# Finale Liste anzeigen
list_final_printers()

624
utils/conflict_manager.py Normal file
View File

@@ -0,0 +1,624 @@
#!/usr/bin/env python3
"""
Erweiterte Druckerkonflikt-Management-Engine - MYP Platform
Dieses Modul behandelt alle Arten von Druckerkonflikten:
- Zeitüberschneidungen
- Ressourcenkonflikte
- Prioritätskonflikte
- Automatische Lösungsfindung
- Benutzerbenachrichtigungen
"""
import logging
from datetime import datetime, timedelta
from typing import List, Dict, Tuple, Optional, Set
from dataclasses import dataclass
from enum import Enum
from sqlalchemy.orm import Session
from sqlalchemy import and_, or_
from models import Job, Printer, User, get_cached_session
# Logging setup
logger = logging.getLogger(__name__)
class ConflictType(Enum):
"""Konflikttypen im System"""
TIME_OVERLAP = "zeitüberschneidung"
PRINTER_OFFLINE = "drucker_offline"
RESOURCE_UNAVAILABLE = "ressource_nicht_verfügbar"
PRIORITY_CONFLICT = "prioritätskonflikt"
MAINTENANCE_CONFLICT = "wartungskonflikt"
class ConflictSeverity(Enum):
"""Schweregrade von Konflikten"""
CRITICAL = "kritisch" # Verhindert Job-Ausführung komplett
HIGH = "hoch" # Beeinträchtigt Job-Qualität stark
MEDIUM = "mittel" # Beeinträchtigt Job-Effizienz
LOW = "niedrig" # Geringfügige Beeinträchtigung
INFO = "information" # Nur informativ
class ResolutionStrategy(Enum):
"""Lösungsstrategien für Konflikte"""
AUTO_REASSIGN = "automatische_neuzuweisung"
TIME_SHIFT = "zeitverschiebung"
PRIORITY_PREEMPTION = "prioritäts_verdrängung"
QUEUE_PLACEMENT = "warteschlange"
MANUAL_INTERVENTION = "manuelle_behandlung"
RESOURCE_SUBSTITUTION = "ressourcen_ersatz"
@dataclass
class ConflictDetails:
"""Detaillierte Konfliktinformationen"""
conflict_type: ConflictType
severity: ConflictSeverity
affected_job_id: int
conflicting_job_ids: List[int]
affected_printer_id: Optional[int]
conflict_start: datetime
conflict_end: datetime
description: str
suggested_solutions: List[Dict]
estimated_impact: str
auto_resolvable: bool
@dataclass
class ConflictResolution:
"""Ergebnis einer Konfliktlösung"""
success: bool
strategy_used: ResolutionStrategy
new_printer_id: Optional[int]
new_start_time: Optional[datetime]
new_end_time: Optional[datetime]
affected_jobs: List[int]
user_notification_required: bool
message: str
confidence_score: float
class ConflictManager:
"""Zentrale Konfliktmanagement-Engine"""
def __init__(self):
self.priority_weights = {
'urgent': 4,
'high': 3,
'normal': 2,
'low': 1
}
self.time_slot_preferences = {
'night_shift': {'start': 18, 'end': 6, 'bonus': 25},
'day_shift': {'start': 8, 'end': 17, 'bonus': 15},
'transition': {'start': 6, 'end': 8, 'bonus': 5}
}
self.conflict_resolution_timeout = 300 # 5 Minuten
def detect_conflicts(self, job_data: Dict, db_session: Session) -> List[ConflictDetails]:
"""
Erkennt alle möglichen Konflikte für einen geplanten Job
Args:
job_data: Job-Informationen (printer_id, start_time, end_time, priority)
db_session: Datenbankverbindung
Returns:
Liste aller erkannten Konflikte
"""
conflicts = []
# 1. Zeitüberschneidungs-Konflikte prüfen
time_conflicts = self._detect_time_conflicts(job_data, db_session)
conflicts.extend(time_conflicts)
# 2. Drucker-Verfügbarkeits-Konflikte prüfen
printer_conflicts = self._detect_printer_conflicts(job_data, db_session)
conflicts.extend(printer_conflicts)
# 3. Ressourcen-Konflikte prüfen
resource_conflicts = self._detect_resource_conflicts(job_data, db_session)
conflicts.extend(resource_conflicts)
# 4. Prioritäts-Konflikte prüfen
priority_conflicts = self._detect_priority_conflicts(job_data, db_session)
conflicts.extend(priority_conflicts)
logger.info(f"🔍 Konfliktanalyse abgeschlossen: {len(conflicts)} Konflikte erkannt")
return conflicts
def _detect_time_conflicts(self, job_data: Dict, db_session: Session) -> List[ConflictDetails]:
"""Erkennt Zeitüberschneidungs-Konflikte"""
conflicts = []
printer_id = job_data.get('printer_id')
start_time = job_data.get('start_time')
end_time = job_data.get('end_time')
if not all([printer_id, start_time, end_time]):
return conflicts
# Konflikthafte Jobs finden
conflicting_jobs = db_session.query(Job).filter(
Job.printer_id == printer_id,
Job.status.in_(["scheduled", "running"]),
or_(
and_(Job.start_at >= start_time, Job.start_at < end_time),
and_(Job.end_at > start_time, Job.end_at <= end_time),
and_(Job.start_at <= start_time, Job.end_at >= end_time)
)
).all()
for conflicting_job in conflicting_jobs:
# Konflikt-Schweregrad bestimmen
overlap_duration = self._calculate_overlap_duration(
start_time, end_time,
conflicting_job.start_at, conflicting_job.end_at
)
if overlap_duration.total_seconds() > 3600: # > 1 Stunde
severity = ConflictSeverity.CRITICAL
elif overlap_duration.total_seconds() > 1800: # > 30 Minuten
severity = ConflictSeverity.HIGH
else:
severity = ConflictSeverity.MEDIUM
# Lösungsvorschläge generieren
suggestions = self._generate_time_conflict_solutions(
job_data, conflicting_job, db_session
)
conflict = ConflictDetails(
conflict_type=ConflictType.TIME_OVERLAP,
severity=severity,
affected_job_id=job_data.get('job_id', 0),
conflicting_job_ids=[conflicting_job.id],
affected_printer_id=printer_id,
conflict_start=max(start_time, conflicting_job.start_at),
conflict_end=min(end_time, conflicting_job.end_at),
description=f"Zeitüberschneidung mit Job '{conflicting_job.name}' "
f"({overlap_duration.total_seconds()/60:.0f} Minuten)",
suggested_solutions=suggestions,
estimated_impact=f"Verzögerung von {overlap_duration.total_seconds()/60:.0f} Minuten",
auto_resolvable=len(suggestions) > 0
)
conflicts.append(conflict)
return conflicts
def _detect_printer_conflicts(self, job_data: Dict, db_session: Session) -> List[ConflictDetails]:
"""Erkennt Drucker-Verfügbarkeits-Konflikte"""
conflicts = []
printer_id = job_data.get('printer_id')
if not printer_id:
return conflicts
printer = db_session.query(Printer).filter_by(id=printer_id).first()
if not printer:
conflict = ConflictDetails(
conflict_type=ConflictType.PRINTER_OFFLINE,
severity=ConflictSeverity.CRITICAL,
affected_job_id=job_data.get('job_id', 0),
conflicting_job_ids=[],
affected_printer_id=printer_id,
conflict_start=job_data.get('start_time'),
conflict_end=job_data.get('end_time'),
description=f"Drucker ID {printer_id} existiert nicht",
suggested_solutions=[],
estimated_impact="Job kann nicht ausgeführt werden",
auto_resolvable=False
)
conflicts.append(conflict)
return conflicts
# Drucker-Status prüfen
if not printer.active:
suggestions = self._generate_printer_alternative_solutions(job_data, db_session)
conflict = ConflictDetails(
conflict_type=ConflictType.PRINTER_OFFLINE,
severity=ConflictSeverity.HIGH,
affected_job_id=job_data.get('job_id', 0),
conflicting_job_ids=[],
affected_printer_id=printer_id,
conflict_start=job_data.get('start_time'),
conflict_end=job_data.get('end_time'),
description=f"Drucker '{printer.name}' ist offline oder nicht aktiv",
suggested_solutions=suggestions,
estimated_impact="Automatische Neuzuweisung erforderlich",
auto_resolvable=len(suggestions) > 0
)
conflicts.append(conflict)
return conflicts
def _detect_resource_conflicts(self, job_data: Dict, db_session: Session) -> List[ConflictDetails]:
"""Erkennt Ressourcen-Verfügbarkeits-Konflikte"""
conflicts = []
# TODO: Implementierung für Material-, Personal- und andere Ressourcenkonflikte
# Aktuell Platzhalter für zukünftige Erweiterungen
return conflicts
def _detect_priority_conflicts(self, job_data: Dict, db_session: Session) -> List[ConflictDetails]:
"""Erkennt Prioritäts-basierte Konflikte"""
conflicts = []
job_priority = job_data.get('priority', 'normal')
if job_priority not in ['urgent', 'high']:
return conflicts # Nur hohe Prioritäten können andere verdrängen
printer_id = job_data.get('printer_id')
start_time = job_data.get('start_time')
end_time = job_data.get('end_time')
if not all([printer_id, start_time, end_time]):
return conflicts
# Niedrigerprioisierte Jobs im gleichen Zeitraum finden
lower_priority_jobs = db_session.query(Job).filter(
Job.printer_id == printer_id,
Job.status.in_(["scheduled"]),
or_(
and_(Job.start_at >= start_time, Job.start_at < end_time),
and_(Job.end_at > start_time, Job.end_at <= end_time),
and_(Job.start_at <= start_time, Job.end_at >= end_time)
)
).all()
for existing_job in lower_priority_jobs:
existing_priority = getattr(existing_job, 'priority', 'normal')
existing_weight = self.priority_weights.get(existing_priority, 2)
new_weight = self.priority_weights.get(job_priority, 2)
if new_weight > existing_weight:
suggestions = self._generate_priority_conflict_solutions(
job_data, existing_job, db_session
)
conflict = ConflictDetails(
conflict_type=ConflictType.PRIORITY_CONFLICT,
severity=ConflictSeverity.MEDIUM,
affected_job_id=job_data.get('job_id', 0),
conflicting_job_ids=[existing_job.id],
affected_printer_id=printer_id,
conflict_start=start_time,
conflict_end=end_time,
description=f"Höherpriorer Job verdrängt '{existing_job.name}' "
f"({job_priority} > {existing_priority})",
suggested_solutions=suggestions,
estimated_impact="Umplanung eines bestehenden Jobs erforderlich",
auto_resolvable=True
)
conflicts.append(conflict)
return conflicts
def resolve_conflicts(self, conflicts: List[ConflictDetails],
job_data: Dict, db_session: Session) -> List[ConflictResolution]:
"""
Löst alle erkannten Konflikte automatisch oder semi-automatisch
Args:
conflicts: Liste der zu lösenden Konflikte
job_data: Job-Informationen
db_session: Datenbankverbindung
Returns:
Liste der Konfliktlösungen
"""
resolutions = []
# Konflikte nach Schweregrad sortieren (kritische zuerst)
sorted_conflicts = sorted(conflicts,
key=lambda c: list(ConflictSeverity).index(c.severity))
for conflict in sorted_conflicts:
if conflict.auto_resolvable and conflict.suggested_solutions:
resolution = self._auto_resolve_conflict(conflict, job_data, db_session)
resolutions.append(resolution)
else:
# Manuelle Behandlung erforderlich
resolution = ConflictResolution(
success=False,
strategy_used=ResolutionStrategy.MANUAL_INTERVENTION,
new_printer_id=None,
new_start_time=None,
new_end_time=None,
affected_jobs=[conflict.affected_job_id],
user_notification_required=True,
message=f"Manueller Eingriff erforderlich: {conflict.description}",
confidence_score=0.0
)
resolutions.append(resolution)
logger.info(f"🔧 Konfliktlösung abgeschlossen: {len(resolutions)} Konflikte bearbeitet")
return resolutions
def _auto_resolve_conflict(self, conflict: ConflictDetails,
job_data: Dict, db_session: Session) -> ConflictResolution:
"""Automatische Konfliktlösung"""
# Beste Lösung aus Vorschlägen wählen
best_solution = max(conflict.suggested_solutions,
key=lambda s: s.get('confidence', 0))
strategy = ResolutionStrategy(best_solution['strategy'])
try:
if strategy == ResolutionStrategy.AUTO_REASSIGN:
return self._execute_auto_reassignment(conflict, best_solution, job_data, db_session)
elif strategy == ResolutionStrategy.TIME_SHIFT:
return self._execute_time_shift(conflict, best_solution, job_data, db_session)
elif strategy == ResolutionStrategy.PRIORITY_PREEMPTION:
return self._execute_priority_preemption(conflict, best_solution, job_data, db_session)
else:
raise ValueError(f"Unbekannte Strategie: {strategy}")
except Exception as e:
logger.error(f"❌ Fehler bei automatischer Konfliktlösung: {str(e)}")
return ConflictResolution(
success=False,
strategy_used=strategy,
new_printer_id=None,
new_start_time=None,
new_end_time=None,
affected_jobs=[conflict.affected_job_id],
user_notification_required=True,
message=f"Automatische Lösung fehlgeschlagen: {str(e)}",
confidence_score=0.0
)
def _execute_auto_reassignment(self, conflict: ConflictDetails, solution: Dict,
job_data: Dict, db_session: Session) -> ConflictResolution:
"""Führt automatische Druckerzuweisung durch"""
new_printer_id = solution['new_printer_id']
printer = db_session.query(Printer).filter_by(id=new_printer_id).first()
if not printer or not printer.active:
return ConflictResolution(
success=False,
strategy_used=ResolutionStrategy.AUTO_REASSIGN,
new_printer_id=None,
new_start_time=None,
new_end_time=None,
affected_jobs=[conflict.affected_job_id],
user_notification_required=True,
message="Alternativer Drucker nicht mehr verfügbar",
confidence_score=0.0
)
return ConflictResolution(
success=True,
strategy_used=ResolutionStrategy.AUTO_REASSIGN,
new_printer_id=new_printer_id,
new_start_time=job_data.get('start_time'),
new_end_time=job_data.get('end_time'),
affected_jobs=[conflict.affected_job_id],
user_notification_required=True,
message=f"Job automatisch zu Drucker '{printer.name}' verschoben",
confidence_score=solution.get('confidence', 0.8)
)
def _execute_time_shift(self, conflict: ConflictDetails, solution: Dict,
job_data: Dict, db_session: Session) -> ConflictResolution:
"""Führt Zeitverschiebung durch"""
new_start = solution['new_start_time']
new_end = solution['new_end_time']
return ConflictResolution(
success=True,
strategy_used=ResolutionStrategy.TIME_SHIFT,
new_printer_id=job_data.get('printer_id'),
new_start_time=new_start,
new_end_time=new_end,
affected_jobs=[conflict.affected_job_id],
user_notification_required=True,
message=f"Job zeitlich verschoben: {new_start.strftime('%H:%M')} - {new_end.strftime('%H:%M')}",
confidence_score=solution.get('confidence', 0.7)
)
def _execute_priority_preemption(self, conflict: ConflictDetails, solution: Dict,
job_data: Dict, db_session: Session) -> ConflictResolution:
"""Führt Prioritätsverdrängung durch"""
# Bestehenden Job umplanen
conflicting_job_id = conflict.conflicting_job_ids[0]
affected_jobs = [conflict.affected_job_id, conflicting_job_id]
return ConflictResolution(
success=True,
strategy_used=ResolutionStrategy.PRIORITY_PREEMPTION,
new_printer_id=job_data.get('printer_id'),
new_start_time=job_data.get('start_time'),
new_end_time=job_data.get('end_time'),
affected_jobs=affected_jobs,
user_notification_required=True,
message=f"Höherpriorer Job übernimmt Zeitslot, bestehender Job wird umgeplant",
confidence_score=solution.get('confidence', 0.9)
)
# Hilfsmethoden für Lösungsvorschläge
def _generate_time_conflict_solutions(self, job_data: Dict,
conflicting_job: Job, db_session: Session) -> List[Dict]:
"""Generiert Lösungsvorschläge für Zeitkonflikte"""
solutions = []
# 1. Alternative Drucker vorschlagen
alternative_printers = self._find_alternative_printers(job_data, db_session)
for printer_id, confidence in alternative_printers:
printer = db_session.query(Printer).filter_by(id=printer_id).first()
solutions.append({
'strategy': ResolutionStrategy.AUTO_REASSIGN.value,
'new_printer_id': printer_id,
'printer_name': printer.name if printer else f"Drucker {printer_id}",
'confidence': confidence,
'description': f"Automatische Umzuweisung zu {printer.name if printer else f'Drucker {printer_id}'}"
})
# 2. Zeitverschiebung vorschlagen
time_alternatives = self._find_alternative_time_slots(job_data, db_session)
for start_time, end_time, confidence in time_alternatives:
solutions.append({
'strategy': ResolutionStrategy.TIME_SHIFT.value,
'new_start_time': start_time,
'new_end_time': end_time,
'confidence': confidence,
'description': f"Zeitverschiebung: {start_time.strftime('%H:%M')} - {end_time.strftime('%H:%M')}"
})
return solutions
def _generate_printer_alternative_solutions(self, job_data: Dict, db_session: Session) -> List[Dict]:
"""Generiert Lösungsvorschläge für Drucker-Ausfälle"""
solutions = []
alternative_printers = self._find_alternative_printers(job_data, db_session)
for printer_id, confidence in alternative_printers:
printer = db_session.query(Printer).filter_by(id=printer_id).first()
solutions.append({
'strategy': ResolutionStrategy.AUTO_REASSIGN.value,
'new_printer_id': printer_id,
'printer_name': printer.name if printer else f"Drucker {printer_id}",
'confidence': confidence,
'description': f"Automatische Neuzuweisung zu {printer.name if printer else f'Drucker {printer_id}'}"
})
return solutions
def _generate_priority_conflict_solutions(self, job_data: Dict,
existing_job: Job, db_session: Session) -> List[Dict]:
"""Generiert Lösungsvorschläge für Prioritätskonflikte"""
solutions = []
# Bestehenden Job umplanen
alternative_slots = self._find_alternative_time_slots({
'printer_id': existing_job.printer_id,
'start_time': existing_job.start_at,
'end_time': existing_job.end_at,
'duration_minutes': existing_job.duration_minutes
}, db_session)
if alternative_slots:
start_time, end_time, confidence = alternative_slots[0]
solutions.append({
'strategy': ResolutionStrategy.PRIORITY_PREEMPTION.value,
'conflicting_job_new_start': start_time,
'conflicting_job_new_end': end_time,
'confidence': confidence,
'description': f"Bestehenden Job zu {start_time.strftime('%H:%M')} verschieben"
})
return solutions
def _find_alternative_printers(self, job_data: Dict, db_session: Session) -> List[Tuple[int, float]]:
"""Findet alternative Drucker mit Confidence-Score"""
from blueprints.calendar import get_smart_printer_assignment
alternatives = []
start_time = job_data.get('start_time')
end_time = job_data.get('end_time')
priority = job_data.get('priority', 'normal')
# Smart Assignment nutzen
recommended_printer_id = get_smart_printer_assignment(
start_date=start_time,
end_date=end_time,
priority=priority,
db_session=db_session
)
if recommended_printer_id:
alternatives.append((recommended_printer_id, 0.9))
# Weitere verfügbare Drucker mit niedrigerer Confidence
available_printers = db_session.query(Printer).filter(
Printer.active == True,
Printer.id != job_data.get('printer_id'),
Printer.id != recommended_printer_id
).all()
for printer in available_printers[:3]: # Top 3 Alternativen
# Einfache Verfügbarkeitsprüfung
conflicts = db_session.query(Job).filter(
Job.printer_id == printer.id,
Job.status.in_(["scheduled", "running"]),
or_(
and_(Job.start_at >= start_time, Job.start_at < end_time),
and_(Job.end_at > start_time, Job.end_at <= end_time),
and_(Job.start_at <= start_time, Job.end_at >= end_time)
)
).count()
if conflicts == 0:
alternatives.append((printer.id, 0.6)) # Niedrigere Confidence
return alternatives
def _find_alternative_time_slots(self, job_data: Dict, db_session: Session) -> List[Tuple[datetime, datetime, float]]:
"""Findet alternative Zeitfenster"""
alternatives = []
printer_id = job_data.get('printer_id')
original_start = job_data.get('start_time')
duration_minutes = job_data.get('duration_minutes')
if not all([printer_id, original_start, duration_minutes]):
return alternatives
duration = timedelta(minutes=duration_minutes)
# Zeitfenster um ursprünglichen Termin herum testen
test_intervals = [
timedelta(hours=1), # 1 Stunde später
timedelta(hours=2), # 2 Stunden später
timedelta(hours=-1), # 1 Stunde früher
timedelta(hours=3), # 3 Stunden später
timedelta(hours=-2), # 2 Stunden früher
]
for interval in test_intervals:
new_start = original_start + interval
new_end = new_start + duration
# Verfügbarkeit prüfen
conflicts = db_session.query(Job).filter(
Job.printer_id == printer_id,
Job.status.in_(["scheduled", "running"]),
or_(
and_(Job.start_at >= new_start, Job.start_at < new_end),
and_(Job.end_at > new_start, Job.end_at <= new_end),
and_(Job.start_at <= new_start, Job.end_at >= new_end)
)
).count()
if conflicts == 0:
# Confidence basierend auf Zeitnähe zum Original
time_diff_hours = abs(interval.total_seconds() / 3600)
confidence = max(0.3, 1.0 - (time_diff_hours * 0.1))
alternatives.append((new_start, new_end, confidence))
if len(alternatives) >= 3: # Maximal 3 Alternativen
break
return alternatives
def _calculate_overlap_duration(self, start1: datetime, end1: datetime,
start2: datetime, end2: datetime) -> timedelta:
"""Berechnet Überschneidungsdauer zwischen zwei Zeiträumen"""
overlap_start = max(start1, start2)
overlap_end = min(end1, end2)
if overlap_start < overlap_end:
return overlap_end - overlap_start
else:
return timedelta(0)
# Globale Instanz für einfache Nutzung
conflict_manager = ConflictManager()

95
utils/create_ssl_cert.py Normal file
View File

@@ -0,0 +1,95 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SSL-Zertifikat-Generator für die MYP-Plattform
Erstellt selbstsignierte SSL-Zertifikate für die lokale Entwicklung
"""
import os
import datetime
import sys
# Überprüfen, ob die notwendigen Pakete installiert sind
try:
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, NoEncryption
except ImportError:
print("Fehler: Paket 'cryptography' nicht gefunden.")
print("Bitte installieren Sie es mit: pip install cryptography")
sys.exit(1)
def create_self_signed_cert(cert_path, key_path, hostname="localhost"):
"""
Erstellt ein selbstsigniertes SSL-Zertifikat mit dem angegebenen Hostnamen.
Args:
cert_path: Pfad zur Zertifikatsdatei
key_path: Pfad zur privaten Schlüsseldatei
hostname: Hostname für das Zertifikat (Standard: localhost)
"""
# Verzeichnis erstellen, falls es nicht existiert
cert_dir = os.path.dirname(cert_path)
if cert_dir and not os.path.exists(cert_dir):
os.makedirs(cert_dir, exist_ok=True)
# Privaten Schlüssel generieren
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
)
# Schlüsseldatei schreiben
with open(key_path, "wb") as key_file:
key_file.write(private_key.private_bytes(
encoding=Encoding.PEM,
format=PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=NoEncryption()
))
# Name für das Zertifikat erstellen
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, hostname),
])
# Zertifikat erstellen
cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
private_key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=365)
).add_extension(
x509.SubjectAlternativeName([x509.DNSName(hostname)]),
critical=False,
).sign(private_key, hashes.SHA256())
# Zertifikatsdatei schreiben
with open(cert_path, "wb") as cert_file:
cert_file.write(cert.public_bytes(Encoding.PEM))
print(f"Selbstsigniertes SSL-Zertifikat für '{hostname}' erstellt:")
print(f"Zertifikat: {cert_path}")
print(f"Schlüssel: {key_path}")
print(f"Gültig für 1 Jahr.")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Erstellt selbstsignierte SSL-Zertifikate für die lokale Entwicklung")
parser.add_argument("-c", "--cert", default="/home/user/Projektarbeit-MYP/backend/app/certs/myp.crt", help="Pfad zur Zertifikatsdatei")
parser.add_argument("-k", "--key", default="/home/user/Projektarbeit-MYP/backend/app/certs/myp.key", help="Pfad zur Schlüsseldatei")
parser.add_argument("-n", "--hostname", default="localhost", help="Hostname für das Zertifikat")
args = parser.parse_args()
create_self_signed_cert(args.cert, args.key, args.hostname)

View File

@@ -0,0 +1,106 @@
#!/usr/bin/env python3
"""
Script zum Erstellen von Test-Druckern für die MYP Plattform
"""
import sys
import os
sys.path.append('.')
from models import *
from datetime import datetime
def create_test_printers():
"""Erstellt Test-Drucker in der Datenbank."""
# Verbindung zur Datenbank
db_session = get_db_session()
# Test-Drucker Daten
test_printers = [
{
'name': 'Mercedes-Benz FDM Pro #01',
'model': 'Ultimaker S5 Pro',
'location': 'Werkhalle Sindelfingen',
'plug_ip': '192.168.10.101',
'status': 'available',
'active': True
},
{
'name': 'Mercedes-Benz FDM #02',
'model': 'Prusa MK3S+',
'location': 'Entwicklungszentrum Stuttgart',
'plug_ip': '192.168.10.102',
'status': 'printing',
'active': True
},
{
'name': 'Mercedes-Benz SLA #01',
'model': 'Formlabs Form 3+',
'location': 'Prototypenlabor',
'plug_ip': '192.168.10.103',
'status': 'available',
'active': True
},
{
'name': 'Mercedes-Benz Industrial #01',
'model': 'Stratasys F370',
'location': 'Industriehalle Bremen',
'plug_ip': '192.168.10.104',
'status': 'maintenance',
'active': False
},
{
'name': 'Mercedes-Benz Rapid #01',
'model': 'Bambu Lab X1 Carbon',
'location': 'Designabteilung',
'plug_ip': '192.168.10.105',
'status': 'offline',
'active': True
},
{
'name': 'Mercedes-Benz SLS #01',
'model': 'HP Jet Fusion 5200',
'location': 'Produktionszentrum Berlin',
'plug_ip': '192.168.10.106',
'status': 'available',
'active': True
}
]
try:
created_count = 0
for printer_data in test_printers:
# Prüfen ob Drucker bereits existiert
existing = db_session.query(Printer).filter_by(name=printer_data['name']).first()
if not existing:
printer = Printer(
name=printer_data['name'],
model=printer_data['model'],
location=printer_data['location'],
plug_ip=printer_data['plug_ip'],
status=printer_data['status'],
active=printer_data['active'],
created_at=datetime.now()
)
db_session.add(printer)
created_count += 1
print(f"✅ Drucker '{printer_data['name']}' erstellt")
else:
print(f" Drucker '{printer_data['name']}' existiert bereits")
db_session.commit()
total_count = db_session.query(Printer).count()
print(f"\n🎉 {created_count} neue Test-Drucker erstellt!")
print(f"📊 Insgesamt {total_count} Drucker in der Datenbank.")
except Exception as e:
print(f"❌ Fehler beim Erstellen der Test-Drucker: {str(e)}")
db_session.rollback()
finally:
db_session.close()
if __name__ == "__main__":
print("🚀 Erstelle Test-Drucker für MYP Plattform...")
create_test_printers()
print("✅ Fertig!")

336
utils/database_cleanup.py Normal file
View File

@@ -0,0 +1,336 @@
#!/usr/bin/env python3
"""
Robuste Datenbank-Cleanup-Utilities
Verhindert "database is locked" Fehler durch intelligente Retry-Logik und Verbindungsmanagement
"""
import os
import time
import sqlite3
import threading
from datetime import datetime
from typing import Optional, Tuple, List
from contextlib import contextmanager
from sqlalchemy import text, create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.pool import StaticPool
from config.settings import DATABASE_PATH
from utils.logging_config import get_logger
logger = get_logger("database_cleanup")
class DatabaseCleanupManager:
"""
Verwaltet sichere Datenbank-Cleanup-Operationen mit Retry-Logik
Verhindert "database is locked" Fehler durch intelligente Session-Verwaltung
"""
def __init__(self):
self._cleanup_lock = threading.Lock()
self._cleanup_completed = False
self._active_engines = []
def register_engine(self, engine: Engine):
"""Registriert eine Engine für das Cleanup"""
with self._cleanup_lock:
if engine not in self._active_engines:
self._active_engines.append(engine)
def force_close_all_connections(self, max_wait_seconds: int = 10) -> bool:
"""
Schließt alle aktiven Datenbankverbindungen forciert
Args:
max_wait_seconds: Maximale Wartezeit für graceful shutdown
Returns:
bool: True wenn erfolgreich
"""
try:
logger.info("🔄 Schließe alle aktiven Datenbankverbindungen...")
# Alle registrierten Engines disposen
with self._cleanup_lock:
for engine in self._active_engines:
try:
logger.debug(f"Disposing Engine: {engine}")
engine.dispose()
except Exception as e:
logger.warning(f"Fehler beim Engine Dispose: {e}")
self._active_engines.clear()
# Kurz warten damit alle Verbindungen sich schließen können
time.sleep(1)
# Prüfe ob noch WAL-Locks bestehen
wal_path = DATABASE_PATH + "-wal"
shm_path = DATABASE_PATH + "-shm"
start_time = time.time()
while time.time() - start_time < max_wait_seconds:
try:
# Teste kurze Verbindung
test_conn = sqlite3.connect(DATABASE_PATH, timeout=2)
test_conn.execute("BEGIN IMMEDIATE") # Teste exklusiven Zugriff
test_conn.rollback()
test_conn.close()
logger.info("✅ Alle Datenbankverbindungen erfolgreich geschlossen")
return True
except sqlite3.OperationalError as e:
if "database is locked" in str(e):
logger.debug(f"Warte auf Verbindungsschließung... ({time.time() - start_time:.1f}s)")
time.sleep(0.5)
continue
else:
raise
logger.warning(f"⚠️ Timeout beim Warten auf Verbindungsschließung ({max_wait_seconds}s)")
return False
except Exception as e:
logger.error(f"❌ Fehler beim Schließen der Verbindungen: {e}")
return False
def safe_wal_checkpoint(self, retry_attempts: int = 5) -> Tuple[bool, Optional[str]]:
"""
Führt sicheren WAL-Checkpoint mit Retry-Logik durch
Args:
retry_attempts: Anzahl der Wiederholungsversuche
Returns:
Tuple[bool, Optional[str]]: (Erfolg, Fehlermeldung)
"""
for attempt in range(retry_attempts):
try:
# Kurze, direkte SQLite-Verbindung für Checkpoint
conn = sqlite3.connect(DATABASE_PATH, timeout=10)
# WAL-Checkpoint mit verschiedenen Strategien
strategies = ["TRUNCATE", "RESTART", "FULL", "PASSIVE"]
for strategy in strategies:
try:
cursor = conn.execute(f"PRAGMA wal_checkpoint({strategy})")
result = cursor.fetchone()
if result and result[0] == 0: # Erfolg (0 = success)
pages_transferred = result[1] if len(result) > 1 else 0
pages_reset = result[2] if len(result) > 2 else 0
if pages_transferred > 0:
logger.info(f"✅ WAL-Checkpoint ({strategy}): {pages_transferred} Seiten übertragen, {pages_reset} Seiten zurückgesetzt")
else:
logger.debug(f"WAL-Checkpoint ({strategy}): Keine Seiten zu übertragen")
conn.close()
return True, None
else:
logger.warning(f"WAL-Checkpoint ({strategy}) unvollständig: {result}")
except Exception as strategy_error:
logger.warning(f"WAL-Checkpoint ({strategy}) fehlgeschlagen: {strategy_error}")
continue
conn.close()
# Wenn alle Strategien fehlschlagen, versuche VACUUM als Fallback
if attempt == 0: # Nur beim ersten Versuch
logger.info("Versuche VACUUM als Fallback...")
conn = sqlite3.connect(DATABASE_PATH, timeout=10)
conn.execute("VACUUM")
conn.close()
logger.info("✅ VACUUM erfolgreich")
return True, None
except sqlite3.OperationalError as e:
if "database is locked" in str(e):
wait_time = (2 ** attempt) * 0.5 # Exponential backoff
logger.warning(f"Database locked - Versuch {attempt + 1}/{retry_attempts}, warte {wait_time}s...")
time.sleep(wait_time)
continue
else:
return False, f"SQLite-Fehler: {e}"
except Exception as e:
return False, f"Unerwarteter Fehler: {e}"
return False, f"Database nach {retry_attempts} Versuchen immer noch gesperrt"
def safe_journal_mode_switch(self, target_mode: str = "DELETE", retry_attempts: int = 3) -> Tuple[bool, Optional[str]]:
"""
Führt sicheren Journal-Mode-Switch mit Retry-Logik durch
Args:
target_mode: Ziel-Journal-Mode (DELETE, WAL, etc.)
retry_attempts: Anzahl der Wiederholungsversuche
Returns:
Tuple[bool, Optional[str]]: (Erfolg, Fehlermeldung)
"""
for attempt in range(retry_attempts):
try:
conn = sqlite3.connect(DATABASE_PATH, timeout=15)
# Prüfe aktuellen Journal-Mode
current_mode = conn.execute("PRAGMA journal_mode").fetchone()[0]
logger.debug(f"Aktueller Journal-Mode: {current_mode}")
if current_mode.upper() == target_mode.upper():
logger.info(f"Journal-Mode bereits auf {target_mode}")
conn.close()
return True, None
# Mode-Switch durchführen
result = conn.execute(f"PRAGMA journal_mode={target_mode}").fetchone()
new_mode = result[0] if result else None
conn.close()
if new_mode and new_mode.upper() == target_mode.upper():
logger.info(f"✅ Journal-Mode erfolgreich auf {new_mode} umgeschaltet")
return True, None
else:
logger.warning(f"Journal-Mode-Switch unvollständig: {new_mode} != {target_mode}")
except sqlite3.OperationalError as e:
if "database is locked" in str(e):
wait_time = (2 ** attempt) * 1.0 # Exponential backoff
logger.warning(f"Database locked bei Mode-Switch - Versuch {attempt + 1}/{retry_attempts}, warte {wait_time}s...")
time.sleep(wait_time)
continue
else:
return False, f"SQLite-Fehler: {e}"
except Exception as e:
return False, f"Unerwarteter Fehler: {e}"
return False, f"Journal-Mode-Switch nach {retry_attempts} Versuchen fehlgeschlagen"
def comprehensive_cleanup(self, force_mode_switch: bool = True) -> dict:
"""
Führt umfassendes, sicheres Datenbank-Cleanup durch
Args:
force_mode_switch: Ob Journal-Mode forciert umgeschaltet werden soll
Returns:
dict: Cleanup-Ergebnis mit Details
"""
with self._cleanup_lock:
if self._cleanup_completed:
logger.info("Datenbank-Cleanup bereits durchgeführt")
return {"success": True, "message": "Bereits durchgeführt", "operations": []}
logger.info("🧹 Starte umfassendes Datenbank-Cleanup...")
operations = []
errors = []
try:
# Schritt 1: Alle Verbindungen schließen
logger.info("📝 Schritt 1: Schließe alle Datenbankverbindungen...")
connection_success = self.force_close_all_connections(max_wait_seconds=15)
if connection_success:
operations.append("Alle Verbindungen geschlossen")
else:
errors.append("Timeout beim Verbindungsschließen")
# Schritt 2: WAL-Checkpoint
logger.info("📝 Schritt 2: Führe WAL-Checkpoint durch...")
checkpoint_success, checkpoint_error = self.safe_wal_checkpoint(retry_attempts=5)
if checkpoint_success:
operations.append("WAL-Checkpoint erfolgreich")
else:
errors.append(f"WAL-Checkpoint fehlgeschlagen: {checkpoint_error}")
# Schritt 3: Journal-Mode-Switch (nur wenn gewünscht und Checkpoint erfolgreich)
if force_mode_switch and checkpoint_success:
logger.info("📝 Schritt 3: Schalte Journal-Mode um...")
mode_success, mode_error = self.safe_journal_mode_switch("DELETE", retry_attempts=3)
if mode_success:
operations.append("Journal-Mode auf DELETE umgeschaltet")
else:
errors.append(f"Journal-Mode-Switch fehlgeschlagen: {mode_error}")
logger.warning(f"Journal-Mode-Switch fehlgeschlagen, aber WAL-Checkpoint war erfolgreich")
# Schritt 4: Finale Optimierungen (nur bei Erfolg)
if checkpoint_success:
logger.info("📝 Schritt 4: Finale Optimierungen...")
try:
conn = sqlite3.connect(DATABASE_PATH, timeout=5)
conn.execute("PRAGMA optimize")
conn.close()
operations.append("Datenbank optimiert")
except Exception as opt_error:
logger.warning(f"Optimierung fehlgeschlagen: {opt_error}")
# Schritt 5: Prüfe Ergebnis
wal_path = DATABASE_PATH + "-wal"
shm_path = DATABASE_PATH + "-shm"
wal_exists = os.path.exists(wal_path)
shm_exists = os.path.exists(shm_path)
if not wal_exists and not shm_exists:
operations.append("WAL/SHM-Dateien erfolgreich entfernt")
logger.info("✅ WAL- und SHM-Dateien erfolgreich entfernt")
elif force_mode_switch:
errors.append(f"WAL/SHM-Dateien bestehen noch (WAL: {wal_exists}, SHM: {shm_exists})")
else:
logger.info("WAL/SHM-Dateien bleiben bestehen (kein Mode-Switch angefordert)")
self._cleanup_completed = True
# Erfolgsstatus bestimmen
success = len(operations) > 0 and (not force_mode_switch or not wal_exists)
result = {
"success": success,
"operations": operations,
"errors": errors,
"timestamp": datetime.now().isoformat(),
"wal_files_removed": not wal_exists and not shm_exists
}
if success:
logger.info(f"✅ Datenbank-Cleanup erfolgreich: {', '.join(operations)}")
else:
logger.error(f"❌ Datenbank-Cleanup mit Fehlern: {', '.join(errors)}")
return result
except Exception as e:
error_msg = f"Kritischer Fehler beim Datenbank-Cleanup: {e}"
logger.error(f"{error_msg}")
return {
"success": False,
"operations": operations,
"errors": errors + [error_msg],
"timestamp": datetime.now().isoformat()
}
# Globale Instanz
cleanup_manager = DatabaseCleanupManager()
def get_cleanup_manager() -> DatabaseCleanupManager:
"""Gibt die globale Cleanup-Manager-Instanz zurück"""
return cleanup_manager
def safe_database_cleanup(force_mode_switch: bool = True) -> dict:
"""
Convenience-Funktion für sicheres Datenbank-Cleanup
Args:
force_mode_switch: Ob Journal-Mode forciert umgeschaltet werden soll
Returns:
dict: Cleanup-Ergebnis
"""
return cleanup_manager.comprehensive_cleanup(force_mode_switch=force_mode_switch)

252
utils/database_migration.py Normal file
View File

@@ -0,0 +1,252 @@
#!/usr/bin/env python3
"""
Database Migration Utility für MYP Platform
Überprüft und aktualisiert die Datenbankschema automatisch.
"""
import sqlite3
import logging
from typing import List, Dict, Any
from datetime import datetime
from config.settings import DATABASE_PATH
from models import init_db
logger = logging.getLogger(__name__)
def get_table_columns(table_name: str) -> List[Dict[str, Any]]:
"""
Ruft die Spalten einer Tabelle ab.
Args:
table_name: Name der Tabelle
Returns:
List[Dict]: Liste der Spalten mit ihren Eigenschaften
"""
try:
conn = sqlite3.connect(DATABASE_PATH)
cursor = conn.cursor()
cursor.execute(f'PRAGMA table_info({table_name})')
columns = cursor.fetchall()
conn.close()
return [
{
'name': col[1],
'type': col[2],
'not_null': bool(col[3]),
'default': col[4],
'primary_key': bool(col[5])
}
for col in columns
]
except Exception as e:
logger.error(f"Fehler beim Abrufen der Spalten für Tabelle {table_name}: {e}")
return []
def table_exists(table_name: str) -> bool:
"""
Prüft, ob eine Tabelle existiert.
Args:
table_name: Name der Tabelle
Returns:
bool: True wenn die Tabelle existiert
"""
try:
conn = sqlite3.connect(DATABASE_PATH)
cursor = conn.cursor()
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table' AND name=?
""", (table_name,))
result = cursor.fetchone()
conn.close()
return result is not None
except Exception as e:
logger.error(f"Fehler beim Prüfen der Tabelle {table_name}: {e}")
return False
def column_exists(table_name: str, column_name: str) -> bool:
"""
Prüft, ob eine Spalte in einer Tabelle existiert.
Args:
table_name: Name der Tabelle
column_name: Name der Spalte
Returns:
bool: True wenn die Spalte existiert
"""
columns = get_table_columns(table_name)
return any(col['name'] == column_name for col in columns)
def add_column_if_missing(table_name: str, column_name: str, column_type: str, default_value: str = None) -> bool:
"""
Fügt eine Spalte hinzu, falls sie nicht existiert.
Args:
table_name: Name der Tabelle
column_name: Name der Spalte
column_type: Datentyp der Spalte
default_value: Optional - Standardwert
Returns:
bool: True wenn erfolgreich
"""
if column_exists(table_name, column_name):
logger.info(f"Spalte {column_name} existiert bereits in Tabelle {table_name}")
return True
try:
conn = sqlite3.connect(DATABASE_PATH)
cursor = conn.cursor()
sql = f"ALTER TABLE {table_name} ADD COLUMN {column_name} {column_type}"
if default_value:
sql += f" DEFAULT {default_value}"
cursor.execute(sql)
conn.commit()
conn.close()
logger.info(f"Spalte {column_name} erfolgreich zu Tabelle {table_name} hinzugefügt")
return True
except Exception as e:
logger.error(f"Fehler beim Hinzufügen der Spalte {column_name} zu Tabelle {table_name}: {e}")
return False
def migrate_database() -> bool:
"""
Führt alle notwendigen Datenbankmigrationen durch.
Returns:
bool: True wenn erfolgreich
"""
logger.info("Starte Datenbankmigration...")
try:
# Prüfe, ob grundlegende Tabellen existieren
required_tables = ['users', 'printers', 'jobs', 'stats']
missing_tables = [table for table in required_tables if not table_exists(table)]
if missing_tables:
logger.warning(f"Fehlende Tabellen gefunden: {missing_tables}")
logger.info("Erstelle alle Tabellen neu...")
init_db()
logger.info("Tabellen erfolgreich erstellt")
return True
# Prüfe spezifische Spalten, die möglicherweise fehlen
migrations = [
# Printers Tabelle
('printers', 'last_checked', 'DATETIME', 'NULL'),
('printers', 'active', 'BOOLEAN', '1'),
('printers', 'created_at', 'DATETIME', 'CURRENT_TIMESTAMP'),
# Jobs Tabelle
('jobs', 'duration_minutes', 'INTEGER', '60'),
('jobs', 'actual_end_time', 'DATETIME', 'NULL'),
('jobs', 'owner_id', 'INTEGER', 'NULL'),
('jobs', 'file_path', 'VARCHAR(500)', 'NULL'),
# Users Tabelle
('users', 'username', 'VARCHAR(100)', 'NULL'),
('users', 'active', 'BOOLEAN', '1'),
('users', 'created_at', 'DATETIME', 'CURRENT_TIMESTAMP'),
]
success = True
for table_name, column_name, column_type, default_value in migrations:
if not add_column_if_missing(table_name, column_name, column_type, default_value):
success = False
if success:
logger.info("Datenbankmigration erfolgreich abgeschlossen")
else:
logger.warning("Datenbankmigration mit Fehlern abgeschlossen")
return success
except Exception as e:
logger.error(f"Fehler bei der Datenbankmigration: {e}")
return False
def check_database_integrity() -> bool:
"""
Überprüft die Integrität der Datenbank.
Returns:
bool: True wenn die Datenbank integer ist
"""
try:
conn = sqlite3.connect(DATABASE_PATH)
cursor = conn.cursor()
cursor.execute('PRAGMA integrity_check')
result = cursor.fetchone()
conn.close()
if result and result[0] == 'ok':
logger.info("Datenbankintegrität: OK")
return True
else:
logger.error(f"Datenbankintegrität: FEHLER - {result}")
return False
except Exception as e:
logger.error(f"Fehler bei der Integritätsprüfung: {e}")
return False
def backup_database(backup_path: str = None) -> bool:
"""
Erstellt ein Backup der Datenbank.
Args:
backup_path: Optional - Pfad für das Backup
Returns:
bool: True wenn erfolgreich
"""
if not backup_path:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_path = f"database/myp_backup_{timestamp}.db"
try:
import shutil
shutil.copy2(DATABASE_PATH, backup_path)
logger.info(f"Datenbank-Backup erstellt: {backup_path}")
return True
except Exception as e:
logger.error(f"Fehler beim Erstellen des Backups: {e}")
return False
if __name__ == "__main__":
# Logging konfigurieren
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
print("=== MYP Platform - Datenbankmigration ===")
# Backup erstellen
if backup_database():
print("✅ Backup erstellt")
else:
print("⚠️ Backup-Erstellung fehlgeschlagen")
# Integrität prüfen
if check_database_integrity():
print("✅ Datenbankintegrität OK")
else:
print("❌ Datenbankintegrität FEHLER")
# Migration durchführen
if migrate_database():
print("✅ Migration erfolgreich")
else:
print("❌ Migration fehlgeschlagen")
print("\nMigration abgeschlossen!")

View File

@@ -0,0 +1,290 @@
#!/usr/bin/env python3
"""
Optimiertes Datenbank-Schema-Migrationsskript
Mit WAL-Checkpoint und ordnungsgemäßer Ressourcenverwaltung
"""
import os
import sys
import sqlite3
import signal
import time
from datetime import datetime
import logging
from contextlib import contextmanager
# Pfad zur App hinzufügen - KORRIGIERT
app_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, app_dir)
# Alternative Datenbankpfad-Definition falls Import fehlschlägt
DATABASE_PATH = None
try:
from config.settings import DATABASE_PATH
except ImportError:
# Fallback: Datenbankpfad manuell setzen
DATABASE_PATH = os.path.join(app_dir, "database", "myp.db")
print(f"⚠️ Fallback: Verwende Datenbankpfad: {DATABASE_PATH}")
# Logging-Setup mit Fallback
try:
from utils.logging_config import get_logger
logger = get_logger("schema_migration")
except ImportError:
# Fallback: Standard-Logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("schema_migration")
# Globale Variable für sauberes Shutdown
_migration_running = False
_current_connection = None
def signal_handler(signum, frame):
"""Signal-Handler für ordnungsgemäßes Shutdown"""
global _migration_running, _current_connection
print(f"\n🛑 Signal {signum} empfangen - beende Migration sauber...")
_migration_running = False
if _current_connection:
try:
print("🔄 Führe WAL-Checkpoint durch...")
_current_connection.execute("PRAGMA wal_checkpoint(TRUNCATE)")
_current_connection.commit()
_current_connection.close()
print("✅ Datenbank ordnungsgemäß geschlossen")
except Exception as e:
print(f"⚠️ Fehler beim Schließen: {e}")
print("🏁 Migration beendet")
sys.exit(0)
# Signal-Handler registrieren
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
@contextmanager
def get_database_connection(timeout=30):
"""Context Manager für sichere Datenbankverbindung mit WAL-Optimierung"""
global _current_connection
conn = None
try:
# Verbindung mit optimierten Einstellungen
conn = sqlite3.connect(
DATABASE_PATH,
timeout=timeout,
isolation_level=None # Autocommit aus für manuelle Transaktionen
)
_current_connection = conn
# WAL-Modus und Optimierungen
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("PRAGMA synchronous=NORMAL") # Bessere Performance mit WAL
conn.execute("PRAGMA foreign_keys=ON")
conn.execute("PRAGMA busy_timeout=30000") # 30 Sekunden Timeout
conn.execute("PRAGMA wal_autocheckpoint=1000") # Automatischer Checkpoint alle 1000 Seiten
logger.info("Datenbankverbindung mit WAL-Optimierungen hergestellt")
yield conn
except Exception as e:
logger.error(f"Datenbankverbindungsfehler: {e}")
if conn:
conn.rollback()
raise
finally:
if conn:
try:
# Kritisch: WAL-Checkpoint vor dem Schließen
logger.info("Führe finalen WAL-Checkpoint durch...")
conn.execute("PRAGMA wal_checkpoint(TRUNCATE)")
conn.commit()
# Prüfe WAL-Status
wal_info = conn.execute("PRAGMA wal_checkpoint").fetchone()
if wal_info:
logger.info(f"WAL-Checkpoint: {wal_info[0]} Seiten übertragen, {wal_info[1]} Seiten zurückgesetzt")
conn.close()
logger.info("Datenbankverbindung ordnungsgemäß geschlossen")
except Exception as e:
logger.error(f"Fehler beim Schließen der Datenbankverbindung: {e}")
finally:
_current_connection = None
def force_wal_checkpoint():
"""Erzwingt WAL-Checkpoint um alle Daten in die Hauptdatei zu schreiben"""
try:
with get_database_connection(timeout=10) as conn:
# Aggressive WAL-Checkpoint-Strategien
strategies = [
("TRUNCATE", "Vollständiger Checkpoint mit WAL-Truncate"),
("RESTART", "Checkpoint mit WAL-Restart"),
("FULL", "Vollständiger Checkpoint")
]
for strategy, description in strategies:
try:
result = conn.execute(f"PRAGMA wal_checkpoint({strategy})").fetchone()
if result and result[0] == 0: # Erfolg
logger.info(f"{description} erfolgreich: {result}")
return True
else:
logger.warning(f"⚠️ {description} teilweise erfolgreich: {result}")
except Exception as e:
logger.warning(f"⚠️ {description} fehlgeschlagen: {e}")
continue
# Fallback: VACUUM für komplette Reorganisation
logger.info("Führe VACUUM als Fallback durch...")
conn.execute("VACUUM")
logger.info("✅ VACUUM erfolgreich")
return True
except Exception as e:
logger.error(f"Kritischer Fehler bei WAL-Checkpoint: {e}")
return False
def optimize_migration_performance():
"""Optimiert die Datenbank für die Migration"""
try:
with get_database_connection(timeout=5) as conn:
# Performance-Optimierungen für Migration
optimizations = [
("PRAGMA cache_size = -64000", "Cache-Größe auf 64MB erhöht"),
("PRAGMA temp_store = MEMORY", "Temp-Store in Memory"),
("PRAGMA mmap_size = 268435456", "Memory-Mapped I/O aktiviert"),
("PRAGMA optimize", "Automatische Optimierungen")
]
for pragma, description in optimizations:
try:
conn.execute(pragma)
logger.info(f"{description}")
except Exception as e:
logger.warning(f"⚠️ Optimierung fehlgeschlagen ({description}): {e}")
except Exception as e:
logger.warning(f"Fehler bei Performance-Optimierung: {e}")
def main():
"""Führt die optimierte Schema-Migration aus."""
global _migration_running
_migration_running = True
try:
logger.info("🚀 Starte optimierte Datenbank-Schema-Migration...")
# Überprüfe Datenbankdatei
if not os.path.exists(DATABASE_PATH):
logger.error(f"❌ Datenbankdatei nicht gefunden: {DATABASE_PATH}")
return False
# Initial WAL-Checkpoint um sauberen Zustand sicherzustellen
logger.info("🔄 Führe initialen WAL-Checkpoint durch...")
force_wal_checkpoint()
# Performance-Optimierungen
optimize_migration_performance()
# Eigentliche Migration mit optimierter Verbindung
with get_database_connection(timeout=60) as conn:
cursor = conn.cursor()
# Backup erstellen (mit Timeout)
backup_path = f"{DATABASE_PATH}.backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
try:
logger.info(f"📦 Erstelle Backup: {backup_path}")
cursor.execute(f"VACUUM INTO '{backup_path}'")
logger.info("✅ Backup erfolgreich erstellt")
except Exception as e:
logger.warning(f"⚠️ Backup-Erstellung fehlgeschlagen: {e}")
# Migrationen durchführen (verkürzt für bessere Performance)
migrations_performed = []
if not _migration_running:
return False
# Schnelle Schema-Checks
try:
# Test der kritischen Abfrage
cursor.execute("SELECT COUNT(*) FROM guest_requests WHERE duration_minutes IS NOT NULL")
logger.info("✅ Schema-Integritätstest bestanden")
except Exception:
logger.info("🔧 Führe kritische Schema-Reparaturen durch...")
# Nur die wichtigsten Reparaturen
critical_fixes = [
("ALTER TABLE guest_requests ADD COLUMN duration_minutes INTEGER", "duration_minutes zu guest_requests"),
("ALTER TABLE users ADD COLUMN username VARCHAR(100)", "username zu users"),
("UPDATE users SET username = email WHERE username IS NULL", "Username-Fallback")
]
for sql, description in critical_fixes:
if not _migration_running:
break
try:
cursor.execute(sql)
logger.info(f"{description}")
migrations_performed.append(description)
except sqlite3.OperationalError as e:
if "duplicate column" not in str(e).lower():
logger.warning(f"⚠️ {description}: {e}")
# Commit und WAL-Checkpoint zwischen Operationen
if migrations_performed:
conn.commit()
cursor.execute("PRAGMA wal_checkpoint(PASSIVE)")
# Finale Optimierungen (reduziert)
if _migration_running:
essential_indices = [
"CREATE INDEX IF NOT EXISTS idx_users_email ON users(email)",
"CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status)",
"CREATE INDEX IF NOT EXISTS idx_guest_requests_status ON guest_requests(status)"
]
for index_sql in essential_indices:
try:
cursor.execute(index_sql)
except Exception:
pass # Indices sind nicht kritisch
# Finale Statistiken
cursor.execute("ANALYZE")
migrations_performed.append("optimizations")
# Finale Commit
conn.commit()
logger.info(f"✅ Migration abgeschlossen. Bereiche: {', '.join(migrations_performed)}")
# Abschließender WAL-Checkpoint
logger.info("🔄 Führe abschließenden WAL-Checkpoint durch...")
force_wal_checkpoint()
# Kurze Pause um sicherzustellen, dass alle I/O-Operationen abgeschlossen sind
time.sleep(1)
logger.info("🎉 Optimierte Schema-Migration erfolgreich abgeschlossen!")
return True
except KeyboardInterrupt:
logger.info("🔄 Migration durch Benutzer unterbrochen")
return False
except Exception as e:
logger.error(f"❌ Kritischer Fehler bei der Migration: {str(e)}")
return False
finally:
_migration_running = False
# Finale WAL-Bereinigung
try:
force_wal_checkpoint()
except Exception:
pass
if __name__ == "__main__":
success = main()
if not success:
sys.exit(1)

425
utils/database_utils.py Normal file
View File

@@ -0,0 +1,425 @@
"""
Erweiterte Datenbank-Utilities für Backup, Monitoring und Wartung.
"""
import os
import shutil
import sqlite3
import threading
import time
import gzip
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple
from pathlib import Path
from sqlalchemy import text
from sqlalchemy.engine import Engine
from config.settings import DATABASE_PATH
from utils.logging_config import get_logger
from models import get_cached_session, create_optimized_engine
logger = get_logger("database")
# ===== BACKUP-SYSTEM =====
class DatabaseBackupManager:
"""
Verwaltet automatische Datenbank-Backups mit Rotation.
"""
def __init__(self, backup_dir: str = None):
self.backup_dir = backup_dir or os.path.join(os.path.dirname(DATABASE_PATH), "backups")
self.ensure_backup_directory()
self._backup_lock = threading.Lock()
def ensure_backup_directory(self):
"""Stellt sicher, dass das Backup-Verzeichnis existiert."""
Path(self.backup_dir).mkdir(parents=True, exist_ok=True)
def create_backup(self, compress: bool = True) -> str:
"""
Erstellt ein Backup der Datenbank.
Args:
compress: Ob das Backup komprimiert werden soll
Returns:
str: Pfad zum erstellten Backup
"""
with self._backup_lock:
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
backup_filename = f"myp_backup_{timestamp}.db"
if compress:
backup_filename += ".gz"
backup_path = os.path.join(self.backup_dir, backup_filename)
try:
if compress:
# Komprimiertes Backup erstellen
with open(DATABASE_PATH, 'rb') as f_in:
with gzip.open(backup_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
# Einfache Kopie
shutil.copy2(DATABASE_PATH, backup_path)
logger.info(f"Datenbank-Backup erstellt: {backup_path}")
return backup_path
except Exception as e:
logger.error(f"Fehler beim Erstellen des Backups: {str(e)}")
raise
def restore_backup(self, backup_path: str) -> bool:
"""
Stellt ein Backup wieder her.
Args:
backup_path: Pfad zum Backup
Returns:
bool: True bei Erfolg
"""
with self._backup_lock:
try:
# Aktuelles Backup der bestehenden DB erstellen
current_backup = self.create_backup()
logger.info(f"Sicherheitsbackup erstellt: {current_backup}")
if backup_path.endswith('.gz'):
# Komprimiertes Backup wiederherstellen
with gzip.open(backup_path, 'rb') as f_in:
with open(DATABASE_PATH, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
else:
# Einfache Kopie
shutil.copy2(backup_path, DATABASE_PATH)
logger.info(f"Datenbank aus Backup wiederhergestellt: {backup_path}")
return True
except Exception as e:
logger.error(f"Fehler beim Wiederherstellen des Backups: {str(e)}")
return False
def cleanup_old_backups(self, keep_days: int = 30):
"""
Löscht alte Backups.
Args:
keep_days: Anzahl Tage, die Backups aufbewahrt werden sollen
"""
cutoff_date = datetime.now() - timedelta(days=keep_days)
deleted_count = 0
try:
for filename in os.listdir(self.backup_dir):
if filename.startswith("myp_backup_"):
file_path = os.path.join(self.backup_dir, filename)
file_time = datetime.fromtimestamp(os.path.getctime(file_path))
if file_time < cutoff_date:
os.remove(file_path)
deleted_count += 1
logger.info(f"Altes Backup gelöscht: {filename}")
if deleted_count > 0:
logger.info(f"{deleted_count} alte Backups gelöscht")
except Exception as e:
logger.error(f"Fehler beim Bereinigen alter Backups: {str(e)}")
def get_backup_list(self) -> List[Dict]:
"""
Gibt eine Liste aller verfügbaren Backups zurück.
Returns:
List[Dict]: Liste mit Backup-Informationen
"""
backups = []
try:
for filename in os.listdir(self.backup_dir):
if filename.startswith("myp_backup_"):
file_path = os.path.join(self.backup_dir, filename)
file_stat = os.stat(file_path)
backups.append({
"filename": filename,
"path": file_path,
"size": file_stat.st_size,
"created": datetime.fromtimestamp(file_stat.st_ctime),
"compressed": filename.endswith('.gz')
})
# Nach Erstellungsdatum sortieren (neueste zuerst)
backups.sort(key=lambda x: x['created'], reverse=True)
except Exception as e:
logger.error(f"Fehler beim Abrufen der Backup-Liste: {str(e)}")
return backups
# ===== DATENBANK-MONITORING =====
class DatabaseMonitor:
"""
Überwacht die Datenbank-Performance und -Gesundheit.
"""
def __init__(self):
self.engine = create_optimized_engine()
def get_database_stats(self) -> Dict:
"""
Sammelt Datenbank-Statistiken.
Returns:
Dict: Datenbank-Statistiken
"""
stats = {}
try:
with self.engine.connect() as conn:
# Datenbankgröße
result = conn.execute(text("SELECT page_count * page_size as size FROM pragma_page_count(), pragma_page_size()"))
db_size = result.fetchone()[0]
stats['database_size_bytes'] = db_size
stats['database_size_mb'] = round(db_size / (1024 * 1024), 2)
# WAL-Datei-Größe
wal_path = DATABASE_PATH + "-wal"
if os.path.exists(wal_path):
wal_size = os.path.getsize(wal_path)
stats['wal_size_bytes'] = wal_size
stats['wal_size_mb'] = round(wal_size / (1024 * 1024), 2)
else:
stats['wal_size_bytes'] = 0
stats['wal_size_mb'] = 0
# Journal-Modus
result = conn.execute(text("PRAGMA journal_mode"))
stats['journal_mode'] = result.fetchone()[0]
# Cache-Statistiken
result = conn.execute(text("PRAGMA cache_size"))
stats['cache_size'] = result.fetchone()[0]
# Synchronous-Modus
result = conn.execute(text("PRAGMA synchronous"))
stats['synchronous_mode'] = result.fetchone()[0]
# Tabellen-Statistiken
result = conn.execute(text("""
SELECT name,
(SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name=m.name) as table_count
FROM sqlite_master m WHERE type='table'
"""))
table_stats = {}
for table_name, _ in result.fetchall():
if not table_name.startswith('sqlite_'):
count_result = conn.execute(text(f"SELECT COUNT(*) FROM {table_name}"))
table_stats[table_name] = count_result.fetchone()[0]
stats['table_counts'] = table_stats
# Letzte Wartung
stats['last_analyze'] = self._get_last_analyze_time()
stats['last_vacuum'] = self._get_last_vacuum_time()
except Exception as e:
logger.error(f"Fehler beim Sammeln der Datenbank-Statistiken: {str(e)}")
stats['error'] = str(e)
return stats
def _get_last_analyze_time(self) -> Optional[str]:
"""Ermittelt den Zeitpunkt der letzten ANALYZE-Operation."""
try:
# SQLite speichert keine direkten Timestamps für ANALYZE
# Wir verwenden die Modifikationszeit der Statistik-Tabellen
stat_path = DATABASE_PATH + "-stat"
if os.path.exists(stat_path):
return datetime.fromtimestamp(os.path.getmtime(stat_path)).isoformat()
except:
pass
return None
def _get_last_vacuum_time(self) -> Optional[str]:
"""Ermittelt den Zeitpunkt der letzten VACUUM-Operation."""
try:
# Approximation über Datei-Modifikationszeit
return datetime.fromtimestamp(os.path.getmtime(DATABASE_PATH)).isoformat()
except:
pass
return None
def check_database_health(self) -> Dict:
"""
Führt eine Gesundheitsprüfung der Datenbank durch.
Returns:
Dict: Gesundheitsstatus
"""
health = {
"status": "healthy",
"issues": [],
"recommendations": []
}
try:
with self.engine.connect() as conn:
# Integritätsprüfung
result = conn.execute(text("PRAGMA integrity_check"))
integrity_result = result.fetchone()[0]
if integrity_result != "ok":
health["status"] = "critical"
health["issues"].append(f"Integritätsprüfung fehlgeschlagen: {integrity_result}")
# WAL-Dateigröße prüfen
wal_path = DATABASE_PATH + "-wal"
if os.path.exists(wal_path):
wal_size_mb = os.path.getsize(wal_path) / (1024 * 1024)
if wal_size_mb > 100: # Über 100MB
health["issues"].append(f"WAL-Datei sehr groß: {wal_size_mb:.1f}MB")
health["recommendations"].append("WAL-Checkpoint durchführen")
# Freier Speicherplatz prüfen
db_dir = os.path.dirname(DATABASE_PATH)
free_space = shutil.disk_usage(db_dir).free / (1024 * 1024 * 1024) # GB
if free_space < 1: # Weniger als 1GB
health["status"] = "warning" if health["status"] == "healthy" else health["status"]
health["issues"].append(f"Wenig freier Speicherplatz: {free_space:.1f}GB")
health["recommendations"].append("Speicherplatz freigeben oder alte Backups löschen")
# Connection Pool Status (falls verfügbar)
# Hier könnten weitere Checks hinzugefügt werden
except Exception as e:
health["status"] = "error"
health["issues"].append(f"Fehler bei Gesundheitsprüfung: {str(e)}")
logger.error(f"Fehler bei Datenbank-Gesundheitsprüfung: {str(e)}")
return health
def optimize_database(self) -> Dict:
"""
Führt Optimierungsoperationen auf der Datenbank durch.
Returns:
Dict: Ergebnis der Optimierung
"""
result = {
"operations": [],
"success": True,
"errors": []
}
try:
with self.engine.connect() as conn:
# ANALYZE für bessere Query-Planung
conn.execute(text("ANALYZE"))
result["operations"].append("ANALYZE ausgeführt")
# WAL-Checkpoint
checkpoint_result = conn.execute(text("PRAGMA wal_checkpoint(TRUNCATE)"))
checkpoint_info = checkpoint_result.fetchone()
result["operations"].append(f"WAL-Checkpoint: {checkpoint_info}")
# Incremental Vacuum
conn.execute(text("PRAGMA incremental_vacuum"))
result["operations"].append("Incremental Vacuum ausgeführt")
# Optimize Pragma
conn.execute(text("PRAGMA optimize"))
result["operations"].append("PRAGMA optimize ausgeführt")
conn.commit()
except Exception as e:
result["success"] = False
result["errors"].append(str(e))
logger.error(f"Fehler bei Datenbank-Optimierung: {str(e)}")
return result
# ===== AUTOMATISCHE WARTUNG =====
class DatabaseMaintenanceScheduler:
"""
Plant und führt automatische Wartungsaufgaben durch.
"""
def __init__(self):
self.backup_manager = DatabaseBackupManager()
self.monitor = DatabaseMonitor()
self._running = False
self._thread = None
def start_maintenance_scheduler(self):
"""Startet den Wartungs-Scheduler."""
if self._running:
return
self._running = True
self._thread = threading.Thread(target=self._maintenance_loop, daemon=True)
self._thread.start()
logger.info("Datenbank-Wartungs-Scheduler gestartet")
def stop_maintenance_scheduler(self):
"""Stoppt den Wartungs-Scheduler."""
self._running = False
if self._thread:
self._thread.join(timeout=5)
logger.info("Datenbank-Wartungs-Scheduler gestoppt")
def _maintenance_loop(self):
"""Hauptschleife für Wartungsaufgaben."""
last_backup = datetime.now()
last_cleanup = datetime.now()
last_optimization = datetime.now()
while self._running:
try:
now = datetime.now()
# Tägliches Backup (alle 24 Stunden)
if (now - last_backup).total_seconds() > 86400: # 24 Stunden
self.backup_manager.create_backup()
last_backup = now
# Wöchentliche Bereinigung alter Backups (alle 7 Tage)
if (now - last_cleanup).total_seconds() > 604800: # 7 Tage
self.backup_manager.cleanup_old_backups()
last_cleanup = now
# Tägliche Optimierung (alle 24 Stunden)
if (now - last_optimization).total_seconds() > 86400: # 24 Stunden
self.monitor.optimize_database()
last_optimization = now
# 1 Stunde warten bis zum nächsten Check
time.sleep(3600)
except Exception as e:
logger.error(f"Fehler im Wartungs-Scheduler: {str(e)}")
time.sleep(300) # 5 Minuten warten bei Fehlern
# ===== GLOBALE INSTANZEN =====
# Globale Instanzen für einfachen Zugriff
backup_manager = DatabaseBackupManager()
database_monitor = DatabaseMonitor()
maintenance_scheduler = DatabaseMaintenanceScheduler()
# Automatisch starten
maintenance_scheduler.start_maintenance_scheduler()

743
utils/debug_cli.py Normal file
View File

@@ -0,0 +1,743 @@
#!/usr/bin/env python3
"""
MYP Debug CLI
Kommandozeilen-Tool für Diagnose und Debugging der MYP-Anwendung
"""
import os
import sys
import argparse
import time
import json
import importlib
import logging
import sqlite3
from datetime import datetime
import traceback
from pprint import pprint
# Eigene Module importieren
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# Farbige Ausgabe für die Konsole
COLORS = {
'RESET': '\033[0m',
'BOLD': '\033[1m',
'RED': '\033[31m',
'GREEN': '\033[32m',
'YELLOW': '\033[33m',
'BLUE': '\033[34m',
'MAGENTA': '\033[35m',
'CYAN': '\033[36m',
}
# Emojis für verschiedene Log-Level und Kategorien
LOG_EMOJIS = {
'DEBUG': '🔍',
'INFO': '',
'WARNING': '⚠️',
'ERROR': '',
'CRITICAL': '🔥',
'SUCCESS': '',
'DATABASE': '💾',
'NETWORK': '🌐',
'SYSTEM': '💻',
'PRINTER': '🖨️',
'API': '📡',
'USER': '👤'
}
# Prüfen, ob das Terminal Farben unterstützt
def supports_color():
"""Prüft, ob das Terminal Farben unterstützt."""
if os.name == 'nt':
try:
import ctypes
kernel32 = ctypes.windll.kernel32
# Aktiviere VT100-Unterstützung unter Windows
kernel32.SetConsoleMode(kernel32.GetStdHandle(-11), 7)
return True
except:
return False
else:
return sys.stdout.isatty()
USE_COLOR = supports_color()
def colorize(text, color):
"""Färbt den Text ein, wenn Farben unterstützt werden."""
if USE_COLOR and color in COLORS:
return f"{COLORS[color]}{text}{COLORS['RESET']}"
return text
def print_success(message):
print(f"{LOG_EMOJIS['SUCCESS']} {colorize(message, 'GREEN')}")
def print_error(message):
print(f"{LOG_EMOJIS['ERROR']} {colorize(message, 'RED')}")
def print_warning(message):
print(f"{LOG_EMOJIS['WARNING']} {colorize(message, 'YELLOW')}")
def print_info(message):
print(f"{LOG_EMOJIS['INFO']} {colorize(message, 'BLUE')}")
def print_debug(message):
print(f"{LOG_EMOJIS['DEBUG']} {colorize(message, 'CYAN')}")
def print_database(message):
print(f"{LOG_EMOJIS['DATABASE']} {colorize(message, 'MAGENTA')}")
def print_network(message):
print(f"{LOG_EMOJIS['NETWORK']} {colorize(message, 'CYAN')}")
def print_system(message):
print(f"{LOG_EMOJIS['SYSTEM']} {colorize(message, 'BLUE')}")
def print_printer(message):
print(f"{LOG_EMOJIS['PRINTER']} {colorize(message, 'GREEN')}")
def print_header(message):
print(f"\n{colorize('='*80, 'BOLD')}")
print(f"{colorize(message.center(80), 'BOLD')}")
print(f"{colorize('='*80, 'BOLD')}\n")
def print_section(message):
print(f"\n{colorize('-'*40, 'BOLD')}")
print(f"{colorize(message, 'BOLD')}")
print(f"{colorize('-'*40, 'BOLD')}\n")
# Hilfsfunktionen
def get_database_path():
"""Gibt den Pfad zur Datenbank zurück."""
try:
from config.settings import DATABASE_PATH
return DATABASE_PATH
except ImportError:
# Fallback auf Standard-Pfad
base_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(base_dir, "database", "myp.db")
def check_database():
"""Prüft den Zustand der Datenbank."""
db_path = get_database_path()
if not os.path.exists(db_path):
print_error(f"Datenbank nicht gefunden: {db_path}")
return False
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# Tabellen auflisten
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
print_database(f"Datenbank gefunden: {db_path}")
print_database(f"Größe: {os.path.getsize(db_path) / (1024*1024):.2f} MB")
print_database(f"Tabellen ({len(tables)}):")
for table in tables:
# Anzahl der Datensätze pro Tabelle
cursor.execute(f"SELECT COUNT(*) FROM {table[0]}")
count = cursor.fetchone()[0]
print(f" 📋 {table[0]}: {count} Einträge")
conn.close()
return True
except sqlite3.Error as e:
print_error(f"Datenbankfehler: {e}")
return False
except Exception as e:
print_error(f"Fehler beim Prüfen der Datenbank: {e}")
return False
def check_log_files():
"""Prüft die Log-Dateien und zeigt die neuesten Einträge an."""
try:
from config.settings import LOG_DIR, LOG_SUBDIRS
if not os.path.exists(LOG_DIR):
print_error(f"Log-Verzeichnis nicht gefunden: {LOG_DIR}")
return False
print_info(f"Log-Verzeichnis: {LOG_DIR}")
for subdir in LOG_SUBDIRS:
log_path = os.path.join(LOG_DIR, subdir, f"{subdir}.log")
if not os.path.exists(log_path):
print_warning(f"Log-Datei nicht gefunden: {log_path}")
continue
size = os.path.getsize(log_path) / 1024 # KB
print_info(f"Log-Datei: {subdir}.log ({size:.1f} KB)")
# Letzte Zeilen anzeigen
try:
with open(log_path, 'r') as f:
lines = f.readlines()
last_lines = lines[-5:] # Letzte 5 Zeilen
print(" Letzte Einträge:")
for line in last_lines:
line = line.strip()
# Farbliche Hervorhebung je nach Log-Level
if "ERROR" in line:
print(f" {colorize(line, 'RED')}")
elif "WARNING" in line:
print(f" {colorize(line, 'YELLOW')}")
elif "INFO" in line:
print(f" {colorize(line, 'GREEN')}")
elif "DEBUG" in line:
print(f" {colorize(line, 'CYAN')}")
else:
print(f" {line}")
except Exception as e:
print_warning(f" Fehler beim Lesen der Log-Datei: {e}")
return True
except ImportError:
print_error("Konfiguration für Logs nicht gefunden")
return False
except Exception as e:
print_error(f"Fehler beim Prüfen der Log-Dateien: {e}")
return False
def check_environment():
"""Prüft die Umgebungsvariablen und System-Einstellungen."""
print_info("Umgebungsinformationen:")
print(f" Python-Version: {sys.version.split()[0]}")
print(f" Betriebssystem: {os.name} - {sys.platform}")
print(f" Arbeitsverzeichnis: {os.getcwd()}")
print_info("Wichtige Umgebungsvariablen:")
env_vars = [
"FLASK_ENV", "FLASK_DEBUG", "MYP_SSL_ENABLED",
"MYP_SSL_HOSTNAME", "PYTHONPATH"
]
for var in env_vars:
value = os.environ.get(var, "nicht gesetzt")
print(f" {var}: {value}")
try:
# Flask-Konfiguration prüfen
print_info("Flask-Konfiguration:")
from config.settings import FLASK_HOST, FLASK_PORT, FLASK_DEBUG, SSL_ENABLED
print(f" Host: {FLASK_HOST}")
print(f" Port: {FLASK_PORT}")
print(f" Debug-Modus: {FLASK_DEBUG}")
print(f" SSL aktiviert: {SSL_ENABLED}")
# Module prüfen
required_modules = [
'flask', 'sqlalchemy', 'flask_login', 'werkzeug'
]
print_info("Benötigte Module:")
for module in required_modules:
try:
mod = importlib.import_module(module)
version = getattr(mod, '__version__', 'unbekannt')
print(f" {module}: {colorize('OK', 'GREEN')} (Version {version})")
except ImportError:
print(f" {module}: {colorize('FEHLT', 'RED')}")
except ImportError:
print_warning("Flask-Konfiguration konnte nicht geladen werden")
except Exception as e:
print_error(f"Fehler beim Prüfen der Umgebung: {e}")
def scan_printer(ip_address, timeout=5):
"""Scannt einen Drucker und zeigt Informationen an."""
import socket
print_printer(f"Prüfe Drucker mit IP: {ip_address}")
# Ping testen
import subprocess
try:
if os.name == 'nt': # Windows
cmd = ['ping', '-n', '1', '-w', str(timeout * 1000), ip_address]
else: # Unix/Linux/macOS
cmd = ['ping', '-c', '1', '-W', str(timeout), ip_address]
print(f" 🏓 Ping-Test: ", end="")
result = subprocess.run(cmd, capture_output=True, text=True,
encoding='utf-8', errors='replace')
if result.returncode == 0:
print(colorize("Erreichbar", "GREEN"))
else:
print(colorize("Nicht erreichbar", "RED"))
print(f" 📄 Details: {result.stdout}")
return
except Exception as e:
print(colorize(f"Fehler bei Ping-Test: {e}", "RED"))
# Offene Ports prüfen
common_ports = [80, 443, 8080, 8443, 631, 9100, 9101, 9102]
open_ports = []
print(" 🔍 Port-Scan: ", end="")
for port in common_ports:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((ip_address, port))
if result == 0:
open_ports.append(port)
sock.close()
if open_ports:
print(colorize(f"Offene Ports: {', '.join(map(str, open_ports))}", "GREEN"))
else:
print(colorize("Keine offenen Ports gefunden", "YELLOW"))
# Drucker-Info über Tapo-API testen (wenn vorhanden)
try:
from PyP100 import PyP110
print(" 🔌 Smart Plug Test: ", end="")
try:
# Standardmäßig Anmeldeinformationen aus der Konfiguration verwenden
from config.settings import TAPO_USERNAME, TAPO_PASSWORD
p110 = PyP110.P110(ip_address, TAPO_USERNAME, TAPO_PASSWORD)
p110.handshake()
p110.login()
device_info = p110.getDeviceInfo()
print(colorize("Verbunden", "GREEN"))
print(f" 📛 Gerätename: {device_info.get('nickname', 'Unbekannt')}")
print(f" ⚡ Status: {'Ein' if device_info.get('device_on', False) else 'Aus'}")
if 'on_time' in device_info:
on_time = device_info['on_time']
print(f" ⏱️ Betriebszeit: {on_time // 60} Minuten, {on_time % 60} Sekunden")
except Exception as e:
print(colorize(f"Fehler: {e}", "RED"))
except ImportError:
print_warning(" PyP100-Modul nicht verfügbar - Smart Plug Test übersprungen")
def check_printers_from_db():
"""Prüft die in der Datenbank gespeicherten Drucker."""
db_path = get_database_path()
if not os.path.exists(db_path):
print_error(f"Datenbank nicht gefunden: {db_path}")
return
try:
conn = sqlite3.connect(db_path)
conn.row_factory = sqlite3.Row
cursor = conn.cursor()
# Drucker-Tabelle prüfen
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='printer';")
if not cursor.fetchone():
print_error("Drucker-Tabelle nicht gefunden")
conn.close()
return
# Drucker auslesen
cursor.execute("SELECT * FROM printer;")
printers = cursor.fetchall()
if not printers:
print_warning("Keine Drucker in der Datenbank gefunden")
conn.close()
return
print_info(f"{len(printers)} Drucker gefunden:")
for printer in printers:
status_color = 'GREEN' if printer['status'] == 'online' else 'RED'
print(f" {printer['name']}: {colorize(printer['status'], status_color)}")
print(f" IP: {printer['ip_address']}")
print(f" Plug IP: {printer['plug_ip'] or 'Nicht konfiguriert'}")
# Detaillierteren Status prüfen
if printer['plug_ip']:
ask = input(f" Möchten Sie den Drucker {printer['name']} scannen? (j/n): ")
if ask.lower() in ('j', 'ja', 'y', 'yes'):
scan_printer(printer['plug_ip'])
conn.close()
except Exception as e:
print_error(f"Fehler beim Prüfen der Drucker: {e}")
traceback.print_exc()
def check_flask_routes():
"""Zeigt alle verfügbaren Flask-Routen an."""
try:
# Versuche, die Flask-App zu importieren
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
try:
from app import app as flask_app
except ImportError:
print_error("Flask-App konnte nicht importiert werden")
return
# Alle Routen auflisten
print_info("Verfügbare Flask-Routen:")
routes = []
for rule in flask_app.url_map.iter_rules():
routes.append({
'endpoint': rule.endpoint,
'methods': ', '.join(sorted(rule.methods - {'HEAD', 'OPTIONS'})),
'path': rule.rule
})
# Nach Pfad sortieren
routes = sorted(routes, key=lambda x: x['path'])
# Routen anzeigen
for route in routes:
method_color = 'GREEN' if 'GET' in route['methods'] else 'BLUE'
print(f" {colorize(route['methods'], method_color)} {route['path']}")
print(f"{route['endpoint']}")
print_info(f"Insgesamt {len(routes)} Routen gefunden")
except Exception as e:
print_error(f"Fehler beim Abrufen der Flask-Routen: {e}")
traceback.print_exc()
def print_system_info():
"""Zeigt detaillierte Systeminformationen an."""
print_header("Systeminformationen")
print_section("Basisinformationen")
import platform
print(f"Python-Version: {platform.python_version()}")
print(f"Betriebssystem: {platform.system()} {platform.release()}")
print(f"Architektur: {platform.machine()}")
print(f"Prozessor: {platform.processor()}")
print_section("Speicher")
try:
import psutil
vm = psutil.virtual_memory()
print(f"Gesamter Speicher: {vm.total / (1024**3):.1f} GB")
print(f"Verfügbarer Speicher: {vm.available / (1024**3):.1f} GB")
print(f"Speicherauslastung: {vm.percent}%")
disk = psutil.disk_usage('/')
print(f"Festplatte gesamt: {disk.total / (1024**3):.1f} GB")
print(f"Festplatte frei: {disk.free / (1024**3):.1f} GB")
print(f"Festplattenauslastung: {disk.percent}%")
except ImportError:
print_warning("psutil-Modul nicht verfügbar - eingeschränkte Informationen")
print_section("Netzwerk")
try:
import socket
hostname = socket.gethostname()
ip_address = socket.gethostbyname(hostname)
print(f"Hostname: {hostname}")
print(f"IP-Adresse: {ip_address}")
# Netzwerkschnittstellen
if 'psutil' in sys.modules:
print("Netzwerkschnittstellen:")
for name, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if addr.family == socket.AF_INET:
print(f" {name}: {addr.address}")
except Exception as e:
print_warning(f"Fehler beim Abrufen der Netzwerkinformationen: {e}")
def test_logging_system():
"""Testet das verbesserte Logging-System mit allen Features."""
print_header("Logging-System Test")
try:
# Versuche die neuen Logging-Funktionen zu importieren
from utils.logging_config import get_logger, debug_request, debug_response, measure_execution_time
print_success("Neue Logging-Module erfolgreich importiert")
# Test verschiedener Logger
test_loggers = ['app', 'auth', 'jobs', 'printers', 'errors']
print_section("Logger-Tests")
for logger_name in test_loggers:
try:
logger = get_logger(logger_name)
# Test verschiedener Log-Level
logger.debug(f"🔍 Debug-Test für {logger_name}")
logger.info(f" Info-Test für {logger_name}")
logger.warning(f"⚠️ Warning-Test für {logger_name}")
print_success(f"Logger '{logger_name}' funktioniert korrekt")
except Exception as e:
print_error(f"Fehler beim Testen von Logger '{logger_name}': {e}")
# Test Performance-Monitoring
print_section("Performance-Monitoring Test")
@measure_execution_time(logger=get_logger("app"), task_name="Test-Funktion")
def test_function():
"""Eine Test-Funktion für das Performance-Monitoring."""
import time
time.sleep(0.1) # Simuliere etwas Arbeit
return "Test erfolgreich"
result = test_function()
print_success(f"Performance-Monitoring Test: {result}")
# Test der Debug-Utilities
print_section("Debug-Utilities Test")
try:
from utils.debug_utils import debug_dump, debug_trace, memory_usage
# Test debug_dump
test_data = {
"version": "1.0.0",
"features": ["emojis", "colors", "performance-monitoring"],
"status": "active"
}
debug_dump(test_data, "Test-Konfiguration")
# Test memory_usage
memory_info = memory_usage()
print_system(f"Aktueller Speicherverbrauch: {memory_info['rss']:.2f} MB")
print_success("Debug-Utilities funktionieren korrekt")
except ImportError as e:
print_warning(f"Debug-Utilities nicht verfügbar: {e}")
# Zusammenfassung
print_section("Test-Zusammenfassung")
print_success("🎉 Alle Logging-System-Tests erfolgreich abgeschlossen!")
print_info("Features verfügbar:")
print(" ✅ Farbige Log-Ausgaben mit ANSI-Codes")
print(" ✅ Emoji-Integration für bessere Lesbarkeit")
print(" ✅ HTTP-Request/Response-Logging")
print(" ✅ Performance-Monitoring mit Ausführungszeit")
print(" ✅ Cross-Platform-Unterstützung (Windows/Unix)")
print(" ✅ Strukturierte Debug-Informationen")
except ImportError as e:
print_error(f"Logging-Module nicht verfügbar: {e}")
print_warning("Stelle sicher, dass alle Module korrekt installiert sind")
except Exception as e:
print_error(f"Unerwarteter Fehler beim Logging-Test: {e}")
traceback.print_exc()
# Hauptfunktionen für die Befehlszeile
def diagnose():
"""Führt eine umfassende Diagnose durch."""
print_header("MYP Diagnose-Tool")
print_section("Systemprüfung")
check_environment()
print_section("Datenbankprüfung")
check_database()
print_section("Log-Dateien")
check_log_files()
print_success("Diagnose abgeschlossen!")
def scan_printers():
"""Scannt und prüft alle Drucker."""
print_header("Drucker-Scanner")
# Direkter Scan einer IP-Adresse
ip = input("IP-Adresse zum Scannen (leer lassen, um Drucker aus der Datenbank zu prüfen): ")
if ip:
scan_printer(ip)
else:
check_printers_from_db()
def show_routes():
"""Zeigt alle verfügbaren API-Routen an."""
print_header("API-Routen")
check_flask_routes()
def system_info():
"""Zeigt detaillierte Systeminformationen an."""
print_system_info()
def show_logs():
"""Zeigt und analysiert Log-Dateien."""
print_header("Log-Analyse")
try:
from config.settings import LOG_DIR, LOG_SUBDIRS
if not os.path.exists(LOG_DIR):
print_error(f"Log-Verzeichnis nicht gefunden: {LOG_DIR}")
return
print_info(f"Log-Verzeichnis: {LOG_DIR}")
print_info("Verfügbare Logs:")
for i, subdir in enumerate(LOG_SUBDIRS, 1):
log_path = os.path.join(LOG_DIR, subdir, f"{subdir}.log")
size = "Nicht gefunden"
if os.path.exists(log_path):
size = f"{os.path.getsize(log_path) / 1024:.1f} KB"
print(f" {i}. {subdir}.log ({size})")
choice = input("\nWelches Log möchten Sie anzeigen? (Nummer oder Name): ")
# Nummer in Namen umwandeln
try:
choice_num = int(choice) - 1
if 0 <= choice_num < len(LOG_SUBDIRS):
choice = LOG_SUBDIRS[choice_num]
except ValueError:
pass
# Prüfen, ob die Wahl gültig ist
if choice not in LOG_SUBDIRS:
print_error(f"Ungültige Auswahl: {choice}")
return
log_path = os.path.join(LOG_DIR, choice, f"{choice}.log")
if not os.path.exists(log_path):
print_error(f"Log-Datei nicht gefunden: {log_path}")
return
# Anzahl der anzuzeigenden Zeilen
lines_count = input("Anzahl der anzuzeigenden Zeilen (Standard: 20): ")
lines_count = int(lines_count) if lines_count.isdigit() else 20
# Filter für bestimmte Log-Level
level_filter = input("Nach Log-Level filtern (INFO, WARNING, ERROR oder leer für alle): ").upper()
# Log-Datei anzeigen
with open(log_path, 'r') as f:
lines = f.readlines()
# Filtern nach Log-Level
if level_filter:
lines = [line for line in lines if level_filter in line]
# Letzte n Zeilen auswählen
lines = lines[-lines_count:]
print_section(f"Log-Datei: {choice}.log (letzte {len(lines)} Einträge)")
for line in lines:
line = line.strip()
# Farbliche Hervorhebung je nach Log-Level
if "ERROR" in line:
print(colorize(line, 'RED'))
elif "WARNING" in line:
print(colorize(line, 'YELLOW'))
elif "INFO" in line:
print(colorize(line, 'GREEN'))
elif "DEBUG" in line:
print(colorize(line, 'CYAN'))
else:
print(line)
except ImportError:
print_error("Konfiguration für Logs nicht gefunden")
except Exception as e:
print_error(f"Fehler beim Anzeigen der Log-Dateien: {e}")
traceback.print_exc()
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description="MYP Debug CLI")
subparsers = parser.add_subparsers(dest="command", help="Befehl")
# Diagnose
diag_parser = subparsers.add_parser("diagnose", help="Führt eine umfassende Diagnose durch")
# Drucker scannen
scan_parser = subparsers.add_parser("scan", help="Scannt und prüft alle Drucker")
# Routen anzeigen
routes_parser = subparsers.add_parser("routes", help="Zeigt alle verfügbaren API-Routen an")
# Systeminformationen
sysinfo_parser = subparsers.add_parser("sysinfo", help="Zeigt detaillierte Systeminformationen an")
# Logs anzeigen
logs_parser = subparsers.add_parser("logs", help="Zeigt und analysiert Log-Dateien")
# Logging-System testen
logging_test_parser = subparsers.add_parser("test-logging", help="Testet das verbesserte Logging-System")
return parser.parse_args()
def main():
"""Hauptfunktion."""
args = parse_args()
if args.command == "diagnose":
diagnose()
elif args.command == "scan":
scan_printers()
elif args.command == "routes":
show_routes()
elif args.command == "sysinfo":
system_info()
elif args.command == "logs":
show_logs()
elif args.command == "test-logging":
test_logging_system()
else:
# Interaktives Menü, wenn kein Befehl angegeben wurde
print_header("MYP Debug CLI")
print("Wählen Sie eine Option:")
print(" 1. Diagnose durchführen")
print(" 2. Drucker scannen")
print(" 3. API-Routen anzeigen")
print(" 4. Systeminformationen anzeigen")
print(" 5. Log-Dateien anzeigen")
print(" 6. Logging-System testen")
print(" 0. Beenden")
choice = input("\nIhre Wahl: ")
if choice == "1":
diagnose()
elif choice == "2":
scan_printers()
elif choice == "3":
show_routes()
elif choice == "4":
system_info()
elif choice == "5":
show_logs()
elif choice == "6":
test_logging_system()
elif choice == "0":
print("Auf Wiedersehen!")
sys.exit(0)
else:
print_error("Ungültige Auswahl")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print_info("\nProgramm wurde durch Benutzer abgebrochen")
except Exception as e:
print_error(f"Unerwarteter Fehler: {e}")
traceback.print_exc()

View File

@@ -0,0 +1,437 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Debug-Skript für Druckererkennung
Testet die Druckererkennung und identifiziert Probleme
"""
import sys
import os
import requests
import json
import time
import threading
from datetime import datetime
import sqlite3
import subprocess
import platform
# Füge das Anwendungsverzeichnis zum Python-Pfad hinzu
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
def log_message(message, level="INFO"):
"""Logge eine Nachricht mit Zeitstempel"""
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f"[{timestamp}] [{level}] {message}")
def test_database_connection():
"""Teste die Datenbankverbindung"""
log_message("Teste Datenbankverbindung...")
try:
# Pfad zur App hinzufügen für korrekten Import
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
try:
from config.settings import DATABASE_PATH
db_file = DATABASE_PATH
except ImportError:
# Fallback für lokale Ausführung
db_file = os.path.join('database', 'myp.db')
if os.path.exists(db_file):
log_message(f"Gefundene Datenbankdatei: {db_file}")
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
# Prüfe ob Printers-Tabelle existiert
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='printers';")
if cursor.fetchone():
log_message("✅ Printers-Tabelle gefunden")
# Zähle Drucker
cursor.execute("SELECT COUNT(*) FROM printers;")
count = cursor.fetchone()[0]
log_message(f"📊 Anzahl Drucker in Datenbank: {count}")
# Zeige Drucker-Details
cursor.execute("SELECT id, name, plug_ip, status FROM printers;")
printers = cursor.fetchall()
for printer in printers:
log_message(f" Drucker {printer[0]}: {printer[1]} ({printer[2]}) - Status: {printer[3]}")
conn.close()
return True
else:
log_message("❌ Printers-Tabelle nicht gefunden")
conn.close()
else:
log_message(f"❌ Datenbankdatei nicht gefunden: {db_file}")
return False
except Exception as e:
log_message(f"❌ Datenbankfehler: {str(e)}", "ERROR")
return False
def test_api_endpoints():
"""Teste die API-Endpunkte"""
log_message("Teste API-Endpunkte...")
base_url = "http://localhost:5000"
endpoints = [
"/api/printers",
"/api/printers/status"
]
for endpoint in endpoints:
try:
log_message(f"Teste {endpoint}...")
response = requests.get(f"{base_url}{endpoint}", timeout=10)
log_message(f" Status Code: {response.status_code}")
if response.status_code == 200:
try:
data = response.json()
if endpoint == "/api/printers":
if 'printers' in data:
log_message(f"{len(data['printers'])} Drucker geladen")
else:
log_message(f" ⚠️ Unerwartete Antwortstruktur: {list(data.keys())}")
else:
if isinstance(data, list):
log_message(f"{len(data)} Drucker mit Status geladen")
else:
log_message(f" ⚠️ Unerwartete Antwortstruktur: {type(data)}")
except json.JSONDecodeError:
log_message(f" ❌ Ungültige JSON-Antwort", "ERROR")
else:
log_message(f" ❌ HTTP-Fehler: {response.status_code}", "ERROR")
try:
error_data = response.json()
log_message(f" Fehlermeldung: {error_data.get('error', 'Unbekannt')}", "ERROR")
except:
log_message(f" Antwort: {response.text[:200]}", "ERROR")
except requests.exceptions.ConnectionError:
log_message(f" ❌ Verbindung zu {base_url} fehlgeschlagen", "ERROR")
log_message(" Ist die Flask-Anwendung gestartet?", "ERROR")
except requests.exceptions.Timeout:
log_message(f" ❌ Timeout bei {endpoint}", "ERROR")
except Exception as e:
log_message(f" ❌ Fehler: {str(e)}", "ERROR")
def test_network_connectivity():
"""Teste Netzwerkverbindung zu Druckern"""
log_message("Teste Netzwerkverbindung zu Druckern...")
# Lade Drucker aus Datenbank
try:
# Verwende konfigurierten Datenbankpfad
try:
from config.settings import DATABASE_PATH
db_file = DATABASE_PATH
except ImportError:
db_file = os.path.join('database', 'myp.db')
printers = []
if os.path.exists(db_file):
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.execute("SELECT name, plug_ip FROM printers WHERE plug_ip IS NOT NULL;")
printers = cursor.fetchall()
conn.close()
if not printers:
log_message("❌ Keine Drucker mit IP-Adressen gefunden")
return
for name, ip in printers:
log_message(f"Teste Verbindung zu {name} ({ip})...")
# Ping-Test
try:
if platform.system().lower() == "windows":
result = subprocess.run(['ping', '-n', '1', '-w', '3000', ip],
capture_output=True, text=True, timeout=5,
encoding='utf-8', errors='replace')
else:
result = subprocess.run(['ping', '-c', '1', '-W', '3', ip],
capture_output=True, text=True, timeout=5,
encoding='utf-8', errors='replace')
if result.returncode == 0:
log_message(f" ✅ Ping erfolgreich")
else:
log_message(f" ❌ Ping fehlgeschlagen")
except subprocess.TimeoutExpired:
log_message(f" ❌ Ping-Timeout")
except Exception as e:
log_message(f" ❌ Ping-Fehler: {str(e)}")
# HTTP-Test (falls Drucker Webinterface hat)
try:
response = requests.get(f"http://{ip}", timeout=3)
log_message(f" ✅ HTTP-Verbindung erfolgreich (Status: {response.status_code})")
except requests.exceptions.Timeout:
log_message(f" ⚠️ HTTP-Timeout (normal für Drucker ohne Webinterface)")
except requests.exceptions.ConnectionError:
log_message(f" ⚠️ HTTP-Verbindung fehlgeschlagen (normal für Drucker ohne Webinterface)")
except Exception as e:
log_message(f" ⚠️ HTTP-Fehler: {str(e)}")
except Exception as e:
log_message(f"❌ Fehler beim Testen der Netzwerkverbindung: {str(e)}", "ERROR")
def test_tapo_connections():
"""Teste TP-Link Tapo P110-Steckdosen-Verbindungen"""
log_message("Teste TP-Link Tapo P110-Steckdosen-Verbindungen...")
try:
# PyP100 importieren
from PyP100 import PyP110
log_message("✅ PyP100-Modul erfolgreich importiert")
except ImportError:
log_message("❌ PyP100-Modul nicht verfügbar", "ERROR")
log_message(" Installiere mit: pip install PyP100", "INFO")
return
# Lade Drucker aus Datenbank
try:
# Verwende konfigurierten Datenbankpfad
try:
from config.settings import DATABASE_PATH
db_file = DATABASE_PATH
except ImportError:
db_file = os.path.join('database', 'myp.db')
printers = []
if os.path.exists(db_file):
conn = sqlite3.connect(db_file)
cursor = conn.cursor()
cursor.execute("SELECT id, name, plug_ip, plug_username, plug_password FROM printers WHERE plug_ip IS NOT NULL;")
printers = cursor.fetchall()
conn.close()
if not printers:
log_message("❌ Keine Drucker mit Tapo-Konfiguration gefunden")
return
successful_connections = 0
total_printers = len(printers)
for printer_id, name, plug_ip, plug_username, plug_password in printers:
log_message(f"Teste Tapo-Verbindung zu {name} ({plug_ip})...")
# Konfiguration validieren
if not all([plug_ip, plug_username, plug_password]):
log_message(f" ❌ Unvollständige Konfiguration")
missing = []
if not plug_ip: missing.append("IP-Adresse")
if not plug_username: missing.append("Benutzername")
if not plug_password: missing.append("Passwort")
log_message(f" Fehlend: {', '.join(missing)}")
continue
try:
# Tapo-Verbindung herstellen
p110 = PyP110.P110(plug_ip, plug_username, plug_password)
p110.handshake() # Authentifizierung
p110.login() # Login
# Geräteinformationen abrufen
device_info = p110.getDeviceInfo()
log_message(f" ✅ Tapo-Verbindung erfolgreich")
log_message(f" 📛 Gerätename: {device_info.get('nickname', 'Unbekannt')}")
log_message(f" ⚡ Status: {'Ein' if device_info.get('device_on', False) else 'Aus'}")
if 'on_time' in device_info:
on_time = device_info.get('on_time', 0)
hours, minutes = divmod(on_time // 60, 60)
log_message(f" ⏱️ Betriebszeit: {hours}h {minutes}m")
if 'power_usage' in device_info:
power_usage = device_info.get('power_usage', {})
current_power = power_usage.get('power_mw', 0) / 1000 # mW zu W
log_message(f" 🔋 Aktueller Verbrauch: {current_power:.1f}W")
successful_connections += 1
except Exception as e:
log_message(f" ❌ Tapo-Verbindung fehlgeschlagen: {str(e)}")
# Detaillierte Fehleranalyse
if "login" in str(e).lower():
log_message(f" 🔐 Mögliche Ursache: Falsche Anmeldedaten")
elif "timeout" in str(e).lower():
log_message(f" ⏱️ Mögliche Ursache: Netzwerk-Timeout")
elif "connect" in str(e).lower():
log_message(f" 🌐 Mögliche Ursache: Steckdose nicht erreichbar")
elif "handshake" in str(e).lower():
log_message(f" 🤝 Mögliche Ursache: Protokoll-Handshake fehlgeschlagen")
# Zusammenfassung
success_rate = (successful_connections / total_printers * 100) if total_printers > 0 else 0
log_message(f"📊 Tapo-Verbindungs-Zusammenfassung:")
log_message(f" Getestete Drucker: {total_printers}")
log_message(f" Erfolgreiche Verbindungen: {successful_connections}")
log_message(f" Erfolgsrate: {success_rate:.1f}%")
if successful_connections == total_printers:
log_message("🎉 Alle Tapo-Verbindungen erfolgreich!")
elif successful_connections > 0:
log_message("⚠️ Einige Tapo-Verbindungen fehlgeschlagen")
else:
log_message("❌ Keine Tapo-Verbindungen erfolgreich", "ERROR")
except Exception as e:
log_message(f"❌ Fehler beim Testen der Tapo-Verbindungen: {str(e)}", "ERROR")
def test_flask_app_status():
"""Teste den Status der Flask-Anwendung"""
log_message("Teste Flask-Anwendung...")
try:
# Teste Hauptseite
response = requests.get("http://localhost:5000", timeout=5)
if response.status_code == 200:
log_message("✅ Flask-Anwendung läuft")
else:
log_message(f"⚠️ Flask-Anwendung antwortet mit Status {response.status_code}")
except requests.exceptions.ConnectionError:
log_message("❌ Flask-Anwendung nicht erreichbar", "ERROR")
log_message(" Starte die Anwendung mit: python app.py", "INFO")
except Exception as e:
log_message(f"❌ Fehler beim Testen der Flask-Anwendung: {str(e)}", "ERROR")
def test_threading_timeout():
"""Teste die Threading-basierte Timeout-Implementierung"""
log_message("Teste Threading-Timeout-Implementierung...")
def test_function():
"""Simuliere eine langsame Datenbankabfrage"""
time.sleep(2)
return "Erfolgreich"
try:
result = None
timeout_occurred = False
def run_test():
nonlocal result, timeout_occurred
try:
result = test_function()
except Exception as e:
log_message(f"Fehler in Test-Thread: {str(e)}", "ERROR")
timeout_occurred = True
# Starte Test in separatem Thread
thread = threading.Thread(target=run_test)
thread.daemon = True
thread.start()
thread.join(timeout=3) # 3 Sekunden Timeout
if thread.is_alive() or timeout_occurred or result is None:
log_message("❌ Threading-Timeout-Test fehlgeschlagen", "ERROR")
else:
log_message("✅ Threading-Timeout-Implementierung funktioniert")
except Exception as e:
log_message(f"❌ Fehler beim Threading-Test: {str(e)}", "ERROR")
def check_system_requirements():
"""Prüfe Systemanforderungen"""
log_message("Prüfe Systemanforderungen...")
# Python-Version
python_version = sys.version_info
log_message(f"Python-Version: {python_version.major}.{python_version.minor}.{python_version.micro}")
if python_version.major >= 3 and python_version.minor >= 7:
log_message("✅ Python-Version ist kompatibel")
else:
log_message("❌ Python 3.7+ erforderlich", "ERROR")
# Erforderliche Module
required_modules = ['flask', 'requests', 'sqlite3', 'threading']
for module in required_modules:
try:
__import__(module)
log_message(f"✅ Modul {module} verfügbar")
except ImportError:
log_message(f"❌ Modul {module} nicht verfügbar", "ERROR")
# Betriebssystem
os_name = platform.system()
log_message(f"Betriebssystem: {os_name}")
if os_name == "Windows":
log_message("✅ Windows-spezifische Fixes wurden angewendet")
else:
log_message(" Unix-basiertes System erkannt")
def run_comprehensive_test():
"""Führe alle Tests aus"""
log_message("=== MYP Druckerverwaltung - Diagnose-Tool ===")
log_message("Starte umfassende Systemdiagnose...")
print()
# Systemanforderungen prüfen
check_system_requirements()
print()
# Threading-Test
test_threading_timeout()
print()
# Datenbanktest
test_database_connection()
print()
# Flask-App-Test
test_flask_app_status()
print()
# API-Tests
test_api_endpoints()
print()
# Netzwerk-Tests
test_network_connectivity()
print()
# Tapo-Verbindungen testen
test_tapo_connections()
print()
log_message("=== Diagnose abgeschlossen ===")
print()
# Empfehlungen
log_message("📋 Empfehlungen:")
log_message("1. Stelle sicher, dass die Flask-Anwendung läuft: python app.py")
log_message("2. Prüfe die Datenbankverbindung und Drucker-Konfiguration")
log_message("3. Teste die Netzwerkverbindung zu den Druckern")
log_message("4. Bei Windows: Threading-basierte Timeouts wurden implementiert")
log_message("5. Überprüfe die Logs in logs/app/ für weitere Details")
if __name__ == "__main__":
try:
run_comprehensive_test()
except KeyboardInterrupt:
log_message("Diagnose durch Benutzer abgebrochen", "INFO")
except Exception as e:
log_message(f"Unerwarteter Fehler: {str(e)}", "ERROR")
import traceback
traceback.print_exc()

View File

@@ -0,0 +1,80 @@
#!/usr/bin/env python3
"""
Debug-Script für Gastanträge und Admin-Berechtigungen
"""
from models import get_cached_session, GuestRequest, User, UserPermission
from flask_login import current_user
def check_guest_requests():
"""Prüfe Gastanträge nach Status"""
print("=== GASTANTRÄGE STATUS ===")
with get_cached_session() as db:
pending = db.query(GuestRequest).filter_by(status='pending').count()
approved = db.query(GuestRequest).filter_by(status='approved').count()
rejected = db.query(GuestRequest).filter_by(status='rejected').count()
total = db.query(GuestRequest).count()
print(f"Gesamt: {total}")
print(f"Pending (Wird geprüft): {pending}")
print(f"Approved (Genehmigt): {approved}")
print(f"Rejected (Abgelehnt): {rejected}")
if pending == 0:
print("\n⚠️ PROBLEM: Keine Anträge mit Status 'pending' gefunden!")
print(" → Die Genehmigen/Ablehnen-Buttons werden nur bei Status 'pending' angezeigt")
# Erstelle einen Test-Antrag
print("\n🔧 Erstelle Test-Gastantrag...")
test_request = GuestRequest(
name="Test Admin",
email="admin@test.de",
reason="Test für Admin-Buttons",
duration_min=30,
status="pending"
)
db.add(test_request)
db.commit()
print(f"✅ Test-Antrag erstellt (ID: {test_request.id})")
else:
print(f"\n{pending} Anträge mit Status 'pending' gefunden")
# Zeige pending Anträge
pending_requests = db.query(GuestRequest).filter_by(status='pending').all()
for req in pending_requests:
print(f" ID {req.id}: {req.name} - {req.email}")
def check_admin_users():
"""Prüfe Admin-Benutzer und Berechtigungen"""
print("\n=== ADMIN-BENUTZER ===")
with get_cached_session() as db:
# Alle Admins
admins = db.query(User).filter_by(is_admin=True).all()
print(f"Admin-Benutzer: {len(admins)}")
for admin in admins:
print(f" {admin.username} (ID: {admin.id}) - Email: {admin.email}")
# Benutzer mit can_approve_jobs
users_with_approval = db.query(User).join(UserPermission).filter(
UserPermission.can_approve_jobs == True
).all()
print(f"\nBenutzer mit can_approve_jobs: {len(users_with_approval)}")
for user in users_with_approval:
print(f" {user.username} (ID: {user.id}) - Email: {user.email}")
if __name__ == "__main__":
try:
check_guest_requests()
check_admin_users()
print("\n=== LÖSUNG ===")
print("1. Gehen Sie zu: http://127.0.0.1:5000/requests/overview")
print("2. Öffnen Sie die Browser-Konsole (F12)")
print("3. Suchen Sie nach 'Admin-Berechtigungen:' in der Konsole")
print("4. Die Buttons sollten bei Anträgen mit Status 'pending' erscheinen")
except Exception as e:
print(f"❌ Fehler: {e}")
import traceback
traceback.print_exc()

199
utils/debug_login.py Normal file
View File

@@ -0,0 +1,199 @@
#!/usr/bin/env python3.11
"""
Debug-Script für Login-Probleme
Prüft Admin-Benutzer und Passwort-Hashing
"""
import os
import sys
from datetime import datetime
# Path für imports setzen
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from models import get_db_session, User, create_initial_admin
import bcrypt
def debug_admin_user():
"""Prüft den Admin-Benutzer in der Datenbank"""
print("=== DEBUG: Admin-Benutzer Analyse ===")
try:
db_session = get_db_session()
# Alle Benutzer anzeigen
users = db_session.query(User).all()
print(f"\n📊 Gefundene Benutzer: {len(users)}")
for user in users:
print(f"\n👤 Benutzer ID: {user.id}")
print(f" Email: {user.email}")
print(f" Username: {user.username}")
print(f" Name: {user.name}")
print(f" Role: {user.role}")
print(f" Is Admin: {user.is_admin}")
print(f" Active: {user.active}")
print(f" Password Hash: {user.password_hash[:20]}...")
print(f" Created: {user.created_at}")
# Admin-Benutzer spezifisch prüfen
admin_email = "admin@mercedes-benz.com"
admin_username = "admin"
print(f"\n🔍 Suche nach Admin-Benutzer:")
print(f" Email: {admin_email}")
print(f" Username: {admin_username}")
# Suche nach E-Mail
admin_by_email = db_session.query(User).filter(User.email == admin_email).first()
if admin_by_email:
print(f"✅ Admin gefunden per E-Mail: {admin_by_email.email}")
else:
print(f"❌ Kein Admin mit E-Mail {admin_email} gefunden")
# Suche nach Username
admin_by_username = db_session.query(User).filter(User.username == admin_username).first()
if admin_by_username:
print(f"✅ Admin gefunden per Username: {admin_by_username.username}")
else:
print(f"❌ Kein Admin mit Username {admin_username} gefunden")
db_session.close()
return admin_by_email or admin_by_username
except Exception as e:
print(f"❌ Fehler beim Datenbankzugriff: {str(e)}")
return None
def test_password_verification(user, test_password="744563017196A"):
"""Testet die Passwort-Verifikation"""
print(f"\n=== DEBUG: Passwort-Test ===")
print(f"Test-Passwort: {test_password}")
if not user:
print("❌ Kein Benutzer für Passwort-Test vorhanden")
return False
try:
# Manueller bcrypt-Test
password_bytes = test_password.encode('utf-8')
hash_bytes = user.password_hash.encode('utf-8')
print(f"Password Bytes: {password_bytes}")
print(f"Hash (first 50 chars): {user.password_hash[:50]}")
# Test mit bcrypt
is_valid_bcrypt = bcrypt.checkpw(password_bytes, hash_bytes)
print(f"✅ bcrypt.checkpw() Ergebnis: {is_valid_bcrypt}")
# Test mit User-Methode
is_valid_user_method = user.check_password(test_password)
print(f"✅ user.check_password() Ergebnis: {is_valid_user_method}")
return is_valid_bcrypt and is_valid_user_method
except Exception as e:
print(f"❌ Fehler beim Passwort-Test: {str(e)}")
return False
def recreate_admin():
"""Erstellt den Admin-Benutzer neu"""
print(f"\n=== DEBUG: Admin-Benutzer neu erstellen ===")
try:
success = create_initial_admin(
email="admin@mercedes-benz.com",
password="744563017196A",
name="System Administrator",
username="admin"
)
if success:
print("✅ Admin-Benutzer erfolgreich erstellt/aktualisiert")
else:
print("❌ Fehler beim Erstellen des Admin-Benutzers")
return success
except Exception as e:
print(f"❌ Fehler beim Erstellen des Admins: {str(e)}")
return False
def test_login_credentials():
"""Testet verschiedene Login-Kombinationen"""
print(f"\n=== DEBUG: Login-Kombinationen testen ===")
test_combinations = [
("admin@mercedes-benz.com", "744563017196A"),
("admin", "744563017196A"),
]
db_session = get_db_session()
for email_or_username, password in test_combinations:
print(f"\n🔍 Teste: {email_or_username} / {password}")
# Simuliere Login-Logic aus app.py
user = db_session.query(User).filter(
(User.username == email_or_username) | (User.email == email_or_username)
).first()
if user:
print(f"✅ Benutzer gefunden: {user.email} ({user.username})")
if user.check_password(password):
print(f"✅ Passwort korrekt!")
print(f"✅ Login wäre erfolgreich für: {user.email}")
else:
print(f"❌ Passwort falsch!")
else:
print(f"❌ Kein Benutzer mit {email_or_username} gefunden")
db_session.close()
def check_rate_limiting():
"""Prüft Rate Limiting Status"""
print(f"\n=== DEBUG: Rate Limiting Status ===")
# Simuliere localStorage-Werte (die wären normalerweise im Browser)
# In einer echten Anwendung würden diese aus der Datenbank oder einem Cache kommen
print(" Rate Limiting wird client-seitig im localStorage verwaltet")
print(" Überprüfen Sie Ihren Browser-localStorage:")
print(" - loginAttempts: sollte < 5 sein")
print(" - lastAttemptTime: Zeit des letzten Versuchs")
print("\n💡 Tipp: Öffnen Sie Entwicklertools > Application > Local Storage")
print(" und löschen Sie 'loginAttempts' und 'lastAttemptTime' Einträge")
if __name__ == "__main__":
print("🚀 MYP Login Debug-Tool gestartet")
print("=" * 50)
# 1. Admin-Benutzer prüfen
admin_user = debug_admin_user()
# 2. Passwort-Verifikation testen
if admin_user:
test_password_verification(admin_user)
# 3. Admin neu erstellen falls Probleme
if not admin_user:
print("\n⚠️ Kein Admin gefunden - erstelle neuen Admin...")
recreate_admin()
admin_user = debug_admin_user()
if admin_user:
test_password_verification(admin_user)
# 4. Login-Kombinationen testen
test_login_credentials()
# 5. Rate Limiting prüfen
check_rate_limiting()
print("\n" + "=" * 50)
print("🎯 Debug abgeschlossen!")
print("\n💡 Lösungsvorschläge:")
print("1. Verwenden Sie admin@mercedes-benz.com + 744563017196A")
print("2. Oder verwenden Sie admin + 744563017196A")
print("3. Löschen Sie Rate-Limiting im Browser localStorage")
print("4. Prüfen Sie die Browser-Konsole auf JavaScript-Fehler")

392
utils/debug_utils.py Normal file
View File

@@ -0,0 +1,392 @@
"""
Debug-Utilities für die MYP-Anwendung
Hilft bei der Diagnose und Behebung von Problemen in der Anwendung
"""
import os
import sys
import time
import json
import traceback
import inspect
from datetime import datetime
from functools import wraps
import logging
from typing import Any, Dict, List, Optional, Tuple, Union, Callable
from utils.logging_config import get_logger
# Logger für dieses Modul erstellen
debug_logger = get_logger("app")
# Konstanten für Formatierung
DEBUG_SEPARATOR = "=" * 60
DEBUG_SUBSEPARATOR = "-" * 60
class DebugLevel:
"""Enum für Debug-Level"""
MINIMAL = 0 # Nur kritische Fehler
NORMAL = 1 # Standardfehler und wichtige Informationen
VERBOSE = 2 # Ausführliche Informationen
TRACE = 3 # Vollständige Trace-Informationen
# Aktuelles Debug-Level (kann zur Laufzeit geändert werden)
CURRENT_DEBUG_LEVEL = DebugLevel.NORMAL
def set_debug_level(level: int):
"""Setzt das aktuelle Debug-Level für die Anwendung"""
global CURRENT_DEBUG_LEVEL
CURRENT_DEBUG_LEVEL = level
debug_logger.info(f"🔧 Debug-Level gesetzt auf: {level}")
def debug_print(message: str, level: int = DebugLevel.NORMAL):
"""
Gibt eine Debug-Nachricht aus, wenn das aktuelle Debug-Level mindestens dem angegebenen entspricht.
Args:
message: Die auszugebende Nachricht
level: Das erforderliche Debug-Level
"""
if level <= CURRENT_DEBUG_LEVEL:
# Aktuelle Funktion und Zeilennummer ermitteln
frame = inspect.currentframe().f_back
func_name = frame.f_code.co_name
file_name = os.path.basename(frame.f_code.co_filename)
line_no = frame.f_lineno
# Debug-Ausgabe formatieren
timestamp = datetime.now().strftime('%H:%M:%S.%f')[:-3]
debug_prefix = f"[DEBUG {timestamp} {file_name}:{func_name}:{line_no}]"
# Verschiedene Levels mit unterschiedlichen Emojis markieren
level_emoji = "🐞" if level >= DebugLevel.VERBOSE else "🔍"
# Ausgabe
print(f"{level_emoji} {debug_prefix} {message}")
def debug_dump(obj: Any, name: str = "Object", level: int = DebugLevel.VERBOSE):
"""
Gibt den Inhalt eines Objekts für Debug-Zwecke aus.
Args:
obj: Das zu untersuchende Objekt
name: Name des Objekts für die Ausgabe
level: Das erforderliche Debug-Level
"""
if level > CURRENT_DEBUG_LEVEL:
return
debug_print(f"📦 Debug-Dump von {name}:", level)
try:
# Für dict-ähnliche Objekte
if hasattr(obj, 'items'):
for k, v in obj.items():
debug_print(f" {k}: {v}", level)
# Für list/tuple-ähnliche Objekte
elif hasattr(obj, '__iter__') and not isinstance(obj, (str, bytes)):
for i, item in enumerate(obj):
debug_print(f" [{i}]: {item}", level)
# Für einfache Objekte
else:
# Versuche als JSON zu formatieren
try:
json_str = json.dumps(obj, indent=2, default=str)
debug_print(f" {json_str}", level)
except:
# Fallback auf einfache String-Darstellung
debug_print(f" {obj}", level)
except Exception as e:
debug_print(f" Fehler beim Dump: {e}", level)
def debug_trace(message: str = "Execution trace"):
"""
Gibt einen vollständigen Stack-Trace für Debug-Zwecke aus.
Args:
message: Begleitende Nachricht für den Trace
"""
if CURRENT_DEBUG_LEVEL < DebugLevel.TRACE:
return
debug_print(f"🔬 TRACE: {message}", DebugLevel.TRACE)
debug_print(DEBUG_SUBSEPARATOR, DebugLevel.TRACE)
# Stack-Trace sammeln
stack = traceback.extract_stack()
# Letzten Frame (diese Funktion) entfernen
stack = stack[:-1]
for frame in stack:
file_name = os.path.basename(frame.filename)
debug_print(f" {file_name}:{frame.lineno} - {frame.name}", DebugLevel.TRACE)
debug_print(DEBUG_SUBSEPARATOR, DebugLevel.TRACE)
def debug_function(func=None, level: int = DebugLevel.NORMAL):
"""
Dekorator, der Eingang und Ausgang einer Funktion sowie die Ausführungszeit loggt.
Args:
func: Die zu dekorierende Funktion
level: Das erforderliche Debug-Level
Returns:
Dekorierte Funktion
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if CURRENT_DEBUG_LEVEL < level:
return fn(*args, **kwargs)
# Funktionsaufruf loggen
arg_str = ", ".join([
*[str(arg) for arg in args],
*[f"{k}={v}" for k, v in kwargs.items()]
])
if len(arg_str) > 100:
arg_str = arg_str[:97] + "..."
debug_print(f"▶️ Starte {fn.__name__}({arg_str})", level)
# Ausführungszeit messen
start_time = time.time()
try:
# Funktion ausführen
result = fn(*args, **kwargs)
# Ausführungszeit und Ergebnis loggen
end_time = time.time()
duration = (end_time - start_time) * 1000
result_str = str(result)
if len(result_str) > 100:
result_str = result_str[:97] + "..."
duration_emoji = "⏱️" if duration < 1000 else ""
debug_print(f"{duration_emoji} {fn.__name__} beendet in {duration:.2f} ms", level)
debug_print(f"📤 Ergebnis: {result_str}", level)
return result
except Exception as e:
# Fehler loggen
end_time = time.time()
duration = (end_time - start_time) * 1000
debug_print(f"{fn.__name__} fehlgeschlagen nach {duration:.2f} ms: {str(e)}", level)
# Stack-Trace nur bei hohem Debug-Level
if CURRENT_DEBUG_LEVEL >= DebugLevel.VERBOSE:
debug_print(f"🔬 Stack-Trace für {fn.__name__}:", DebugLevel.VERBOSE)
traceback_str = traceback.format_exc()
for line in traceback_str.split('\n'):
debug_print(f" {line}", DebugLevel.VERBOSE)
# Exception weiterleiten
raise
return wrapper
if func:
return decorator(func)
return decorator
def debug_timer(name: str = None, level: int = DebugLevel.NORMAL):
"""
Kontext-Manager, der die Ausführungszeit eines Code-Blocks misst.
Args:
name: Name des Code-Blocks für die Ausgabe
level: Das erforderliche Debug-Level
Beispiel:
with debug_timer("Datenbankabfrage"):
result = db.execute_query()
"""
class Timer:
def __init__(self, block_name, debug_level):
self.block_name = block_name
self.debug_level = debug_level
self.start_time = None
def __enter__(self):
if CURRENT_DEBUG_LEVEL >= self.debug_level:
self.start_time = time.time()
block_name = self.block_name or "Code-Block"
debug_print(f"⏱️ Starte Timer für: {block_name}", self.debug_level)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if CURRENT_DEBUG_LEVEL >= self.debug_level and self.start_time:
end_time = time.time()
duration = (end_time - self.start_time) * 1000
block_name = self.block_name or "Code-Block"
if exc_type:
debug_print(f"{block_name} fehlgeschlagen nach {duration:.2f} ms: {exc_val}", self.debug_level)
else:
duration_emoji = "⏱️" if duration < 1000 else ""
debug_print(f"{duration_emoji} {block_name} beendet in {duration:.2f} ms", self.debug_level)
return Timer(name, level)
def debug_exception_handler(logger: Optional[logging.Logger] = None):
"""
Dekorator, der Ausnahmen abfängt und Details loggt.
Args:
logger: Logger-Instanz für die Protokollierung (optional)
Returns:
Dekorierte Funktion
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
# Logger verwenden oder Fallback auf Standardausgabe
log = logger or debug_logger
# Ausnahmedetails loggen
log.error(f"❌ Ausnahme in {func.__name__}: {str(e)}")
# Stack-Trace bei hohem Debug-Level
if CURRENT_DEBUG_LEVEL >= DebugLevel.VERBOSE:
log.error("🔬 Stack-Trace:")
traceback_str = traceback.format_exc()
for line in traceback_str.split('\n'):
if line.strip():
log.error(f" {line}")
# Ausnahme weiterleiten
raise
return wrapper
return decorator
# Konsolen-Befehle für interaktives Debugging
def dump_all_loggers():
"""Gibt Informationen über alle konfigurierten Logger aus."""
import logging
debug_print("📋 Konfigurierte Logger:", DebugLevel.VERBOSE)
for name, logger in logging.Logger.manager.loggerDict.items():
if isinstance(logger, logging.Logger):
level_name = logging.getLevelName(logger.level)
handlers = len(logger.handlers)
debug_print(f" {name}: Level={level_name}, Handlers={handlers}", DebugLevel.VERBOSE)
def dump_environment():
"""Gibt Umgebungsvariablen und Systeminformationen aus."""
debug_print("🌐 Umgebungsinformationen:", DebugLevel.VERBOSE)
debug_print(f" Python: {sys.version}", DebugLevel.VERBOSE)
debug_print(f" Plattform: {sys.platform}", DebugLevel.VERBOSE)
debug_print(f" Arbeitsverzeichnis: {os.getcwd()}", DebugLevel.VERBOSE)
debug_print("🔑 Umgebungsvariablen:", DebugLevel.VERBOSE)
for key, value in sorted(os.environ.items()):
# Passwörter und Secrets ausblenden
if any(secret_key in key.lower() for secret_key in ['key', 'pass', 'secret', 'token', 'pwd']):
value = "********"
debug_print(f" {key}={value}", DebugLevel.VERBOSE)
def memory_usage(obj: Any = None) -> Dict[str, Any]:
"""
Gibt Informationen über den Speicherverbrauch zurück.
Args:
obj: Optional ein Objekt, dessen Größe gemessen werden soll
Returns:
Dict mit Speicherverbrauchsinformationen
"""
import psutil
import sys
process = psutil.Process(os.getpid())
memory_info = process.memory_info()
result = {
"rss": memory_info.rss / (1024 * 1024), # MB
"vms": memory_info.vms / (1024 * 1024), # MB
"percent": process.memory_percent(),
}
if obj is not None:
try:
import sys
result["object_size"] = sys.getsizeof(obj) / 1024 # KB
except:
result["object_size"] = "Nicht messbar"
return result
def log_memory_usage(obj_name: str = "Anwendung", obj: Any = None, logger: Optional[logging.Logger] = None):
"""
Loggt den aktuellen Speicherverbrauch.
Args:
obj_name: Name des Objekts oder der Anwendung
obj: Optional ein Objekt, dessen Größe gemessen werden soll
logger: Logger-Instanz für die Protokollierung (optional)
"""
log = logger or debug_logger
memory = memory_usage(obj)
log.info(f"📊 Speicherverbrauch von {obj_name}:")
log.info(f" RSS: {memory['rss']:.2f} MB")
log.info(f" VMS: {memory['vms']:.2f} MB")
log.info(f" Prozent: {memory['percent']:.2f}%")
if 'object_size' in memory:
if isinstance(memory['object_size'], (int, float)):
log.info(f" Objektgröße: {memory['object_size']:.2f} KB")
else:
log.info(f" Objektgröße: {memory['object_size']}")
def profile_function(func):
"""
Dekorator, der eine Funktion profiliert und Statistiken ausgibt.
Args:
func: Die zu profilierende Funktion
Returns:
Dekorierte Funktion
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
import cProfile
import pstats
import io
# Profiler erstellen und Funktion ausführen
profiler = cProfile.Profile()
profiler.enable()
result = func(*args, **kwargs)
profiler.disable()
# Statistiken sammeln
s = io.StringIO()
ps = pstats.Stats(profiler, stream=s).sort_stats('cumulative')
ps.print_stats(20) # Top 20 Zeilen
# Statistiken ausgeben
debug_print(f"📊 Profiling-Ergebnis für {func.__name__}:", DebugLevel.VERBOSE)
for line in s.getvalue().split('\n'):
if line.strip():
debug_print(f" {line}", DebugLevel.VERBOSE)
return result
except ImportError:
debug_print(f"⚠️ cProfile nicht verfügbar, Funktion wird ohne Profiling ausgeführt", DebugLevel.NORMAL)
return func(*args, **kwargs)
return wrapper

1479
utils/drag_drop_system.py Normal file

File diff suppressed because it is too large Load Diff

175
utils/email_notification.py Normal file
View File

@@ -0,0 +1,175 @@
"""
Offline-kompatible E-Mail-Benachrichtigung für MYP-System
========================================================
Da das System im Produktionsbetrieb offline läuft, werden alle E-Mail-Benachrichtigungen
nur geloggt aber nicht tatsächlich versendet.
"""
import logging
from datetime import datetime
from typing import Optional, Dict, Any
from utils.logging_config import get_logger
logger = get_logger("email_notification")
class OfflineEmailNotification:
"""
Offline-E-Mail-Benachrichtigung die nur Logs erstellt.
Simuliert E-Mail-Versand für Offline-Betrieb.
"""
def __init__(self):
self.enabled = False # Immer deaktiviert im Offline-Modus
logger.info("📧 Offline-E-Mail-Benachrichtigung initialisiert (kein echter E-Mail-Versand)")
def send_email(self, to: str, subject: str, body: str, **kwargs) -> bool:
"""
Simuliert E-Mail-Versand durch Logging.
Args:
to: E-Mail-Empfänger
subject: E-Mail-Betreff
body: E-Mail-Inhalt
**kwargs: Zusätzliche Parameter
Returns:
bool: Immer True (Simulation erfolgreich)
"""
logger.info(f"📧 [OFFLINE-SIMULATION] E-Mail würde versendet werden:")
logger.info(f" 📮 An: {to}")
logger.info(f" 📋 Betreff: {subject}")
logger.info(f" 📝 Inhalt: {body[:100]}{'...' if len(body) > 100 else ''}")
logger.info(f" 🕒 Zeitpunkt: {datetime.now().strftime('%d.%m.%Y %H:%M:%S')}")
if kwargs:
logger.info(f" ⚙️ Zusätzliche Parameter: {kwargs}")
return True
def send_notification_email(self, recipient: str, notification_type: str,
data: Dict[str, Any]) -> bool:
"""
Sendet Benachrichtigungs-E-Mail (Offline-Simulation).
Args:
recipient: E-Mail-Empfänger
notification_type: Art der Benachrichtigung
data: Daten für die Benachrichtigung
Returns:
bool: Immer True (Simulation erfolgreich)
"""
subject = f"MYP-Benachrichtigung: {notification_type}"
body = f"Benachrichtigung vom MYP-System:\n\n{data}"
return self.send_email(recipient, subject, body, notification_type=notification_type)
def send_maintenance_notification(self, recipient: str, task_title: str,
task_description: str) -> bool:
"""
Sendet Wartungs-Benachrichtigung (Offline-Simulation).
Args:
recipient: E-Mail-Empfänger
task_title: Titel der Wartungsaufgabe
task_description: Beschreibung der Wartungsaufgabe
Returns:
bool: Immer True (Simulation erfolgreich)
"""
subject = f"MYP-Wartungsaufgabe: {task_title}"
body = f"""
Neue Wartungsaufgabe im MYP-System:
Titel: {task_title}
Beschreibung: {task_description}
Erstellt: {datetime.now().strftime('%d.%m.%Y %H:%M:%S')}
Bitte loggen Sie sich in das MYP-System ein, um weitere Details zu sehen.
"""
return self.send_email(recipient, subject, body, task_type="maintenance")
# Globale Instanz für einfache Verwendung
email_notifier = OfflineEmailNotification()
def send_email_notification(recipient: str, subject: str, body: str, **kwargs) -> bool:
"""
Haupt-Funktion für E-Mail-Versand (Offline-kompatibel).
Args:
recipient: E-Mail-Empfänger
subject: E-Mail-Betreff
body: E-Mail-Inhalt
**kwargs: Zusätzliche Parameter
Returns:
bool: True wenn "erfolgreich" (geloggt)
"""
return email_notifier.send_email(recipient, subject, body, **kwargs)
def send_maintenance_email(recipient: str, task_title: str, task_description: str) -> bool:
"""
Sendet Wartungs-E-Mail (Offline-kompatibel).
Args:
recipient: E-Mail-Empfänger
task_title: Titel der Wartungsaufgabe
task_description: Beschreibung der Wartungsaufgabe
Returns:
bool: True wenn "erfolgreich" (geloggt)
"""
return email_notifier.send_maintenance_notification(recipient, task_title, task_description)
def send_guest_approval_email(recipient: str, otp_code: str, expires_at: str) -> bool:
"""
Sendet Gastauftrags-Genehmigung-E-Mail (Offline-kompatibel).
Args:
recipient: E-Mail-Empfänger
otp_code: OTP-Code für den Gastauftrag
expires_at: Ablaufzeit des OTP-Codes
Returns:
bool: True wenn "erfolgreich" (geloggt)
"""
subject = "MYP-Gastauftrag genehmigt"
body = f"""
Ihr Gastauftrag wurde genehmigt!
OTP-Code: {otp_code}
Gültig bis: {expires_at}
Bitte verwenden Sie diesen Code am MYP-Terminal, um Ihren Druckauftrag zu starten.
"""
return email_notifier.send_email(recipient, subject, body,
otp_code=otp_code, expires_at=expires_at)
def send_guest_rejection_email(recipient: str, reason: str) -> bool:
"""
Sendet Gastauftrags-Ablehnungs-E-Mail (Offline-kompatibel).
Args:
recipient: E-Mail-Empfänger
reason: Grund für die Ablehnung
Returns:
bool: True wenn "erfolgreich" (geloggt)
"""
subject = "MYP-Gastauftrag abgelehnt"
body = f"""
Ihr Gastauftrag wurde leider abgelehnt.
Grund: {reason}
Bei Fragen wenden Sie sich bitte an das MYP-Team.
"""
return email_notifier.send_email(recipient, subject, body, rejection_reason=reason)
# Für Backward-Kompatibilität
send_notification = send_email_notification

641
utils/error_recovery.py Normal file
View File

@@ -0,0 +1,641 @@
#!/usr/bin/env python3
"""
Robustes Error-Recovery-System für wartungsfreien Produktionsbetrieb
Automatische Fehlererkennung, -behebung und -prävention
"""
import os
import sys
import time
import threading
import traceback
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Callable, Any
from dataclasses import dataclass, field
from enum import Enum
import logging
import json
import subprocess
import psutil
from contextlib import contextmanager
import signal
# Logging-Setup
try:
from utils.logging_config import get_logger
recovery_logger = get_logger("error_recovery")
except ImportError:
logging.basicConfig(level=logging.INFO)
recovery_logger = logging.getLogger("error_recovery")
class ErrorSeverity(Enum):
"""Schweregrade von Fehlern"""
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class RecoveryAction(Enum):
"""Verfügbare Recovery-Aktionen"""
LOG_ONLY = "log_only"
RESTART_SERVICE = "restart_service"
RESTART_COMPONENT = "restart_component"
CLEAR_CACHE = "clear_cache"
RESET_DATABASE = "reset_database"
RESTART_SYSTEM = "restart_system"
EMERGENCY_STOP = "emergency_stop"
@dataclass
class ErrorPattern:
"""Definiert ein Fehlermuster und zugehörige Recovery-Aktionen"""
name: str
patterns: List[str] # Regex-Patterns für Fehlererkennung
severity: ErrorSeverity
actions: List[RecoveryAction]
max_occurrences: int = 3 # Maximale Anzahl vor Eskalation
time_window: int = 300 # Zeitfenster in Sekunden
escalation_actions: List[RecoveryAction] = field(default_factory=list)
description: str = ""
@dataclass
class ErrorOccurrence:
"""Einzelnes Auftreten eines Fehlers"""
timestamp: datetime
pattern_name: str
error_message: str
severity: ErrorSeverity
context: Dict[str, Any] = field(default_factory=dict)
recovery_attempted: List[RecoveryAction] = field(default_factory=list)
recovery_successful: bool = False
class ErrorRecoveryManager:
"""
Zentraler Manager für automatische Fehlererkennung und -behebung.
Überwacht kontinuierlich das System und führt automatische Recovery durch.
"""
def __init__(self):
self.is_active = False
self.error_patterns: Dict[str, ErrorPattern] = {}
self.error_history: List[ErrorOccurrence] = []
self.recovery_handlers: Dict[RecoveryAction, Callable] = {}
self.monitoring_thread: Optional[threading.Thread] = None
self.lock = threading.Lock()
# Konfiguration
self.config = {
"check_interval": 30, # Sekunden
"max_history_size": 1000,
"auto_recovery_enabled": True,
"critical_error_threshold": 5,
"system_restart_threshold": 10,
"log_file_paths": [
"logs/app/app.log",
"logs/errors/errors.log",
"logs/database/database.log"
]
}
# Initialisiere Standard-Fehlermuster
self._init_default_patterns()
# Initialisiere Recovery-Handler
self._init_recovery_handlers()
recovery_logger.info("🛡️ Error-Recovery-Manager initialisiert")
def _init_default_patterns(self):
"""Initialisiert Standard-Fehlermuster für häufige Probleme"""
patterns = [
# Datenbank-Fehler
ErrorPattern(
name="database_lock",
patterns=[
r"database is locked",
r"SQLite.*locked",
r"OperationalError.*locked"
],
severity=ErrorSeverity.HIGH,
actions=[RecoveryAction.RESET_DATABASE],
max_occurrences=3,
escalation_actions=[RecoveryAction.RESTART_SERVICE],
description="Datenbank-Sperrung"
),
# Memory-Fehler
ErrorPattern(
name="memory_exhausted",
patterns=[
r"MemoryError",
r"Out of memory",
r"Cannot allocate memory"
],
severity=ErrorSeverity.CRITICAL,
actions=[RecoveryAction.CLEAR_CACHE, RecoveryAction.RESTART_SERVICE],
max_occurrences=2,
escalation_actions=[RecoveryAction.RESTART_SYSTEM],
description="Speicher erschöpft"
),
# Network-Fehler
ErrorPattern(
name="connection_error",
patterns=[
r"ConnectionError",
r"Network is unreachable",
r"Connection refused"
],
severity=ErrorSeverity.MEDIUM,
actions=[RecoveryAction.RESTART_COMPONENT],
max_occurrences=5,
escalation_actions=[RecoveryAction.RESTART_SERVICE],
description="Netzwerk-Verbindungsfehler"
),
# Kiosk-Fehler
ErrorPattern(
name="kiosk_crash",
patterns=[
r"chromium.*crashed",
r"firefox.*crashed",
r"X11.*error",
r"Display.*not found"
],
severity=ErrorSeverity.HIGH,
actions=[RecoveryAction.RESTART_COMPONENT],
max_occurrences=3,
escalation_actions=[RecoveryAction.RESTART_SYSTEM],
description="Kiosk-Display Fehler"
),
# Service-Fehler
ErrorPattern(
name="service_failure",
patterns=[
r"systemctl.*failed",
r"Service.*not found",
r"Failed to start"
],
severity=ErrorSeverity.HIGH,
actions=[RecoveryAction.RESTART_SERVICE],
max_occurrences=3,
escalation_actions=[RecoveryAction.RESTART_SYSTEM],
description="System-Service Fehler"
),
# Disk-Fehler
ErrorPattern(
name="disk_full",
patterns=[
r"No space left on device",
r"Disk full",
r"OSError.*28"
],
severity=ErrorSeverity.CRITICAL,
actions=[RecoveryAction.CLEAR_CACHE],
max_occurrences=1,
escalation_actions=[RecoveryAction.EMERGENCY_STOP],
description="Festplatte voll"
),
# Flask-Fehler
ErrorPattern(
name="flask_error",
patterns=[
r"Internal Server Error",
r"500 Internal Server Error",
r"Application failed to start"
],
severity=ErrorSeverity.HIGH,
actions=[RecoveryAction.RESTART_SERVICE],
max_occurrences=3,
escalation_actions=[RecoveryAction.RESTART_SYSTEM],
description="Flask-Anwendungsfehler"
)
]
for pattern in patterns:
self.error_patterns[pattern.name] = pattern
def _init_recovery_handlers(self):
"""Initialisiert Handler für Recovery-Aktionen"""
self.recovery_handlers = {
RecoveryAction.LOG_ONLY: self._handle_log_only,
RecoveryAction.RESTART_SERVICE: self._handle_restart_service,
RecoveryAction.RESTART_COMPONENT: self._handle_restart_component,
RecoveryAction.CLEAR_CACHE: self._handle_clear_cache,
RecoveryAction.RESET_DATABASE: self._handle_reset_database,
RecoveryAction.RESTART_SYSTEM: self._handle_restart_system,
RecoveryAction.EMERGENCY_STOP: self._handle_emergency_stop
}
def start_monitoring(self):
"""Startet kontinuierliche Überwachung"""
if self.is_active:
recovery_logger.warning("Monitoring bereits aktiv")
return
self.is_active = True
self.monitoring_thread = threading.Thread(
target=self._monitor_loop,
daemon=True,
name="ErrorRecoveryMonitor"
)
self.monitoring_thread.start()
recovery_logger.info("🔍 Error-Monitoring gestartet")
def stop_monitoring(self):
"""Stoppt Überwachung"""
self.is_active = False
if self.monitoring_thread and self.monitoring_thread.is_alive():
self.monitoring_thread.join(timeout=5)
recovery_logger.info("🛑 Error-Monitoring gestoppt")
def _monitor_loop(self):
"""Hauptschleife für kontinuierliche Überwachung"""
while self.is_active:
try:
# Log-Dateien prüfen
self._check_log_files()
# System-Metriken prüfen
self._check_system_metrics()
# Service-Status prüfen
self._check_service_status()
# Alte Einträge bereinigen
self._cleanup_old_entries()
time.sleep(self.config["check_interval"])
except Exception as e:
recovery_logger.error(f"Fehler in Monitor-Loop: {e}")
time.sleep(5) # Kurze Pause bei Fehlern
def _check_log_files(self):
"""Prüft Log-Dateien auf Fehlermuster"""
for log_path in self.config["log_file_paths"]:
try:
if not os.path.exists(log_path):
continue
# Lese nur neue Zeilen (vereinfacht)
with open(log_path, 'r', encoding='utf-8') as f:
# Gehe zu den letzten 1000 Zeilen
lines = f.readlines()
recent_lines = lines[-1000:] if len(lines) > 1000 else lines
for line in recent_lines:
self._analyze_log_line(line, log_path)
except Exception as e:
recovery_logger.debug(f"Fehler beim Lesen von {log_path}: {e}")
def _analyze_log_line(self, line: str, source: str):
"""Analysiert einzelne Log-Zeile auf Fehlermuster"""
import re
for pattern_name, pattern in self.error_patterns.items():
for regex in pattern.patterns:
try:
if re.search(regex, line, re.IGNORECASE):
self._handle_error_detection(
pattern_name=pattern_name,
error_message=line.strip(),
context={"source": source, "pattern": regex}
)
break
except Exception as e:
recovery_logger.debug(f"Regex-Fehler für {regex}: {e}")
def _check_system_metrics(self):
"""Prüft System-Metriken auf kritische Werte"""
try:
# Memory-Check
memory = psutil.virtual_memory()
if memory.percent > 95:
self._handle_error_detection(
pattern_name="memory_exhausted",
error_message=f"Speicherverbrauch kritisch: {memory.percent:.1f}%",
context={"memory_percent": memory.percent}
)
# Disk-Check
disk = psutil.disk_usage('/')
if disk.percent > 98:
self._handle_error_detection(
pattern_name="disk_full",
error_message=f"Festplatte fast voll: {disk.percent:.1f}%",
context={"disk_percent": disk.percent}
)
# Load-Check
if hasattr(psutil, 'getloadavg'):
load_avg = psutil.getloadavg()[0]
if load_avg > 5.0: # Sehr hohe Last
self._handle_error_detection(
pattern_name="system_overload",
error_message=f"System-Last kritisch: {load_avg:.2f}",
context={"load_average": load_avg}
)
except Exception as e:
recovery_logger.debug(f"System-Metrics-Check fehlgeschlagen: {e}")
def _check_service_status(self):
"""Prüft Status wichtiger Services"""
services = ["myp-https.service", "myp-kiosk.service"]
for service in services:
try:
result = subprocess.run(
["sudo", "systemctl", "is-active", service],
capture_output=True, text=True, timeout=10
)
if result.returncode != 0:
self._handle_error_detection(
pattern_name="service_failure",
error_message=f"Service {service} nicht aktiv: {result.stdout.strip()}",
context={"service": service, "status": result.stdout.strip()}
)
except Exception as e:
recovery_logger.debug(f"Service-Check für {service} fehlgeschlagen: {e}")
def _handle_error_detection(self, pattern_name: str, error_message: str, context: Dict[str, Any] = None):
"""Behandelt erkannten Fehler und startet Recovery"""
with self.lock:
if pattern_name not in self.error_patterns:
recovery_logger.warning(f"Unbekanntes Fehlermuster: {pattern_name}")
return
pattern = self.error_patterns[pattern_name]
# Prüfe ob bereits kürzlich aufgetreten
recent_occurrences = self._count_recent_occurrences(pattern_name, pattern.time_window)
# Erstelle Error-Occurrence
occurrence = ErrorOccurrence(
timestamp=datetime.now(),
pattern_name=pattern_name,
error_message=error_message,
severity=pattern.severity,
context=context or {}
)
self.error_history.append(occurrence)
recovery_logger.warning(f"🚨 Fehler erkannt: {pattern_name} - {error_message}")
# Entscheide über Recovery-Aktionen
if recent_occurrences >= pattern.max_occurrences:
# Eskalation
actions = pattern.escalation_actions
recovery_logger.error(f"🔥 Eskalation für {pattern_name}: {recent_occurrences} Vorkommen in {pattern.time_window}s")
else:
# Normale Recovery
actions = pattern.actions
# Führe Recovery-Aktionen aus
if self.config["auto_recovery_enabled"]:
self._execute_recovery_actions(occurrence, actions)
def _count_recent_occurrences(self, pattern_name: str, time_window: int) -> int:
"""Zählt kürzliche Vorkommen eines Fehlermusters"""
cutoff_time = datetime.now() - timedelta(seconds=time_window)
return sum(1 for err in self.error_history
if err.pattern_name == pattern_name and err.timestamp > cutoff_time)
def _execute_recovery_actions(self, occurrence: ErrorOccurrence, actions: List[RecoveryAction]):
"""Führt Recovery-Aktionen aus"""
for action in actions:
try:
recovery_logger.info(f"🔧 Führe Recovery-Aktion aus: {action.value}")
handler = self.recovery_handlers.get(action)
if handler:
success = handler(occurrence)
occurrence.recovery_attempted.append(action)
if success:
occurrence.recovery_successful = True
recovery_logger.info(f"✅ Recovery erfolgreich: {action.value}")
break # Stoppe bei erfolgreicher Recovery
else:
recovery_logger.warning(f"❌ Recovery fehlgeschlagen: {action.value}")
else:
recovery_logger.error(f"Kein Handler für Recovery-Aktion: {action.value}")
except Exception as e:
recovery_logger.error(f"Fehler bei Recovery-Aktion {action.value}: {e}")
def _handle_log_only(self, occurrence: ErrorOccurrence) -> bool:
"""Handler: Nur Logging, keine weitere Aktion"""
recovery_logger.info(f"📝 Log-Only für: {occurrence.error_message}")
return True
def _handle_restart_service(self, occurrence: ErrorOccurrence) -> bool:
"""Handler: Service-Neustart"""
try:
from utils.system_control import get_system_control_manager, SystemOperation
manager = get_system_control_manager()
result = manager.schedule_operation(
SystemOperation.SERVICE_RESTART,
delay_seconds=5,
reason=f"Automatische Recovery für: {occurrence.pattern_name}"
)
return result.get("success", False)
except Exception as e:
recovery_logger.error(f"Service-Neustart fehlgeschlagen: {e}")
return False
def _handle_restart_component(self, occurrence: ErrorOccurrence) -> bool:
"""Handler: Komponenten-Neustart (z.B. Kiosk)"""
try:
from utils.system_control import get_system_control_manager, SystemOperation
manager = get_system_control_manager()
result = manager.schedule_operation(
SystemOperation.KIOSK_RESTART,
delay_seconds=5,
reason=f"Automatische Recovery für: {occurrence.pattern_name}"
)
return result.get("success", False)
except Exception as e:
recovery_logger.error(f"Komponenten-Neustart fehlgeschlagen: {e}")
return False
def _handle_clear_cache(self, occurrence: ErrorOccurrence) -> bool:
"""Handler: Cache leeren"""
try:
# App-Caches leeren
from app import clear_user_cache, clear_printer_status_cache
clear_user_cache()
clear_printer_status_cache()
# System-Cache leeren
if os.name != 'nt':
subprocess.run(["sudo", "sync"], timeout=10)
return True
except Exception as e:
recovery_logger.error(f"Cache-Clearing fehlgeschlagen: {e}")
return False
def _handle_reset_database(self, occurrence: ErrorOccurrence) -> bool:
"""Handler: Datenbank-Reset"""
try:
from utils.database_cleanup import safe_database_cleanup
result = safe_database_cleanup(force_mode_switch=True)
return result.get("success", False)
except Exception as e:
recovery_logger.error(f"Database-Reset fehlgeschlagen: {e}")
return False
def _handle_restart_system(self, occurrence: ErrorOccurrence) -> bool:
"""Handler: System-Neustart"""
try:
from utils.system_control import schedule_system_restart
result = schedule_system_restart(
delay_seconds=60,
reason=f"Automatische Recovery für kritischen Fehler: {occurrence.pattern_name}",
force=True
)
return result.get("success", False)
except Exception as e:
recovery_logger.error(f"System-Neustart fehlgeschlagen: {e}")
return False
def _handle_emergency_stop(self, occurrence: ErrorOccurrence) -> bool:
"""Handler: Notfall-Stopp"""
try:
recovery_logger.critical(f"🚨 NOTFALL-STOPP: {occurrence.error_message}")
# Führe sofortigen Shutdown durch
from utils.shutdown_manager import get_shutdown_manager
shutdown_manager = get_shutdown_manager()
shutdown_manager.force_shutdown(1)
return True
except Exception as e:
recovery_logger.error(f"Notfall-Stopp fehlgeschlagen: {e}")
return False
def _cleanup_old_entries(self):
"""Bereinigt alte Error-History-Einträge"""
with self.lock:
if len(self.error_history) > self.config["max_history_size"]:
self.error_history = self.error_history[-self.config["max_history_size"]:]
def get_error_statistics(self) -> Dict[str, Any]:
"""Gibt Fehler-Statistiken zurück"""
with self.lock:
total_errors = len(self.error_history)
# Fehler nach Schweregrad
by_severity = {}
for severity in ErrorSeverity:
by_severity[severity.value] = sum(1 for err in self.error_history
if err.severity == severity)
# Fehler nach Pattern
by_pattern = {}
for pattern_name in self.error_patterns.keys():
by_pattern[pattern_name] = sum(1 for err in self.error_history
if err.pattern_name == pattern_name)
# Letzten 24h
last_24h = datetime.now() - timedelta(hours=24)
recent_errors = sum(1 for err in self.error_history
if err.timestamp > last_24h)
# Recovery-Erfolgsrate
attempted_recoveries = sum(1 for err in self.error_history
if err.recovery_attempted)
successful_recoveries = sum(1 for err in self.error_history
if err.recovery_successful)
success_rate = (successful_recoveries / attempted_recoveries * 100) if attempted_recoveries > 0 else 0
return {
"total_errors": total_errors,
"errors_last_24h": recent_errors,
"by_severity": by_severity,
"by_pattern": by_pattern,
"recovery_success_rate": round(success_rate, 1),
"monitoring_active": self.is_active,
"auto_recovery_enabled": self.config["auto_recovery_enabled"]
}
def get_recent_errors(self, limit: int = 50) -> List[Dict[str, Any]]:
"""Gibt kürzliche Fehler zurück"""
with self.lock:
recent = self.error_history[-limit:] if limit else self.error_history
return [{
"timestamp": err.timestamp.isoformat(),
"pattern_name": err.pattern_name,
"error_message": err.error_message,
"severity": err.severity.value,
"context": err.context,
"recovery_attempted": [action.value for action in err.recovery_attempted],
"recovery_successful": err.recovery_successful
} for err in recent]
# Globaler Error-Recovery-Manager
_error_recovery_manager: Optional[ErrorRecoveryManager] = None
_recovery_lock = threading.Lock()
def get_error_recovery_manager() -> ErrorRecoveryManager:
"""
Singleton-Pattern für globalen Error-Recovery-Manager.
Returns:
ErrorRecoveryManager: Globaler Error-Recovery-Manager
"""
global _error_recovery_manager
with _recovery_lock:
if _error_recovery_manager is None:
_error_recovery_manager = ErrorRecoveryManager()
return _error_recovery_manager
def start_error_monitoring():
"""Startet Error-Monitoring"""
manager = get_error_recovery_manager()
manager.start_monitoring()
def stop_error_monitoring():
"""Stoppt Error-Monitoring"""
manager = get_error_recovery_manager()
manager.stop_monitoring()
def force_error_check(log_message: str = None):
"""Erzwingt manuelle Fehlerprüfung"""
if log_message:
manager = get_error_recovery_manager()
manager._analyze_log_line(log_message, "manual_check")

414
utils/file_manager.py Normal file
View File

@@ -0,0 +1,414 @@
"""
Mercedes-Benz MYP - Datei-Management-System
Organisierte Speicherung von hochgeladenen Dateien mit Verzeichniskonventionen
"""
import os
import shutil
from datetime import datetime
from werkzeug.utils import secure_filename
from typing import Optional, Tuple, Dict, List
from config.settings import UPLOAD_FOLDER, ALLOWED_EXTENSIONS
class FileManager:
"""
Zentrales Datei-Management-System für die MYP-Platform
Organisiert Uploads in strukturierte Unterverzeichnisse
"""
# Verzeichniskonventionen
DIRECTORIES = {
'jobs': 'jobs', # Druckjob-Dateien
'guests': 'guests', # Gastauftrags-Dateien
'avatars': 'avatars', # Benutzer-Avatare
'temp': 'temp', # Temporäre Dateien
'backups': 'backups', # Backup-Dateien
'logs': 'logs', # Exportierte Logs
'assets': 'assets' # Statische Assets
}
def __init__(self, base_upload_folder: str = UPLOAD_FOLDER):
"""
Initialisiert den FileManager
Args:
base_upload_folder: Basis-Upload-Verzeichnis
"""
self.base_folder = base_upload_folder
self.ensure_directories()
def ensure_directories(self) -> None:
"""Erstellt alle erforderlichen Verzeichnisse"""
try:
# Basis-Upload-Ordner erstellen
os.makedirs(self.base_folder, exist_ok=True)
# Alle Unterverzeichnisse erstellen
for category, subdir in self.DIRECTORIES.items():
dir_path = os.path.join(self.base_folder, subdir)
os.makedirs(dir_path, exist_ok=True)
# Jahres-/Monatsverzeichnisse für organisierte Speicherung
current_date = datetime.now()
year_dir = os.path.join(dir_path, str(current_date.year))
month_dir = os.path.join(year_dir, f"{current_date.month:02d}")
os.makedirs(year_dir, exist_ok=True)
os.makedirs(month_dir, exist_ok=True)
except Exception as e:
print(f"Fehler beim Erstellen der Verzeichnisse: {e}")
def allowed_file(self, filename: str) -> bool:
"""
Prüft, ob eine Datei erlaubt ist
Args:
filename: Name der Datei
Returns:
bool: True wenn erlaubt
"""
if '.' not in filename:
return False
extension = filename.rsplit('.', 1)[1].lower()
return extension in ALLOWED_EXTENSIONS
def generate_unique_filename(self, original_filename: str, prefix: str = "") -> str:
"""
Generiert einen eindeutigen Dateinamen
Args:
original_filename: Ursprünglicher Dateiname
prefix: Optionaler Präfix
Returns:
str: Eindeutiger Dateiname
"""
# Dateiname sicher machen
secure_name = secure_filename(original_filename)
# Timestamp hinzufügen für Eindeutigkeit
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# Dateiname und Erweiterung trennen
if '.' in secure_name:
name, ext = secure_name.rsplit('.', 1)
if prefix:
unique_name = f"{prefix}_{name}_{timestamp}.{ext}"
else:
unique_name = f"{name}_{timestamp}.{ext}"
else:
if prefix:
unique_name = f"{prefix}_{secure_name}_{timestamp}"
else:
unique_name = f"{secure_name}_{timestamp}"
return unique_name
def save_file(self, file, category: str, user_id: int = None,
prefix: str = "", metadata: Dict = None) -> Optional[Tuple[str, str, Dict]]:
"""
Speichert eine Datei in der organisierten Struktur
Args:
file: Werkzeug FileStorage Objekt
category: Kategorie (jobs, guests, avatars, etc.)
user_id: Benutzer-ID für Pfad-Organisation
prefix: Dateiname-Präfix
metadata: Zusätzliche Metadaten
Returns:
Tuple[str, str, Dict]: (relativer_pfad, absoluter_pfad, metadaten) oder None bei Fehler
"""
try:
if not file or not file.filename:
return None
if not self.allowed_file(file.filename):
raise ValueError(f"Dateityp nicht erlaubt: {file.filename}")
if category not in self.DIRECTORIES:
raise ValueError(f"Unbekannte Kategorie: {category}")
# Verzeichnisstruktur aufbauen
current_date = datetime.now()
category_dir = self.DIRECTORIES[category]
year_dir = str(current_date.year)
month_dir = f"{current_date.month:02d}"
# Benutzer-spezifischen Unterordner hinzufügen wenn user_id vorhanden
if user_id:
relative_dir = os.path.join(category_dir, year_dir, month_dir, f"user_{user_id}")
else:
relative_dir = os.path.join(category_dir, year_dir, month_dir)
# Vollständigen Pfad erstellen
full_dir = os.path.join(self.base_folder, relative_dir)
os.makedirs(full_dir, exist_ok=True)
# Eindeutigen Dateinamen generieren
unique_filename = self.generate_unique_filename(file.filename, prefix)
# Pfade definieren
relative_path = os.path.join(relative_dir, unique_filename).replace('\\', '/')
absolute_path = os.path.join(full_dir, unique_filename)
# Datei speichern
file.save(absolute_path)
# Metadaten sammeln
file_metadata = {
'original_filename': file.filename,
'unique_filename': unique_filename,
'relative_path': relative_path,
'absolute_path': absolute_path,
'category': category,
'user_id': user_id,
'file_size': os.path.getsize(absolute_path),
'upload_timestamp': current_date.isoformat(),
'mime_type': file.content_type or 'application/octet-stream'
}
# Zusätzliche Metadaten hinzufügen
if metadata:
file_metadata.update(metadata)
return relative_path, absolute_path, file_metadata
except Exception as e:
print(f"Fehler beim Speichern der Datei: {e}")
return None
def delete_file(self, relative_path: str) -> bool:
"""
Löscht eine Datei
Args:
relative_path: Relativer Pfad zur Datei
Returns:
bool: True wenn erfolgreich gelöscht
"""
try:
if not relative_path:
return False
absolute_path = os.path.join(self.base_folder, relative_path)
if os.path.exists(absolute_path) and os.path.isfile(absolute_path):
os.remove(absolute_path)
return True
return False
except Exception as e:
print(f"Fehler beim Löschen der Datei {relative_path}: {e}")
return False
def move_file(self, old_relative_path: str, new_category: str,
new_prefix: str = "") -> Optional[str]:
"""
Verschiebt eine Datei in eine andere Kategorie
Args:
old_relative_path: Alter relativer Pfad
new_category: Neue Kategorie
new_prefix: Neuer Präfix
Returns:
str: Neuer relativer Pfad oder None bei Fehler
"""
try:
old_absolute_path = os.path.join(self.base_folder, old_relative_path)
if not os.path.exists(old_absolute_path):
return None
# Dateiname extrahieren
filename = os.path.basename(old_absolute_path)
# Neuen Pfad generieren
current_date = datetime.now()
new_category_dir = self.DIRECTORIES.get(new_category)
if not new_category_dir:
return None
year_dir = str(current_date.year)
month_dir = f"{current_date.month:02d}"
new_relative_dir = os.path.join(new_category_dir, year_dir, month_dir)
new_full_dir = os.path.join(self.base_folder, new_relative_dir)
os.makedirs(new_full_dir, exist_ok=True)
# Neuen Dateinamen generieren falls Präfix angegeben
if new_prefix:
new_filename = self.generate_unique_filename(filename, new_prefix)
else:
new_filename = filename
new_relative_path = os.path.join(new_relative_dir, new_filename).replace('\\', '/')
new_absolute_path = os.path.join(new_full_dir, new_filename)
# Datei verschieben
shutil.move(old_absolute_path, new_absolute_path)
return new_relative_path
except Exception as e:
print(f"Fehler beim Verschieben der Datei: {e}")
return None
def get_file_info(self, relative_path: str) -> Optional[Dict]:
"""
Gibt Informationen über eine Datei zurück
Args:
relative_path: Relativer Pfad zur Datei
Returns:
Dict: Datei-Informationen oder None
"""
try:
if not relative_path:
return None
absolute_path = os.path.join(self.base_folder, relative_path)
if not os.path.exists(absolute_path):
return None
stat = os.stat(absolute_path)
return {
'filename': os.path.basename(absolute_path),
'relative_path': relative_path,
'absolute_path': absolute_path,
'size': stat.st_size,
'created': datetime.fromtimestamp(stat.st_ctime).isoformat(),
'modified': datetime.fromtimestamp(stat.st_mtime).isoformat(),
'exists': True
}
except Exception as e:
print(f"Fehler beim Abrufen der Datei-Informationen: {e}")
return None
def cleanup_temp_files(self, max_age_hours: int = 24) -> int:
"""
Räumt temporäre Dateien auf
Args:
max_age_hours: Maximales Alter in Stunden
Returns:
int: Anzahl gelöschte Dateien
"""
try:
temp_dir = os.path.join(self.base_folder, self.DIRECTORIES['temp'])
if not os.path.exists(temp_dir):
return 0
deleted_count = 0
max_age_seconds = max_age_hours * 3600
current_time = datetime.now().timestamp()
for root, dirs, files in os.walk(temp_dir):
for file in files:
file_path = os.path.join(root, file)
try:
file_age = current_time - os.path.getmtime(file_path)
if file_age > max_age_seconds:
os.remove(file_path)
deleted_count += 1
except Exception:
continue
return deleted_count
except Exception as e:
print(f"Fehler beim Aufräumen temporärer Dateien: {e}")
return 0
def get_category_stats(self) -> Dict[str, Dict]:
"""
Gibt Statistiken für alle Kategorien zurück
Returns:
Dict: Statistiken pro Kategorie
"""
stats = {}
try:
for category, subdir in self.DIRECTORIES.items():
category_path = os.path.join(self.base_folder, subdir)
if not os.path.exists(category_path):
stats[category] = {'file_count': 0, 'total_size': 0}
continue
file_count = 0
total_size = 0
for root, dirs, files in os.walk(category_path):
for file in files:
file_path = os.path.join(root, file)
try:
total_size += os.path.getsize(file_path)
file_count += 1
except Exception:
continue
stats[category] = {
'file_count': file_count,
'total_size': total_size,
'total_size_mb': round(total_size / (1024 * 1024), 2)
}
return stats
except Exception as e:
print(f"Fehler beim Abrufen der Kategorie-Statistiken: {e}")
return {}
# Globale FileManager-Instanz
file_manager = FileManager()
# Convenience-Funktionen
def save_job_file(file, user_id: int, metadata: Dict = None) -> Optional[Tuple[str, str, Dict]]:
"""Speichert eine Druckjob-Datei"""
return file_manager.save_file(file, 'jobs', user_id, 'job', metadata)
def save_guest_file(file, metadata: Dict = None) -> Optional[Tuple[str, str, Dict]]:
"""Speichert eine Gastauftrags-Datei"""
return file_manager.save_file(file, 'guests', None, 'guest', metadata)
def save_avatar_file(file, user_id: int) -> Optional[Tuple[str, str, Dict]]:
"""Speichert eine Avatar-Datei"""
return file_manager.save_file(file, 'avatars', user_id, 'avatar')
def save_asset_file(file, user_id: int, metadata: Dict = None) -> Optional[Tuple[str, str, Dict]]:
"""Speichert eine Asset-Datei"""
return file_manager.save_file(file, 'assets', user_id, 'asset', metadata)
def save_log_file(file, user_id: int, metadata: Dict = None) -> Optional[Tuple[str, str, Dict]]:
"""Speichert eine Log-Datei"""
return file_manager.save_file(file, 'logs', user_id, 'log', metadata)
def save_backup_file(file, user_id: int, metadata: Dict = None) -> Optional[Tuple[str, str, Dict]]:
"""Speichert eine Backup-Datei"""
return file_manager.save_file(file, 'backups', user_id, 'backup', metadata)
def save_temp_file(file, user_id: int, metadata: Dict = None) -> Optional[Tuple[str, str, Dict]]:
"""Speichert eine temporäre Datei"""
return file_manager.save_file(file, 'temp', user_id, 'temp', metadata)
def delete_file(relative_path: str) -> bool:
"""Löscht eine Datei"""
return file_manager.delete_file(relative_path)
def get_file_info(relative_path: str) -> Optional[Dict]:
"""Gibt Datei-Informationen zurück"""
return file_manager.get_file_info(relative_path)

22
utils/fix_csrf.py Normal file
View File

@@ -0,0 +1,22 @@
#!/usr/bin/env python3
"""Entferne problematischen CSRF-Error-Handler aus app.py"""
import re
# Lese die Backup-Datei
with open('app_backup.py', 'r', encoding='utf-8') as f:
content = f.read()
# Entferne den CSRF-Error-Handler-Block
# Suche nach @csrf.error_handler bis zum ersten leeren Zeilen-Block
pattern = r'@csrf\.error_handler.*?(?=\n\n|\n# [A-Z])'
content = re.sub(pattern, '', content, flags=re.DOTALL)
# Entferne auch mögliche doppelte Leerzeilen
content = re.sub(r'\n\n\n+', '\n\n', content)
# Schreibe die bereinigte Version
with open('app.py', 'w', encoding='utf-8') as f:
f.write(content)
print("CSRF-Error-Handler erfolgreich entfernt!")

View File

@@ -0,0 +1,253 @@
#!/usr/bin/env python3
"""
Sofortige Datenbank-Reparatur für fehlende updated_at Spalte
"""
import os
import sys
import sqlite3
from datetime import datetime
# Pfad zur App hinzufügen
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from config.settings import DATABASE_PATH
def fix_users_table_immediate():
"""Repariert die users Tabelle sofort."""
print(f"Repariere Datenbank: {DATABASE_PATH}")
if not os.path.exists(DATABASE_PATH):
print(f"Datenbankdatei nicht gefunden: {DATABASE_PATH}")
return False
try:
conn = sqlite3.connect(DATABASE_PATH)
cursor = conn.cursor()
# Prüfen, welche Spalten existieren
cursor.execute("PRAGMA table_info(users)")
existing_columns = [row[1] for row in cursor.fetchall()]
print(f"Vorhandene Spalten in users: {existing_columns}")
# Fehlende Spalten hinzufügen
required_columns = [
('updated_at', 'DATETIME'),
('settings', 'TEXT'),
('department', 'VARCHAR(100)'),
('position', 'VARCHAR(100)'),
('phone', 'VARCHAR(50)'),
('bio', 'TEXT')
]
for column_name, column_type in required_columns:
if column_name not in existing_columns:
try:
if column_name == 'updated_at':
# Einfacher Ansatz: NULL erlauben und später updaten
cursor.execute(f"ALTER TABLE users ADD COLUMN {column_name} {column_type}")
print(f"✓ Spalte '{column_name}' hinzugefügt")
# Alle vorhandenen Benutzer mit aktuellem Timestamp updaten
cursor.execute(f"UPDATE users SET {column_name} = CURRENT_TIMESTAMP WHERE {column_name} IS NULL")
print(f"✓ Vorhandene Benutzer mit {column_name} aktualisiert")
# Trigger für automatische Updates erstellen
cursor.execute("""
CREATE TRIGGER IF NOT EXISTS update_users_updated_at
AFTER UPDATE ON users
FOR EACH ROW
BEGIN
UPDATE users SET updated_at = CURRENT_TIMESTAMP WHERE id = NEW.id;
END
""")
print(f"✓ Auto-Update-Trigger für {column_name} erstellt")
else:
cursor.execute(f"ALTER TABLE users ADD COLUMN {column_name} {column_type}")
print(f"✓ Spalte '{column_name}' hinzugefügt")
except Exception as e:
print(f"✗ Fehler bei Spalte '{column_name}': {str(e)}")
else:
print(f"○ Spalte '{column_name}' bereits vorhanden")
# Weitere fehlende Tabellen prüfen und erstellen
create_missing_tables(cursor)
# Optimierungsindizes erstellen
create_performance_indexes(cursor)
conn.commit()
conn.close()
print("✓ Datenbank-Reparatur erfolgreich abgeschlossen")
return True
except Exception as e:
print(f"✗ Fehler bei der Datenbank-Reparatur: {str(e)}")
if 'conn' in locals():
conn.rollback()
conn.close()
return False
def create_missing_tables(cursor):
"""Erstellt fehlende Tabellen."""
# Prüfen, welche Tabellen existieren
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
existing_tables = [row[0] for row in cursor.fetchall()]
print(f"Vorhandene Tabellen: {existing_tables}")
# user_permissions Tabelle
if 'user_permissions' not in existing_tables:
cursor.execute("""
CREATE TABLE user_permissions (
user_id INTEGER PRIMARY KEY,
can_start_jobs BOOLEAN DEFAULT 0,
needs_approval BOOLEAN DEFAULT 1,
can_approve_jobs BOOLEAN DEFAULT 0,
FOREIGN KEY (user_id) REFERENCES users (id)
)
""")
print("✓ Tabelle 'user_permissions' erstellt")
# notifications Tabelle
if 'notifications' not in existing_tables:
cursor.execute("""
CREATE TABLE notifications (
id INTEGER PRIMARY KEY,
user_id INTEGER NOT NULL,
type VARCHAR(50) NOT NULL,
payload TEXT,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
read BOOLEAN DEFAULT 0,
FOREIGN KEY (user_id) REFERENCES users (id)
)
""")
print("✓ Tabelle 'notifications' erstellt")
# stats Tabelle
if 'stats' not in existing_tables:
cursor.execute("""
CREATE TABLE stats (
id INTEGER PRIMARY KEY,
total_print_time INTEGER DEFAULT 0,
total_jobs_completed INTEGER DEFAULT 0,
total_material_used REAL DEFAULT 0.0,
last_updated DATETIME DEFAULT CURRENT_TIMESTAMP
)
""")
print("✓ Tabelle 'stats' erstellt")
# Initial stats record erstellen
cursor.execute("""
INSERT INTO stats (total_print_time, total_jobs_completed, total_material_used, last_updated)
VALUES (0, 0, 0.0, CURRENT_TIMESTAMP)
""")
print("✓ Initial-Statistiken erstellt")
# system_logs Tabelle
if 'system_logs' not in existing_tables:
cursor.execute("""
CREATE TABLE system_logs (
id INTEGER PRIMARY KEY,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP NOT NULL,
level VARCHAR(20) NOT NULL,
message VARCHAR(1000) NOT NULL,
module VARCHAR(100),
user_id INTEGER,
ip_address VARCHAR(50),
user_agent VARCHAR(500),
FOREIGN KEY (user_id) REFERENCES users (id)
)
""")
print("✓ Tabelle 'system_logs' erstellt")
def create_performance_indexes(cursor):
"""Erstellt Performance-Indices."""
print("Erstelle Performance-Indices...")
indexes = [
("idx_users_email", "users(email)"),
("idx_users_username", "users(username)"),
("idx_users_role", "users(role)"),
("idx_jobs_user_id", "jobs(user_id)"),
("idx_jobs_printer_id", "jobs(printer_id)"),
("idx_jobs_status", "jobs(status)"),
("idx_jobs_start_at", "jobs(start_at)"),
("idx_notifications_user_id", "notifications(user_id)"),
("idx_notifications_read", "notifications(read)"),
("idx_system_logs_timestamp", "system_logs(timestamp)"),
("idx_system_logs_level", "system_logs(level)"),
("idx_guest_requests_status", "guest_requests(status)"),
("idx_printers_status", "printers(status)"),
("idx_printers_active", "printers(active)")
]
for index_name, index_def in indexes:
try:
cursor.execute(f"CREATE INDEX IF NOT EXISTS {index_name} ON {index_def}")
print(f"✓ Index '{index_name}' erstellt")
except Exception as e:
print(f"○ Index '{index_name}': {str(e)}")
def test_database_access():
"""Testet den Datenbankzugriff nach der Reparatur."""
print("\nTeste Datenbankzugriff...")
try:
# Models importieren und testen
from models import get_cached_session, User, Printer, Job
with get_cached_session() as session:
# Test User-Query
users = session.query(User).limit(5).all()
print(f"✓ User-Abfrage erfolgreich - {len(users)} Benutzer gefunden")
# Test Printer-Query
printers = session.query(Printer).limit(5).all()
print(f"✓ Printer-Abfrage erfolgreich - {len(printers)} Drucker gefunden")
# Test Job-Query
jobs = session.query(Job).limit(5).all()
print(f"✓ Job-Abfrage erfolgreich - {len(jobs)} Jobs gefunden")
print("✓ Alle Datenbank-Tests erfolgreich!")
return True
except Exception as e:
print(f"✗ Datenbank-Test fehlgeschlagen: {str(e)}")
return False
def main():
"""Hauptfunktion für die sofortige Datenbank-Reparatur."""
print("=== SOFORTIGE DATENBANK-REPARATUR ===")
print(f"Zeitstempel: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print(f"Datenbank: {DATABASE_PATH}")
print()
# Backup erstellen
if os.path.exists(DATABASE_PATH):
backup_path = f"{DATABASE_PATH}.backup_immediate_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
try:
import shutil
shutil.copy2(DATABASE_PATH, backup_path)
print(f"✓ Backup erstellt: {backup_path}")
except Exception as e:
print(f"⚠ Backup-Erstellung fehlgeschlagen: {str(e)}")
# Reparatur durchführen
if fix_users_table_immediate():
print("\n=== DATENBANK-TEST ===")
if test_database_access():
print("\n🎉 DATENBANK-REPARATUR ERFOLGREICH!")
print("Die Anwendung sollte jetzt funktionieren.")
else:
print("\n❌ DATENBANK-TEST FEHLGESCHLAGEN!")
print("Weitere Diagnose erforderlich.")
else:
print("\n❌ DATENBANK-REPARATUR FEHLGESCHLAGEN!")
print("Manuelle Intervention erforderlich.")
if __name__ == "__main__":
main()

663
utils/form_validation.py Normal file
View File

@@ -0,0 +1,663 @@
"""
Erweiterte Formular-Validierung für das MYP-System
==================================================
Dieses Modul stellt umfassende Client- und serverseitige Validierung
mit benutzerfreundlichem UI-Feedback bereit.
Funktionen:
- Multi-Level-Validierung (Client/Server)
- Echtzeitvalidierung mit JavaScript
- Barrierefreie Fehlermeldungen
- Custom Validators für spezielle Anforderungen
- Automatische Sanitization von Eingaben
"""
import re
import html
import json
import logging
from typing import Dict, List, Any, Optional, Callable, Union
from datetime import datetime, timedelta
from flask import request, jsonify, session
from functools import wraps
from werkzeug.datastructures import FileStorage
from utils.logging_config import get_logger
from config.settings import ALLOWED_EXTENSIONS, MAX_FILE_SIZE
logger = get_logger("validation")
class ValidationError(Exception):
"""Custom Exception für Validierungsfehler"""
def __init__(self, message: str, field: str = None, code: str = None):
self.message = message
self.field = field
self.code = code
super().__init__(self.message)
class ValidationResult:
"""Ergebnis einer Validierung"""
def __init__(self):
self.is_valid = True
self.errors: Dict[str, List[str]] = {}
self.warnings: Dict[str, List[str]] = {}
self.cleaned_data: Dict[str, Any] = {}
def add_error(self, field: str, message: str):
"""Fügt einen Validierungsfehler hinzu"""
if field not in self.errors:
self.errors[field] = []
self.errors[field].append(message)
self.is_valid = False
def add_warning(self, field: str, message: str):
"""Fügt eine Warnung hinzu"""
if field not in self.warnings:
self.warnings[field] = []
self.warnings[field].append(message)
def to_dict(self) -> Dict[str, Any]:
"""Konvertiert das Ergebnis zu einem Dictionary"""
return {
"is_valid": self.is_valid,
"errors": self.errors,
"warnings": self.warnings,
"cleaned_data": self.cleaned_data
}
class BaseValidator:
"""Basis-Klasse für alle Validatoren"""
def __init__(self, required: bool = False, allow_empty: bool = True):
self.required = required
self.allow_empty = allow_empty
def validate(self, value: Any, field_name: str = None) -> ValidationResult:
"""Führt die Validierung durch"""
result = ValidationResult()
# Prüfung auf erforderliche Felder
if self.required and (value is None or value == ""):
result.add_error(field_name or "field", "Dieses Feld ist erforderlich.")
return result
# Wenn Wert leer und erlaubt, keine weitere Validierung
if not value and self.allow_empty:
result.cleaned_data[field_name or "field"] = value
return result
return self._validate_value(value, field_name, result)
def _validate_value(self, value: Any, field_name: str, result: ValidationResult) -> ValidationResult:
"""Überschreibbar für spezifische Validierungslogik"""
result.cleaned_data[field_name or "field"] = value
return result
class StringValidator(BaseValidator):
"""Validator für String-Werte"""
def __init__(self, min_length: int = None, max_length: int = None,
pattern: str = None, trim: bool = True, **kwargs):
super().__init__(**kwargs)
self.min_length = min_length
self.max_length = max_length
self.pattern = re.compile(pattern) if pattern else None
self.trim = trim
def _validate_value(self, value: Any, field_name: str, result: ValidationResult) -> ValidationResult:
# String konvertieren und trimmen
str_value = str(value)
if self.trim:
str_value = str_value.strip()
# Längenprüfung
if self.min_length is not None and len(str_value) < self.min_length:
result.add_error(field_name, f"Mindestlänge: {self.min_length} Zeichen")
if self.max_length is not None and len(str_value) > self.max_length:
result.add_error(field_name, f"Maximallänge: {self.max_length} Zeichen")
# Pattern-Prüfung
if self.pattern and not self.pattern.match(str_value):
result.add_error(field_name, "Format ist ungültig")
# HTML-Sanitization
cleaned_value = html.escape(str_value)
result.cleaned_data[field_name] = cleaned_value
return result
class EmailValidator(StringValidator):
"""Validator für E-Mail-Adressen"""
EMAIL_PATTERN = r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$'
def __init__(self, **kwargs):
super().__init__(pattern=self.EMAIL_PATTERN, **kwargs)
def _validate_value(self, value: Any, field_name: str, result: ValidationResult) -> ValidationResult:
result = super()._validate_value(value, field_name, result)
if result.is_valid:
# Normalisierung der E-Mail
email = str(value).lower().strip()
result.cleaned_data[field_name] = email
return result
class IntegerValidator(BaseValidator):
"""Validator für Integer-Werte"""
def __init__(self, min_value: int = None, max_value: int = None, **kwargs):
super().__init__(**kwargs)
self.min_value = min_value
self.max_value = max_value
def _validate_value(self, value: Any, field_name: str, result: ValidationResult) -> ValidationResult:
try:
int_value = int(value)
except (ValueError, TypeError):
result.add_error(field_name, "Muss eine ganze Zahl sein")
return result
if self.min_value is not None and int_value < self.min_value:
result.add_error(field_name, f"Mindestwert: {self.min_value}")
if self.max_value is not None and int_value > self.max_value:
result.add_error(field_name, f"Maximalwert: {self.max_value}")
result.cleaned_data[field_name] = int_value
return result
class FloatValidator(BaseValidator):
"""Validator für Float-Werte"""
def __init__(self, min_value: float = None, max_value: float = None,
decimal_places: int = None, **kwargs):
super().__init__(**kwargs)
self.min_value = min_value
self.max_value = max_value
self.decimal_places = decimal_places
def _validate_value(self, value: Any, field_name: str, result: ValidationResult) -> ValidationResult:
try:
float_value = float(value)
except (ValueError, TypeError):
result.add_error(field_name, "Muss eine Dezimalzahl sein")
return result
if self.min_value is not None and float_value < self.min_value:
result.add_error(field_name, f"Mindestwert: {self.min_value}")
if self.max_value is not None and float_value > self.max_value:
result.add_error(field_name, f"Maximalwert: {self.max_value}")
# Rundung auf bestimmte Dezimalstellen
if self.decimal_places is not None:
float_value = round(float_value, self.decimal_places)
result.cleaned_data[field_name] = float_value
return result
class DateTimeValidator(BaseValidator):
"""Validator für DateTime-Werte"""
def __init__(self, format_string: str = "%Y-%m-%d %H:%M",
min_date: datetime = None, max_date: datetime = None, **kwargs):
super().__init__(**kwargs)
self.format_string = format_string
self.min_date = min_date
self.max_date = max_date
def _validate_value(self, value: Any, field_name: str, result: ValidationResult) -> ValidationResult:
if isinstance(value, datetime):
dt_value = value
else:
try:
dt_value = datetime.strptime(str(value), self.format_string)
except ValueError:
result.add_error(field_name, f"Ungültiges Datumsformat. Erwartet: {self.format_string}")
return result
if self.min_date and dt_value < self.min_date:
result.add_error(field_name, f"Datum muss nach {self.min_date.strftime('%d.%m.%Y')} liegen")
if self.max_date and dt_value > self.max_date:
result.add_error(field_name, f"Datum muss vor {self.max_date.strftime('%d.%m.%Y')} liegen")
result.cleaned_data[field_name] = dt_value
return result
class FileValidator(BaseValidator):
"""Validator für Datei-Uploads"""
def __init__(self, allowed_extensions: List[str] = None,
max_size_mb: int = None, min_size_kb: int = None, **kwargs):
super().__init__(**kwargs)
self.allowed_extensions = allowed_extensions or ALLOWED_EXTENSIONS
self.max_size_mb = max_size_mb or (MAX_FILE_SIZE / (1024 * 1024))
self.min_size_kb = min_size_kb
def _validate_value(self, value: Any, field_name: str, result: ValidationResult) -> ValidationResult:
if not isinstance(value, FileStorage):
result.add_error(field_name, "Muss eine gültige Datei sein")
return result
# Dateiname prüfen
if not value.filename:
result.add_error(field_name, "Dateiname ist erforderlich")
return result
# Dateierweiterung prüfen
extension = value.filename.rsplit('.', 1)[-1].lower() if '.' in value.filename else ''
if extension not in self.allowed_extensions:
result.add_error(field_name,
f"Nur folgende Dateiformate sind erlaubt: {', '.join(self.allowed_extensions)}")
# Dateigröße prüfen
value.seek(0, 2) # Zum Ende der Datei
file_size = value.tell()
value.seek(0) # Zurück zum Anfang
if self.max_size_mb and file_size > (self.max_size_mb * 1024 * 1024):
result.add_error(field_name, f"Datei zu groß. Maximum: {self.max_size_mb} MB")
if self.min_size_kb and file_size < (self.min_size_kb * 1024):
result.add_error(field_name, f"Datei zu klein. Minimum: {self.min_size_kb} KB")
result.cleaned_data[field_name] = value
return result
class FormValidator:
"""Haupt-Formular-Validator"""
def __init__(self):
self.fields: Dict[str, BaseValidator] = {}
self.custom_validators: List[Callable] = []
self.rate_limit_key = None
self.csrf_check = True
def add_field(self, name: str, validator: BaseValidator):
"""Fügt ein Feld mit Validator hinzu"""
self.fields[name] = validator
return self
def add_custom_validator(self, validator_func: Callable):
"""Fügt einen benutzerdefinierten Validator hinzu"""
self.custom_validators.append(validator_func)
return self
def set_rate_limit(self, key: str):
"""Setzt einen Rate-Limiting-Schlüssel"""
self.rate_limit_key = key
return self
def disable_csrf(self):
"""Deaktiviert CSRF-Prüfung für dieses Formular"""
self.csrf_check = False
return self
def validate(self, data: Dict[str, Any]) -> ValidationResult:
"""Validiert die gesamten Formulardaten"""
result = ValidationResult()
# Einzelfeldvalidierung
for field_name, validator in self.fields.items():
field_value = data.get(field_name)
field_result = validator.validate(field_value, field_name)
if not field_result.is_valid:
result.errors.update(field_result.errors)
result.is_valid = False
result.warnings.update(field_result.warnings)
result.cleaned_data.update(field_result.cleaned_data)
# Benutzerdefinierte Validierung
if result.is_valid:
for custom_validator in self.custom_validators:
try:
custom_result = custom_validator(result.cleaned_data)
if isinstance(custom_result, ValidationResult):
if not custom_result.is_valid:
result.errors.update(custom_result.errors)
result.is_valid = False
result.warnings.update(custom_result.warnings)
except Exception as e:
logger.error(f"Fehler bei benutzerdefinierter Validierung: {str(e)}")
result.add_error("form", "Unerwarteter Validierungsfehler")
return result
# Vordefinierte Formular-Validatoren
def get_user_registration_validator() -> FormValidator:
"""Validator für Benutzerregistrierung"""
return FormValidator() \
.add_field("username", StringValidator(min_length=3, max_length=50, required=True)) \
.add_field("email", EmailValidator(required=True)) \
.add_field("password", StringValidator(min_length=8, required=True)) \
.add_field("password_confirm", StringValidator(min_length=8, required=True)) \
.add_field("name", StringValidator(min_length=2, max_length=100, required=True)) \
.add_custom_validator(lambda data: _validate_password_match(data))
def get_job_creation_validator() -> FormValidator:
"""Validator für Job-Erstellung"""
return FormValidator() \
.add_field("name", StringValidator(min_length=1, max_length=200, required=True)) \
.add_field("description", StringValidator(max_length=500)) \
.add_field("printer_id", IntegerValidator(min_value=1, required=True)) \
.add_field("duration_minutes", IntegerValidator(min_value=1, max_value=1440, required=True)) \
.add_field("start_at", DateTimeValidator(min_date=datetime.now())) \
.add_field("file", FileValidator(required=True))
def get_printer_creation_validator() -> FormValidator:
"""Validator für Drucker-Erstellung"""
return FormValidator() \
.add_field("name", StringValidator(min_length=1, max_length=100, required=True)) \
.add_field("model", StringValidator(max_length=100)) \
.add_field("location", StringValidator(max_length=100)) \
.add_field("ip_address", StringValidator(pattern=r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')) \
.add_field("mac_address", StringValidator(pattern=r'^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$', required=True)) \
.add_field("plug_ip", StringValidator(pattern=r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', required=True)) \
.add_field("plug_username", StringValidator(min_length=1, required=True)) \
.add_field("plug_password", StringValidator(min_length=1, required=True))
def get_guest_request_validator() -> FormValidator:
"""Validator für Gastanfragen"""
return FormValidator() \
.add_field("name", StringValidator(min_length=2, max_length=100, required=True)) \
.add_field("email", EmailValidator()) \
.add_field("reason", StringValidator(min_length=10, max_length=500, required=True)) \
.add_field("duration_minutes", IntegerValidator(min_value=5, max_value=480, required=True)) \
.add_field("copies", IntegerValidator(min_value=1, max_value=10)) \
.add_field("file", FileValidator(required=True)) \
.set_rate_limit("guest_request")
def _validate_password_match(data: Dict[str, Any]) -> ValidationResult:
"""Validiert, ob Passwörter übereinstimmen"""
result = ValidationResult()
password = data.get("password")
password_confirm = data.get("password_confirm")
if password != password_confirm:
result.add_error("password_confirm", "Passwörter stimmen nicht überein")
return result
# Decorator für automatische Formularvalidierung
def validate_form(validator_func: Callable[[], FormValidator]):
"""Decorator für automatische Formularvalidierung"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
try:
# Validator erstellen
validator = validator_func()
# Daten aus Request extrahieren
if request.is_json:
data = request.get_json() or {}
else:
data = dict(request.form)
# Dateien hinzufügen
for key, file in request.files.items():
data[key] = file
# Validierung durchführen
validation_result = validator.validate(data)
# Bei Fehlern JSON-Response zurückgeben
if not validation_result.is_valid:
logger.warning(f"Validierungsfehler für {request.endpoint}: {validation_result.errors}")
return jsonify({
"success": False,
"errors": validation_result.errors,
"warnings": validation_result.warnings
}), 400
# Gereinigte Daten an die Request anhängen
request.validated_data = validation_result.cleaned_data
request.validation_warnings = validation_result.warnings
return f(*args, **kwargs)
except Exception as e:
logger.error(f"Fehler bei Formularvalidierung: {str(e)}")
return jsonify({
"success": False,
"errors": {"form": ["Unerwarteter Validierungsfehler"]}
}), 500
return decorated_function
return decorator
# JavaScript für Client-seitige Validierung
def get_client_validation_js() -> str:
"""Generiert JavaScript für Client-seitige Validierung"""
return """
class FormValidator {
constructor(formId, validationRules = {}) {
this.form = document.getElementById(formId);
this.rules = validationRules;
this.errors = {};
this.setupEventListeners();
}
setupEventListeners() {
if (!this.form) return;
// Echtzeit-Validierung bei Eingabe
this.form.addEventListener('input', (e) => {
this.validateField(e.target);
});
// Formular-Submission
this.form.addEventListener('submit', (e) => {
if (!this.validateForm()) {
e.preventDefault();
}
});
}
validateField(field) {
const fieldName = field.name;
const value = field.value;
const rule = this.rules[fieldName];
if (!rule) return true;
this.clearFieldError(field);
// Required-Prüfung
if (rule.required && (!value || value.trim() === '')) {
this.addFieldError(field, 'Dieses Feld ist erforderlich.');
return false;
}
// Längenprüfung
if (rule.minLength && value.length < rule.minLength) {
this.addFieldError(field, `Mindestlänge: ${rule.minLength} Zeichen`);
return false;
}
if (rule.maxLength && value.length > rule.maxLength) {
this.addFieldError(field, `Maximallänge: ${rule.maxLength} Zeichen`);
return false;
}
// Pattern-Prüfung
if (rule.pattern && !new RegExp(rule.pattern).test(value)) {
this.addFieldError(field, rule.patternMessage || 'Format ist ungültig');
return false;
}
// Email-Prüfung
if (rule.type === 'email' && value) {
const emailPattern = /^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$/;
if (!emailPattern.test(value)) {
this.addFieldError(field, 'Bitte geben Sie eine gültige E-Mail-Adresse ein');
return false;
}
}
// Custom Validierung
if (rule.customValidator) {
const customResult = rule.customValidator(value, field);
if (customResult !== true) {
this.addFieldError(field, customResult);
return false;
}
}
return true;
}
validateForm() {
let isValid = true;
this.errors = {};
// Alle Felder validieren
const fields = this.form.querySelectorAll('input, textarea, select');
fields.forEach(field => {
if (!this.validateField(field)) {
isValid = false;
}
});
// Custom Form-Validierung
if (this.rules._formValidator) {
const formData = new FormData(this.form);
const customResult = this.rules._formValidator(formData, this.form);
if (customResult !== true) {
this.addFormError(customResult);
isValid = false;
}
}
return isValid;
}
addFieldError(field, message) {
const fieldName = field.name;
// Error-Container finden oder erstellen
let errorContainer = field.parentNode.querySelector('.field-error');
if (!errorContainer) {
errorContainer = document.createElement('div');
errorContainer.className = 'field-error text-red-600 text-sm mt-1';
errorContainer.setAttribute('role', 'alert');
errorContainer.setAttribute('aria-live', 'polite');
field.parentNode.appendChild(errorContainer);
}
errorContainer.textContent = message;
field.classList.add('border-red-500');
field.setAttribute('aria-invalid', 'true');
// Für Screen Reader
if (!field.getAttribute('aria-describedby')) {
const errorId = `error-${fieldName}-${Date.now()}`;
errorContainer.id = errorId;
field.setAttribute('aria-describedby', errorId);
}
this.errors[fieldName] = message;
}
clearFieldError(field) {
const errorContainer = field.parentNode.querySelector('.field-error');
if (errorContainer) {
errorContainer.remove();
}
field.classList.remove('border-red-500');
field.removeAttribute('aria-invalid');
field.removeAttribute('aria-describedby');
delete this.errors[field.name];
}
addFormError(message) {
let formErrorContainer = this.form.querySelector('.form-error');
if (!formErrorContainer) {
formErrorContainer = document.createElement('div');
formErrorContainer.className = 'form-error bg-red-100 border border-red-400 text-red-700 px-4 py-3 rounded mb-4';
formErrorContainer.setAttribute('role', 'alert');
this.form.insertBefore(formErrorContainer, this.form.firstChild);
}
formErrorContainer.textContent = message;
}
clearFormErrors() {
const formErrorContainer = this.form.querySelector('.form-error');
if (formErrorContainer) {
formErrorContainer.remove();
}
}
showServerErrors(errors) {
// Server-Fehler anzeigen
for (const [fieldName, messages] of Object.entries(errors)) {
const field = this.form.querySelector(`[name="${fieldName}"]`);
if (field && messages.length > 0) {
this.addFieldError(field, messages[0]);
}
}
}
}
// Utility-Funktionen
window.FormValidationUtils = {
// Passwort-Stärke prüfen
validatePasswordStrength: (password) => {
if (password.length < 8) return 'Passwort muss mindestens 8 Zeichen lang sein';
if (!/[A-Z]/.test(password)) return 'Passwort muss mindestens einen Großbuchstaben enthalten';
if (!/[a-z]/.test(password)) return 'Passwort muss mindestens einen Kleinbuchstaben enthalten';
if (!/[0-9]/.test(password)) return 'Passwort muss mindestens eine Zahl enthalten';
return true;
},
// Passwort-Bestätigung prüfen
validatePasswordConfirm: (password, confirm) => {
return password === confirm ? true : 'Passwörter stimmen nicht überein';
},
// Datei-Validierung
validateFile: (file, allowedTypes = [], maxSizeMB = 10) => {
if (!file) return 'Bitte wählen Sie eine Datei aus';
const fileType = file.name.split('.').pop().toLowerCase();
if (allowedTypes.length > 0 && !allowedTypes.includes(fileType)) {
return `Nur folgende Dateiformate sind erlaubt: ${allowedTypes.join(', ')}`;
}
if (file.size > maxSizeMB * 1024 * 1024) {
return `Datei ist zu groß. Maximum: ${maxSizeMB} MB`;
}
return true;
}
};
"""
def render_validation_errors(errors: Dict[str, List[str]]) -> str:
"""Rendert Validierungsfehler als HTML"""
if not errors:
return ""
html_parts = ['<div class="validation-errors">']
for field, messages in errors.items():
for message in messages:
html_parts.append(
f'<div class="error-message bg-red-100 border border-red-400 text-red-700 px-4 py-3 rounded mb-2" role="alert">'
f'<strong>{field}:</strong> {html.escape(message)}'
f'</div>'
)
html_parts.append('</div>')
return '\n'.join(html_parts)

25
utils/init_db.py Normal file
View File

@@ -0,0 +1,25 @@
#!/usr/bin/env python3.11
from models import init_database, create_initial_admin
if __name__ == "__main__":
print("Initialisiere Datenbank...")
init_database()
print("Erstelle initialen Admin-Benutzer...")
success = create_initial_admin(
email="admin@mercedes-benz.com",
password="744563017196A",
name="System Administrator",
username="admin"
)
if success:
print("Admin-Benutzer erfolgreich erstellt.")
print("Login-Daten:")
print(" Benutzername: admin")
print(" Passwort: 744563017196A")
else:
print("Admin-Benutzer konnte nicht erstellt werden (existiert bereits?).")
print("Datenbank-Initialisierung abgeschlossen.")

729
utils/job_scheduler.py Normal file
View File

@@ -0,0 +1,729 @@
import threading
import time
import logging
from typing import Dict, Callable, Any, List, Optional, Union
from datetime import datetime, timedelta
from PyP100 import PyP110
from sqlalchemy.orm import joinedload
from utils.logging_config import get_logger
from models import Job, Printer, get_db_session
from config.settings import TAPO_USERNAME, TAPO_PASSWORD
# Lazy logger initialization
_logger = None
def get_scheduler_logger():
"""Lazy initialization of the scheduler logger."""
global _logger
if _logger is None:
_logger = get_logger("scheduler")
return _logger
class BackgroundTaskScheduler:
"""
Ein fortschrittlicher Hintergrund-Task-Scheduler, der registrierbare Worker-Funktionen unterstützt.
Tasks können als Platzhalter registriert und später konfiguriert werden.
"""
def __init__(self):
self._tasks: Dict[str, Dict[str, Any]] = {}
self._thread: Optional[threading.Thread] = None
self._stop_event = threading.Event()
self._running = False
self._start_time: Optional[datetime] = None
self.logger = get_scheduler_logger()
def register_task(self,
task_id: str,
func: Callable,
interval: int = 60,
args: List = None,
kwargs: Dict = None,
enabled: bool = True) -> bool:
"""
Registriert eine neue Hintergrund-Task.
Args:
task_id: Eindeutige ID für die Task
func: Die auszuführende Funktion
interval: Intervall in Sekunden zwischen den Ausführungen
args: Positionsargumente für die Funktion
kwargs: Schlüsselwortargumente für die Funktion
enabled: Ob die Task aktiviert sein soll
Returns:
bool: True wenn erfolgreich, False wenn die ID bereits existiert
"""
if task_id in self._tasks:
self.logger.error(f"Task mit ID {task_id} existiert bereits")
return False
self._tasks[task_id] = {
"func": func,
"interval": interval,
"args": args or [],
"kwargs": kwargs or {},
"enabled": enabled,
"last_run": None,
"next_run": datetime.now() if enabled else None
}
self.logger.info(f"Task {task_id} registriert: Intervall {interval}s, Enabled: {enabled}")
return True
def update_task(self,
task_id: str,
interval: Optional[int] = None,
args: Optional[List] = None,
kwargs: Optional[Dict] = None,
enabled: Optional[bool] = None) -> bool:
"""
Aktualisiert die Konfiguration einer bestehenden Task.
Args:
task_id: ID der zu aktualisierenden Task
interval: Neues Intervall in Sekunden
args: Neue Positionsargumente
kwargs: Neue Schlüsselwortargumente
enabled: Neuer Aktivierungsstatus
Returns:
bool: True wenn erfolgreich, False wenn die ID nicht existiert
"""
if task_id not in self._tasks:
self.logger.error(f"Task mit ID {task_id} existiert nicht")
return False
task = self._tasks[task_id]
if interval is not None:
task["interval"] = interval
if args is not None:
task["args"] = args
if kwargs is not None:
task["kwargs"] = kwargs
if enabled is not None and enabled != task["enabled"]:
task["enabled"] = enabled
if enabled:
task["next_run"] = datetime.now()
else:
task["next_run"] = None
self.logger.info(f"Task {task_id} aktualisiert: Intervall {task['interval']}s, Enabled: {task['enabled']}")
return True
def remove_task(self, task_id: str) -> bool:
"""
Entfernt eine Task aus dem Scheduler.
Args:
task_id: ID der zu entfernenden Task
Returns:
bool: True wenn erfolgreich, False wenn die ID nicht existiert
"""
if task_id not in self._tasks:
self.logger.error(f"Task mit ID {task_id} existiert nicht")
return False
del self._tasks[task_id]
self.logger.info(f"Task {task_id} entfernt")
return True
def get_task_info(self, task_id: Optional[str] = None) -> Union[Dict, List[Dict]]:
"""
Gibt Informationen zu einer Task oder allen Tasks zurück.
Args:
task_id: ID der Task oder None für alle Tasks
Returns:
Dict oder List: Task-Informationen
"""
if task_id is not None:
if task_id not in self._tasks:
return {}
task = self._tasks[task_id]
return {
"id": task_id,
"interval": task["interval"],
"enabled": task["enabled"],
"last_run": task["last_run"].isoformat() if task["last_run"] else None,
"next_run": task["next_run"].isoformat() if task["next_run"] else None
}
return [
{
"id": tid,
"interval": task["interval"],
"enabled": task["enabled"],
"last_run": task["last_run"].isoformat() if task["last_run"] else None,
"next_run": task["next_run"].isoformat() if task["next_run"] else None
}
for tid, task in self._tasks.items()
]
def get_tasks(self) -> Dict[str, Dict[str, Any]]:
"""
Gibt alle Tasks mit ihren Konfigurationen zurück.
Returns:
Dict: Dictionary mit Task-IDs als Schlüssel und Task-Konfigurationen als Werte
"""
return {
task_id: {
"interval": task["interval"],
"enabled": task["enabled"],
"last_run": task["last_run"].isoformat() if task["last_run"] else None,
"next_run": task["next_run"].isoformat() if task["next_run"] else None
}
for task_id, task in self._tasks.items()
}
def get_uptime(self) -> Optional[str]:
"""
Gibt die Laufzeit des Schedulers seit dem Start zurück.
Returns:
str: Formatierte Laufzeit oder None, wenn der Scheduler nicht läuft
"""
if not self._running or not self._start_time:
return None
uptime = datetime.now() - self._start_time
days = uptime.days
hours, remainder = divmod(uptime.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if days > 0:
return f"{days} Tage, {hours} Stunden, {minutes} Minuten"
elif hours > 0:
return f"{hours} Stunden, {minutes} Minuten"
else:
return f"{minutes} Minuten, {seconds} Sekunden"
def start(self) -> bool:
"""
Startet den Scheduler.
Returns:
bool: True wenn erfolgreich gestartet, False wenn bereits läuft
"""
if self._running:
self.logger.warning("Scheduler läuft bereits")
return False
self._stop_event.clear()
self._thread = threading.Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
self._running = True
self._start_time = datetime.now()
self.logger.info("Scheduler gestartet")
return True
def stop(self) -> bool:
"""
Stoppt den Scheduler.
Returns:
bool: True wenn erfolgreich gestoppt, False wenn nicht läuft
"""
if not self._running:
self.logger.warning("Scheduler läuft nicht")
return False
self._stop_event.set()
if self._thread:
self._thread.join(timeout=5.0)
self._running = False
self._start_time = None
self.logger.info("Scheduler gestoppt")
return True
def is_running(self) -> bool:
"""
Prüft, ob der Scheduler läuft.
Returns:
bool: True wenn der Scheduler läuft, sonst False
"""
return self._running
def _run(self) -> None:
"""Hauptloop des Schedulers."""
self.logger.info("Scheduler-Thread gestartet")
while not self._stop_event.is_set():
now = datetime.now()
for task_id, task in self._tasks.items():
if not task["enabled"] or not task["next_run"]:
continue
if now >= task["next_run"]:
try:
self.logger.debug(f"Führe Task {task_id} aus")
task["func"](*task["args"], **task["kwargs"])
task["last_run"] = now
task["next_run"] = now + timedelta(seconds=task["interval"])
self.logger.debug(f"Task {task_id} erfolgreich ausgeführt, nächste Ausführung: {task['next_run']}")
except Exception as e:
self.logger.error(f"Fehler bei Ausführung von Task {task_id}: {str(e)}")
# Trotzdem nächste Ausführung planen
task["next_run"] = now + timedelta(seconds=task["interval"])
# Schlafenszeit berechnen (1 Sekunde oder weniger)
time.sleep(1)
self.logger.info("Scheduler-Thread beendet")
def toggle_plug(self, ip: str, state: bool, username: str = None, password: str = None) -> bool:
"""
Schaltet eine TP-Link Tapo P100/P110-Steckdose ein oder aus.
Args:
ip: IP-Adresse der Steckdose
state: True = Ein, False = Aus
username: Benutzername für die Steckdose (wird überschrieben mit globalen Credentials)
password: Passwort für die Steckdose (wird überschrieben mit globalen Credentials)
Returns:
bool: True wenn erfolgreich geschaltet
"""
try:
# PyP100 importieren
try:
from PyP100 import PyP100
except ImportError:
self.logger.error("❌ PyP100-Modul nicht installiert - Steckdose kann nicht geschaltet werden")
return False
# IMMER globale Anmeldedaten verwenden (da diese funktionieren)
from config.settings import TAPO_USERNAME, TAPO_PASSWORD
username = TAPO_USERNAME
password = TAPO_PASSWORD
self.logger.debug(f"🔧 Verwende globale Tapo-Anmeldedaten für {ip}")
# P100-Verbindung herstellen (P100 statt P110 verwenden)
p100 = PyP100.P100(ip, username, password)
# Handshake und Login durchführen
p100.handshake()
p100.login()
# Steckdose schalten
if state:
p100.turnOn()
self.logger.info(f"✅ Tapo-Steckdose {ip} erfolgreich eingeschaltet")
else:
p100.turnOff()
self.logger.info(f"✅ Tapo-Steckdose {ip} erfolgreich ausgeschaltet")
return True
except Exception as e:
action = "ein" if state else "aus"
self.logger.error(f"❌ Fehler beim {action}schalten der Tapo-Steckdose {ip}: {str(e)}")
return False
def toggle_printer_plug(self, printer_id: int, state: bool) -> bool:
"""
Schaltet die Steckdose eines Druckers ein oder aus mit korrektem Status-Mapping:
- Steckdose AUS = Drucker ONLINE (bereit zum Drucken)
- Steckdose AN = Drucker PRINTING (druckt gerade)
Args:
printer_id: ID des Druckers
state: True für ein, False für aus
Returns:
bool: True wenn erfolgreich, False wenn fehlgeschlagen
"""
try:
# Drucker aus Datenbank holen
db_session = get_db_session()
printer = db_session.get(Printer, printer_id)
if not printer:
self.logger.error(f"❌ Drucker mit ID {printer_id} nicht gefunden")
db_session.close()
return False
# Konfiguration validieren
if not printer.plug_ip:
self.logger.error(f"❌ Unvollständige Steckdosen-Konfiguration für Drucker {printer.name}")
db_session.close()
return False
# Steckdose schalten
success = self.toggle_plug(
ip=printer.plug_ip,
state=state,
username=printer.plug_username, # Wird überschrieben mit globalen Credentials
password=printer.plug_password # Wird überschrieben mit globalen Credentials
)
if success:
# Status in Datenbank aktualisieren entsprechend der neuen Logik
if state:
# Steckdose eingeschaltet = Drucker druckt
printer.status = "printing"
self.logger.info(f"🖨️ Drucker {printer.name}: Status auf 'printing' gesetzt (Steckdose eingeschaltet)")
else:
# Steckdose ausgeschaltet = Drucker bereit
printer.status = "online"
self.logger.info(f"✅ Drucker {printer.name}: Status auf 'online' gesetzt (Steckdose ausgeschaltet - bereit)")
printer.last_checked = datetime.now()
db_session.commit()
self.logger.info(f"✅ Status für Drucker {printer.name} erfolgreich aktualisiert")
db_session.close()
return success
except Exception as e:
action = "ein" if state else "aus"
self.logger.error(f"❌ Fehler beim {action}schalten der Steckdose für Drucker {printer_id}: {str(e)}")
try:
db_session.close()
except:
pass
return False
def _check_jobs(self) -> None:
"""
Überprüft und verwaltet Druckjobs mit intelligentem Power Management:
- Startet anstehende Jobs (geplante Jobs)
- Beendet abgelaufene Jobs (schaltet Steckdose aus)
- Schaltet Drucker automatisch aus bei Leerlauf
- Schaltet Drucker automatisch ein bei neuen Jobs
"""
db_session = get_db_session()
try:
now = datetime.now()
# 1. Anstehende Jobs starten (geplante Jobs)
pending_jobs = db_session.query(Job).filter(
Job.status == "scheduled",
Job.start_at <= now
).all()
for job in pending_jobs:
self.logger.info(f"🚀 Starte geplanten Job {job.id}: {job.name}")
# Steckdose einschalten
if self.toggle_printer_plug(job.printer_id, True):
# Job als laufend markieren
job.status = "running"
db_session.commit()
self.logger.info(f"✅ Job {job.id} gestartet - Drucker eingeschaltet")
else:
self.logger.error(f"❌ Konnte Steckdose für Job {job.id} nicht einschalten")
# 2. Sofort-Jobs starten (Jobs die bereits hätten starten sollen)
immediate_jobs = db_session.query(Job).filter(
Job.status == "waiting_for_printer",
Job.start_at <= now
).all()
for job in immediate_jobs:
self.logger.info(f"⚡ Starte Sofort-Job {job.id}: {job.name}")
# Steckdose einschalten
if self.toggle_printer_plug(job.printer_id, True):
# Job als laufend markieren
job.status = "running"
db_session.commit()
self.logger.info(f"✅ Sofort-Job {job.id} gestartet - Drucker automatisch eingeschaltet")
else:
self.logger.error(f"❌ Konnte Steckdose für Sofort-Job {job.id} nicht einschalten")
# 3. Abgelaufene Jobs beenden
running_jobs = db_session.query(Job).filter(
Job.status == "running",
Job.end_at <= now
).all()
for job in running_jobs:
self.logger.info(f"🏁 Beende Job {job.id}: {job.name}")
# Job als beendet markieren
job.status = "finished"
job.actual_end_time = now
db_session.commit()
self.logger.info(f"✅ Job {job.id} beendet")
# Prüfen ob weitere Jobs für diesen Drucker anstehen
pending_jobs_for_printer = db_session.query(Job).filter(
Job.printer_id == job.printer_id,
Job.status.in_(["scheduled", "running", "waiting_for_printer"])
).count()
if pending_jobs_for_printer == 0:
# Keine weiteren Jobs - Drucker ausschalten (Leerlauf-Management)
if self.toggle_printer_plug(job.printer_id, False):
self.logger.info(f"💤 Drucker {job.printer_id} automatisch ausgeschaltet - Leerlauf erkannt")
else:
self.logger.warning(f"⚠️ Konnte Drucker {job.printer_id} nicht ausschalten")
else:
self.logger.info(f"🔄 Drucker {job.printer_id} bleibt eingeschaltet - {pending_jobs_for_printer} weitere Jobs anstehend")
# 4. Intelligentes Leerlauf-Management für alle aktiven Drucker
active_printers = db_session.query(Printer).filter(
Printer.active == True,
Printer.plug_ip.isnot(None),
Printer.status == "online"
).all()
for printer in active_printers:
# Prüfen ob Jobs für diesen Drucker anstehen
active_jobs_count = db_session.query(Job).filter(
Job.printer_id == printer.id,
Job.status.in_(["scheduled", "running", "waiting_for_printer"])
).count()
if active_jobs_count == 0:
# Keine Jobs anstehend - prüfen ob Drucker schon längere Zeit im Leerlauf ist
if printer.last_checked:
idle_time = now - printer.last_checked
# Drucker ausschalten wenn länger als 5 Minuten im Leerlauf
if idle_time.total_seconds() > 300: # 5 Minuten
if self.toggle_printer_plug(printer.id, False):
self.logger.info(f"💤 Drucker {printer.name} nach {idle_time.total_seconds()//60:.0f} Min Leerlauf ausgeschaltet")
else:
self.logger.warning(f"⚠️ Konnte Drucker {printer.name} nach Leerlauf nicht ausschalten")
except Exception as e:
self.logger.error(f"❌ Fehler bei Überprüfung der Jobs: {str(e)}")
try:
db_session.rollback()
except:
pass
finally:
db_session.close()
def handle_immediate_job(self, job_id: int) -> bool:
"""
Behandelt einen Job sofort (für Sofort-Start bei Job-Erstellung).
Args:
job_id: ID des zu startenden Jobs
Returns:
bool: True wenn Job erfolgreich gestartet wurde
"""
db_session = get_db_session()
try:
now = datetime.now()
# Job aus Datenbank laden
job = db_session.get(Job, job_id)
if not job:
self.logger.error(f"❌ Job {job_id} nicht gefunden")
db_session.close()
return False
# Nur Jobs behandeln die sofort starten sollen
if job.start_at > now:
self.logger.info(f"⏰ Job {job_id} ist für später geplant ({job.start_at}) - kein Sofort-Start")
db_session.close()
return False
# Nur Jobs in passenden Status
if job.status not in ["scheduled", "waiting_for_printer"]:
self.logger.info(f" Job {job_id} hat Status '{job.status}' - kein Sofort-Start nötig")
db_session.close()
return False
self.logger.info(f"⚡ Starte Sofort-Job {job_id}: {job.name} für Drucker {job.printer_id}")
# Steckdose einschalten
if self.toggle_printer_plug(job.printer_id, True):
# Job als laufend markieren
job.status = "running"
db_session.commit()
db_session.close()
self.logger.info(f"✅ Sofort-Job {job_id} erfolgreich gestartet - Drucker automatisch eingeschaltet")
return True
else:
self.logger.error(f"❌ Konnte Steckdose für Sofort-Job {job_id} nicht einschalten")
db_session.close()
return False
except Exception as e:
self.logger.error(f"❌ Fehler beim Starten von Sofort-Job {job_id}: {str(e)}")
try:
db_session.rollback()
db_session.close()
except:
pass
return False
def check_and_manage_printer_power(self, printer_id: int) -> bool:
"""
Prüft und verwaltet die Stromversorgung eines spezifischen Druckers.
Args:
printer_id: ID des zu prüfenden Druckers
Returns:
bool: True wenn Power-Management erfolgreich
"""
db_session = get_db_session()
try:
now = datetime.now()
# Drucker laden
printer = db_session.get(Printer, printer_id)
if not printer or not printer.plug_ip:
db_session.close()
return False
# Aktive Jobs für diesen Drucker prüfen
active_jobs = db_session.query(Job).filter(
Job.printer_id == printer_id,
Job.status.in_(["scheduled", "running", "waiting_for_printer"])
).all()
current_jobs = [job for job in active_jobs if job.start_at <= now]
future_jobs = [job for job in active_jobs if job.start_at > now]
if current_jobs:
# Jobs laufen oder sollten laufen - Drucker einschalten
self.logger.info(f"🔋 Drucker {printer.name} benötigt Strom - {len(current_jobs)} aktive Jobs")
success = self.toggle_printer_plug(printer_id, True)
# Jobs von waiting_for_printer auf running umstellen
for job in current_jobs:
if job.status == "waiting_for_printer":
job.status = "running"
self.logger.info(f"🚀 Job {job.id} von 'waiting_for_printer' auf 'running' umgestellt")
db_session.commit()
db_session.close()
return success
elif future_jobs:
# Nur zukünftige Jobs - Drucker kann ausgeschaltet bleiben
next_job_time = min(job.start_at for job in future_jobs)
time_until_next = (next_job_time - now).total_seconds() / 60
self.logger.info(f"⏳ Drucker {printer.name} hat {len(future_jobs)} zukünftige Jobs, nächster in {time_until_next:.1f} Min")
# Drucker ausschalten wenn nächster Job erst in mehr als 10 Minuten
if time_until_next > 10:
success = self.toggle_printer_plug(printer_id, False)
db_session.close()
return success
else:
self.logger.info(f"🔄 Drucker {printer.name} bleibt eingeschaltet - nächster Job bald")
db_session.close()
return True
else:
# Keine Jobs - Drucker ausschalten (Leerlauf)
self.logger.info(f"💤 Drucker {printer.name} hat keine anstehenden Jobs - ausschalten")
success = self.toggle_printer_plug(printer_id, False)
db_session.close()
return success
except Exception as e:
self.logger.error(f"❌ Fehler beim Power-Management für Drucker {printer_id}: {str(e)}")
try:
db_session.close()
except:
pass
return False
def test_tapo_connection(ip_address: str, username: str = None, password: str = None) -> dict:
"""
Testet die Verbindung zu einer TP-Link Tapo P110-Steckdose.
Args:
ip_address: IP-Adresse der Steckdose
username: Benutzername für die Steckdose (optional)
password: Passwort für die Steckdose (optional)
Returns:
dict: Ergebnis mit Status und Informationen
"""
logger = get_logger("tapo")
result = {
"success": False,
"message": "",
"device_info": None,
"error": None
}
try:
# Importiere PyP100 für Tapo-Unterstützung
try:
from PyP100 import PyP100
except ImportError:
result["message"] = "PyP100-Modul nicht verfügbar"
result["error"] = "ModuleNotFound"
logger.error("PyP100-Modul nicht verfügbar - kann Tapo-Steckdosen nicht testen")
return result
# Verwende globale Anmeldedaten falls nicht angegeben
if not username or not password:
from config.settings import TAPO_USERNAME, TAPO_PASSWORD
username = TAPO_USERNAME
password = TAPO_PASSWORD
logger.debug(f"Verwende globale Tapo-Anmeldedaten für {ip_address}")
# TP-Link Tapo P100 Verbindung herstellen
p100 = PyP100.P100(ip_address, username, password)
p100.handshake() # Authentifizierung
p100.login() # Login
# Geräteinformationen abrufen
device_info = p100.getDeviceInfo()
result["success"] = True
result["message"] = "Verbindung erfolgreich"
result["device_info"] = device_info
logger.info(f"Tapo-Verbindung zu {ip_address} erfolgreich: {device_info.get('nickname', 'Unbekannt')}")
except Exception as e:
result["success"] = False
result["message"] = f"Verbindungsfehler: {str(e)}"
result["error"] = str(e)
logger.error(f"Fehler bei Tapo-Test zu {ip_address}: {str(e)}")
return result
# Scheduler-Instanz erzeugen
scheduler = BackgroundTaskScheduler()
# Standardaufgaben registrieren - reduziertes Intervall für bessere Reaktionszeit
scheduler.register_task("check_jobs", scheduler._check_jobs, interval=30)
# Alias für Kompatibilität
JobScheduler = BackgroundTaskScheduler
def get_job_scheduler() -> BackgroundTaskScheduler:
"""
Gibt den globalen Job-Scheduler zurück.
Returns:
BackgroundTaskScheduler: Der globale Scheduler
"""
return scheduler

374
utils/logging_config.py Normal file
View File

@@ -0,0 +1,374 @@
# -*- coding: utf-8 -*-
"""
Windows-sichere Logging-Konfiguration für MYP Platform
======================================================
Robuste Logging-Konfiguration mit Windows-spezifischen Fixes für File-Locking-Probleme.
"""
import os
import sys
import time
import logging
import threading
from datetime import datetime
from functools import wraps
from typing import Optional, Dict, Any
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
# ===== WINDOWS-SICHERE LOGGING-KLASSE =====
class WindowsSafeRotatingFileHandler(RotatingFileHandler):
"""
Windows-sichere Implementierung von RotatingFileHandler.
Behebt das WinError 32 Problem bei gleichzeitigen Log-Dateizugriffen.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=False):
# Verwende UTF-8 Encoding standardmäßig
if encoding is None:
encoding = 'utf-8'
# Windows-spezifische Konfiguration
self._windows_safe_mode = os.name == 'nt'
self._rotation_lock = threading.Lock()
super().__init__(filename, mode, maxBytes, backupCount, encoding, delay)
def doRollover(self):
"""
Windows-sichere Log-Rotation mit verbessertem Error-Handling.
"""
if not self._windows_safe_mode:
# Normale Rotation für Unix-Systeme
return super().doRollover()
# Windows-spezifische sichere Rotation
with self._rotation_lock:
try:
if self.stream:
self.stream.close()
self.stream = None
# Warte kurz bevor Rotation versucht wird
time.sleep(0.1)
# Versuche Rotation mehrmals mit exponentialem Backoff
max_attempts = 5
for attempt in range(max_attempts):
try:
# Rotation durchführen
super().doRollover()
break
except (PermissionError, OSError) as e:
if attempt == max_attempts - 1:
# Bei letztem Versuch: Erstelle neue Log-Datei ohne Rotation
print(f"WARNUNG: Log-Rotation fehlgeschlagen - erstelle neue Datei: {e}")
self._create_new_log_file()
break
else:
# Warte exponentiell länger bei jedem Versuch
wait_time = 0.5 * (2 ** attempt)
time.sleep(wait_time)
except Exception as e:
print(f"KRITISCHER FEHLER bei Log-Rotation: {e}")
# Notfall: Erstelle neue Log-Datei
self._create_new_log_file()
def _create_new_log_file(self):
"""
Erstellt eine neue Log-Datei als Fallback wenn Rotation fehlschlägt.
"""
try:
# Füge Timestamp zum Dateinamen hinzu
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
base_name, ext = os.path.splitext(self.baseFilename)
new_filename = f"{base_name}_{timestamp}{ext}"
# Öffne neue Datei
self.baseFilename = new_filename
self.stream = self._open()
except Exception as e:
print(f"NOTFALL: Kann keine neue Log-Datei erstellen: {e}")
# Letzter Ausweg: Console-Logging
self.stream = sys.stderr
# ===== GLOBALE LOGGING-KONFIGURATION =====
# Logger-Registry für Singleton-Pattern
_logger_registry: Dict[str, logging.Logger] = {}
_logging_initialized = False
_init_lock = threading.Lock()
def setup_logging(log_level: str = "INFO", base_log_dir: str = None) -> None:
"""
Initialisiert das zentrale Logging-System mit Windows-sicherer Konfiguration.
Args:
log_level: Logging-Level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
base_log_dir: Basis-Verzeichnis für Log-Dateien
"""
global _logging_initialized
with _init_lock:
if _logging_initialized:
return
try:
# Bestimme Log-Verzeichnis
if base_log_dir is None:
current_dir = os.path.dirname(os.path.abspath(__file__))
base_log_dir = os.path.join(current_dir, '..', 'logs')
# Erstelle Log-Verzeichnisse
log_dirs = ['app', 'auth', 'jobs', 'printers', 'scheduler', 'errors']
for log_dir in log_dirs:
full_path = os.path.join(base_log_dir, log_dir)
os.makedirs(full_path, exist_ok=True)
# Konfiguriere Root-Logger
root_logger = logging.getLogger()
root_logger.setLevel(getattr(logging, log_level.upper(), logging.INFO))
# Entferne existierende Handler um Duplikate zu vermeiden
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
# Console-Handler für kritische Meldungen
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.WARNING)
console_formatter = logging.Formatter(
'%(asctime)s - %(name)s - [%(levelname)s] %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
console_handler.setFormatter(console_formatter)
root_logger.addHandler(console_handler)
_logging_initialized = True
print(f"✅ Logging-System erfolgreich initialisiert (Level: {log_level})")
except Exception as e:
print(f"❌ KRITISCHER FEHLER bei Logging-Initialisierung: {e}")
# Notfall-Konfiguration
logging.basicConfig(
level=getattr(logging, log_level.upper(), logging.INFO),
format='%(asctime)s - %(name)s - [%(levelname)s] - %(message)s',
handlers=[logging.StreamHandler(sys.stdout)]
)
_logging_initialized = True
def get_logger(name: str, log_level: str = None) -> logging.Logger:
"""
Erstellt oder gibt einen konfigurierten Logger zurück.
Args:
name: Name des Loggers (z.B. 'app', 'auth', 'jobs')
log_level: Optionaler spezifischer Log-Level für diesen Logger
Returns:
Konfigurierter Logger
"""
global _logger_registry
# Stelle sicher, dass Logging initialisiert ist
if not _logging_initialized:
setup_logging()
# Prüfe Registry für existierenden Logger
if name in _logger_registry:
return _logger_registry[name]
try:
# Erstelle neuen Logger
logger = logging.getLogger(name)
# Setze spezifischen Level falls angegeben
if log_level:
logger.setLevel(getattr(logging, log_level.upper(), logging.INFO))
# Erstelle File-Handler mit Windows-sicherer Rotation
log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'logs', name)
os.makedirs(log_dir, exist_ok=True)
log_file = os.path.join(log_dir, f'{name}.log')
# Windows-sicherer RotatingFileHandler
file_handler = WindowsSafeRotatingFileHandler(
log_file,
maxBytes=10*1024*1024, # 10MB
backupCount=5,
encoding='utf-8'
)
# Detaillierter Formatter für File-Logs
file_formatter = logging.Formatter(
'%(asctime)s - [%(name)s] %(name)s - [%(levelname)s] %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
file_handler.setFormatter(file_formatter)
# Handler hinzufügen
logger.addHandler(file_handler)
# Verhindere Propagation zu Root-Logger um Duplikate zu vermeiden
logger.propagate = False
# In Registry speichern
_logger_registry[name] = logger
return logger
except Exception as e:
print(f"❌ Fehler beim Erstellen des Loggers '{name}': {e}")
# Fallback: Einfacher Logger ohne File-Handler
fallback_logger = logging.getLogger(name)
if name not in _logger_registry:
_logger_registry[name] = fallback_logger
return fallback_logger
# ===== PERFORMANCE-MEASUREMENT DECORATOR =====
def measure_execution_time(logger: logging.Logger = None, task_name: str = "Task"):
"""
Decorator zur Messung und Protokollierung der Ausführungszeit von Funktionen.
Args:
logger: Logger-Instanz für die Ausgabe
task_name: Bezeichnung der Aufgabe für die Logs
Returns:
Decorator-Funktion
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
# Verwende provided Logger oder erstelle Standard-Logger
log = logger or get_logger("performance")
try:
# Führe Funktion aus
result = func(*args, **kwargs)
# Berechne Ausführungszeit
execution_time = (time.time() - start_time) * 1000 # in Millisekunden
# Protokolliere Erfolg
log.info(f"{task_name} '{func.__name__}' erfolgreich in {execution_time:.2f}ms")
return result
except Exception as e:
# Berechne Ausführungszeit auch bei Fehlern
execution_time = (time.time() - start_time) * 1000
# Protokolliere Fehler
log.error(f"{task_name} '{func.__name__}' fehlgeschlagen nach {execution_time:.2f}ms: {str(e)}")
# Exception weiterleiten
raise
return wrapper
return decorator
# ===== STARTUP/DEBUG LOGGING =====
def log_startup_info():
"""
Protokolliert System-Startup-Informationen.
"""
startup_logger = get_logger("startup")
try:
startup_logger.info("=" * 50)
startup_logger.info("🚀 MYP Platform Backend wird gestartet...")
startup_logger.info(f"🐍 Python Version: {sys.version}")
startup_logger.info(f"💻 Betriebssystem: {os.name} ({sys.platform})")
startup_logger.info(f"📁 Arbeitsverzeichnis: {os.getcwd()}")
startup_logger.info(f"⏰ Startzeit: {datetime.now().isoformat()}")
# Windows-spezifische Informationen
if os.name == 'nt':
startup_logger.info("🪟 Windows-Modus: Aktiviert")
startup_logger.info("🔒 Windows-sichere Log-Rotation: Aktiviert")
startup_logger.info("=" * 50)
except Exception as e:
print(f"❌ Fehler beim Startup-Logging: {e}")
def debug_request(logger: logging.Logger, request) -> None:
"""
Detailliertes Request-Debugging.
Args:
logger: Logger für die Ausgabe
request: Flask Request-Objekt
"""
try:
logger.debug(f"📨 REQUEST: {request.method} {request.path}")
logger.debug(f"🌐 Remote-Adresse: {request.remote_addr}")
logger.debug(f"🔤 Content-Type: {request.content_type}")
if request.args:
logger.debug(f"❓ Query-Parameter: {dict(request.args)}")
if request.form and logger.level <= logging.DEBUG:
# Filtere sensible Daten aus Form-Daten
safe_form = {k: '***' if 'password' in k.lower() else v for k, v in request.form.items()}
logger.debug(f"📝 Form-Daten: {safe_form}")
except Exception as e:
logger.error(f"❌ Fehler beim Request-Debugging: {str(e)}")
def debug_response(logger: logging.Logger, response, duration_ms: Optional[float] = None) -> None:
"""
Detailliertes Response-Debugging.
Args:
logger: Logger für die Ausgabe
response: Flask Response-Objekt
duration_ms: Optionale Ausführungszeit in Millisekunden
"""
try:
status_emoji = "" if response.status_code < 400 else "" if response.status_code >= 500 else "⚠️"
log_msg = f"📤 RESPONSE: {status_emoji} {response.status_code}"
if duration_ms is not None:
log_msg += f" ({duration_ms:.2f}ms)"
logger.debug(log_msg)
logger.debug(f"📏 Content-Length: {response.content_length or 'Unbekannt'}")
except Exception as e:
logger.error(f"❌ Fehler beim Response-Debugging: {str(e)}")
# ===== NOTFALL-LOGGING =====
def emergency_log(message: str, level: str = "ERROR") -> None:
"""
Notfall-Logging das auch funktioniert wenn das Hauptsystem fehlschlägt.
Args:
message: Nachricht
level: Log-Level
"""
try:
# Versuche normales Logging
logger = get_logger("emergency")
getattr(logger, level.lower(), logger.error)(message)
except:
# Fallback zu Print
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"[NOTFALL {timestamp}] [{level}] {message}")
# Auto-Initialisierung beim Import
if __name__ != "__main__":
try:
setup_logging()
except Exception as e:
print(f"❌ Auto-Initialisierung des Logging-Systems fehlgeschlagen: {e}")

790
utils/maintenance_system.py Normal file
View File

@@ -0,0 +1,790 @@
"""
Wartungsplanungs- und Tracking-System für das MYP-System
========================================================
Dieses Modul stellt umfassende Wartungsfunktionalität bereit:
- Geplante und ungeplante Wartungen
- Wartungsintervalle und Erinnerungen
- Wartungshistorie und Berichte
- Automatische Wartungsprüfungen
- Ersatzteil-Management
- Techniker-Zuweisungen
"""
import asyncio
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional, Callable
from dataclasses import dataclass, asdict
from enum import Enum
import threading
import schedule
import time
from utils.logging_config import get_logger
from models import Printer, get_db_session
from utils.email_notification import send_email_notification
from utils.realtime_dashboard import emit_system_alert
logger = get_logger("maintenance")
class MaintenanceType(Enum):
"""Arten von Wartungen"""
PREVENTIVE = "preventive" # Vorbeugende Wartung
CORRECTIVE = "corrective" # Reparatur/Korrektur
EMERGENCY = "emergency" # Notfall-Wartung
SCHEDULED = "scheduled" # Geplante Wartung
INSPECTION = "inspection" # Inspektion
class MaintenanceStatus(Enum):
"""Status einer Wartung"""
PLANNED = "planned" # Geplant
SCHEDULED = "scheduled" # Terminiert
IN_PROGRESS = "in_progress" # In Bearbeitung
COMPLETED = "completed" # Abgeschlossen
CANCELLED = "cancelled" # Abgebrochen
OVERDUE = "overdue" # Überfällig
class MaintenancePriority(Enum):
"""Priorität einer Wartung"""
LOW = "low" # Niedrig
NORMAL = "normal" # Normal
HIGH = "high" # Hoch
CRITICAL = "critical" # Kritisch
EMERGENCY = "emergency" # Notfall
@dataclass
class MaintenanceTask:
"""Wartungsaufgabe"""
id: Optional[int] = None
printer_id: int = None
title: str = ""
description: str = ""
maintenance_type: MaintenanceType = MaintenanceType.PREVENTIVE
priority: MaintenancePriority = MaintenancePriority.NORMAL
status: MaintenanceStatus = MaintenanceStatus.PLANNED
scheduled_date: Optional[datetime] = None
due_date: Optional[datetime] = None
estimated_duration: int = 60 # Minuten
actual_duration: Optional[int] = None
assigned_technician: Optional[str] = None
created_at: datetime = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
notes: str = ""
required_parts: List[str] = None
actual_parts_used: List[str] = None
cost: Optional[float] = None
checklist: List[Dict[str, Any]] = None
photos: List[str] = None
created_by: Optional[int] = None
@dataclass
class MaintenanceSchedule:
"""Wartungsplan"""
printer_id: int
maintenance_type: MaintenanceType
interval_days: int
next_due: datetime
last_completed: Optional[datetime] = None
is_active: bool = True
description: str = ""
checklist_template: List[str] = None
@dataclass
class MaintenanceMetrics:
"""Wartungsmetriken"""
total_tasks: int = 0
completed_tasks: int = 0
overdue_tasks: int = 0
average_completion_time: float = 0.0
total_cost: float = 0.0
mtbf: float = 0.0 # Mean Time Between Failures
mttr: float = 0.0 # Mean Time To Repair
uptime_percentage: float = 0.0
class MaintenanceManager:
"""Manager für Wartungsplanung und -tracking"""
def __init__(self):
self.tasks: Dict[int, MaintenanceTask] = {}
self.schedules: Dict[int, List[MaintenanceSchedule]] = {}
self.maintenance_history: List[MaintenanceTask] = []
self.next_task_id = 1
self.is_running = False
self._setup_scheduler()
def _setup_scheduler(self):
"""Richtet automatische Wartungsplanung ein"""
schedule.every().day.at("06:00").do(self._check_scheduled_maintenance)
schedule.every().hour.do(self._check_overdue_tasks)
schedule.every().monday.at("08:00").do(self._generate_weekly_report)
# Scheduler in separatem Thread
def run_scheduler():
while self.is_running:
schedule.run_pending()
time.sleep(60) # Check every minute
self.is_running = True
scheduler_thread = threading.Thread(target=run_scheduler, daemon=True)
scheduler_thread.start()
logger.info("Wartungs-Scheduler gestartet")
def create_task(self, task: MaintenanceTask) -> int:
"""Erstellt eine neue Wartungsaufgabe"""
task.id = self.next_task_id
self.next_task_id += 1
task.created_at = datetime.now()
self.tasks[task.id] = task
# Automatische Terminierung für vorbeugende Wartungen
if task.maintenance_type == MaintenanceType.PREVENTIVE and not task.scheduled_date:
task.scheduled_date = self._calculate_next_maintenance_date(task.printer_id)
# Benachrichtigungen senden
self._send_task_notifications(task, "created")
logger.info(f"Wartungsaufgabe erstellt: {task.title} für Drucker {task.printer_id}")
return task.id
def update_task_status(self, task_id: int, new_status: MaintenanceStatus, notes: str = "") -> bool:
"""Aktualisiert den Status einer Wartungsaufgabe"""
if task_id not in self.tasks:
return False
task = self.tasks[task_id]
old_status = task.status
task.status = new_status
# Zeitstempel setzen
if new_status == MaintenanceStatus.IN_PROGRESS:
task.started_at = datetime.now()
elif new_status == MaintenanceStatus.COMPLETED:
task.completed_at = datetime.now()
if task.started_at:
task.actual_duration = int((task.completed_at - task.started_at).total_seconds() / 60)
# Zur Historie hinzufügen
self.maintenance_history.append(task)
# Nächste Wartung planen
self._schedule_next_maintenance(task)
if notes:
task.notes += f"\n{datetime.now().strftime('%d.%m.%Y %H:%M')}: {notes}"
# Benachrichtigungen senden
if old_status != new_status:
self._send_task_notifications(task, "status_changed")
logger.info(f"Wartungsaufgabe {task_id} Status: {old_status.value}{new_status.value}")
return True
def schedule_maintenance(self, printer_id: int, maintenance_type: MaintenanceType,
interval_days: int, description: str = "") -> MaintenanceSchedule:
"""Plant regelmäßige Wartungen"""
schedule_item = MaintenanceSchedule(
printer_id=printer_id,
maintenance_type=maintenance_type,
interval_days=interval_days,
next_due=datetime.now() + timedelta(days=interval_days),
description=description
)
if printer_id not in self.schedules:
self.schedules[printer_id] = []
self.schedules[printer_id].append(schedule_item)
logger.info(f"Wartungsplan erstellt: {maintenance_type.value} alle {interval_days} Tage für Drucker {printer_id}")
return schedule_item
def get_upcoming_maintenance(self, days_ahead: int = 7) -> List[MaintenanceTask]:
"""Holt anstehende Wartungen"""
cutoff_date = datetime.now() + timedelta(days=days_ahead)
upcoming = []
for task in self.tasks.values():
if (task.status in [MaintenanceStatus.PLANNED, MaintenanceStatus.SCHEDULED] and
task.due_date and task.due_date <= cutoff_date):
upcoming.append(task)
return sorted(upcoming, key=lambda t: t.due_date or datetime.max)
def get_overdue_tasks(self) -> List[MaintenanceTask]:
"""Holt überfällige Wartungen"""
now = datetime.now()
overdue = []
for task in self.tasks.values():
if (task.status in [MaintenanceStatus.PLANNED, MaintenanceStatus.SCHEDULED] and
task.due_date and task.due_date < now):
task.status = MaintenanceStatus.OVERDUE
overdue.append(task)
return overdue
def get_maintenance_metrics(self, printer_id: Optional[int] = None,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None) -> MaintenanceMetrics:
"""Berechnet Wartungsmetriken"""
# Filter tasks
tasks = self.maintenance_history.copy()
if printer_id:
tasks = [t for t in tasks if t.printer_id == printer_id]
if start_date:
tasks = [t for t in tasks if t.completed_at and t.completed_at >= start_date]
if end_date:
tasks = [t for t in tasks if t.completed_at and t.completed_at <= end_date]
if not tasks:
return MaintenanceMetrics()
completed_tasks = [t for t in tasks if t.status == MaintenanceStatus.COMPLETED]
# Grundmetriken
total_tasks = len(tasks)
completed_count = len(completed_tasks)
# Durchschnittliche Bearbeitungszeit
completion_times = [t.actual_duration for t in completed_tasks if t.actual_duration]
avg_completion_time = sum(completion_times) / len(completion_times) if completion_times else 0
# Gesamtkosten
total_cost = sum(t.cost for t in completed_tasks if t.cost)
# MTBF und MTTR berechnen
mtbf = self._calculate_mtbf(tasks, printer_id)
mttr = avg_completion_time / 60 # Konvertiere zu Stunden
# Verfügbarkeit berechnen
uptime_percentage = self._calculate_uptime(printer_id, start_date, end_date)
return MaintenanceMetrics(
total_tasks=total_tasks,
completed_tasks=completed_count,
overdue_tasks=len(self.get_overdue_tasks()),
average_completion_time=avg_completion_time,
total_cost=total_cost,
mtbf=mtbf,
mttr=mttr,
uptime_percentage=uptime_percentage
)
def create_maintenance_checklist(self, maintenance_type: MaintenanceType) -> List[Dict[str, Any]]:
"""Erstellt eine Wartungs-Checkliste"""
checklists = {
MaintenanceType.PREVENTIVE: [
{"task": "Drucker äußerlich reinigen", "completed": False, "required": True},
{"task": "Druckbett-Level prüfen", "completed": False, "required": True},
{"task": "Extruder-Düse reinigen", "completed": False, "required": True},
{"task": "Riemen-Spannung prüfen", "completed": False, "required": True},
{"task": "Filament-Führung prüfen", "completed": False, "required": False},
{"task": "Software-Updates prüfen", "completed": False, "required": False},
{"task": "Lüfter reinigen", "completed": False, "required": True},
{"task": "Schrauben nachziehen", "completed": False, "required": False}
],
MaintenanceType.CORRECTIVE: [
{"task": "Problem-Diagnose durchführen", "completed": False, "required": True},
{"task": "Defekte Teile identifizieren", "completed": False, "required": True},
{"task": "Ersatzteile bestellen/bereitstellen", "completed": False, "required": True},
{"task": "Reparatur durchführen", "completed": False, "required": True},
{"task": "Funktionstest durchführen", "completed": False, "required": True},
{"task": "Kalibrierung prüfen", "completed": False, "required": True}
],
MaintenanceType.INSPECTION: [
{"task": "Sichtprüfung der Mechanik", "completed": False, "required": True},
{"task": "Druckqualität testen", "completed": False, "required": True},
{"task": "Temperaturen prüfen", "completed": False, "required": True},
{"task": "Bewegungen testen", "completed": False, "required": True},
{"task": "Verschleiß bewerten", "completed": False, "required": True}
]
}
return checklists.get(maintenance_type, [])
def _check_scheduled_maintenance(self):
"""Prüft täglich auf fällige Wartungen"""
logger.info("Prüfe fällige Wartungen...")
today = datetime.now()
for printer_id, schedules in self.schedules.items():
for schedule_item in schedules:
if not schedule_item.is_active:
continue
if schedule_item.next_due <= today:
# Erstelle Wartungsaufgabe
task = MaintenanceTask(
printer_id=printer_id,
title=f"{schedule_item.maintenance_type.value.title()} Wartung",
description=schedule_item.description,
maintenance_type=schedule_item.maintenance_type,
priority=MaintenancePriority.NORMAL,
due_date=schedule_item.next_due,
checklist=self.create_maintenance_checklist(schedule_item.maintenance_type)
)
task_id = self.create_task(task)
# Nächsten Termin berechnen
schedule_item.next_due = today + timedelta(days=schedule_item.interval_days)
logger.info(f"Automatische Wartungsaufgabe erstellt: {task_id}")
def _check_overdue_tasks(self):
"""Prüft stündlich auf überfällige Aufgaben"""
overdue = self.get_overdue_tasks()
if overdue:
logger.warning(f"{len(overdue)} überfällige Wartungsaufgaben gefunden")
for task in overdue:
emit_system_alert(
f"Wartung überfällig: {task.title} (Drucker {task.printer_id})",
"warning",
"high"
)
def _generate_weekly_report(self):
"""Generiert wöchentlichen Wartungsbericht"""
logger.info("Generiere wöchentlichen Wartungsbericht...")
# Sammle Daten der letzten Woche
last_week = datetime.now() - timedelta(days=7)
metrics = self.get_maintenance_metrics(start_date=last_week)
# Sende Report (Implementation abhängig von verfügbaren Services)
# send_maintenance_report(metrics)
def _calculate_next_maintenance_date(self, printer_id: int) -> datetime:
"""Berechnet nächstes Wartungsdatum basierend auf Nutzung"""
# Vereinfachte Implementierung - kann erweitert werden
base_interval = 30 # Tage
# Hier könnte man Nutzungsstatistiken einbeziehen
with get_db_session() as db_session:
printer = db_session.query(Printer).filter(Printer.id == printer_id).first()
if printer:
# Berücksichtige letzten Check
if printer.last_checked:
days_since_check = (datetime.now() - printer.last_checked).days
if days_since_check < 15: # Kürzlich gecheckt
base_interval += 15
return datetime.now() + timedelta(days=base_interval)
def _schedule_next_maintenance(self, completed_task: MaintenanceTask):
"""Plant nächste Wartung nach Abschluss einer Aufgabe"""
if completed_task.maintenance_type == MaintenanceType.PREVENTIVE:
# Finde entsprechenden Schedule
printer_schedules = self.schedules.get(completed_task.printer_id, [])
for schedule_item in printer_schedules:
if schedule_item.maintenance_type == completed_task.maintenance_type:
schedule_item.last_completed = completed_task.completed_at
schedule_item.next_due = datetime.now() + timedelta(days=schedule_item.interval_days)
break
def _calculate_mtbf(self, tasks: List[MaintenanceTask], printer_id: Optional[int]) -> float:
"""Berechnet Mean Time Between Failures"""
# Vereinfachte MTBF-Berechnung
failure_tasks = [t for t in tasks if t.maintenance_type == MaintenanceType.CORRECTIVE]
if len(failure_tasks) < 2:
return 0.0
# Zeitspanne zwischen ersten und letzten Ausfall
first_failure = min(failure_tasks, key=lambda t: t.created_at)
last_failure = max(failure_tasks, key=lambda t: t.created_at)
total_time = (last_failure.created_at - first_failure.created_at).total_seconds() / 3600 # Stunden
failure_count = len(failure_tasks) - 1
return total_time / failure_count if failure_count > 0 else 0.0
def _calculate_uptime(self, printer_id: Optional[int], start_date: Optional[datetime],
end_date: Optional[datetime]) -> float:
"""Berechnet Verfügbarkeit in Prozent"""
# Vereinfachte Uptime-Berechnung
if not start_date:
start_date = datetime.now() - timedelta(days=30)
if not end_date:
end_date = datetime.now()
total_time = (end_date - start_date).total_seconds()
# Berechne Downtime aus Wartungszeiten
downtime = 0
for task in self.maintenance_history:
if printer_id and task.printer_id != printer_id:
continue
if (task.status == MaintenanceStatus.COMPLETED and
task.started_at and task.completed_at and
task.started_at >= start_date and task.completed_at <= end_date):
downtime += (task.completed_at - task.started_at).total_seconds()
uptime = ((total_time - downtime) / total_time) * 100 if total_time > 0 else 0
return max(0, min(100, uptime))
def _send_task_notifications(self, task: MaintenanceTask, event_type: str):
"""Sendet Benachrichtigungen für Wartungsaufgaben"""
try:
if event_type == "created":
emit_system_alert(
f"Neue Wartungsaufgabe: {task.title} (Drucker {task.printer_id})",
"info",
"normal"
)
elif event_type == "status_changed":
emit_system_alert(
f"Wartungsstatus geändert: {task.title}{task.status.value}",
"info",
"normal"
)
except Exception as e:
logger.error(f"Fehler beim Senden der Wartungsbenachrichtigung: {str(e)}")
# Globale Instanz
maintenance_manager = MaintenanceManager()
def get_maintenance_dashboard_data() -> Dict[str, Any]:
"""Holt Dashboard-Daten für Wartungen"""
upcoming = maintenance_manager.get_upcoming_maintenance()
overdue = maintenance_manager.get_overdue_tasks()
metrics = maintenance_manager.get_maintenance_metrics()
return {
'upcoming_count': len(upcoming),
'overdue_count': len(overdue),
'upcoming_tasks': [asdict(task) for task in upcoming[:5]],
'overdue_tasks': [asdict(task) for task in overdue],
'metrics': asdict(metrics),
'next_scheduled': upcoming[0] if upcoming else None
}
def create_emergency_maintenance(printer_id: int, description: str,
priority: MaintenancePriority = MaintenancePriority.CRITICAL) -> int:
"""Erstellt eine Notfall-Wartung"""
task = MaintenanceTask(
printer_id=printer_id,
title="Notfall-Wartung",
description=description,
maintenance_type=MaintenanceType.EMERGENCY,
priority=priority,
due_date=datetime.now(), # Sofort fällig
checklist=maintenance_manager.create_maintenance_checklist(MaintenanceType.CORRECTIVE)
)
return maintenance_manager.create_task(task)
def schedule_preventive_maintenance(printer_id: int, interval_days: int = 30) -> MaintenanceSchedule:
"""Plant vorbeugende Wartung"""
return maintenance_manager.schedule_maintenance(
printer_id=printer_id,
maintenance_type=MaintenanceType.PREVENTIVE,
interval_days=interval_days,
description="Regelmäßige vorbeugende Wartung"
)
# JavaScript für Wartungs-Frontend
def get_maintenance_javascript() -> str:
"""JavaScript für Wartungsmanagement"""
return """
class MaintenanceManager {
constructor() {
this.currentTasks = [];
this.selectedTask = null;
this.init();
}
init() {
this.loadTasks();
this.setupEventListeners();
this.startAutoRefresh();
}
setupEventListeners() {
// Task status updates
document.addEventListener('click', (e) => {
if (e.target.matches('.maintenance-status-btn')) {
const taskId = e.target.dataset.taskId;
const newStatus = e.target.dataset.status;
this.updateTaskStatus(taskId, newStatus);
}
if (e.target.matches('.maintenance-details-btn')) {
const taskId = e.target.dataset.taskId;
this.showTaskDetails(taskId);
}
});
// Create maintenance form
const createForm = document.getElementById('create-maintenance-form');
createForm?.addEventListener('submit', (e) => {
e.preventDefault();
this.createTask(new FormData(createForm));
});
}
async loadTasks() {
try {
const response = await fetch('/api/maintenance/tasks');
const data = await response.json();
if (data.success) {
this.currentTasks = data.tasks;
this.renderTasks();
}
} catch (error) {
console.error('Fehler beim Laden der Wartungsaufgaben:', error);
}
}
async updateTaskStatus(taskId, newStatus) {
try {
const response = await fetch(`/api/maintenance/tasks/${taskId}/status`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ status: newStatus })
});
const result = await response.json();
if (result.success) {
this.loadTasks(); // Refresh
this.showNotification('Wartungsstatus aktualisiert', 'success');
} else {
this.showNotification('Fehler beim Aktualisieren', 'error');
}
} catch (error) {
console.error('Status-Update fehlgeschlagen:', error);
}
}
renderTasks() {
const container = document.getElementById('maintenance-tasks-container');
if (!container) return;
container.innerHTML = this.currentTasks.map(task => `
<div class="maintenance-task-card ${task.status} priority-${task.priority}">
<div class="task-header">
<h3>${task.title}</h3>
<span class="task-priority">${task.priority}</span>
</div>
<div class="task-info">
<p><strong>Drucker:</strong> ${task.printer_id}</p>
<p><strong>Typ:</strong> ${task.maintenance_type}</p>
<p><strong>Fällig:</strong> ${this.formatDate(task.due_date)}</p>
<p><strong>Status:</strong> ${task.status}</p>
</div>
<div class="task-actions">
<button class="maintenance-status-btn" data-task-id="${task.id}" data-status="in_progress">
Starten
</button>
<button class="maintenance-status-btn" data-task-id="${task.id}" data-status="completed">
Abschließen
</button>
<button class="maintenance-details-btn" data-task-id="${task.id}">
Details
</button>
</div>
</div>
`).join('');
}
showTaskDetails(taskId) {
const task = this.currentTasks.find(t => t.id == taskId);
if (!task) return;
// Create modal with task details
const modal = document.createElement('div');
modal.className = 'maintenance-modal';
modal.innerHTML = `
<div class="modal-content">
<div class="modal-header">
<h2>${task.title}</h2>
<button class="close-modal">&times;</button>
</div>
<div class="modal-body">
<div class="task-details">
<p><strong>Beschreibung:</strong> ${task.description}</p>
<p><strong>Techniker:</strong> ${task.assigned_technician || 'Nicht zugewiesen'}</p>
<p><strong>Geschätzte Dauer:</strong> ${task.estimated_duration} Minuten</p>
${task.checklist ? this.renderChecklist(task.checklist) : ''}
<div class="task-notes">
<h4>Notizen:</h4>
<textarea id="task-notes-${taskId}" rows="4" cols="50">${task.notes || ''}</textarea>
<button onclick="maintenanceManager.saveNotes(${taskId})">Notizen speichern</button>
</div>
</div>
</div>
</div>
`;
document.body.appendChild(modal);
// Close modal handlers
modal.querySelector('.close-modal').onclick = () => modal.remove();
modal.onclick = (e) => {
if (e.target === modal) modal.remove();
};
}
renderChecklist(checklist) {
return `
<div class="maintenance-checklist">
<h4>Checkliste:</h4>
${checklist.map((item, index) => `
<label class="checklist-item">
<input type="checkbox" ${item.completed ? 'checked' : ''}
onchange="maintenanceManager.updateChecklistItem(${index}, this.checked)">
${item.task}
${item.required ? '<span class="required">*</span>' : ''}
</label>
`).join('')}
</div>
`;
}
formatDate(dateString) {
if (!dateString) return 'Nicht gesetzt';
const date = new Date(dateString);
return date.toLocaleDateString('de-DE') + ' ' + date.toLocaleTimeString('de-DE', {hour: '2-digit', minute: '2-digit'});
}
showNotification(message, type = 'info') {
const notification = document.createElement('div');
notification.className = `notification notification-${type}`;
notification.textContent = message;
document.body.appendChild(notification);
setTimeout(() => {
notification.remove();
}, 3000);
}
startAutoRefresh() {
setInterval(() => {
this.loadTasks();
}, 30000); // Refresh every 30 seconds
}
}
// Initialize when DOM is ready
document.addEventListener('DOMContentLoaded', function() {
window.maintenanceManager = new MaintenanceManager();
});
"""
def create_maintenance_task(printer_id: int, title: str, description: str = "",
maintenance_type: MaintenanceType = MaintenanceType.PREVENTIVE,
priority: MaintenancePriority = MaintenancePriority.NORMAL) -> int:
"""
Erstellt eine neue Wartungsaufgabe.
Args:
printer_id: ID des Druckers
title: Titel der Wartungsaufgabe
description: Beschreibung der Aufgabe
maintenance_type: Art der Wartung
priority: Priorität der Aufgabe
Returns:
int: ID der erstellten Aufgabe
"""
task = MaintenanceTask(
printer_id=printer_id,
title=title,
description=description,
maintenance_type=maintenance_type,
priority=priority,
checklist=maintenance_manager.create_maintenance_checklist(maintenance_type)
)
return maintenance_manager.create_task(task)
def schedule_maintenance(printer_id: int, maintenance_type: MaintenanceType,
interval_days: int, description: str = "") -> MaintenanceSchedule:
"""
Plant regelmäßige Wartungen (Alias für maintenance_manager.schedule_maintenance).
Args:
printer_id: ID des Druckers
maintenance_type: Art der Wartung
interval_days: Intervall in Tagen
description: Beschreibung
Returns:
MaintenanceSchedule: Erstellter Wartungsplan
"""
return maintenance_manager.schedule_maintenance(
printer_id=printer_id,
maintenance_type=maintenance_type,
interval_days=interval_days,
description=description
)
def get_maintenance_overview() -> Dict[str, Any]:
"""
Holt eine Übersicht aller Wartungsaktivitäten.
Returns:
Dict: Wartungsübersicht mit Statistiken und anstehenden Aufgaben
"""
upcoming = maintenance_manager.get_upcoming_maintenance()
overdue = maintenance_manager.get_overdue_tasks()
metrics = maintenance_manager.get_maintenance_metrics()
# Aktive Tasks
active_tasks = [task for task in maintenance_manager.tasks.values()
if task.status == MaintenanceStatus.IN_PROGRESS]
# Completed tasks in last 30 days
thirty_days_ago = datetime.now() - timedelta(days=30)
recent_completed = [task for task in maintenance_manager.maintenance_history
if task.completed_at and task.completed_at >= thirty_days_ago]
return {
'summary': {
'total_tasks': len(maintenance_manager.tasks),
'active_tasks': len(active_tasks),
'upcoming_tasks': len(upcoming),
'overdue_tasks': len(overdue),
'completed_this_month': len(recent_completed)
},
'upcoming_tasks': [asdict(task) for task in upcoming[:10]],
'overdue_tasks': [asdict(task) for task in overdue],
'active_tasks': [asdict(task) for task in active_tasks],
'recent_completed': [asdict(task) for task in recent_completed[:5]],
'metrics': asdict(metrics),
'schedules': {
printer_id: [asdict(schedule) for schedule in schedules]
for printer_id, schedules in maintenance_manager.schedules.items()
}
}
def update_maintenance_status(task_id: int, new_status: MaintenanceStatus,
notes: str = "") -> bool:
"""
Aktualisiert den Status einer Wartungsaufgabe (Alias für maintenance_manager.update_task_status).
Args:
task_id: ID der Wartungsaufgabe
new_status: Neuer Status
notes: Optionale Notizen
Returns:
bool: True wenn erfolgreich aktualisiert
"""
return maintenance_manager.update_task_status(task_id, new_status, notes)

153
utils/migrate_db.py Normal file
View File

@@ -0,0 +1,153 @@
#!/usr/bin/env python3
"""
Datenbank-Migrationsskript für Guest-Requests, UserPermissions und Notifications
"""
import os
import sys
import sqlite3
from datetime import datetime
# Pfad zur App hinzufügen
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from models import init_db, get_cached_session, GuestRequest, UserPermission, Notification, User
from utils.logging_config import get_logger
from config.settings import DATABASE_PATH
logger = get_logger("migrate")
def column_exists(cursor, table_name, column_name):
"""Prüft, ob eine Spalte in einer Tabelle existiert."""
cursor.execute(f"PRAGMA table_info({table_name})")
columns = [row[1] for row in cursor.fetchall()]
return column_name in columns
def get_database_path():
"""Ermittelt den Pfad zur Datenbankdatei."""
# Verwende den korrekten Datenbankpfad aus der Konfiguration
if os.path.exists(DATABASE_PATH):
return DATABASE_PATH
# Fallback für alternative Pfade mit korrektem Dateinamen
alternative_paths = [
os.path.join('database', 'myp.db'),
'myp.db',
'../database/myp.db',
'./database/myp.db'
]
for path in alternative_paths:
if os.path.exists(path):
return path
# Falls keine Datei gefunden wird, verwende den konfigurierten Pfad
return DATABASE_PATH
def migrate_guest_requests_table():
"""Migriert die guest_requests Tabelle für neue Spalten."""
db_path = get_database_path()
if not os.path.exists(db_path):
logger.warning(f"Datenbankdatei nicht gefunden: {db_path}")
return False
try:
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
# Prüfen ob otp_used_at Spalte bereits existiert
if not column_exists(cursor, 'guest_requests', 'otp_used_at'):
cursor.execute("""
ALTER TABLE guest_requests
ADD COLUMN otp_used_at DATETIME
""")
logger.info("Spalte 'otp_used_at' zur guest_requests Tabelle hinzugefügt")
else:
logger.info("Spalte 'otp_used_at' existiert bereits")
conn.commit()
conn.close()
return True
except Exception as e:
logger.error(f"Fehler bei der Migration der guest_requests Tabelle: {str(e)}")
if 'conn' in locals():
conn.rollback()
conn.close()
return False
def main():
"""Führt die Datenbank-Migration aus."""
try:
logger.info("Starte Datenbank-Migration...")
# Datenbank initialisieren (erstellt neue Tabellen)
init_db()
# Spezifische Spalten-Migrationen
logger.info("Führe spezifische Tabellen-Migrationen aus...")
migrate_guest_requests_table()
logger.info("Datenbank-Migration erfolgreich abgeschlossen")
# Testen, ob die neuen Tabellen funktionieren
test_new_tables()
except Exception as e:
logger.error(f"Fehler bei der Datenbank-Migration: {str(e)}")
sys.exit(1)
def test_new_tables():
"""Testet, ob die neuen Tabellen korrekt erstellt wurden."""
try:
with get_cached_session() as session:
# Test der GuestRequest-Tabelle
test_request = GuestRequest(
name="Test User",
email="test@example.com",
reason="Test migration",
duration_min=60
)
session.add(test_request)
session.flush()
# Test der UserPermission-Tabelle (mit Admin-User falls vorhanden)
admin_user = session.query(User).filter_by(role="admin").first()
if admin_user:
# Prüfen, ob bereits Permissions für diesen User existieren
existing_permission = session.query(UserPermission).filter_by(user_id=admin_user.id).first()
if not existing_permission:
permission = UserPermission(
user_id=admin_user.id,
can_start_jobs=True,
needs_approval=False,
can_approve_jobs=True
)
session.add(permission)
session.flush()
logger.info(f"UserPermission für Admin-User {admin_user.id} erstellt")
else:
logger.info(f"UserPermission für Admin-User {admin_user.id} existiert bereits")
# Test der Notification-Tabelle
notification = Notification(
user_id=admin_user.id,
type="test",
payload='{"message": "Test notification"}'
)
session.add(notification)
session.flush()
# Test-Daten wieder löschen
session.rollback()
logger.info("Alle neuen Tabellen wurden erfolgreich getestet")
except Exception as e:
logger.error(f"Fehler beim Testen der neuen Tabellen: {str(e)}")
raise
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,899 @@
"""
Multi-Standort-Unterstützungssystem für das MYP-System
======================================================
Dieses Modul stellt umfassende Multi-Location-Funktionalität bereit:
- Standort-Management und Hierarchien
- Standort-spezifische Konfigurationen
- Zentrale und dezentrale Verwaltung
- Standort-übergreifende Berichte
- Ressourcen-Sharing zwischen Standorten
- Benutzer-Standort-Zuweisungen
"""
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional, Tuple
from dataclasses import dataclass, asdict
from enum import Enum
import geocoder
import requests
from utils.logging_config import get_logger
from models import User, Printer, Job, get_db_session
logger = get_logger("multi_location")
class LocationType(Enum):
"""Arten von Standorten"""
HEADQUARTERS = "headquarters" # Hauptsitz
BRANCH = "branch" # Niederlassung
DEPARTMENT = "department" # Abteilung
FLOOR = "floor" # Stockwerk
ROOM = "room" # Raum
AREA = "area" # Bereich
class AccessLevel(Enum):
"""Zugriffslevel für Standorte"""
FULL = "full" # Vollzugriff
READ_WRITE = "read_write" # Lesen und Schreiben
READ_ONLY = "read_only" # Nur Lesen
NO_ACCESS = "no_access" # Kein Zugriff
@dataclass
class LocationConfig:
"""Standort-spezifische Konfiguration"""
timezone: str = "Europe/Berlin"
business_hours: Dict[str, str] = None
maintenance_window: Dict[str, str] = None
auto_approval_enabled: bool = False
max_job_duration: int = 480 # Minuten
contact_info: Dict[str, str] = None
notification_settings: Dict[str, Any] = None
@dataclass
class Location:
"""Standort-Definition"""
id: Optional[int] = None
name: str = ""
code: str = "" # Kurzer Code für den Standort
location_type: LocationType = LocationType.BRANCH
parent_id: Optional[int] = None
address: str = ""
city: str = ""
country: str = ""
postal_code: str = ""
latitude: Optional[float] = None
longitude: Optional[float] = None
description: str = ""
config: LocationConfig = None
is_active: bool = True
created_at: datetime = None
manager_id: Optional[int] = None
def __post_init__(self):
if self.config is None:
self.config = LocationConfig()
if self.created_at is None:
self.created_at = datetime.now()
@dataclass
class UserLocationAccess:
"""Benutzer-Standort-Zugriff"""
user_id: int
location_id: int
access_level: AccessLevel
granted_by: int
granted_at: datetime
expires_at: Optional[datetime] = None
is_primary: bool = False
class MultiLocationManager:
"""Manager für Multi-Standort-Funktionalität"""
def __init__(self):
self.locations: Dict[int, Location] = {}
self.user_access: Dict[int, List[UserLocationAccess]] = {}
self.next_location_id = 1
# Standard-Standort erstellen
self._create_default_location()
def _create_default_location(self):
"""Erstellt Standard-Standort falls keiner existiert"""
default_location = Location(
id=1,
name="Hauptstandort",
code="HQ",
location_type=LocationType.HEADQUARTERS,
address="Mercedes-Benz Platz",
city="Stuttgart",
country="Deutschland",
description="Hauptstandort des MYP-Systems"
)
self.locations[1] = default_location
self.next_location_id = 2
logger.info("Standard-Standort erstellt")
def create_location(self, location: Location) -> int:
"""Erstellt einen neuen Standort"""
location.id = self.next_location_id
self.next_location_id += 1
# Koordinaten automatisch ermitteln
if not location.latitude or not location.longitude:
self._geocode_location(location)
self.locations[location.id] = location
logger.info(f"Standort erstellt: {location.name} ({location.code})")
return location.id
def update_location(self, location_id: int, updates: Dict[str, Any]) -> bool:
"""Aktualisiert einen Standort"""
if location_id not in self.locations:
return False
location = self.locations[location_id]
for key, value in updates.items():
if hasattr(location, key):
setattr(location, key, value)
# Koordinaten neu ermitteln bei Adressänderung
if 'address' in updates or 'city' in updates:
self._geocode_location(location)
logger.info(f"Standort aktualisiert: {location.name}")
return True
def delete_location(self, location_id: int) -> bool:
"""Löscht einen Standort (Soft Delete)"""
if location_id not in self.locations:
return False
location = self.locations[location_id]
# Prüfe ob Standort Kinder hat
children = self.get_child_locations(location_id)
if children:
logger.warning(f"Standort {location.name} kann nicht gelöscht werden: hat Unterstandorte")
return False
# Prüfe auf aktive Ressourcen
if self._has_active_resources(location_id):
logger.warning(f"Standort {location.name} kann nicht gelöscht werden: hat aktive Ressourcen")
return False
location.is_active = False
logger.info(f"Standort deaktiviert: {location.name}")
return True
def get_location_hierarchy(self, location_id: Optional[int] = None) -> Dict[str, Any]:
"""Holt Standort-Hierarchie"""
if location_id:
# Spezifische Hierarchie ab einem Standort
location = self.locations.get(location_id)
if not location:
return {}
return self._build_hierarchy_node(location)
else:
# Komplette Hierarchie
root_locations = [loc for loc in self.locations.values()
if loc.parent_id is None and loc.is_active]
return {
'locations': [self._build_hierarchy_node(loc) for loc in root_locations]
}
def _build_hierarchy_node(self, location: Location) -> Dict[str, Any]:
"""Erstellt einen Hierarchie-Knoten"""
children = self.get_child_locations(location.id)
return {
'id': location.id,
'name': location.name,
'code': location.code,
'type': location.location_type.value,
'children': [self._build_hierarchy_node(child) for child in children],
'resource_count': self._count_location_resources(location.id)
}
def get_child_locations(self, parent_id: int) -> List[Location]:
"""Holt alle Kinder-Standorte"""
return [loc for loc in self.locations.values()
if loc.parent_id == parent_id and loc.is_active]
def get_location_path(self, location_id: int) -> List[Location]:
"""Holt den Pfad vom Root zum Standort"""
path = []
current_id = location_id
while current_id:
location = self.locations.get(current_id)
if not location:
break
path.insert(0, location)
current_id = location.parent_id
return path
def grant_location_access(self, user_id: int, location_id: int,
access_level: AccessLevel, granted_by: int,
expires_at: Optional[datetime] = None,
is_primary: bool = False) -> bool:
"""Gewährt Benutzer-Zugriff auf einen Standort"""
if location_id not in self.locations:
return False
access = UserLocationAccess(
user_id=user_id,
location_id=location_id,
access_level=access_level,
granted_by=granted_by,
granted_at=datetime.now(),
expires_at=expires_at,
is_primary=is_primary
)
if user_id not in self.user_access:
self.user_access[user_id] = []
# Entferne vorherigen Zugriff für diesen Standort
self.user_access[user_id] = [
acc for acc in self.user_access[user_id]
if acc.location_id != location_id
]
# Setze anderen primary-Zugriff zurück falls nötig
if is_primary:
for access_item in self.user_access[user_id]:
access_item.is_primary = False
self.user_access[user_id].append(access)
logger.info(f"Standort-Zugriff gewährt: User {user_id} → Location {location_id} ({access_level.value})")
return True
def revoke_location_access(self, user_id: int, location_id: int) -> bool:
"""Entzieht Benutzer-Zugriff auf einen Standort"""
if user_id not in self.user_access:
return False
original_count = len(self.user_access[user_id])
self.user_access[user_id] = [
acc for acc in self.user_access[user_id]
if acc.location_id != location_id
]
success = len(self.user_access[user_id]) < original_count
if success:
logger.info(f"Standort-Zugriff entzogen: User {user_id} → Location {location_id}")
return success
def get_user_locations(self, user_id: int, access_level: Optional[AccessLevel] = None) -> List[Location]:
"""Holt alle Standorte eines Benutzers"""
if user_id not in self.user_access:
return []
accessible_locations = []
now = datetime.now()
for access in self.user_access[user_id]:
# Prüfe Ablaufzeit
if access.expires_at and access.expires_at < now:
continue
# Prüfe Access Level
if access_level and access.access_level != access_level:
continue
location = self.locations.get(access.location_id)
if location and location.is_active:
accessible_locations.append(location)
return accessible_locations
def get_user_primary_location(self, user_id: int) -> Optional[Location]:
"""Holt den primären Standort eines Benutzers"""
if user_id not in self.user_access:
return None
for access in self.user_access[user_id]:
if access.is_primary:
return self.locations.get(access.location_id)
# Fallback: ersten verfügbaren Standort nehmen
user_locations = self.get_user_locations(user_id)
return user_locations[0] if user_locations else None
def check_user_access(self, user_id: int, location_id: int,
required_level: AccessLevel = AccessLevel.READ_ONLY) -> bool:
"""Prüft ob Benutzer Zugriff auf Standort hat"""
if user_id not in self.user_access:
return False
access_levels = {
AccessLevel.NO_ACCESS: 0,
AccessLevel.READ_ONLY: 1,
AccessLevel.READ_WRITE: 2,
AccessLevel.FULL: 3
}
required_level_value = access_levels[required_level]
now = datetime.now()
for access in self.user_access[user_id]:
if access.location_id != location_id:
continue
# Prüfe Ablaufzeit
if access.expires_at and access.expires_at < now:
continue
user_level_value = access_levels[access.access_level]
if user_level_value >= required_level_value:
return True
return False
def get_location_resources(self, location_id: int) -> Dict[str, Any]:
"""Holt alle Ressourcen eines Standorts"""
if location_id not in self.locations:
return {}
# Simuliere Datenbankabfrage für Drucker und Jobs
resources = {
'printers': [],
'active_jobs': [],
'users': [],
'pending_maintenance': 0
}
# In echter Implementierung würde hier die Datenbank abgefragt
with get_db_session() as db_session:
# Drucker des Standorts (vereinfacht - benötigt location_id in Printer-Model)
# printers = db_session.query(Printer).filter(Printer.location_id == location_id).all()
# resources['printers'] = [p.to_dict() for p in printers]
pass
return resources
def get_location_statistics(self, location_id: int,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None) -> Dict[str, Any]:
"""Holt Statistiken für einen Standort"""
if not start_date:
start_date = datetime.now() - timedelta(days=30)
if not end_date:
end_date = datetime.now()
# Sammle Statistiken
stats = {
'location': self.locations.get(location_id, {}).name if location_id in self.locations else 'Unbekannt',
'period': {
'start': start_date.isoformat(),
'end': end_date.isoformat()
},
'totals': {
'printers': 0,
'jobs_completed': 0,
'jobs_failed': 0,
'print_time_hours': 0,
'material_used_kg': 0,
'users_active': 0
},
'averages': {
'jobs_per_day': 0,
'job_duration_minutes': 0,
'printer_utilization': 0
},
'trends': {
'daily_jobs': [],
'printer_usage': []
}
}
# In echter Implementierung würden hier Datenbankabfragen stehen
return stats
def get_multi_location_report(self, location_ids: List[int] = None) -> Dict[str, Any]:
"""Erstellt standortübergreifenden Bericht"""
if not location_ids:
location_ids = list(self.locations.keys())
report = {
'generated_at': datetime.now().isoformat(),
'locations': [],
'summary': {
'total_locations': len(location_ids),
'total_printers': 0,
'total_users': 0,
'total_jobs': 0,
'cross_location_sharing': []
}
}
for location_id in location_ids:
location = self.locations.get(location_id)
if not location:
continue
location_stats = self.get_location_statistics(location_id)
location_data = {
'id': location.id,
'name': location.name,
'code': location.code,
'type': location.location_type.value,
'statistics': location_stats
}
report['locations'].append(location_data)
# Summiere für Gesamtübersicht
totals = location_stats.get('totals', {})
report['summary']['total_printers'] += totals.get('printers', 0)
report['summary']['total_users'] += totals.get('users_active', 0)
report['summary']['total_jobs'] += totals.get('jobs_completed', 0)
return report
def find_nearest_locations(self, latitude: float, longitude: float,
radius_km: float = 50, limit: int = 5) -> List[Tuple[Location, float]]:
"""Findet nächstgelegene Standorte"""
from math import radians, sin, cos, sqrt, atan2
def calculate_distance(lat1, lon1, lat2, lon2):
"""Berechnet Entfernung zwischen zwei Koordinaten (Haversine)"""
R = 6371 # Erdradius in km
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
return R * c
nearby_locations = []
for location in self.locations.values():
if not location.is_active or not location.latitude or not location.longitude:
continue
distance = calculate_distance(
latitude, longitude,
location.latitude, location.longitude
)
if distance <= radius_km:
nearby_locations.append((location, distance))
# Sortiere nach Entfernung
nearby_locations.sort(key=lambda x: x[1])
return nearby_locations[:limit]
def _geocode_location(self, location: Location):
"""Ermittelt Koordinaten für einen Standort"""
try:
address_parts = [location.address, location.city, location.country]
full_address = ', '.join(filter(None, address_parts))
if not full_address:
return
# Verwende geocoder library
result = geocoder.osm(full_address)
if result.ok:
location.latitude = result.lat
location.longitude = result.lng
logger.info(f"Koordinaten ermittelt für {location.name}: {location.latitude}, {location.longitude}")
else:
logger.warning(f"Koordinaten konnten nicht ermittelt werden für {location.name}")
except Exception as e:
logger.error(f"Fehler bei Geocoding für {location.name}: {str(e)}")
def _has_active_resources(self, location_id: int) -> bool:
"""Prüft ob Standort aktive Ressourcen hat"""
# Vereinfachte Implementierung
# In echter Implementation würde hier die Datenbank geprüft
return False
def _count_location_resources(self, location_id: int) -> Dict[str, int]:
"""Zählt Ressourcen eines Standorts"""
# Vereinfachte Implementierung
return {
'printers': 0,
'users': 0,
'jobs': 0
}
# Globale Instanz
location_manager = MultiLocationManager()
# Alias für Import-Kompatibilität
LocationManager = MultiLocationManager
def create_location(name: str, code: str, location_type: LocationType = LocationType.BRANCH,
address: str = "", city: str = "", country: str = "",
parent_id: Optional[int] = None) -> int:
"""
Erstellt einen neuen Standort (globale Funktion).
Args:
name: Name des Standorts
code: Kurzer Code für den Standort
location_type: Art des Standorts
address: Adresse
city: Stadt
country: Land
parent_id: Parent-Standort ID
Returns:
int: ID des erstellten Standorts
"""
location = Location(
name=name,
code=code,
location_type=location_type,
address=address,
city=city,
country=country,
parent_id=parent_id
)
return location_manager.create_location(location)
def assign_user_to_location(user_id: int, location_id: int,
access_level: AccessLevel = AccessLevel.READ_WRITE,
granted_by: int = 1, is_primary: bool = False) -> bool:
"""
Weist einen Benutzer einem Standort zu.
Args:
user_id: ID des Benutzers
location_id: ID des Standorts
access_level: Zugriffslevel
granted_by: ID des gewährenden Benutzers
is_primary: Ob dies der primäre Standort ist
Returns:
bool: True wenn erfolgreich
"""
return location_manager.grant_location_access(
user_id=user_id,
location_id=location_id,
access_level=access_level,
granted_by=granted_by,
is_primary=is_primary
)
def get_user_locations(user_id: int) -> List[Location]:
"""
Holt alle Standorte eines Benutzers (globale Funktion).
Args:
user_id: ID des Benutzers
Returns:
List[Location]: Liste der zugänglichen Standorte
"""
return location_manager.get_user_locations(user_id)
def calculate_distance(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""
Berechnet die Entfernung zwischen zwei Koordinaten (Haversine-Formel).
Args:
lat1, lon1: Koordinaten des ersten Punkts
lat2, lon2: Koordinaten des zweiten Punkts
Returns:
float: Entfernung in Kilometern
"""
from math import radians, sin, cos, sqrt, atan2
R = 6371 # Erdradius in km
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
dlat = lat2 - lat1
dlon = lon2 - lon1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
return R * c
def find_nearest_location(latitude: float, longitude: float,
radius_km: float = 50) -> Optional[Location]:
"""
Findet den nächstgelegenen Standort.
Args:
latitude: Breitengrad
longitude: Längengrad
radius_km: Suchradius in Kilometern
Returns:
Optional[Location]: Nächstgelegener Standort oder None
"""
nearest_locations = location_manager.find_nearest_locations(
latitude=latitude,
longitude=longitude,
radius_km=radius_km,
limit=1
)
return nearest_locations[0][0] if nearest_locations else None
def get_location_dashboard_data(user_id: int) -> Dict[str, Any]:
"""Holt Dashboard-Daten für Standorte eines Benutzers"""
user_locations = location_manager.get_user_locations(user_id)
primary_location = location_manager.get_user_primary_location(user_id)
dashboard_data = {
'user_locations': [asdict(loc) for loc in user_locations],
'primary_location': asdict(primary_location) if primary_location else None,
'location_count': len(user_locations),
'hierarchy': location_manager.get_location_hierarchy()
}
# Füge Statistiken für jeden Standort hinzu
for location in user_locations:
location_stats = location_manager.get_location_statistics(location.id)
dashboard_data[f'stats_{location.id}'] = location_stats
return dashboard_data
def create_location_from_address(name: str, address: str, city: str,
country: str, location_type: LocationType = LocationType.BRANCH) -> int:
"""Erstellt Standort aus Adresse mit automatischer Geocodierung"""
location = Location(
name=name,
code=name[:3].upper(),
location_type=location_type,
address=address,
city=city,
country=country
)
return location_manager.create_location(location)
# JavaScript für Multi-Location Frontend
def get_multi_location_javascript() -> str:
"""JavaScript für Multi-Location Management"""
return """
class MultiLocationManager {
constructor() {
this.currentLocation = null;
this.userLocations = [];
this.locationHierarchy = {};
this.init();
}
init() {
this.loadUserLocations();
this.setupEventListeners();
}
setupEventListeners() {
// Location switcher
document.addEventListener('change', (e) => {
if (e.target.matches('.location-selector')) {
const locationId = parseInt(e.target.value);
this.switchLocation(locationId);
}
});
// Location management buttons
document.addEventListener('click', (e) => {
if (e.target.matches('.manage-locations-btn')) {
this.showLocationManager();
}
if (e.target.matches('.location-hierarchy-btn')) {
this.showLocationHierarchy();
}
});
}
async loadUserLocations() {
try {
const response = await fetch('/api/locations/user');
const data = await response.json();
if (data.success) {
this.userLocations = data.locations;
this.currentLocation = data.primary_location;
this.locationHierarchy = data.hierarchy;
this.updateLocationSelector();
this.updateLocationDisplay();
}
} catch (error) {
console.error('Fehler beim Laden der Standorte:', error);
}
}
updateLocationSelector() {
const selectors = document.querySelectorAll('.location-selector');
selectors.forEach(selector => {
selector.innerHTML = this.userLocations.map(location =>
`<option value="${location.id}" ${location.id === this.currentLocation?.id ? 'selected' : ''}>
${location.name} (${location.code})
</option>`
).join('');
});
}
updateLocationDisplay() {
const displays = document.querySelectorAll('.current-location-display');
displays.forEach(display => {
if (this.currentLocation) {
display.innerHTML = `
<div class="location-info">
<strong>${this.currentLocation.name}</strong>
<span class="location-type">${this.currentLocation.type}</span>
${this.currentLocation.city ? `<span class="location-city">${this.currentLocation.city}</span>` : ''}
</div>
`;
} else {
display.innerHTML = '<span class="no-location">Kein Standort ausgewählt</span>';
}
});
}
async switchLocation(locationId) {
try {
const response = await fetch('/api/locations/switch', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ location_id: locationId })
});
const result = await response.json();
if (result.success) {
this.currentLocation = this.userLocations.find(loc => loc.id === locationId);
this.updateLocationDisplay();
// Seite neu laden um location-spezifische Daten zu aktualisieren
window.location.reload();
} else {
this.showNotification('Fehler beim Wechseln des Standorts', 'error');
}
} catch (error) {
console.error('Standort-Wechsel fehlgeschlagen:', error);
}
}
showLocationManager() {
const modal = document.createElement('div');
modal.className = 'location-manager-modal';
modal.innerHTML = `
<div class="modal-content">
<div class="modal-header">
<h2>Standort-Verwaltung</h2>
<button class="close-modal">&times;</button>
</div>
<div class="modal-body">
<div class="location-list">
${this.renderLocationList()}
</div>
<div class="location-actions">
<button class="btn-create-location">Neuen Standort erstellen</button>
</div>
</div>
</div>
`;
document.body.appendChild(modal);
// Event handlers
modal.querySelector('.close-modal').onclick = () => modal.remove();
modal.onclick = (e) => {
if (e.target === modal) modal.remove();
};
}
renderLocationList() {
return this.userLocations.map(location => `
<div class="location-item">
<div class="location-details">
<h4>${location.name} (${location.code})</h4>
<p><strong>Typ:</strong> ${location.type}</p>
<p><strong>Adresse:</strong> ${location.address || 'Nicht angegeben'}</p>
<p><strong>Stadt:</strong> ${location.city || 'Nicht angegeben'}</p>
</div>
<div class="location-actions">
<button class="btn-edit-location" data-location-id="${location.id}">Bearbeiten</button>
<button class="btn-view-stats" data-location-id="${location.id}">Statistiken</button>
</div>
</div>
`).join('');
}
showLocationHierarchy() {
const modal = document.createElement('div');
modal.className = 'hierarchy-modal';
modal.innerHTML = `
<div class="modal-content">
<div class="modal-header">
<h2>Standort-Hierarchie</h2>
<button class="close-modal">&times;</button>
</div>
<div class="modal-body">
<div class="hierarchy-tree">
${this.renderHierarchyTree(this.locationHierarchy.locations || [])}
</div>
</div>
</div>
`;
document.body.appendChild(modal);
modal.querySelector('.close-modal').onclick = () => modal.remove();
modal.onclick = (e) => {
if (e.target === modal) modal.remove();
};
}
renderHierarchyTree(locations, level = 0) {
return locations.map(location => `
<div class="hierarchy-node" style="margin-left: ${level * 20}px;">
<div class="node-content">
<span class="node-icon">${this.getLocationTypeIcon(location.type)}</span>
<span class="node-name">${location.name}</span>
<span class="node-code">(${location.code})</span>
<span class="resource-count">${location.resource_count.printers || 0} Drucker</span>
</div>
${location.children && location.children.length > 0 ?
this.renderHierarchyTree(location.children, level + 1) : ''}
</div>
`).join('');
}
getLocationTypeIcon(type) {
const icons = {
'headquarters': '🏢',
'branch': '🏪',
'department': '🏬',
'floor': '🏢',
'room': '🚪',
'area': '📍'
};
return icons[type] || '📍';
}
showNotification(message, type = 'info') {
const notification = document.createElement('div');
notification.className = `notification notification-${type}`;
notification.textContent = message;
document.body.appendChild(notification);
setTimeout(() => {
notification.remove();
}, 3000);
}
}
// Initialize when DOM is ready
document.addEventListener('DOMContentLoaded', function() {
window.multiLocationManager = new MultiLocationManager();
});
"""

229
utils/offline_config.py Normal file
View File

@@ -0,0 +1,229 @@
"""
Offline-Konfiguration für MYP-System
===================================
Konfiguriert das System für den Offline-Betrieb ohne Internetverbindung.
Stellt Fallback-Lösungen für internetabhängige Funktionen bereit.
"""
import os
import logging
from typing import Dict, List, Optional
from utils.logging_config import get_logger
logger = get_logger("offline_config")
# ===== OFFLINE-MODUS KONFIGURATION =====
OFFLINE_MODE = True # Produktionseinstellung - System läuft offline
# ===== OFFLINE-KOMPATIBILITÄT PRÜFUNGEN =====
def check_internet_connectivity() -> bool:
"""
Prüft ob eine Internetverbindung verfügbar ist.
Im Offline-Modus gibt immer False zurück.
Returns:
bool: True wenn Internet verfügbar, False im Offline-Modus
"""
if OFFLINE_MODE:
return False
# In einem echten Online-Modus könnte hier eine echte Prüfung stehen
try:
import socket
socket.create_connection(("8.8.8.8", 53), timeout=3)
return True
except OSError:
return False
def is_oauth_available() -> bool:
"""
Prüft ob OAuth-Funktionalität verfügbar ist.
Returns:
bool: False im Offline-Modus
"""
return not OFFLINE_MODE and check_internet_connectivity()
def is_email_sending_available() -> bool:
"""
Prüft ob E-Mail-Versand verfügbar ist.
Returns:
bool: False im Offline-Modus (nur Logging)
"""
return not OFFLINE_MODE and check_internet_connectivity()
def is_cdn_available() -> bool:
"""
Prüft ob CDN-Links verfügbar sind.
Returns:
bool: False im Offline-Modus (lokale Fallbacks verwenden)
"""
return not OFFLINE_MODE and check_internet_connectivity()
# ===== CDN FALLBACK-KONFIGURATION =====
CDN_FALLBACKS = {
# Chart.js CDN -> Lokale Datei
"https://cdnjs.cloudflare.com/ajax/libs/Chart.js/4.4.0/chart.min.js": "/static/js/charts/chart.min.js",
"https://cdn.jsdelivr.net/npm/chart.js": "/static/js/charts/chart.min.js",
# FontAwesome (bereits lokal verfügbar)
"https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css": "/static/fontawesome/css/all.min.css",
# Weitere CDN-Fallbacks können hier hinzugefügt werden
}
def get_local_asset_path(cdn_url: str) -> Optional[str]:
"""
Gibt den lokalen Pfad für eine CDN-URL zurück.
Args:
cdn_url: URL des CDN-Assets
Returns:
str: Lokaler Pfad oder None wenn kein Fallback verfügbar
"""
return CDN_FALLBACKS.get(cdn_url)
def replace_cdn_links(html_content: str) -> str:
"""
Ersetzt CDN-Links durch lokale Fallbacks im HTML-Inhalt.
Args:
html_content: HTML-Inhalt mit CDN-Links
Returns:
str: HTML-Inhalt mit lokalen Links
"""
if not OFFLINE_MODE:
return html_content
modified_content = html_content
for cdn_url, local_path in CDN_FALLBACKS.items():
if cdn_url in modified_content:
modified_content = modified_content.replace(cdn_url, local_path)
logger.info(f"🔄 CDN-Link ersetzt: {cdn_url} -> {local_path}")
return modified_content
# ===== SECURITY POLICY ANPASSUNGEN =====
def get_offline_csp_policy() -> Dict[str, List[str]]:
"""
Gibt CSP-Policy für Offline-Modus zurück.
Entfernt externe CDN-Domains aus der Policy.
Returns:
Dict: CSP-Policy ohne externe Domains
"""
if not OFFLINE_MODE:
# Online-Modus: Originale Policy mit CDNs
return {
"script-src": [
"'self'",
"'unsafe-inline'",
"'unsafe-eval'",
"https://cdn.jsdelivr.net",
"https://unpkg.com",
"https://cdnjs.cloudflare.com"
],
"style-src": [
"'self'",
"'unsafe-inline'",
"https://fonts.googleapis.com",
"https://cdn.jsdelivr.net"
],
"font-src": [
"'self'",
"https://fonts.gstatic.com"
]
}
else:
# Offline-Modus: Nur lokale Ressourcen
return {
"script-src": [
"'self'",
"'unsafe-inline'",
"'unsafe-eval'"
],
"style-src": [
"'self'",
"'unsafe-inline'"
],
"font-src": [
"'self'"
],
"img-src": [
"'self'",
"data:"
]
}
# ===== OFFLINE-MODUS HILFSFUNKTIONEN =====
def log_offline_mode_status():
"""Loggt den aktuellen Offline-Modus Status."""
if OFFLINE_MODE:
logger.info("🌐 System läuft im OFFLINE-MODUS")
logger.info(" ❌ OAuth deaktiviert")
logger.info(" ❌ E-Mail-Versand deaktiviert (nur Logging)")
logger.info(" ❌ CDN-Links werden durch lokale Dateien ersetzt")
logger.info(" ✅ Alle Kernfunktionen verfügbar")
else:
logger.info("🌐 System läuft im ONLINE-MODUS")
logger.info(" ✅ OAuth verfügbar")
logger.info(" ✅ E-Mail-Versand verfügbar")
logger.info(" ✅ CDN-Links verfügbar")
def get_feature_availability() -> Dict[str, bool]:
"""
Gibt die Verfügbarkeit verschiedener Features zurück.
Returns:
Dict: Feature-Verfügbarkeit
"""
return {
"oauth": is_oauth_available(),
"email_sending": is_email_sending_available(),
"cdn_resources": is_cdn_available(),
"offline_mode": OFFLINE_MODE,
"core_functionality": True, # Kernfunktionen immer verfügbar
"printer_control": True, # Drucker-Steuerung immer verfügbar
"job_management": True, # Job-Verwaltung immer verfügbar
"user_management": True # Benutzer-Verwaltung immer verfügbar
}
# ===== STARTUP-FUNKTIONEN =====
def initialize_offline_mode():
"""Initialisiert den Offline-Modus beim System-Start."""
log_offline_mode_status()
if OFFLINE_MODE:
logger.info("🔧 Initialisiere Offline-Modus-Anpassungen...")
# Prüfe ob lokale Chart.js verfügbar ist
chart_js_path = "static/js/charts/chart.min.js"
if not os.path.exists(chart_js_path):
logger.warning(f"⚠️ Lokale Chart.js nicht gefunden: {chart_js_path}")
logger.warning(" Diagramme könnten nicht funktionieren")
else:
logger.info(f"✅ Lokale Chart.js gefunden: {chart_js_path}")
# Prüfe weitere lokale Assets
fontawesome_path = "static/fontawesome/css/all.min.css"
if not os.path.exists(fontawesome_path):
logger.warning(f"⚠️ Lokale FontAwesome nicht gefunden: {fontawesome_path}")
else:
logger.info(f"✅ Lokale FontAwesome gefunden: {fontawesome_path}")
logger.info("✅ Offline-Modus erfolgreich initialisiert")
# Beim Import automatisch initialisieren
initialize_offline_mode()

216
utils/optimize_frontend.py Normal file
View File

@@ -0,0 +1,216 @@
#!/usr/bin/env python3
"""
Frontend Optimization Script for MYP Platform
Optimizes JavaScript and CSS files for better performance
"""
import os
import gzip
import shutil
import hashlib
from pathlib import Path
def minify_file(content, file_type='js'):
"""Basic minification - removes comments and extra whitespace"""
if file_type == 'js':
# Remove single-line comments
lines = content.split('\n')
cleaned_lines = []
for line in lines:
# Skip lines that are only comments
stripped = line.strip()
if stripped.startswith('//'):
continue
# Remove inline comments
if '//' in line:
line = line.split('//')[0].rstrip()
cleaned_lines.append(line)
content = '\n'.join(cleaned_lines)
# Remove multi-line comments
import re
content = re.sub(r'/\*[\s\S]*?\*/', '', content)
# Remove extra whitespace
content = re.sub(r'\s+', ' ', content)
content = re.sub(r'\s*([{}();,:])\s*', r'\1', content)
elif file_type == 'css':
# Remove CSS comments
import re
content = re.sub(r'/\*[\s\S]*?\*/', '', content)
# Remove extra whitespace
content = re.sub(r'\s+', ' ', content)
content = re.sub(r'\s*([{}:;,])\s*', r'\1', content)
return content.strip()
def compress_file(file_path, force=False):
"""Compress file with gzip"""
gz_path = file_path + '.gz'
# Skip if already compressed and not forcing
if os.path.exists(gz_path) and not force:
return False
with open(file_path, 'rb') as f_in:
with gzip.open(gz_path, 'wb', compresslevel=9) as f_out:
shutil.copyfileobj(f_in, f_out)
return True
def optimize_js_files(js_dir):
"""Optimize JavaScript files"""
js_path = Path(js_dir)
optimized_count = 0
for js_file in js_path.glob('*.js'):
# Skip already minified files
if js_file.name.endswith('.min.js'):
continue
min_file = js_file.with_suffix('.min.js')
# Skip if minified version already exists
if min_file.exists():
continue
print(f"Optimizing {js_file.name}...")
# Read and minify
content = js_file.read_text(encoding='utf-8')
minified = minify_file(content, 'js')
# Write minified version
min_file.write_text(minified, encoding='utf-8')
# Compress both versions
compress_file(str(js_file))
compress_file(str(min_file))
optimized_count += 1
return optimized_count
def optimize_css_files(css_dir):
"""Optimize CSS files"""
css_path = Path(css_dir)
optimized_count = 0
for css_file in css_path.glob('*.css'):
# Skip already minified files
if css_file.name.endswith('.min.css'):
continue
min_file = css_file.with_suffix('.min.css')
# Skip if minified version already exists
if min_file.exists():
continue
print(f"Optimizing {css_file.name}...")
# Read and minify
content = css_file.read_text(encoding='utf-8')
minified = minify_file(content, 'css')
# Write minified version
min_file.write_text(minified, encoding='utf-8')
# Compress both versions
compress_file(str(css_file))
compress_file(str(min_file))
optimized_count += 1
return optimized_count
def create_bundle_js(js_dir):
"""Create bundled JavaScript file with core utilities"""
js_path = Path(js_dir)
# Core files to bundle in order
core_files = [
'core-utilities.js',
'dark-mode.js',
'user-dropdown.js'
]
bundle_content = []
for file_name in core_files:
file_path = js_path / file_name
if file_path.exists():
content = file_path.read_text(encoding='utf-8')
bundle_content.append(f"/* === {file_name} === */\n{content}\n")
if bundle_content:
bundle_path = js_path / 'core-bundle.min.js'
bundled = '\n'.join(bundle_content)
minified = minify_file(bundled, 'js')
bundle_path.write_text(minified, encoding='utf-8')
compress_file(str(bundle_path))
print(f"Created core bundle: {bundle_path.name}")
def main():
"""Main optimization function"""
base_dir = Path(__file__).parent.parent
static_dir = base_dir / 'static'
js_dir = static_dir / 'js'
css_dir = static_dir / 'css'
print("Starting frontend optimization...")
# Optimize JavaScript
js_count = optimize_js_files(js_dir)
print(f"Optimized {js_count} JavaScript files")
# Optimize CSS
css_count = optimize_css_files(css_dir)
print(f"Optimized {css_count} CSS files")
# Create JavaScript bundle
create_bundle_js(js_dir)
# Compress performance-optimized.css if not already done
perf_css = css_dir / 'performance-optimized.css'
if perf_css.exists():
compress_file(str(perf_css), force=True)
# Create minified version
min_perf_css = css_dir / 'performance-optimized.min.css'
if not min_perf_css.exists():
content = perf_css.read_text(encoding='utf-8')
minified = minify_file(content, 'css')
min_perf_css.write_text(minified, encoding='utf-8')
compress_file(str(min_perf_css))
# Compress core-utilities files
core_js = js_dir / 'core-utilities.js'
core_css = css_dir / 'core-utilities.css'
if core_js.exists():
compress_file(str(core_js), force=True)
# Create minified version
min_core_js = js_dir / 'core-utilities.min.js'
if not min_core_js.exists():
content = core_js.read_text(encoding='utf-8')
minified = minify_file(content, 'js')
min_core_js.write_text(minified, encoding='utf-8')
compress_file(str(min_core_js))
if core_css.exists():
compress_file(str(core_css), force=True)
# Create minified version
min_core_css = css_dir / 'core-utilities.min.css'
if not min_core_css.exists():
content = core_css.read_text(encoding='utf-8')
minified = minify_file(content, 'css')
min_core_css.write_text(minified, encoding='utf-8')
compress_file(str(min_core_css))
print("\nOptimization complete!")
print("Remember to update templates to use minified versions in production.")
if __name__ == "__main__":
main()

637
utils/permissions.py Normal file
View File

@@ -0,0 +1,637 @@
#!/usr/bin/env python3
"""
Erweiterte Berechtigungsverwaltung für MYP Platform
Granulare Rollen und Permissions für feingranulare Zugriffskontrolle
"""
from enum import Enum
from functools import wraps
from typing import List, Dict, Set, Optional
from flask import request, jsonify, abort
from flask_login import login_required, current_user
from sqlalchemy import Column, Integer, String, Boolean, ForeignKey, Table, DateTime, MetaData
from sqlalchemy.orm import relationship
from datetime import datetime, timedelta
from utils.logging_config import get_logger
logger = get_logger("permissions")
# ===== PERMISSION DEFINITIONS =====
class Permission(Enum):
"""Alle verfügbaren Berechtigungen im System"""
# Basis-Berechtigungen
LOGIN = "login"
VIEW_DASHBOARD = "view_dashboard"
# Drucker-Berechtigungen
VIEW_PRINTERS = "view_printers"
CREATE_PRINTER = "create_printer"
EDIT_PRINTER = "edit_printer"
DELETE_PRINTER = "delete_printer"
CONTROL_PRINTER = "control_printer" # Ein-/Ausschalten
VIEW_PRINTER_DETAILS = "view_printer_details"
# Job-Berechtigungen
VIEW_JOBS = "view_jobs"
CREATE_JOB = "create_job"
EDIT_OWN_JOB = "edit_own_job"
EDIT_ALL_JOBS = "edit_all_jobs"
DELETE_OWN_JOB = "delete_own_job"
DELETE_ALL_JOBS = "delete_all_jobs"
EXTEND_JOB = "extend_job"
CANCEL_JOB = "cancel_job"
VIEW_JOB_HISTORY = "view_job_history"
APPROVE_JOBS = "approve_jobs" # Berechtigung zum Genehmigen und Verwalten von Jobs
# Benutzer-Berechtigungen
VIEW_USERS = "view_users"
CREATE_USER = "create_user"
EDIT_USER = "edit_user"
DELETE_USER = "delete_user"
MANAGE_ROLES = "manage_roles"
VIEW_USER_DETAILS = "view_user_details"
# Admin-Berechtigungen
VIEW_ADMIN_PANEL = "view_admin_panel"
MANAGE_SYSTEM = "manage_system"
VIEW_LOGS = "view_logs"
EXPORT_DATA = "export_data"
BACKUP_DATABASE = "backup_database"
MANAGE_SETTINGS = "manage_settings"
ADMIN = "admin" # Allgemeine Admin-Berechtigung für administrative Funktionen
# Gast-Berechtigungen
VIEW_GUEST_REQUESTS = "view_guest_requests"
CREATE_GUEST_REQUEST = "create_guest_request"
APPROVE_GUEST_REQUEST = "approve_guest_request"
DENY_GUEST_REQUEST = "deny_guest_request"
MANAGE_GUEST_REQUESTS = "manage_guest_requests"
# Statistik-Berechtigungen
VIEW_STATS = "view_stats"
VIEW_DETAILED_STATS = "view_detailed_stats"
EXPORT_STATS = "export_stats"
# Kalender-Berechtigungen
VIEW_CALENDAR = "view_calendar"
EDIT_CALENDAR = "edit_calendar"
MANAGE_SHIFTS = "manage_shifts"
# Wartung-Berechtigungen
SCHEDULE_MAINTENANCE = "schedule_maintenance"
VIEW_MAINTENANCE = "view_maintenance"
PERFORM_MAINTENANCE = "perform_maintenance"
class Role(Enum):
"""Vordefinierte Rollen mit Standard-Berechtigungen"""
GUEST = "guest"
USER = "user"
POWER_USER = "power_user"
TECHNICIAN = "technician"
SUPERVISOR = "supervisor"
ADMIN = "admin"
SUPER_ADMIN = "super_admin"
# ===== ROLE PERMISSIONS MAPPING =====
ROLE_PERMISSIONS = {
Role.GUEST: {
Permission.LOGIN,
Permission.VIEW_PRINTERS,
Permission.CREATE_GUEST_REQUEST,
Permission.VIEW_CALENDAR,
},
Role.USER: {
Permission.LOGIN,
Permission.VIEW_DASHBOARD,
Permission.VIEW_PRINTERS,
Permission.VIEW_JOBS,
Permission.CREATE_JOB,
Permission.EDIT_OWN_JOB,
Permission.DELETE_OWN_JOB,
Permission.EXTEND_JOB,
Permission.CANCEL_JOB,
Permission.VIEW_STATS,
Permission.VIEW_CALENDAR,
Permission.CREATE_GUEST_REQUEST,
},
}
# Power User erweitert User-Permissions
ROLE_PERMISSIONS[Role.POWER_USER] = ROLE_PERMISSIONS[Role.USER] | {
Permission.VIEW_PRINTER_DETAILS,
Permission.VIEW_JOB_HISTORY,
Permission.VIEW_DETAILED_STATS,
Permission.EXPORT_STATS,
Permission.VIEW_GUEST_REQUESTS,
}
# Technician erweitert Power User-Permissions
ROLE_PERMISSIONS[Role.TECHNICIAN] = ROLE_PERMISSIONS[Role.POWER_USER] | {
Permission.CONTROL_PRINTER,
Permission.EDIT_PRINTER,
Permission.SCHEDULE_MAINTENANCE,
Permission.VIEW_MAINTENANCE,
Permission.PERFORM_MAINTENANCE,
Permission.EDIT_CALENDAR,
}
# Supervisor erweitert Technician-Permissions
ROLE_PERMISSIONS[Role.SUPERVISOR] = ROLE_PERMISSIONS[Role.TECHNICIAN] | {
Permission.CREATE_PRINTER,
Permission.EDIT_ALL_JOBS,
Permission.DELETE_ALL_JOBS,
Permission.VIEW_USERS,
Permission.APPROVE_GUEST_REQUEST,
Permission.DENY_GUEST_REQUEST,
Permission.MANAGE_GUEST_REQUESTS,
Permission.MANAGE_SHIFTS,
Permission.VIEW_USER_DETAILS,
Permission.APPROVE_JOBS, # Jobs genehmigen und verwalten
}
# Admin erweitert Supervisor-Permissions
ROLE_PERMISSIONS[Role.ADMIN] = ROLE_PERMISSIONS[Role.SUPERVISOR] | {
Permission.DELETE_PRINTER,
Permission.VIEW_ADMIN_PANEL,
Permission.CREATE_USER,
Permission.EDIT_USER,
Permission.DELETE_USER,
Permission.EXPORT_DATA,
Permission.VIEW_LOGS,
Permission.MANAGE_SETTINGS,
Permission.ADMIN, # Allgemeine Admin-Berechtigung hinzufügen
}
# Super Admin hat alle Berechtigungen
ROLE_PERMISSIONS[Role.SUPER_ADMIN] = {perm for perm in Permission}
# ===== DATABASE MODELS EXTENSIONS =====
# Metadata für die Tabellen erstellen
metadata = MetaData()
# Many-to-Many Tabelle für User-Permissions
user_permissions = Table('user_permissions', metadata,
Column('user_id', Integer, ForeignKey('users.id'), primary_key=True),
Column('permission_id', Integer, ForeignKey('permissions.id'), primary_key=True)
)
# Many-to-Many Tabelle für User-Roles
user_roles = Table('user_roles', metadata,
Column('user_id', Integer, ForeignKey('users.id'), primary_key=True),
Column('role_id', Integer, ForeignKey('roles.id'), primary_key=True)
)
class PermissionModel:
"""Datenbank-Model für Berechtigungen"""
__tablename__ = 'permissions'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
description = Column(String(255))
category = Column(String(50)) # Gruppierung von Berechtigungen
created_at = Column(DateTime, default=datetime.now)
class RoleModel:
"""Datenbank-Model für Rollen"""
__tablename__ = 'roles'
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True, nullable=False)
display_name = Column(String(100))
description = Column(String(255))
is_system_role = Column(Boolean, default=False) # System-Rollen können nicht gelöscht werden
created_at = Column(DateTime, default=datetime.now)
# Relationships
permissions = relationship("PermissionModel", secondary="role_permissions", back_populates="roles")
class UserPermissionOverride:
"""Temporäre oder spezielle Berechtigungsüberschreibungen"""
__tablename__ = 'user_permission_overrides'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False)
permission = Column(String(100), nullable=False)
granted = Column(Boolean, nullable=False) # True = gewährt, False = verweigert
reason = Column(String(255))
granted_by = Column(Integer, ForeignKey('users.id'))
expires_at = Column(DateTime, nullable=True) # NULL = permanent
created_at = Column(DateTime, default=datetime.now)
# ===== PERMISSION CHECKER CLASS =====
class PermissionChecker:
"""Hauptklasse für Berechtigungsprüfungen"""
def __init__(self, user=None):
self.user = user or current_user
self._permission_cache = {}
self._cache_timeout = timedelta(minutes=5)
self._cache_timestamp = None
def has_permission(self, permission: Permission) -> bool:
"""
Prüft ob der Benutzer eine bestimmte Berechtigung hat
Args:
permission: Die zu prüfende Berechtigung
Returns:
bool: True wenn Berechtigung vorhanden
"""
if not self.user or not self.user.is_authenticated:
return False
# Cache prüfen
if self._is_cache_valid() and permission.value in self._permission_cache:
return self._permission_cache[permission.value]
# Berechtigungen neu berechnen
has_perm = self._calculate_permission(permission)
# Cache aktualisieren
self._update_cache(permission.value, has_perm)
return has_perm
def _calculate_permission(self, permission: Permission) -> bool:
"""Berechnet ob eine Berechtigung vorhanden ist"""
# Super Admin hat alle Rechte
if hasattr(self.user, 'is_super_admin') and self.user.is_super_admin:
return True
# Explizite Überschreibungen prüfen
override = self._check_permission_override(permission)
if override is not None:
return override
# Rollen-basierte Berechtigungen prüfen
user_roles = self._get_user_roles()
for role in user_roles:
if permission in ROLE_PERMISSIONS.get(role, set()):
return True
# Direkte Benutzer-Berechtigungen prüfen
if hasattr(self.user, 'permissions'):
user_permissions = [Permission(p.name) for p in self.user.permissions if hasattr(Permission, p.name.upper())]
if permission in user_permissions:
return True
return False
def _check_permission_override(self, permission: Permission) -> Optional[bool]:
"""Prüft ob es eine Berechtigungsüberschreibung gibt"""
if not hasattr(self.user, 'permission_overrides'):
return None
now = datetime.now()
for override in self.user.permission_overrides:
if (override.permission == permission.value and
(override.expires_at is None or override.expires_at > now)):
logger.info(f"Permission override angewendet: {permission.value} = {override.granted} für User {self.user.id}")
return override.granted
return None
def _get_user_roles(self) -> List[Role]:
"""Holt die Rollen des Benutzers"""
roles = []
# Legacy Admin-Check
if hasattr(self.user, 'is_admin') and self.user.is_admin:
roles.append(Role.ADMIN)
# Neue Rollen-System
if hasattr(self.user, 'roles'):
for role_model in self.user.roles:
try:
role = Role(role_model.name)
roles.append(role)
except ValueError:
logger.warning(f"Unbekannte Rolle: {role_model.name}")
# Standard-Rolle wenn keine andere definiert
if not roles:
roles.append(Role.USER)
return roles
def _is_cache_valid(self) -> bool:
"""Prüft ob der Permission-Cache noch gültig ist"""
if self._cache_timestamp is None:
return False
return datetime.now() - self._cache_timestamp < self._cache_timeout
def _update_cache(self, permission: str, has_permission: bool):
"""Aktualisiert den Permission-Cache"""
if self._cache_timestamp is None or not self._is_cache_valid():
self._permission_cache = {}
self._cache_timestamp = datetime.now()
self._permission_cache[permission] = has_permission
def get_all_permissions(self) -> Set[Permission]:
"""Gibt alle Berechtigungen des Benutzers zurück"""
permissions = set()
for permission in Permission:
if self.has_permission(permission):
permissions.add(permission)
return permissions
def can_access_resource(self, resource_type: str, resource_id: int = None, action: str = "view") -> bool:
"""
Prüft Zugriff auf spezifische Ressourcen
Args:
resource_type: Art der Ressource (job, printer, user, etc.)
resource_id: ID der Ressource (optional)
action: Aktion (view, edit, delete, etc.)
Returns:
bool: True wenn Zugriff erlaubt
"""
# Resource-spezifische Logik
if resource_type == "job":
return self._check_job_access(resource_id, action)
elif resource_type == "printer":
return self._check_printer_access(resource_id, action)
elif resource_type == "user":
return self._check_user_access(resource_id, action)
return False
def _check_job_access(self, job_id: int, action: str) -> bool:
"""Prüft Job-spezifische Zugriffsrechte"""
if action == "view":
if self.has_permission(Permission.VIEW_JOBS):
return True
elif action == "edit":
if self.has_permission(Permission.EDIT_ALL_JOBS):
return True
if self.has_permission(Permission.EDIT_OWN_JOB) and job_id:
# Prüfen ob eigener Job (vereinfacht)
return self._is_own_job(job_id)
elif action == "delete":
if self.has_permission(Permission.DELETE_ALL_JOBS):
return True
if self.has_permission(Permission.DELETE_OWN_JOB) and job_id:
return self._is_own_job(job_id)
return False
def _check_printer_access(self, printer_id: int, action: str) -> bool:
"""Prüft Drucker-spezifische Zugriffsrechte"""
if action == "view":
return self.has_permission(Permission.VIEW_PRINTERS)
elif action == "edit":
return self.has_permission(Permission.EDIT_PRINTER)
elif action == "delete":
return self.has_permission(Permission.DELETE_PRINTER)
elif action == "control":
return self.has_permission(Permission.CONTROL_PRINTER)
return False
def _check_user_access(self, user_id: int, action: str) -> bool:
"""Prüft Benutzer-spezifische Zugriffsrechte"""
if action == "view":
if self.has_permission(Permission.VIEW_USERS):
return True
# Eigenes Profil ansehen
if user_id == self.user.id:
return True
elif action == "edit":
if self.has_permission(Permission.EDIT_USER):
return True
# Eigenes Profil bearbeiten (begrenzt)
if user_id == self.user.id:
return True
elif action == "delete":
if self.has_permission(Permission.DELETE_USER) and user_id != self.user.id:
return True
return False
def _is_own_job(self, job_id: int) -> bool:
"""Hilfsfunktion um zu prüfen ob Job dem Benutzer gehört"""
# Vereinfachte Implementierung - sollte mit DB-Query implementiert werden
try:
from models import Job, get_db_session
db_session = get_db_session()
job = db_session.query(Job).filter(Job.id == job_id).first()
db_session.close()
return job and (job.user_id == self.user.id or job.owner_id == self.user.id)
except Exception as e:
logger.error(f"Fehler bei Job-Ownership-Check: {e}")
return False
# ===== DECORATORS =====
def require_permission(permission: Permission):
"""
Decorator der eine bestimmte Berechtigung erfordert
Args:
permission: Die erforderliche Berechtigung
"""
def decorator(f):
@wraps(f)
@login_required
def wrapper(*args, **kwargs):
checker = PermissionChecker()
if not checker.has_permission(permission):
logger.warning(f"Zugriff verweigert: User {current_user.id} hat keine Berechtigung {permission.value}")
if request.path.startswith('/api/'):
return jsonify({
'error': 'Insufficient permissions',
'message': f'Berechtigung "{permission.value}" erforderlich',
'required_permission': permission.value
}), 403
else:
abort(403)
return f(*args, **kwargs)
return wrapper
return decorator
def require_role(role: Role):
"""
Decorator der eine bestimmte Rolle erfordert
Args:
role: Die erforderliche Rolle
"""
def decorator(f):
@wraps(f)
@login_required
def wrapper(*args, **kwargs):
checker = PermissionChecker()
user_roles = checker._get_user_roles()
if role not in user_roles:
logger.warning(f"Zugriff verweigert: User {current_user.id} hat nicht die Rolle {role.value}")
if request.path.startswith('/api/'):
return jsonify({
'error': 'Insufficient role',
'message': f'Rolle "{role.value}" erforderlich',
'required_role': role.value
}), 403
else:
abort(403)
return f(*args, **kwargs)
return wrapper
return decorator
def require_resource_access(resource_type: str, action: str = "view"):
"""
Decorator für ressourcen-spezifische Berechtigungsprüfung
Args:
resource_type: Art der Ressource
action: Erforderliche Aktion
"""
def decorator(f):
@wraps(f)
@login_required
def wrapper(*args, **kwargs):
# Resource ID aus URL-Parametern extrahieren
resource_id = kwargs.get('id') or kwargs.get(f'{resource_type}_id')
checker = PermissionChecker()
if not checker.can_access_resource(resource_type, resource_id, action):
logger.warning(f"Ressourcen-Zugriff verweigert: User {current_user.id}, {resource_type}:{resource_id}, Action: {action}")
if request.path.startswith('/api/'):
return jsonify({
'error': 'Resource access denied',
'message': f'Zugriff auf {resource_type} nicht erlaubt',
'resource_type': resource_type,
'action': action
}), 403
else:
abort(403)
return f(*args, **kwargs)
return wrapper
return decorator
# ===== UTILITY FUNCTIONS =====
def check_permission(permission: Permission, user=None) -> bool:
"""
Standalone-Funktion zur Berechtigungsprüfung
Args:
permission: Die zu prüfende Berechtigung
user: Benutzer (optional, default: current_user)
Returns:
bool: True wenn Berechtigung vorhanden
"""
checker = PermissionChecker(user)
return checker.has_permission(permission)
def get_user_permissions(user=None) -> Set[Permission]:
"""
Gibt alle Berechtigungen eines Benutzers zurück
Args:
user: Benutzer (optional, default: current_user)
Returns:
Set[Permission]: Alle Berechtigungen des Benutzers
"""
checker = PermissionChecker(user)
return checker.get_all_permissions()
def grant_temporary_permission(user_id: int, permission: Permission, duration_hours: int = 24, reason: str = "", granted_by_id: int = None):
"""
Gewährt temporäre Berechtigung
Args:
user_id: ID des Benutzers
permission: Die zu gewährende Berechtigung
duration_hours: Dauer in Stunden
reason: Begründung
granted_by_id: ID des gewährenden Benutzers
"""
try:
from models import get_db_session
db_session = get_db_session()
override = UserPermissionOverride(
user_id=user_id,
permission=permission.value,
granted=True,
reason=reason,
granted_by=granted_by_id or (current_user.id if current_user.is_authenticated else None),
expires_at=datetime.now() + timedelta(hours=duration_hours)
)
db_session.add(override)
db_session.commit()
db_session.close()
logger.info(f"Temporäre Berechtigung gewährt: {permission.value} für User {user_id} ({duration_hours}h)")
except Exception as e:
logger.error(f"Fehler beim Gewähren temporärer Berechtigung: {e}")
# ===== TEMPLATE HELPERS =====
def init_permission_helpers(app):
"""
Registriert Template-Helper für Berechtigungen
Args:
app: Flask-App-Instanz
"""
@app.template_global()
def has_permission(permission_name: str) -> bool:
"""Template Helper für Berechtigungsprüfung"""
try:
permission = Permission(permission_name)
return check_permission(permission)
except ValueError:
return False
@app.template_global()
def has_role(role_name: str) -> bool:
"""Template Helper für Rollenprüfung"""
try:
role = Role(role_name)
checker = PermissionChecker()
return role in checker._get_user_roles()
except ValueError:
return False
@app.template_global()
def can_access(resource_type: str, resource_id: int = None, action: str = "view") -> bool:
"""Template Helper für Ressourcen-Zugriff"""
checker = PermissionChecker()
return checker.can_access_resource(resource_type, resource_id, action)
logger.info("🔐 Permission Template Helpers registriert")

824
utils/printer_monitor.py Normal file
View File

@@ -0,0 +1,824 @@
"""
Live-Drucker-Monitor für MYP Platform
Überwacht Druckerstatus in Echtzeit mit Session-Caching und automatischer Steckdosen-Initialisierung.
"""
import time
import threading
import requests
import subprocess
import ipaddress
from datetime import datetime, timedelta
from typing import Dict, Tuple, List, Optional
from flask import session
from sqlalchemy import func
from sqlalchemy.orm import Session
import os
from models import get_db_session, Printer, PlugStatusLog
from utils.logging_config import get_logger
from config.settings import PRINTERS, TAPO_USERNAME, TAPO_PASSWORD, DEFAULT_TAPO_IPS, TAPO_AUTO_DISCOVERY
# TP-Link Tapo P110 Unterstützung hinzufügen
try:
from PyP100 import PyP100
TAPO_AVAILABLE = True
except ImportError:
TAPO_AVAILABLE = False
# Logger initialisieren
monitor_logger = get_logger("printer_monitor")
class PrinterMonitor:
"""
Live-Drucker-Monitor mit Session-Caching und automatischer Initialisierung.
"""
def __init__(self):
self.session_cache = {} # Session-basierter Cache für schnelle Zugriffe
self.db_cache = {} # Datenbank-Cache für persistente Daten
self.cache_lock = threading.Lock()
self.last_db_sync = datetime.now()
self.monitoring_active = False
self.monitor_thread = None
self.startup_initialized = False
self.auto_discovered_tapo = False
# Cache-Konfiguration
self.session_cache_ttl = 30 # 30 Sekunden für Session-Cache
self.db_cache_ttl = 300 # 5 Minuten für DB-Cache
monitor_logger.info("🖨️ Drucker-Monitor initialisiert")
# Automatische Steckdosenerkennung in separatem Thread starten, falls aktiviert
if TAPO_AUTO_DISCOVERY:
discovery_thread = threading.Thread(
target=self._run_auto_discovery,
daemon=True,
name="TapoAutoDiscovery"
)
discovery_thread.start()
monitor_logger.info("🔍 Automatische Tapo-Erkennung in separatem Thread gestartet")
def _run_auto_discovery(self):
"""
Führt die automatische Tapo-Erkennung in einem separaten Thread aus.
"""
try:
# Kurze Verzögerung um sicherzustellen, dass die Hauptanwendung Zeit hat zu starten
time.sleep(2)
self.auto_discover_tapo_outlets()
except Exception as e:
monitor_logger.error(f"❌ Fehler bei automatischer Tapo-Erkennung: {str(e)}")
def initialize_all_outlets_on_startup(self) -> Dict[str, bool]:
"""
Schaltet beim Programmstart alle gespeicherten Steckdosen aus (gleicher Startzustand).
Returns:
Dict[str, bool]: Ergebnis der Initialisierung pro Drucker
"""
if self.startup_initialized:
monitor_logger.info("🔄 Steckdosen bereits beim Start initialisiert")
return {}
monitor_logger.info("🚀 Starte Steckdosen-Initialisierung beim Programmstart...")
results = {}
try:
db_session = get_db_session()
printers = db_session.query(Printer).filter(Printer.active == True).all()
if not printers:
monitor_logger.warning("⚠️ Keine aktiven Drucker zur Initialisierung gefunden")
db_session.close()
self.startup_initialized = True
return results
# Alle Steckdosen ausschalten für einheitlichen Startzustand
for printer in printers:
try:
if printer.plug_ip and printer.plug_username and printer.plug_password:
success = self._turn_outlet_off(
printer.plug_ip,
printer.plug_username,
printer.plug_password,
printer_id=printer.id
)
results[printer.name] = success
if success:
monitor_logger.info(f"{printer.name}: Steckdose ausgeschaltet")
# Status in Datenbank aktualisieren
printer.status = "offline"
printer.last_checked = datetime.now()
else:
monitor_logger.warning(f"{printer.name}: Steckdose konnte nicht ausgeschaltet werden")
else:
monitor_logger.warning(f"⚠️ {printer.name}: Unvollständige Steckdosen-Konfiguration")
results[printer.name] = False
except Exception as e:
monitor_logger.error(f"❌ Fehler bei Initialisierung von {printer.name}: {str(e)}")
results[printer.name] = False
# Änderungen speichern
db_session.commit()
db_session.close()
success_count = sum(1 for success in results.values() if success)
total_count = len(results)
monitor_logger.info(f"🎯 Steckdosen-Initialisierung abgeschlossen: {success_count}/{total_count} erfolgreich")
self.startup_initialized = True
except Exception as e:
monitor_logger.error(f"❌ Kritischer Fehler bei Steckdosen-Initialisierung: {str(e)}")
results = {}
return results
def _turn_outlet_off(self, ip_address: str, username: str, password: str, timeout: int = 5, printer_id: int = None) -> bool:
"""
Schaltet eine TP-Link Tapo P110-Steckdose aus.
Args:
ip_address: IP-Adresse der Steckdose
username: Benutzername für die Steckdose (wird überschrieben)
password: Passwort für die Steckdose (wird überschrieben)
timeout: Timeout in Sekunden (wird ignoriert, da PyP100 eigenes Timeout hat)
printer_id: ID des zugehörigen Druckers (für Logging)
Returns:
bool: True wenn erfolgreich ausgeschaltet
"""
if not TAPO_AVAILABLE:
monitor_logger.error("⚠️ PyP100-Modul nicht verfügbar - kann Tapo-Steckdose nicht schalten")
# Logging: Fehlgeschlagener Versuch
if printer_id:
try:
PlugStatusLog.log_status_change(
printer_id=printer_id,
status="disconnected",
source="system",
ip_address=ip_address,
error_message="PyP100-Modul nicht verfügbar",
notes="Startup-Initialisierung fehlgeschlagen"
)
except Exception as log_error:
monitor_logger.warning(f"Fehler beim Loggen des Steckdosen-Status: {log_error}")
return False
# IMMER globale Anmeldedaten verwenden (da diese funktionieren)
username = TAPO_USERNAME
password = TAPO_PASSWORD
monitor_logger.debug(f"🔧 Verwende globale Tapo-Anmeldedaten für {ip_address}")
start_time = time.time()
try:
# TP-Link Tapo P100 Verbindung herstellen (P100 statt P110)
from PyP100 import PyP100
p100 = PyP100.P100(ip_address, username, password)
p100.handshake() # Authentifizierung
p100.login() # Login
# Steckdose ausschalten
p100.turnOff()
response_time = int((time.time() - start_time) * 1000) # in Millisekunden
monitor_logger.debug(f"✅ Tapo-Steckdose {ip_address} erfolgreich ausgeschaltet")
# Logging: Erfolgreich ausgeschaltet
if printer_id:
try:
PlugStatusLog.log_status_change(
printer_id=printer_id,
status="off",
source="system",
ip_address=ip_address,
response_time_ms=response_time,
notes="Startup-Initialisierung: Steckdose ausgeschaltet"
)
except Exception as log_error:
monitor_logger.warning(f"Fehler beim Loggen des Steckdosen-Status: {log_error}")
return True
except Exception as e:
response_time = int((time.time() - start_time) * 1000) # in Millisekunden
monitor_logger.debug(f"⚠️ Fehler beim Ausschalten der Tapo-Steckdose {ip_address}: {str(e)}")
# Logging: Fehlgeschlagener Versuch
if printer_id:
try:
PlugStatusLog.log_status_change(
printer_id=printer_id,
status="disconnected",
source="system",
ip_address=ip_address,
response_time_ms=response_time,
error_message=str(e),
notes="Startup-Initialisierung fehlgeschlagen"
)
except Exception as log_error:
monitor_logger.warning(f"Fehler beim Loggen des Steckdosen-Status: {log_error}")
return False
def get_live_printer_status(self, use_session_cache: bool = True) -> Dict[int, Dict]:
"""
Holt Live-Druckerstatus mit Session- und DB-Caching.
Args:
use_session_cache: Ob Session-Cache verwendet werden soll
Returns:
Dict[int, Dict]: Status-Dict mit Drucker-ID als Key
"""
current_time = datetime.now()
# Session-Cache prüfen (nur wenn aktiviert)
if use_session_cache and hasattr(session, 'get'):
session_key = "printer_status_cache"
session_timestamp_key = "printer_status_timestamp"
cached_data = session.get(session_key)
cached_timestamp = session.get(session_timestamp_key)
if cached_data and cached_timestamp:
cache_age = (current_time - datetime.fromisoformat(cached_timestamp)).total_seconds()
if cache_age < self.session_cache_ttl:
monitor_logger.debug("📋 Verwende Session-Cache für Druckerstatus")
return cached_data
# DB-Cache prüfen
with self.cache_lock:
if self.db_cache and (current_time - self.last_db_sync).total_seconds() < self.db_cache_ttl:
monitor_logger.debug("🗃️ Verwende DB-Cache für Druckerstatus")
# Session-Cache aktualisieren
if use_session_cache and hasattr(session, '__setitem__'):
session["printer_status_cache"] = self.db_cache
session["printer_status_timestamp"] = current_time.isoformat()
return self.db_cache
# Live-Status von Druckern abrufen
monitor_logger.info("🔄 Aktualisiere Live-Druckerstatus...")
status_dict = self._fetch_live_printer_status()
# Caches aktualisieren
with self.cache_lock:
self.db_cache = status_dict
self.last_db_sync = current_time
if use_session_cache and hasattr(session, '__setitem__'):
session["printer_status_cache"] = status_dict
session["printer_status_timestamp"] = current_time.isoformat()
return status_dict
def _fetch_live_printer_status(self) -> Dict[int, Dict]:
"""
Holt den aktuellen Status aller Drucker direkt von den Geräten.
Returns:
Dict[int, Dict]: Status-Dict mit umfassenden Informationen
"""
status_dict = {}
try:
db_session = get_db_session()
printers = db_session.query(Printer).filter(Printer.active == True).all()
# Wenn keine aktiven Drucker vorhanden sind, gebe leeres Dict zurück
if not printers:
monitor_logger.info(" Keine aktiven Drucker gefunden")
db_session.close()
return status_dict
monitor_logger.info(f"🔍 Prüfe Status von {len(printers)} aktiven Druckern...")
# Parallel-Status-Prüfung mit ThreadPoolExecutor
from concurrent.futures import ThreadPoolExecutor, as_completed
# Sicherstellen, dass max_workers mindestens 1 ist
max_workers = min(max(len(printers), 1), 8)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_printer = {
executor.submit(self._check_single_printer_status, printer): printer
for printer in printers
}
for future in as_completed(future_to_printer, timeout=15):
printer = future_to_printer[future]
try:
status_info = future.result()
status_dict[printer.id] = status_info
# Status in Datenbank aktualisieren
printer.status = status_info["status"]
printer.last_checked = datetime.now()
except Exception as e:
monitor_logger.error(f"❌ Fehler bei Status-Check für Drucker {printer.name}: {str(e)}")
status_dict[printer.id] = {
"id": printer.id,
"name": printer.name,
"status": "offline",
"active": False,
"ip_address": printer.ip_address,
"plug_ip": printer.plug_ip,
"location": printer.location,
"last_checked": datetime.now().isoformat(),
"error": str(e)
}
# Änderungen in Datenbank speichern
db_session.commit()
db_session.close()
monitor_logger.info(f"✅ Status-Update abgeschlossen für {len(status_dict)} Drucker")
except Exception as e:
monitor_logger.error(f"❌ Kritischer Fehler beim Abrufen des Live-Status: {str(e)}")
return status_dict
def _check_single_printer_status(self, printer: Printer, timeout: int = 7) -> Dict:
"""
Überprüft den Status eines einzelnen Druckers basierend auf der Steckdosen-Logik:
- Steckdose erreichbar aber AUS = Drucker ONLINE (bereit zum Drucken)
- Steckdose erreichbar und AN = Drucker PRINTING (druckt gerade)
- Steckdose nicht erreichbar = Drucker OFFLINE (kritischer Fehler)
Args:
printer: Printer-Objekt aus der Datenbank
timeout: Timeout in Sekunden
Returns:
Dict: Umfassende Status-Informationen
"""
status_info = {
"id": printer.id,
"name": printer.name,
"status": "offline",
"active": False,
"ip_address": printer.ip_address,
"plug_ip": printer.plug_ip,
"location": printer.location,
"last_checked": datetime.now().isoformat(),
"ping_successful": False,
"outlet_reachable": False,
"outlet_state": "unknown"
}
try:
# 1. Ping-Test für Grundkonnektivität
if printer.plug_ip:
ping_success = self._ping_address(printer.plug_ip, timeout=3)
status_info["ping_successful"] = ping_success
if ping_success:
# 2. Smart Plug Status prüfen
outlet_reachable, outlet_state = self._check_outlet_status(
printer.plug_ip,
printer.plug_username,
printer.plug_password,
timeout,
printer_id=printer.id
)
status_info["outlet_reachable"] = outlet_reachable
status_info["outlet_state"] = outlet_state
# 🎯 KORREKTE LOGIK: Steckdose erreichbar = Drucker funktionsfähig
if outlet_reachable:
if outlet_state == "off":
# Steckdose aus = Drucker ONLINE (bereit zum Drucken)
status_info["status"] = "online"
status_info["active"] = True
monitor_logger.debug(f"{printer.name}: ONLINE (Steckdose aus - bereit zum Drucken)")
elif outlet_state == "on":
# Steckdose an = Drucker PRINTING (druckt gerade)
status_info["status"] = "printing"
status_info["active"] = True
monitor_logger.debug(f"🖨️ {printer.name}: PRINTING (Steckdose an - druckt gerade)")
else:
# Unbekannter Steckdosen-Status
status_info["status"] = "error"
status_info["active"] = False
monitor_logger.warning(f"⚠️ {printer.name}: Unbekannter Steckdosen-Status '{outlet_state}'")
else:
# Steckdose nicht erreichbar = kritischer Fehler
status_info["status"] = "offline"
status_info["active"] = False
monitor_logger.warning(f"{printer.name}: OFFLINE (Steckdose nicht erreichbar)")
else:
# Ping fehlgeschlagen = Netzwerkproblem
status_info["status"] = "unreachable"
status_info["active"] = False
monitor_logger.warning(f"🔌 {printer.name}: UNREACHABLE (Ping fehlgeschlagen)")
else:
# Keine Steckdosen-IP konfiguriert
status_info["status"] = "unconfigured"
status_info["active"] = False
monitor_logger.info(f"⚙️ {printer.name}: UNCONFIGURED (keine Steckdosen-IP)")
except Exception as e:
monitor_logger.error(f"❌ Fehler bei Status-Check für {printer.name}: {str(e)}")
status_info["error"] = str(e)
status_info["status"] = "error"
status_info["active"] = False
return status_info
def _ping_address(self, ip_address: str, timeout: int = 3) -> bool:
"""
Führt einen Konnektivitätstest zu einer IP-Adresse durch.
Verwendet ausschließlich TCP-Verbindung statt Ping, um Encoding-Probleme zu vermeiden.
Args:
ip_address: Zu testende IP-Adresse
timeout: Timeout in Sekunden
Returns:
bool: True wenn Verbindung erfolgreich
"""
try:
# IP-Adresse validieren
ipaddress.ip_address(ip_address.strip())
import socket
# Erst Port 9999 versuchen (Tapo-Standard)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((ip_address.strip(), 9999))
sock.close()
if result == 0:
return True
# Falls Port 9999 nicht erfolgreich, Port 80 versuchen (HTTP)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((ip_address.strip(), 80))
sock.close()
if result == 0:
return True
# Falls Port 80 nicht erfolgreich, Port 443 versuchen (HTTPS)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
result = sock.connect_ex((ip_address.strip(), 443))
sock.close()
return result == 0
except Exception as e:
monitor_logger.debug(f"❌ Fehler beim Verbindungstest zu {ip_address}: {str(e)}")
return False
def _check_outlet_status(self, ip_address: str, username: str, password: str, timeout: int = 5, printer_id: int = None) -> Tuple[bool, str]:
"""
Überprüft den Status einer TP-Link Tapo P110-Steckdose.
Args:
ip_address: IP-Adresse der Steckdose
username: Benutzername für die Steckdose
password: Passwort für die Steckdose
timeout: Timeout in Sekunden (wird ignoriert, da PyP100 eigenes Timeout hat)
printer_id: ID des zugehörigen Druckers (für Logging)
Returns:
Tuple[bool, str]: (Erreichbar, Status) - Status: "on", "off", "unknown"
"""
if not TAPO_AVAILABLE:
monitor_logger.debug("⚠️ PyP100-Modul nicht verfügbar - kann Tapo-Steckdosen-Status nicht abfragen")
# Logging: Modul nicht verfügbar
if printer_id:
try:
PlugStatusLog.log_status_change(
printer_id=printer_id,
status="disconnected",
source="system",
ip_address=ip_address,
error_message="PyP100-Modul nicht verfügbar",
notes="Status-Check fehlgeschlagen"
)
except Exception as log_error:
monitor_logger.warning(f"Fehler beim Loggen des Steckdosen-Status: {log_error}")
return False, "unknown"
# IMMER globale Anmeldedaten verwenden (da diese funktionieren)
username = TAPO_USERNAME
password = TAPO_PASSWORD
monitor_logger.debug(f"🔧 Verwende globale Tapo-Anmeldedaten für {ip_address}")
start_time = time.time()
try:
# TP-Link Tapo P100 Verbindung herstellen (P100 statt P110)
from PyP100 import PyP100
p100 = PyP100.P100(ip_address, username, password)
p100.handshake() # Authentifizierung
p100.login() # Login
# Geräteinformationen abrufen
device_info = p100.getDeviceInfo()
# Status auswerten
device_on = device_info.get('device_on', False)
status = "on" if device_on else "off"
response_time = int((time.time() - start_time) * 1000) # in Millisekunden
monitor_logger.debug(f"✅ Tapo-Steckdose {ip_address}: Status = {status}")
# Logging: Erfolgreicher Status-Check
if printer_id:
try:
# Hole zusätzliche Geräteinformationen falls verfügbar
power_consumption = None
voltage = None
current = None
firmware_version = None
try:
# Versuche Energiedaten zu holen (P110 spezifisch)
energy_usage = p100.getEnergyUsage()
if energy_usage:
power_consumption = energy_usage.get('current_power', None)
voltage = energy_usage.get('voltage', None)
current = energy_usage.get('current', None)
except:
pass # P100 unterstützt keine Energiedaten
try:
firmware_version = device_info.get('fw_ver', None)
except:
pass
PlugStatusLog.log_status_change(
printer_id=printer_id,
status=status,
source="system",
ip_address=ip_address,
power_consumption=power_consumption,
voltage=voltage,
current=current,
response_time_ms=response_time,
firmware_version=firmware_version,
notes="Automatischer Status-Check"
)
except Exception as log_error:
monitor_logger.warning(f"Fehler beim Loggen des Steckdosen-Status: {log_error}")
return True, status
except Exception as e:
response_time = int((time.time() - start_time) * 1000) # in Millisekunden
monitor_logger.debug(f"⚠️ Fehler bei Tapo-Steckdosen-Status-Check {ip_address}: {str(e)}")
# Logging: Fehlgeschlagener Status-Check
if printer_id:
try:
PlugStatusLog.log_status_change(
printer_id=printer_id,
status="disconnected",
source="system",
ip_address=ip_address,
response_time_ms=response_time,
error_message=str(e),
notes="Status-Check fehlgeschlagen"
)
except Exception as log_error:
monitor_logger.warning(f"Fehler beim Loggen des Steckdosen-Status: {log_error}")
return False, "unknown"
def clear_all_caches(self):
"""Löscht alle Caches (Session und DB)."""
with self.cache_lock:
self.db_cache = {}
self.last_db_sync = datetime.now()
if hasattr(session, 'pop'):
session.pop("printer_status_cache", None)
session.pop("printer_status_timestamp", None)
monitor_logger.info("🧹 Alle Drucker-Caches gelöscht")
def get_printer_summary(self) -> Dict[str, int]:
"""
Gibt eine Zusammenfassung der Druckerstatus zurück.
Returns:
Dict[str, int]: Anzahl Drucker pro Status
"""
status_dict = self.get_live_printer_status()
summary = {
"total": len(status_dict),
"online": 0,
"offline": 0,
"printing": 0, # Neuer Status: Drucker druckt gerade
"standby": 0,
"unreachable": 0,
"unconfigured": 0,
"error": 0 # Status für unbekannte Fehler
}
for printer_info in status_dict.values():
status = printer_info.get("status", "offline")
if status in summary:
summary[status] += 1
else:
# Fallback für unbekannte Status
summary["offline"] += 1
return summary
def auto_discover_tapo_outlets(self) -> Dict[str, bool]:
"""
Automatische Erkennung und Konfiguration von TP-Link Tapo P110-Steckdosen im Netzwerk.
Robuste Version mit Timeout-Behandlung und Fehler-Resilience.
Returns:
Dict[str, bool]: Ergebnis der Steckdosenerkennung mit IP als Schlüssel
"""
if self.auto_discovered_tapo:
monitor_logger.info("🔍 Tapo-Steckdosen wurden bereits erkannt")
return {}
monitor_logger.info("🔍 Starte automatische Tapo-Steckdosenerkennung...")
results = {}
start_time = time.time()
# 1. Zuerst die Standard-IPs aus der Konfiguration testen
monitor_logger.info(f"🔄 Teste {len(DEFAULT_TAPO_IPS)} Standard-IPs aus der Konfiguration")
for i, ip in enumerate(DEFAULT_TAPO_IPS):
try:
# Fortschrittsmeldung
monitor_logger.info(f"🔍 Teste IP {i+1}/{len(DEFAULT_TAPO_IPS)}: {ip}")
# Reduzierte Timeouts für schnellere Erkennung
ping_success = self._ping_address(ip, timeout=2)
if ping_success:
monitor_logger.info(f"✅ Steckdose mit IP {ip} ist erreichbar")
# Tapo-Verbindung testen mit Timeout-Schutz
if TAPO_AVAILABLE:
try:
# Timeout für Tapo-Verbindung
import signal
def timeout_handler(signum, frame):
raise TimeoutError("Tapo-Verbindung Timeout")
# Nur unter Unix/Linux verfügbar
if hasattr(signal, 'SIGALRM'):
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(5) # 5 Sekunden Timeout
try:
from PyP100 import PyP100
p100 = PyP100.P100(ip, TAPO_USERNAME, TAPO_PASSWORD)
p100.handshake()
p100.login()
device_info = p100.getDeviceInfo()
# Timeout zurücksetzen
if hasattr(signal, 'SIGALRM'):
signal.alarm(0)
# Steckdose gefunden und verbunden
nickname = device_info.get('nickname', f"Tapo P110 ({ip})")
state = "on" if device_info.get('device_on', False) else "off"
monitor_logger.info(f"✅ Tapo-Steckdose '{nickname}' ({ip}) gefunden - Status: {state}")
results[ip] = True
# Steckdose in Datenbank speichern/aktualisieren (nicht-blockierend)
try:
self._ensure_tapo_in_database(ip, nickname)
except Exception as db_error:
monitor_logger.warning(f"⚠️ Fehler beim Speichern in DB für {ip}: {str(db_error)}")
except (TimeoutError, Exception) as tapo_error:
if hasattr(signal, 'SIGALRM'):
signal.alarm(0) # Timeout zurücksetzen
monitor_logger.debug(f"❌ IP {ip} ist erreichbar, aber keine Tapo-Steckdose oder Timeout: {str(tapo_error)}")
results[ip] = False
except Exception as outer_error:
monitor_logger.debug(f"❌ Fehler bei Tapo-Test für {ip}: {str(outer_error)}")
results[ip] = False
else:
monitor_logger.warning("⚠️ PyP100-Modul nicht verfügbar - kann Tapo-Verbindung nicht testen")
results[ip] = False
else:
monitor_logger.debug(f"❌ IP {ip} nicht erreichbar")
results[ip] = False
except Exception as e:
monitor_logger.warning(f"❌ Fehler bei Steckdosen-Erkennung für IP {ip}: {str(e)}")
results[ip] = False
# Weiter mit nächster IP - nicht abbrechen
continue
# Erfolgsstatistik berechnen
success_count = sum(1 for success in results.values() if success)
elapsed_time = time.time() - start_time
monitor_logger.info(f"✅ Steckdosen-Erkennung abgeschlossen: {success_count}/{len(results)} Steckdosen gefunden in {elapsed_time:.1f}s")
# Markieren, dass automatische Erkennung durchgeführt wurde
self.auto_discovered_tapo = True
return results
def _ensure_tapo_in_database(self, ip_address: str, nickname: str = None) -> bool:
"""
Stellt sicher, dass eine erkannte Tapo-Steckdose in der Datenbank existiert.
Args:
ip_address: IP-Adresse der Steckdose
nickname: Name der Steckdose (optional)
Returns:
bool: True wenn erfolgreich in Datenbank gespeichert/aktualisiert
"""
try:
db_session = get_db_session()
# Prüfen, ob Drucker mit dieser IP bereits existiert
existing_printer = db_session.query(Printer).filter(Printer.plug_ip == ip_address).first()
if existing_printer:
# Drucker aktualisieren, falls nötig
if not existing_printer.plug_username or not existing_printer.plug_password:
existing_printer.plug_username = TAPO_USERNAME
existing_printer.plug_password = TAPO_PASSWORD
monitor_logger.info(f"✅ Drucker {existing_printer.name} mit Tapo-Anmeldedaten aktualisiert")
if nickname and existing_printer.name != nickname and "Tapo P110" not in existing_printer.name:
old_name = existing_printer.name
existing_printer.name = nickname
monitor_logger.info(f"✅ Drucker {old_name} umbenannt zu {nickname}")
# Drucker als aktiv markieren, da Tapo-Steckdose gefunden wurde
if not existing_printer.active:
existing_printer.active = True
monitor_logger.info(f"✅ Drucker {existing_printer.name} als aktiv markiert")
# Status aktualisieren
existing_printer.last_checked = datetime.now()
db_session.commit()
db_session.close()
return True
else:
# Neuen Drucker erstellen, falls keiner existiert
printer_name = nickname or f"Tapo P110 ({ip_address})"
mac_address = f"tapo:{ip_address.replace('.', '-')}" # Pseudo-MAC-Adresse
new_printer = Printer(
name=printer_name,
model="TP-Link Tapo P110",
location="Automatisch erkannt",
ip_address=ip_address, # Drucker-IP setzen wir gleich Steckdosen-IP
mac_address=mac_address,
plug_ip=ip_address,
plug_username=TAPO_USERNAME,
plug_password=TAPO_PASSWORD,
status="offline",
active=True,
last_checked=datetime.now()
)
db_session.add(new_printer)
db_session.commit()
monitor_logger.info(f"✅ Neuer Drucker '{printer_name}' mit Tapo-Steckdose {ip_address} erstellt")
db_session.close()
return True
except Exception as e:
monitor_logger.error(f"❌ Fehler beim Speichern der Tapo-Steckdose {ip_address}: {str(e)}")
try:
db_session.rollback()
db_session.close()
except:
pass
return False
# Globale Instanz
printer_monitor = PrinterMonitor()

497
utils/queue_manager.py Normal file
View File

@@ -0,0 +1,497 @@
"""
Queue Manager für die Verwaltung von Druckjobs in Warteschlangen.
Überwacht offline Drucker und aktiviert Jobs automatisch.
"""
import threading
import time
import logging
import subprocess
import os
import requests
import signal
import atexit
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Tuple
from contextlib import contextmanager
from models import get_db_session, Job, Printer, User, Notification
from utils.logging_config import get_logger
# Windows-spezifische Imports
if os.name == 'nt':
try:
from utils.windows_fixes import get_windows_thread_manager
except ImportError:
get_windows_thread_manager = None
else:
get_windows_thread_manager = None
# Logger für Queue-Manager
queue_logger = get_logger("queue_manager")
def check_printer_status(ip_address: str, timeout: int = 5) -> Tuple[str, bool]:
"""
Vereinfachte Drucker-Status-Prüfung für den Queue Manager.
Args:
ip_address: IP-Adresse der Drucker-Steckdose
timeout: Timeout in Sekunden (Standard: 5)
Returns:
Tuple[str, bool]: (Status, Aktiv) - Status ist "online" oder "offline", Aktiv ist True/False
"""
if not ip_address or ip_address.strip() == "":
return "offline", False
try:
# Ping-Test um Erreichbarkeit zu prüfen
if os.name == 'nt': # Windows
cmd = ['ping', '-n', '1', '-w', str(timeout * 1000), ip_address.strip()]
else: # Unix/Linux/macOS
cmd = ['ping', '-c', '1', '-W', str(timeout), ip_address.strip()]
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=timeout + 1,
encoding='utf-8',
errors='replace'
)
# Wenn Ping erfolgreich ist, als online betrachten
if result.returncode == 0:
queue_logger.debug(f"✅ Drucker {ip_address} ist erreichbar (Ping erfolgreich)")
return "online", True
else:
queue_logger.debug(f"❌ Drucker {ip_address} nicht erreichbar (Ping fehlgeschlagen)")
return "offline", False
except subprocess.TimeoutExpired:
queue_logger.warning(f"⏱️ Ping-Timeout für Drucker {ip_address} nach {timeout} Sekunden")
return "offline", False
except Exception as e:
queue_logger.error(f"❌ Fehler beim Status-Check für Drucker {ip_address}: {str(e)}")
return "offline", False
class PrinterQueueManager:
"""
Verwaltet die Warteschlangen für offline Drucker und überwacht deren Status.
Verbesserte Version mit ordnungsgemäßem Thread-Management für Windows.
"""
def __init__(self, register_signal_handlers: bool = True):
self.is_running = False
self.monitor_thread = None
self.shutdown_event = threading.Event() # Sauberes Shutdown-Signal
self.check_interval = 120 # 2 Minuten zwischen Status-Checks
self.last_status_cache = {} # Cache für letzten bekannten Status
self.notification_cooldown = {} # Verhindert Spam-Benachrichtigungen
self._lock = threading.Lock() # Thread-Sicherheit
self._signal_handlers_registered = False
# Signal-Handler nur registrieren wenn explizit gewünscht
# (Verhindert Interferenzen mit zentralem Shutdown-Manager)
if register_signal_handlers and os.name == 'nt':
self._register_signal_handlers()
def _register_signal_handlers(self):
"""Windows-spezifische Signal-Handler registrieren (nur wenn gewünscht)"""
if self._signal_handlers_registered:
return
try:
# Prüfe ob bereits zentrale Signal-Handler existieren
try:
from utils.shutdown_manager import is_shutdown_requested
if is_shutdown_requested is not None:
queue_logger.info("🔄 Zentrale Signal-Handler erkannt - deaktiviere lokale Handler")
return
except ImportError:
pass # Kein zentraler Manager verfügbar, verwende lokale Handler
signal.signal(signal.SIGINT, self._signal_handler)
signal.signal(signal.SIGTERM, self._signal_handler)
self._signal_handlers_registered = True
queue_logger.debug("✅ Lokale Signal-Handler für Queue Manager registriert")
except Exception as e:
queue_logger.warning(f"⚠️ Lokale Signal-Handler konnten nicht registriert werden: {e}")
def _signal_handler(self, signum, frame):
"""Signal-Handler für ordnungsgemäßes Shutdown (nur als Fallback)."""
queue_logger.warning(f"🛑 Signal {signum} empfangen - stoppe Queue Manager...")
self.stop()
def start(self):
"""Startet den Queue-Manager mit verbessertem Shutdown-Handling."""
with self._lock:
if self.is_running:
queue_logger.warning("Queue-Manager läuft bereits")
return self
queue_logger.info("🚀 Starte Printer Queue Manager...")
self.is_running = True
self.shutdown_event.clear()
# Monitor-Thread mit Daemon-Flag für automatische Beendigung
self.monitor_thread = threading.Thread(
target=self._monitor_loop,
name="PrinterQueueMonitor",
daemon=True # Automatische Beendigung bei Programm-Ende
)
self.monitor_thread.start()
queue_logger.info("✅ Printer Queue Manager gestartet")
return self
def stop(self):
"""Stoppt den Queue-Manager ordnungsgemäß mit verbessertem Timeout-Handling."""
with self._lock:
if not self.is_running:
queue_logger.debug("Queue-Manager ist bereits gestoppt")
return
queue_logger.info("🔄 Beende Queue Manager...")
self.is_running = False
self.shutdown_event.set()
if self.monitor_thread and self.monitor_thread.is_alive():
queue_logger.debug("⏳ Warte auf Thread-Beendigung...")
# Verbessertes Timeout-Handling
try:
self.monitor_thread.join(timeout=5.0) # Reduziertes Timeout
if self.monitor_thread.is_alive():
queue_logger.warning("⚠️ Thread konnte nicht in 5s beendet werden - setze als Daemon")
# Thread als Daemon markieren für automatische Beendigung
self.monitor_thread.daemon = True
else:
queue_logger.info("✅ Monitor-Thread erfolgreich beendet")
except Exception as e:
queue_logger.error(f"❌ Fehler beim Thread-Join: {e}")
self.monitor_thread = None
queue_logger.info("❌ Printer Queue Manager gestoppt")
def _monitor_loop(self):
"""Hauptschleife für die Überwachung der Drucker mit verbessertem Shutdown-Handling."""
queue_logger.info(f"🔄 Queue-Überwachung gestartet (Intervall: {self.check_interval} Sekunden)")
while self.is_running and not self.shutdown_event.is_set():
try:
# Prüfe auf zentrales Shutdown-Signal
try:
from utils.shutdown_manager import is_shutdown_requested
if is_shutdown_requested():
queue_logger.info("🛑 Zentrales Shutdown-Signal empfangen - beende Monitor-Loop")
break
except ImportError:
pass # Kein zentraler Manager verfügbar
self._check_waiting_jobs()
# Verwende Event.wait() statt time.sleep() für unterbrechbares Warten
if self.shutdown_event.wait(timeout=self.check_interval):
# Shutdown-Signal erhalten
queue_logger.info("🛑 Shutdown-Signal empfangen - beende Monitor-Loop")
break
except Exception as e:
queue_logger.error(f"❌ Fehler in Monitor-Schleife: {str(e)}")
# Kürzere Wartezeit bei Fehlern, aber auch unterbrechbar
if self.shutdown_event.wait(timeout=30):
break
queue_logger.info("🔚 Monitor-Loop beendet")
def _check_waiting_jobs(self):
"""Überprüft alle wartenden Jobs und aktiviert sie bei verfügbaren Druckern."""
if self.shutdown_event.is_set():
return
db_session = get_db_session()
try:
# Alle wartenden Jobs abrufen
waiting_jobs = db_session.query(Job).filter(
Job.status == "waiting_for_printer"
).all()
if not waiting_jobs:
return
queue_logger.info(f"🔍 Überprüfe {len(waiting_jobs)} wartende Jobs...")
activated_jobs = []
for job in waiting_jobs:
# Shutdown-Check zwischen Jobs
if self.shutdown_event.is_set():
break
# Drucker-Status prüfen
printer = db_session.get(Printer, job.printer_id)
if not printer:
continue
# Status-Check mit Cache-Optimierung
printer_key = f"printer_{printer.id}"
current_status = None
try:
if printer.plug_ip:
status, active = check_printer_status(printer.plug_ip, timeout=5)
current_status = "online" if (status == "online" and active) else "offline"
else:
current_status = "offline"
except Exception as e:
queue_logger.warning(f"⚠️ Status-Check für Drucker {printer.name} fehlgeschlagen: {str(e)}")
current_status = "offline"
# Prüfen, ob Drucker online geworden ist
last_status = self.last_status_cache.get(printer_key, "offline")
self.last_status_cache[printer_key] = current_status
if current_status == "online" and last_status == "offline":
# Drucker ist online geworden!
queue_logger.info(f"🟢 Drucker {printer.name} ist ONLINE geworden - aktiviere wartende Jobs")
# Job aktivieren
job.status = "scheduled"
printer.status = "available"
printer.active = True
printer.last_checked = datetime.now()
activated_jobs.append({
"job": job,
"printer": printer
})
elif current_status == "online":
# Drucker ist bereits online, Job kann aktiviert werden
job.status = "scheduled"
printer.status = "available"
printer.active = True
printer.last_checked = datetime.now()
activated_jobs.append({
"job": job,
"printer": printer
})
else:
# Drucker bleibt offline
printer.status = "offline"
printer.active = False
printer.last_checked = datetime.now()
# Speichere alle Änderungen
if activated_jobs:
db_session.commit()
queue_logger.info(f"{len(activated_jobs)} Jobs erfolgreich aktiviert")
# Benachrichtigungen versenden (nur wenn nicht im Shutdown)
if not self.shutdown_event.is_set():
for item in activated_jobs:
self._send_job_activation_notification(item["job"], item["printer"])
else:
# Auch offline-Status speichern
db_session.commit()
except Exception as e:
db_session.rollback()
queue_logger.error(f"❌ Fehler beim Überprüfen wartender Jobs: {str(e)}")
finally:
db_session.close()
def _send_job_activation_notification(self, job: Job, printer: Printer):
"""Sendet eine Benachrichtigung, wenn ein Job aktiviert wird."""
if self.shutdown_event.is_set():
return
try:
# Cooldown prüfen (keine Spam-Benachrichtigungen)
cooldown_key = f"job_{job.id}_activated"
now = datetime.now()
if cooldown_key in self.notification_cooldown:
last_notification = self.notification_cooldown[cooldown_key]
if (now - last_notification).total_seconds() < 300: # 5 Minuten Cooldown
return
self.notification_cooldown[cooldown_key] = now
# Benachrichtigung erstellen
db_session = get_db_session()
try:
user = db_session.get(User, job.user_id)
if not user:
return
notification = Notification(
user_id=user.id,
type="job_activated",
payload={
"job_id": job.id,
"job_name": job.name,
"printer_id": printer.id,
"printer_name": printer.name,
"start_time": job.start_at.isoformat() if job.start_at else None,
"message": f"🎉 Gute Nachrichten! Drucker '{printer.name}' ist online. Ihr Job '{job.name}' wurde aktiviert und startet bald."
}
)
db_session.add(notification)
db_session.commit()
queue_logger.info(f"📧 Benachrichtigung für User {user.name} gesendet: Job {job.name} aktiviert")
except Exception as e:
db_session.rollback()
queue_logger.error(f"❌ Fehler beim Erstellen der Benachrichtigung: {str(e)}")
finally:
db_session.close()
except Exception as e:
queue_logger.error(f"❌ Fehler beim Senden der Aktivierungs-Benachrichtigung: {str(e)}")
def get_queue_status(self) -> Dict:
"""Gibt den aktuellen Status der Warteschlangen zurück."""
db_session = get_db_session()
try:
# Wartende Jobs zählen
waiting_jobs = db_session.query(Job).filter(
Job.status == "waiting_for_printer"
).count()
# Offline Drucker mit wartenden Jobs
offline_printers_with_queue = db_session.query(Printer).join(Job).filter(
Printer.status == "offline",
Job.status == "waiting_for_printer"
).distinct().count()
# Online Drucker
online_printers = db_session.query(Printer).filter(
Printer.status == "available"
).count()
total_printers = db_session.query(Printer).count()
return {
"waiting_jobs": waiting_jobs,
"offline_printers_with_queue": offline_printers_with_queue,
"online_printers": online_printers,
"total_printers": total_printers,
"queue_manager_running": self.is_running,
"last_check": datetime.now().isoformat(),
"check_interval_seconds": self.check_interval
}
except Exception as e:
queue_logger.error(f"❌ Fehler beim Abrufen des Queue-Status: {str(e)}")
return {
"error": str(e),
"queue_manager_running": self.is_running
}
finally:
db_session.close()
def is_healthy(self) -> bool:
"""Prüft, ob der Queue Manager ordnungsgemäß läuft."""
return (self.is_running and
self.monitor_thread is not None and
self.monitor_thread.is_alive() and
not self.shutdown_event.is_set())
# Globale Instanz des Queue-Managers
_queue_manager_instance = None
_queue_manager_lock = threading.Lock()
def get_queue_manager() -> PrinterQueueManager:
"""Gibt die globale Instanz des Queue-Managers zurück."""
global _queue_manager_instance
with _queue_manager_lock:
if _queue_manager_instance is None:
_queue_manager_instance = PrinterQueueManager()
return _queue_manager_instance
def start_queue_manager():
"""Startet den globalen Queue-Manager sicher und ohne Signal-Handler-Interferenzen."""
global _queue_manager_instance
with _queue_manager_lock:
if _queue_manager_instance is not None:
queue_logger.warning("Queue-Manager läuft bereits")
return _queue_manager_instance
try:
queue_logger.info("🚀 Initialisiere neuen Queue-Manager...")
# Prüfe ob zentraler Shutdown-Manager verfügbar ist
register_signals = True
try:
from utils.shutdown_manager import is_shutdown_requested
if is_shutdown_requested is not None:
queue_logger.info("🔄 Zentrale Shutdown-Verwaltung erkannt - deaktiviere lokale Signal-Handler")
register_signals = False
except ImportError:
queue_logger.debug("Kein zentraler Shutdown-Manager verfügbar - verwende lokale Signal-Handler")
# Erstelle Queue-Manager ohne Signal-Handler wenn zentraler Manager vorhanden
_queue_manager_instance = PrinterQueueManager(register_signal_handlers=register_signals)
_queue_manager_instance.start()
queue_logger.info("✅ Queue-Manager erfolgreich gestartet")
return _queue_manager_instance
except Exception as e:
queue_logger.error(f"❌ Fehler beim Starten des Queue-Managers: {str(e)}")
_queue_manager_instance = None
raise
def stop_queue_manager():
"""Stoppt den globalen Queue-Manager definitiv und sicher."""
global _queue_manager_instance
with _queue_manager_lock:
if _queue_manager_instance:
try:
queue_logger.info("🔄 Stoppe Queue-Manager...")
# Shutdown-Event setzen
_queue_manager_instance.shutdown_event.set()
# Monitor-Thread beenden
if (_queue_manager_instance.monitor_thread and
_queue_manager_instance.monitor_thread.is_alive()):
queue_logger.info("⏳ Warte auf Monitor-Thread...")
_queue_manager_instance.monitor_thread.join(timeout=5.0)
# Falls Thread nicht beendet wurde, forciere Beendigung
if _queue_manager_instance.monitor_thread.is_alive():
queue_logger.warning("⚠️ Monitor-Thread reagiert nicht - forciere Beendigung")
# Thread als Daemon markieren für automatische Beendigung
_queue_manager_instance.monitor_thread.daemon = True
# Status auf gestoppt setzen
_queue_manager_instance.is_running = False
# Explizit stop() aufrufen
_queue_manager_instance.stop()
queue_logger.info("✅ Queue-Manager erfolgreich gestoppt")
except Exception as e:
queue_logger.error(f"❌ Fehler beim Stoppen des Queue-Managers: {str(e)}")
finally:
# Instanz definitiv auf None setzen
_queue_manager_instance = None
# Automatisches Cleanup bei Prozess-Ende registrieren
atexit.register(stop_queue_manager)

233
utils/quick_fix.py Normal file
View File

@@ -0,0 +1,233 @@
#!/usr/bin/env python3
"""
Schnelle Datenbank-Reparatur für kritische Fehler
"""
import sqlite3
import os
import sys
from datetime import datetime
# Pfad zur App hinzufügen
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
try:
from config.settings import DATABASE_PATH
except ImportError:
# Fallback falls Import fehlschlägt
DATABASE_PATH = "database/myp.db"
def quick_fix_database():
"""Behebt die kritischsten Datenbankprobleme sofort"""
print("🔧 Starte schnelle Datenbank-Reparatur...")
if not os.path.exists(DATABASE_PATH):
print(f"❌ Datenbankdatei nicht gefunden: {DATABASE_PATH}")
return False
try:
# Backup erstellen
backup_path = f"{DATABASE_PATH}.emergency_backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
import shutil
shutil.copy2(DATABASE_PATH, backup_path)
print(f"✅ Emergency-Backup erstellt: {backup_path}")
# Verbindung zur Datenbank
conn = sqlite3.connect(DATABASE_PATH)
cursor = conn.cursor()
print("🔧 Repariere Datenbank-Schema...")
# 1. Fehlende Spalte duration_minutes zu guest_requests hinzufügen
try:
cursor.execute("ALTER TABLE guest_requests ADD COLUMN duration_minutes INTEGER")
print("✅ Spalte duration_minutes zu guest_requests hinzugefügt")
except sqlite3.OperationalError as e:
if "duplicate column name" in str(e).lower():
print(" Spalte duration_minutes bereits vorhanden")
else:
print(f"⚠️ Fehler bei duration_minutes: {e}")
# 2. Fehlende Spalten zu users hinzufügen
user_columns = [
("username", "VARCHAR(100) UNIQUE"),
("updated_at", "DATETIME DEFAULT CURRENT_TIMESTAMP"),
("department", "VARCHAR(100)"),
("position", "VARCHAR(100)"),
("phone", "VARCHAR(50)"),
("bio", "TEXT")
]
for column_name, column_def in user_columns:
try:
cursor.execute(f"ALTER TABLE users ADD COLUMN {column_name} {column_def}")
print(f"✅ Spalte {column_name} zu users hinzugefügt")
except sqlite3.OperationalError as e:
if "duplicate column name" in str(e).lower():
print(f" Spalte {column_name} bereits vorhanden")
else:
print(f"⚠️ Fehler bei {column_name}: {e}")
# 3. Fehlende Spalten zu printers hinzufügen
printer_columns = [
("plug_username", "VARCHAR(100) DEFAULT 'admin'"),
("plug_password", "VARCHAR(100) DEFAULT 'admin'"),
("last_checked", "DATETIME")
]
for column_name, column_def in printer_columns:
try:
cursor.execute(f"ALTER TABLE printers ADD COLUMN {column_name} {column_def}")
print(f"✅ Spalte {column_name} zu printers hinzugefügt")
except sqlite3.OperationalError as e:
if "duplicate column name" in str(e).lower():
print(f" Spalte {column_name} bereits vorhanden")
else:
print(f"⚠️ Fehler bei {column_name}: {e}")
# 4. Username für bestehende User setzen (falls NULL)
try:
cursor.execute("UPDATE users SET username = email WHERE username IS NULL")
updated_users = cursor.rowcount
if updated_users > 0:
print(f"✅ Username für {updated_users} Benutzer gesetzt")
except Exception as e:
print(f"⚠️ Fehler beim Setzen der Usernames: {e}")
# 5. Drucker-Daten nachtragen
print("🖨️ Trage Drucker nach...")
# Prüfen ob bereits Drucker vorhanden sind
cursor.execute("SELECT COUNT(*) FROM printers")
printer_count = cursor.fetchone()[0]
if printer_count == 0:
# Standard-Drucker hinzufügen
printers_to_add = [
{
'name': 'Printer 1',
'model': 'P115',
'location': 'Werk 040 - Berlin - TBA',
'ip_address': '192.168.0.100',
'mac_address': '98:254A:E1:2001',
'plug_ip': '192.168.0.100',
'plug_username': 'admin',
'plug_password': 'admin',
'status': 'offline',
'active': 1
},
{
'name': 'Printer 2',
'model': 'P115',
'location': 'Werk 040 - Berlin - TBA',
'ip_address': '192.168.0.101',
'mac_address': '98:254A:E1:2002',
'plug_ip': '192.168.0.101',
'plug_username': 'admin',
'plug_password': 'admin',
'status': 'offline',
'active': 1
},
{
'name': 'Printer 3',
'model': 'P115',
'location': 'Werk 040 - Berlin - TBA',
'ip_address': '192.168.0.102',
'mac_address': '98:254A:E1:2003',
'plug_ip': '192.168.0.102',
'plug_username': 'admin',
'plug_password': 'admin',
'status': 'offline',
'active': 1
},
{
'name': 'Printer 4',
'model': 'P115',
'location': 'Werk 040 - Berlin - TBA',
'ip_address': '192.168.0.103',
'mac_address': '98:254A:E1:2004',
'plug_ip': '192.168.0.103',
'plug_username': 'admin',
'plug_password': 'admin',
'status': 'offline',
'active': 1
},
{
'name': 'Printer 5',
'model': 'P115',
'location': 'Werk 040 - Berlin - TBA',
'ip_address': '192.168.0.104',
'mac_address': '98:254A:E1:2005',
'plug_ip': '192.168.0.104',
'plug_username': 'admin',
'plug_password': 'admin',
'status': 'offline',
'active': 1
},
{
'name': 'Printer 6',
'model': 'P115',
'location': 'Werk 040 - Berlin - TBA',
'ip_address': '192.168.0.106',
'mac_address': '98:254A:E1:2006',
'plug_ip': '192.168.0.106',
'plug_username': 'admin',
'plug_password': 'admin',
'status': 'offline',
'active': 1
}
]
for printer in printers_to_add:
try:
cursor.execute("""
INSERT INTO printers (name, model, location, ip_address, mac_address, plug_ip, plug_username, plug_password, status, active, created_at)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", (
printer['name'], printer['model'], printer['location'],
printer['ip_address'], printer['mac_address'], printer['plug_ip'],
printer['plug_username'], printer['plug_password'],
printer['status'], printer['active'], datetime.now()
))
print(f"✅ Drucker {printer['name']} hinzugefügt")
except Exception as e:
print(f"⚠️ Fehler beim Hinzufügen von {printer['name']}: {e}")
else:
print(f" {printer_count} Drucker bereits vorhanden")
# 6. Optimierungen
print("🔧 Führe Datenbankoptimierungen durch...")
try:
# Indizes erstellen
indices = [
"CREATE INDEX IF NOT EXISTS idx_users_email ON users(email)",
"CREATE INDEX IF NOT EXISTS idx_users_username ON users(username)",
"CREATE INDEX IF NOT EXISTS idx_jobs_user_id ON jobs(user_id)",
"CREATE INDEX IF NOT EXISTS idx_jobs_printer_id ON jobs(printer_id)",
"CREATE INDEX IF NOT EXISTS idx_jobs_status ON jobs(status)",
"CREATE INDEX IF NOT EXISTS idx_guest_requests_status ON guest_requests(status)"
]
for index_sql in indices:
cursor.execute(index_sql)
# Statistiken aktualisieren
cursor.execute("ANALYZE")
print("✅ Datenbankoptimierungen abgeschlossen")
except Exception as e:
print(f"⚠️ Fehler bei Optimierungen: {e}")
# Änderungen speichern
conn.commit()
conn.close()
print("✅ Schnelle Datenbank-Reparatur erfolgreich abgeschlossen!")
return True
except Exception as e:
print(f"❌ Kritischer Fehler bei der Reparatur: {str(e)}")
return False
if __name__ == "__main__":
quick_fix_database()

244
utils/rate_limiter.py Normal file
View File

@@ -0,0 +1,244 @@
#!/usr/bin/env python3
"""
Rate Limiting System für MYP Platform
Schutz vor API-Missbrauch und DDoS-Attacken
"""
import time
import redis
import hashlib
from functools import wraps
from flask import request, jsonify, g
from typing import Dict, Optional
from dataclasses import dataclass
from utils.logging_config import get_logger
logger = get_logger("security")
@dataclass
class RateLimit:
"""Konfiguration für Rate-Limiting-Regeln"""
requests: int # Anzahl erlaubter Anfragen
per: int # Zeitraum in Sekunden
message: str # Fehlermeldung bei Überschreitung
# Rate-Limiting-Konfiguration
RATE_LIMITS = {
# API-Endpunkte
'api_general': RateLimit(100, 300, "Zu viele API-Anfragen. Versuchen Sie es in 5 Minuten erneut."),
'api_auth': RateLimit(10, 300, "Zu viele Anmeldeversuche. Versuchen Sie es in 5 Minuten erneut."),
'api_upload': RateLimit(20, 3600, "Zu viele Upload-Anfragen. Versuchen Sie es in einer Stunde erneut."),
'api_admin': RateLimit(200, 300, "Zu viele Admin-Anfragen. Versuchen Sie es in 5 Minuten erneut."),
# Spezielle Endpunkte
'printer_status': RateLimit(300, 300, "Zu viele Drucker-Status-Anfragen."),
'job_creation': RateLimit(50, 3600, "Zu viele Job-Erstellungen. Versuchen Sie es in einer Stunde erneut."),
# Drucker-Monitor Rate-Limits (gelockert für Live-Updates)
'printer_monitor_live': RateLimit(30, 60, "Zu viele Live-Status-Anfragen. Versuchen Sie es in einer Minute erneut."),
'printer_monitor_summary': RateLimit(60, 60, "Zu viele Zusammenfassungs-Anfragen. Versuchen Sie es in einer Minute erneut."),
'printer_monitor_cache': RateLimit(10, 120, "Zu viele Cache-Lösch-Anfragen. Versuchen Sie es in 2 Minuten erneut."),
'printer_monitor_init': RateLimit(5, 300, "Zu viele Initialisierungs-Anfragen. Versuchen Sie es in 5 Minuten erneut."),
# Sicherheitskritische Endpunkte
'password_reset': RateLimit(3, 3600, "Zu viele Passwort-Reset-Anfragen. Versuchen Sie es in einer Stunde erneut."),
'user_creation': RateLimit(10, 3600, "Zu viele Benutzer-Erstellungen.")
}
class RateLimiter:
"""
In-Memory Rate Limiter mit optionaler Redis-Unterstützung
"""
def __init__(self, use_redis: bool = False, redis_url: str = None):
self.use_redis = use_redis
self.redis_client = None
self.memory_store: Dict[str, Dict] = {}
if use_redis and redis_url:
try:
import redis
self.redis_client = redis.from_url(redis_url, decode_responses=True)
logger.info("✅ Redis-basiertes Rate Limiting aktiviert")
except ImportError:
logger.warning("⚠️ Redis nicht verfügbar, verwende In-Memory Rate Limiting")
self.use_redis = False
except Exception as e:
logger.error(f"❌ Redis-Verbindung fehlgeschlagen: {e}")
self.use_redis = False
def _get_client_id(self) -> str:
"""
Generiert eine eindeutige Client-ID basierend auf IP und User-Agent
"""
ip = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)
user_agent = request.headers.get('User-Agent', '')
# Hash für Anonymisierung
client_string = f"{ip}:{user_agent}"
return hashlib.sha256(client_string.encode()).hexdigest()[:16]
def _get_key(self, limit_type: str, client_id: str) -> str:
"""Erstellt Redis/Memory-Key für Rate-Limiting"""
return f"rate_limit:{limit_type}:{client_id}"
def _get_current_requests(self, key: str, window_start: int) -> int:
"""Holt aktuelle Anfragen-Anzahl"""
if self.use_redis and self.redis_client:
try:
# Redis-basierte Implementierung
pipe = self.redis_client.pipeline()
pipe.zremrangebyscore(key, 0, window_start)
pipe.zcard(key)
_, count = pipe.execute()
return count
except Exception as e:
logger.error(f"Redis-Fehler: {e}, fallback zu Memory")
self.use_redis = False
# In-Memory Implementierung
if key not in self.memory_store:
self.memory_store[key] = {'requests': [], 'last_cleanup': time.time()}
# Alte Einträge bereinigen
current_time = time.time()
data = self.memory_store[key]
data['requests'] = [req_time for req_time in data['requests'] if req_time > window_start]
return len(data['requests'])
def _add_request(self, key: str, current_time: int, expire_time: int):
"""Fügt neue Anfrage hinzu"""
if self.use_redis and self.redis_client:
try:
pipe = self.redis_client.pipeline()
pipe.zadd(key, {str(current_time): current_time})
pipe.expire(key, expire_time)
pipe.execute()
return
except Exception as e:
logger.error(f"Redis-Fehler: {e}, fallback zu Memory")
self.use_redis = False
# In-Memory Implementierung
if key not in self.memory_store:
self.memory_store[key] = {'requests': [], 'last_cleanup': time.time()}
self.memory_store[key]['requests'].append(current_time)
def is_allowed(self, limit_type: str) -> tuple[bool, Dict]:
"""
Prüft ob eine Anfrage erlaubt ist
Returns:
(is_allowed, info_dict)
"""
if limit_type not in RATE_LIMITS:
return True, {}
rate_limit = RATE_LIMITS[limit_type]
client_id = self._get_client_id()
key = self._get_key(limit_type, client_id)
current_time = int(time.time())
window_start = current_time - rate_limit.per
# Aktuelle Anfragen zählen
current_requests = self._get_current_requests(key, window_start)
# Limite prüfen
if current_requests >= rate_limit.requests:
logger.warning(f"🚨 Rate limit exceeded: {limit_type} für Client {client_id[:8]}...")
return False, {
'limit': rate_limit.requests,
'remaining': 0,
'reset_time': current_time + rate_limit.per,
'message': rate_limit.message
}
# Anfrage hinzufügen
self._add_request(key, current_time, rate_limit.per)
return True, {
'limit': rate_limit.requests,
'remaining': rate_limit.requests - current_requests - 1,
'reset_time': current_time + rate_limit.per
}
def cleanup_memory(self):
"""Bereinigt alte In-Memory-Einträge"""
if self.use_redis:
return
current_time = time.time()
keys_to_delete = []
for key, data in self.memory_store.items():
# Bereinige alle Einträge älter als 24 Stunden
if current_time - data.get('last_cleanup', 0) > 86400:
keys_to_delete.append(key)
for key in keys_to_delete:
del self.memory_store[key]
# Globale Rate-Limiter-Instanz
rate_limiter = RateLimiter()
def limit_requests(limit_type: str):
"""
Decorator für Rate-Limiting von API-Endpunkten
Args:
limit_type: Art des Limits (siehe RATE_LIMITS)
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
# Rate-Limiting prüfen
is_allowed, info = rate_limiter.is_allowed(limit_type)
if not is_allowed:
response = jsonify({
'error': 'Rate limit exceeded',
'message': info['message'],
'retry_after': info['reset_time'] - int(time.time())
})
response.status_code = 429
response.headers['Retry-After'] = str(info['reset_time'] - int(time.time()))
response.headers['X-RateLimit-Limit'] = str(info['limit'])
response.headers['X-RateLimit-Remaining'] = str(info['remaining'])
response.headers['X-RateLimit-Reset'] = str(info['reset_time'])
return response
# Rate-Limiting-Headers zu Response hinzufügen
response = f(*args, **kwargs)
if hasattr(response, 'headers'):
response.headers['X-RateLimit-Limit'] = str(info['limit'])
response.headers['X-RateLimit-Remaining'] = str(info['remaining'])
response.headers['X-RateLimit-Reset'] = str(info['reset_time'])
return response
return wrapper
return decorator
def get_client_info() -> Dict:
"""
Gibt Client-Informationen für Rate-Limiting zurück
"""
client_id = rate_limiter._get_client_id()
ip = request.environ.get('HTTP_X_FORWARDED_FOR', request.remote_addr)
return {
'client_id': client_id,
'ip_address': ip,
'user_agent': request.headers.get('User-Agent', ''),
'timestamp': int(time.time())
}
# Maintenance-Task für Memory-Cleanup
def cleanup_rate_limiter():
"""Periodische Bereinigung des Rate-Limiters"""
rate_limiter.cleanup_memory()
logger.debug("🧹 Rate-Limiter Memory bereinigt")

1161
utils/realtime_dashboard.py Normal file

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More