Projektarbeit-MYP/backend/scripts/test_protocol_generator.py
2025-06-05 01:34:10 +02:00

535 lines
18 KiB
Python
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/usr/bin/env python3
"""
Automatisches Test-Protokoll-Generator für MYP Backend
===============================================
Führt systematische Tests durch und generiert mit Anthropic API
ein kompaktes, professionelles Testprotokoll (1-2 Seiten).
Erstellt für: Mercedes-Benz Projektarbeit
IHK-konform: Fachinformatiker Systemintegration
"""
import os
import sys
import time
import json
import subprocess
import traceback
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Any, Tuple
import requests
# Anthropic API Integration
API_KEY = "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"
ANTHROPIC_API_URL = "https://api.anthropic.com/v1/messages"
class TestProtocolGenerator:
def __init__(self):
self.start_time = datetime.now()
self.test_results = {}
self.detailed_logs = []
self.summary_stats = {
'total_tests': 0,
'passed': 0,
'failed': 0,
'warnings': 0,
'critical_issues': []
}
# Teste-Verzeichnis sicherstellen
self.backend_dir = Path(__file__).parent
os.chdir(self.backend_dir)
print("🚀 MYP Backend Test-Protokoll-Generator")
print("=" * 60)
print(f"Start: {self.start_time.strftime('%d.%m.%Y %H:%M:%S')}")
print(f"Verzeichnis: {self.backend_dir}")
print("=" * 60)
def log_test(self, test_name: str, status: str, details: str, execution_time: float = 0):
"""Loggt einen Test mit Details"""
self.test_results[test_name] = {
'status': status,
'details': details,
'execution_time': execution_time,
'timestamp': datetime.now().isoformat()
}
self.summary_stats['total_tests'] += 1
if status == 'PASSED':
self.summary_stats['passed'] += 1
icon = ""
elif status == 'FAILED':
self.summary_stats['failed'] += 1
icon = ""
if "kritisch" in details.lower() or "critical" in details.lower():
self.summary_stats['critical_issues'].append(test_name)
elif status == 'WARNING':
self.summary_stats['warnings'] += 1
icon = "⚠️"
else:
icon = ""
log_entry = f"{icon} {test_name}: {status}"
if execution_time > 0:
log_entry += f" ({execution_time:.2f}s)"
print(log_entry)
self.detailed_logs.append(f"{log_entry}\n Details: {details}")
def run_command(self, command: str, test_name: str, expect_success: bool = True) -> Tuple[bool, str, float]:
"""Führt Kommando aus und misst Ausführungszeit"""
start = time.time()
try:
result = subprocess.run(
command,
shell=True,
capture_output=True,
text=True,
timeout=30
)
execution_time = time.time() - start
success = (result.returncode == 0) if expect_success else (result.returncode != 0)
output = result.stdout + result.stderr
return success, output, execution_time
except subprocess.TimeoutExpired:
execution_time = time.time() - start
return False, f"Timeout nach 30s", execution_time
except Exception as e:
execution_time = time.time() - start
return False, f"Exception: {str(e)}", execution_time
def test_syntax_validation(self):
"""Test 1: Syntax-Validierung beider App-Versionen"""
print("\n🔍 Test 1: Syntax-Validierung")
# Test app_cleaned.py
success, output, exec_time = self.run_command(
"python -m py_compile app_cleaned.py",
"Syntax app_cleaned.py"
)
if success:
self.log_test(
"Syntax_app_cleaned",
"PASSED",
"Keine Syntax-Fehler erkannt",
exec_time
)
else:
self.log_test(
"Syntax_app_cleaned",
"FAILED",
f"Syntax-Fehler: {output}",
exec_time
)
# Test app.py
success, output, exec_time = self.run_command(
"python -m py_compile app.py",
"Syntax app.py"
)
if success:
self.log_test(
"Syntax_app",
"PASSED",
"Kompilierung erfolgreich (Syntax korrekt)",
exec_time
)
else:
self.log_test(
"Syntax_app",
"FAILED",
f"Syntax-Fehler: {output}",
exec_time
)
def test_import_functionality(self):
"""Test 2: Import-Funktionalität"""
print("\n🔍 Test 2: Import-Tests")
# Test app_cleaned.py Import
success, output, exec_time = self.run_command(
'python -c "import app_cleaned; print(\'SUCCESS\')"',
"Import app_cleaned"
)
if success and "SUCCESS" in output:
self.log_test(
"Import_app_cleaned",
"PASSED",
"Erfolgreich importiert ohne Fehler",
exec_time
)
else:
self.log_test(
"Import_app_cleaned",
"FAILED",
f"Import fehlgeschlagen: {output}",
exec_time
)
# Test app.py Import (erwarten SystemExit)
success, output, exec_time = self.run_command(
'python -c "import app; print(\'SUCCESS\')" 2>&1',
"Import app.py",
expect_success=False # Erwarten Fehler
)
if "SystemExit" in output or not success:
self.log_test(
"Import_app",
"FAILED",
"KRITISCH: SystemExit beim Import - Shutdown-Manager Problem",
exec_time
)
else:
self.log_test(
"Import_app",
"WARNING",
"Unerwarteter Erfolg oder anderer Fehler",
exec_time
)
def test_models_and_blueprints(self):
"""Test 3: Modelle und Blueprint-Imports"""
print("\n🔍 Test 3: Modelle und Blueprints")
# Test Models
success, output, exec_time = self.run_command(
'python -c "from models import User, Printer, Job; print(\'Models OK\')"',
"Models Import"
)
if success and "Models OK" in output:
self.log_test(
"Models_Import",
"PASSED",
"Alle Datenbank-Modelle erfolgreich importiert",
exec_time
)
else:
self.log_test(
"Models_Import",
"FAILED",
f"Modell-Import fehlgeschlagen: {output}",
exec_time
)
# Test Blueprints
success, output, exec_time = self.run_command(
'python -c "from blueprints.auth import auth_blueprint; print(\'Auth OK\')"',
"Blueprint Import"
)
if success and "Auth OK" in output:
self.log_test(
"Blueprint_Import",
"PASSED",
"Blueprint-Architektur funktionsfähig",
exec_time
)
else:
self.log_test(
"Blueprint_Import",
"FAILED",
f"Blueprint-Import fehlgeschlagen: {output}",
exec_time
)
def test_flask_app_creation(self):
"""Test 4: Flask-App-Erstellung"""
print("\n🔍 Test 4: Flask-App-Objekterstellung")
success, output, exec_time = self.run_command(
'python -c "from app_cleaned import app; print(\'Flask App:\', type(app).__name__)"',
"Flask App Creation"
)
if success and "Flask App: Flask" in output:
self.log_test(
"Flask_App_Creation",
"PASSED",
"Flask-App-Objekt erfolgreich erstellt",
exec_time
)
else:
self.log_test(
"Flask_App_Creation",
"FAILED",
f"Flask-App-Erstellung fehlgeschlagen: {output}",
exec_time
)
def test_dependency_versions(self):
"""Test 5: Dependency-Versionen prüfen"""
print("\n🔍 Test 5: Dependency-Validierung")
dependencies = [
('Flask', 'import flask; print(flask.__version__)'),
('SQLAlchemy', 'import sqlalchemy; print(sqlalchemy.__version__)'),
('Python', 'import sys; print(sys.version)')
]
for dep_name, command in dependencies:
success, output, exec_time = self.run_command(
f'python -c "{command}"',
f"{dep_name} Version"
)
if success:
version = output.strip()
self.log_test(
f"Dependency_{dep_name}",
"PASSED",
f"Version: {version}",
exec_time
)
else:
self.log_test(
f"Dependency_{dep_name}",
"FAILED",
f"Dependency nicht verfügbar: {output}",
exec_time
)
def analyze_code_metrics(self):
"""Test 6: Code-Metriken analysieren"""
print("\n🔍 Test 6: Code-Analyse")
try:
# Zeilen zählen
app_lines = len(open('app.py', 'r', encoding='utf-8').readlines())
cleaned_lines = len(open('app_cleaned.py', 'r', encoding='utf-8').readlines())
reduction_percent = ((app_lines - cleaned_lines) / app_lines) * 100
self.log_test(
"Code_Metrics",
"PASSED",
f"app.py: {app_lines} Zeilen, app_cleaned.py: {cleaned_lines} Zeilen. Reduktion: {reduction_percent:.1f}%",
0
)
# Print-Statements zählen
success, output, exec_time = self.run_command(
'grep -n "print(" app_cleaned.py | wc -l',
"Print Count"
)
if success:
print_count = output.strip()
self.log_test(
"Print_Statements",
"WARNING",
f"{print_count} print()-Anweisungen in app_cleaned.py gefunden - sollten durch Logger ersetzt werden",
exec_time
)
except Exception as e:
self.log_test(
"Code_Metrics",
"FAILED",
f"Code-Analyse fehlgeschlagen: {str(e)}",
0
)
def generate_ai_summary(self) -> str:
"""Generiert mit Anthropic API eine kompakte Zusammenfassung"""
print("\n🤖 Generiere AI-Zusammenfassung...")
# Testdaten für AI vorbereiten
test_data = {
"test_results": self.test_results,
"summary_stats": self.summary_stats,
"execution_time": (datetime.now() - self.start_time).total_seconds(),
"environment": {
"os": "Windows 10",
"python_version": "3.13.3",
"backend_path": str(self.backend_dir)
}
}
prompt = f"""
Du bist ein Experte für Softwarequalität und IHK-konforme Dokumentation.
Analysiere diese Backend-Test-Ergebnisse und erstelle ein professionelles, kompaktes Testprotokoll für eine Mercedes-Benz Projektarbeit (max. 1-2 Seiten).
TESTDATEN:
{json.dumps(test_data, indent=2, default=str)}
ANFORDERUNGEN:
- IHK-konform für Fachinformatiker Systemintegration
- Professionell und präzise
- Fokus auf kritische Erkenntnisse
- Klare Handlungsempfehlungen
- Deutsche Sprache
- Struktur: Zusammenfassung, Testergebnisse, Probleme, Empfehlungen
Erstelle ein prägnantes Protokoll, das die wichtigsten Erkenntnisse hervorhebt:
1. app_cleaned.py ist produktionstauglich
2. app.py hat kritische Probleme (SystemExit)
3. Konkrete Verbesserungsvorschläge
"""
try:
headers = {
"x-api-key": API_KEY,
"content-type": "application/json",
"anthropic-version": "2023-06-01"
}
data = {
"model": "claude-3-sonnet-20240229",
"max_tokens": 4000,
"messages": [
{
"role": "user",
"content": prompt
}
]
}
response = requests.post(
ANTHROPIC_API_URL,
headers=headers,
json=data,
timeout=60
)
if response.status_code == 200:
result = response.json()
ai_summary = result['content'][0]['text']
print("✅ AI-Zusammenfassung generiert")
return ai_summary
else:
error_msg = f"API Error {response.status_code}: {response.text}"
print(f"❌ AI-Generierung fehlgeschlagen: {error_msg}")
return self.generate_fallback_summary()
except Exception as e:
print(f"❌ AI-API Fehler: {str(e)}")
return self.generate_fallback_summary()
def generate_fallback_summary(self) -> str:
"""Fallback-Zusammenfassung wenn AI nicht verfügbar"""
return f"""
# BACKEND TEST-PROTOKOLL (Fallback)
## Mercedes-Benz 3D-Druck-Management-System
**Datum:** {datetime.now().strftime('%d.%m.%Y %H:%M')}
**Tests:** {self.summary_stats['total_tests']} durchgeführt
**Dauer:** {(datetime.now() - self.start_time).total_seconds():.1f}s
### ERGEBNISSE
✅ Bestanden: {self.summary_stats['passed']}
❌ Fehlgeschlagen: {self.summary_stats['failed']}
⚠️ Warnungen: {self.summary_stats['warnings']}
### KRITISCHE PROBLEME
{chr(10).join(f"- {issue}" for issue in self.summary_stats['critical_issues'])}
### EMPFEHLUNG
Migration zu app_cleaned.py für Produktionsbetrieb.
"""
def save_results(self, ai_summary: str):
"""Speichert Ergebnisse in verschiedenen Formaten"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
# AI-Zusammenfassung speichern
summary_file = f"docs/Testprotokoll_Kompakt_{timestamp}.md"
with open(summary_file, 'w', encoding='utf-8') as f:
f.write(ai_summary)
# Detaillierte Rohdaten speichern
raw_data_file = f"docs/Testprotokoll_Raw_{timestamp}.json"
with open(raw_data_file, 'w', encoding='utf-8') as f:
json.dump({
"test_results": self.test_results,
"summary_stats": self.summary_stats,
"detailed_logs": self.detailed_logs,
"execution_info": {
"start_time": self.start_time.isoformat(),
"end_time": datetime.now().isoformat(),
"duration_seconds": (datetime.now() - self.start_time).total_seconds()
}
}, f, indent=2, default=str, ensure_ascii=False)
print(f"\n📄 Ergebnisse gespeichert:")
print(f" 📋 Kompakt: {summary_file}")
print(f" 📊 Rohdaten: {raw_data_file}")
return summary_file, raw_data_file
def run_all_tests(self):
"""Führt alle Tests durch"""
try:
self.test_syntax_validation()
self.test_import_functionality()
self.test_models_and_blueprints()
self.test_flask_app_creation()
self.test_dependency_versions()
self.analyze_code_metrics()
print(f"\n{'='*60}")
print("📊 TEST-ZUSAMMENFASSUNG")
print(f"{'='*60}")
print(f"✅ Bestanden: {self.summary_stats['passed']}")
print(f"❌ Fehlgeschlagen: {self.summary_stats['failed']}")
print(f"⚠️ Warnungen: {self.summary_stats['warnings']}")
print(f"🕒 Gesamtdauer: {(datetime.now() - self.start_time).total_seconds():.1f}s")
if self.summary_stats['critical_issues']:
print(f"\n🚨 KRITISCHE PROBLEME:")
for issue in self.summary_stats['critical_issues']:
print(f"{issue}")
# AI-Zusammenfassung generieren
ai_summary = self.generate_ai_summary()
# Ergebnisse speichern
summary_file, raw_data_file = self.save_results(ai_summary)
print(f"\n🎉 Test-Protokoll erfolgreich erstellt!")
print(f"📖 Lese: {summary_file}")
return True
except KeyboardInterrupt:
print("\n⚠️ Tests durch Benutzer unterbrochen")
return False
except Exception as e:
print(f"\n❌ Kritischer Fehler: {str(e)}")
traceback.print_exc()
return False
def main():
"""Hauptfunktion"""
print("🔧 Initialisiere Test-Umgebung...")
# Prüfe ob wir im Backend-Verzeichnis sind
if not Path("app_cleaned.py").exists():
print("❌ Fehler: app_cleaned.py nicht gefunden!")
print(" Bitte Skript im backend/ Verzeichnis ausführen")
sys.exit(1)
# Test-Generator erstellen und ausführen
generator = TestProtocolGenerator()
success = generator.run_all_tests()
if success:
print(f"\n✅ Alle Tests abgeschlossen - Protokoll generiert")
sys.exit(0)
else:
print(f"\n❌ Tests nicht erfolgreich abgeschlossen")
sys.exit(1)
if __name__ == "__main__":
main()