This repository has been archived on 2026-02-15. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
breakpilot-pwa/backend/tests/test_llm_gateway/test_config.py
Benjamin Admin 21a844cb8a fix: Restore all files lost during destructive rebase
A previous `git pull --rebase origin main` dropped 177 local commits,
losing 3400+ files across admin-v2, backend, studio-v2, website,
klausur-service, and many other services. The partial restore attempt
(660295e2) only recovered some files.

This commit restores all missing files from pre-rebase ref 98933f5e
while preserving post-rebase additions (night-scheduler, night-mode UI,
NightModeWidget dashboard integration).

Restored features include:
- AI Module Sidebar (FAB), OCR Labeling, OCR Compare
- GPU Dashboard, RAG Pipeline, Magic Help
- Klausur-Korrektur (8 files), Abitur-Archiv (5+ files)
- Companion, Zeugnisse-Crawler, Screen Flow
- Full backend, studio-v2, website, klausur-service
- All compliance SDKs, agent-core, voice-service
- CI/CD configs, documentation, scripts

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-09 09:51:32 +01:00

176 lines
5.9 KiB
Python

"""
Tests für LLM Gateway Config.
"""
import pytest
import os
from unittest.mock import patch
from llm_gateway.config import (
GatewayConfig,
LLMBackendConfig,
load_config,
get_config,
)
class TestGatewayConfig:
"""Tests für GatewayConfig Dataclass."""
def test_default_values(self):
"""Test Standardwerte."""
config = GatewayConfig()
assert config.host == "0.0.0.0"
assert config.port == 8002
assert config.debug is False
assert config.rate_limit_requests_per_minute == 60
assert config.log_level == "INFO"
def test_custom_values(self):
"""Test benutzerdefinierte Werte."""
config = GatewayConfig(
host="127.0.0.1",
port=9000,
debug=True,
rate_limit_requests_per_minute=100,
)
assert config.host == "127.0.0.1"
assert config.port == 9000
assert config.debug is True
assert config.rate_limit_requests_per_minute == 100
class TestLLMBackendConfig:
"""Tests für LLMBackendConfig."""
def test_minimal_config(self):
"""Test minimale Backend-Konfiguration."""
config = LLMBackendConfig(
name="test",
base_url="http://localhost:8000",
)
assert config.name == "test"
assert config.base_url == "http://localhost:8000"
assert config.api_key is None
assert config.enabled is True
def test_full_config(self):
"""Test vollständige Backend-Konfiguration."""
config = LLMBackendConfig(
name="vllm",
base_url="http://gpu-server:8000",
api_key="secret-key",
default_model="llama-3.1-8b",
timeout=180,
enabled=True,
)
assert config.api_key == "secret-key"
assert config.default_model == "llama-3.1-8b"
assert config.timeout == 180
class TestLoadConfig:
"""Tests für load_config Funktion."""
def test_load_config_defaults(self):
"""Test Laden mit Standardwerten."""
with patch.dict(os.environ, {}, clear=True):
config = load_config()
assert config.host == "0.0.0.0"
assert config.port == 8002
assert config.debug is False
def test_load_config_with_env_vars(self):
"""Test Laden mit Umgebungsvariablen."""
env = {
"LLM_GATEWAY_HOST": "127.0.0.1",
"LLM_GATEWAY_PORT": "9000",
"LLM_GATEWAY_DEBUG": "true",
"LLM_RATE_LIMIT_RPM": "120",
"LLM_LOG_LEVEL": "DEBUG",
}
with patch.dict(os.environ, env, clear=True):
config = load_config()
assert config.host == "127.0.0.1"
assert config.port == 9000
assert config.debug is True
assert config.rate_limit_requests_per_minute == 120
assert config.log_level == "DEBUG"
def test_load_config_ollama_backend(self):
"""Test Ollama Backend Konfiguration."""
env = {
"OLLAMA_BASE_URL": "http://localhost:11434",
"OLLAMA_DEFAULT_MODEL": "mistral:7b",
"OLLAMA_TIMEOUT": "60",
"OLLAMA_ENABLED": "true",
}
with patch.dict(os.environ, env, clear=True):
config = load_config()
assert config.ollama is not None
assert config.ollama.base_url == "http://localhost:11434"
assert config.ollama.default_model == "mistral:7b"
assert config.ollama.timeout == 60
assert config.ollama.enabled is True
def test_load_config_vllm_backend(self):
"""Test vLLM Backend Konfiguration."""
env = {
"VLLM_BASE_URL": "http://gpu-server:8000",
"VLLM_API_KEY": "secret-key",
"VLLM_DEFAULT_MODEL": "meta-llama/Llama-3.1-8B-Instruct",
"VLLM_ENABLED": "true",
}
with patch.dict(os.environ, env, clear=True):
config = load_config()
assert config.vllm is not None
assert config.vllm.base_url == "http://gpu-server:8000"
assert config.vllm.api_key == "secret-key"
assert config.vllm.enabled is True
def test_load_config_anthropic_backend(self):
"""Test Anthropic Backend Konfiguration."""
env = {
"ANTHROPIC_API_KEY": "sk-ant-xxx",
"ANTHROPIC_DEFAULT_MODEL": "claude-3-5-sonnet-20241022",
"ANTHROPIC_ENABLED": "true",
}
with patch.dict(os.environ, env, clear=True):
config = load_config()
assert config.anthropic is not None
assert config.anthropic.api_key == "sk-ant-xxx"
assert config.anthropic.default_model == "claude-3-5-sonnet-20241022"
assert config.anthropic.enabled is True
def test_load_config_no_anthropic_without_key(self):
"""Test dass Anthropic ohne Key nicht konfiguriert wird."""
with patch.dict(os.environ, {}, clear=True):
config = load_config()
assert config.anthropic is None
def test_load_config_backend_priority(self):
"""Test Backend Priorität."""
env = {
"LLM_BACKEND_PRIORITY": "vllm,anthropic,ollama",
}
with patch.dict(os.environ, env, clear=True):
config = load_config()
assert config.backend_priority == ["vllm", "anthropic", "ollama"]
def test_load_config_api_keys(self):
"""Test API Keys Liste."""
env = {
"LLM_API_KEYS": "key1,key2,key3",
}
with patch.dict(os.environ, env, clear=True):
config = load_config()
assert config.api_keys == ["key1", "key2", "key3"]
def test_load_config_jwt_secret(self):
"""Test JWT Secret."""
env = {
"JWT_SECRET": "my-secret-key",
}
with patch.dict(os.environ, env, clear=True):
config = load_config()
assert config.jwt_secret == "my-secret-key"