Some checks failed
Tests / Go Tests (push) Has been cancelled
Tests / Python Tests (push) Has been cancelled
Tests / Integration Tests (push) Has been cancelled
Tests / Go Lint (push) Has been cancelled
Tests / Python Lint (push) Has been cancelled
Tests / Security Scan (push) Has been cancelled
Tests / All Checks Passed (push) Has been cancelled
Security Scanning / Secret Scanning (push) Has been cancelled
Security Scanning / Dependency Vulnerability Scan (push) Has been cancelled
Security Scanning / Go Security Scan (push) Has been cancelled
Security Scanning / Python Security Scan (push) Has been cancelled
Security Scanning / Node.js Security Scan (push) Has been cancelled
Security Scanning / Docker Image Security (push) Has been cancelled
Security Scanning / Security Summary (push) Has been cancelled
CI/CD Pipeline / Go Tests (push) Has been cancelled
CI/CD Pipeline / Python Tests (push) Has been cancelled
CI/CD Pipeline / Website Tests (push) Has been cancelled
CI/CD Pipeline / Linting (push) Has been cancelled
CI/CD Pipeline / Security Scan (push) Has been cancelled
CI/CD Pipeline / Docker Build & Push (push) Has been cancelled
CI/CD Pipeline / Integration Tests (push) Has been cancelled
CI/CD Pipeline / Deploy to Staging (push) Has been cancelled
CI/CD Pipeline / Deploy to Production (push) Has been cancelled
CI/CD Pipeline / CI Summary (push) Has been cancelled
ci/woodpecker/manual/build-ci-image Pipeline was successful
ci/woodpecker/manual/main Pipeline failed
All services: admin-v2, studio-v2, website, ai-compliance-sdk, consent-service, klausur-service, voice-service, and infrastructure. Large PDFs and compiled binaries excluded via .gitignore.
176 lines
5.9 KiB
Python
176 lines
5.9 KiB
Python
"""
|
|
Tests für LLM Gateway Config.
|
|
"""
|
|
|
|
import pytest
|
|
import os
|
|
from unittest.mock import patch
|
|
from llm_gateway.config import (
|
|
GatewayConfig,
|
|
LLMBackendConfig,
|
|
load_config,
|
|
get_config,
|
|
)
|
|
|
|
|
|
class TestGatewayConfig:
|
|
"""Tests für GatewayConfig Dataclass."""
|
|
|
|
def test_default_values(self):
|
|
"""Test Standardwerte."""
|
|
config = GatewayConfig()
|
|
assert config.host == "0.0.0.0"
|
|
assert config.port == 8002
|
|
assert config.debug is False
|
|
assert config.rate_limit_requests_per_minute == 60
|
|
assert config.log_level == "INFO"
|
|
|
|
def test_custom_values(self):
|
|
"""Test benutzerdefinierte Werte."""
|
|
config = GatewayConfig(
|
|
host="127.0.0.1",
|
|
port=9000,
|
|
debug=True,
|
|
rate_limit_requests_per_minute=100,
|
|
)
|
|
assert config.host == "127.0.0.1"
|
|
assert config.port == 9000
|
|
assert config.debug is True
|
|
assert config.rate_limit_requests_per_minute == 100
|
|
|
|
|
|
class TestLLMBackendConfig:
|
|
"""Tests für LLMBackendConfig."""
|
|
|
|
def test_minimal_config(self):
|
|
"""Test minimale Backend-Konfiguration."""
|
|
config = LLMBackendConfig(
|
|
name="test",
|
|
base_url="http://localhost:8000",
|
|
)
|
|
assert config.name == "test"
|
|
assert config.base_url == "http://localhost:8000"
|
|
assert config.api_key is None
|
|
assert config.enabled is True
|
|
|
|
def test_full_config(self):
|
|
"""Test vollständige Backend-Konfiguration."""
|
|
config = LLMBackendConfig(
|
|
name="vllm",
|
|
base_url="http://gpu-server:8000",
|
|
api_key="secret-key",
|
|
default_model="llama-3.1-8b",
|
|
timeout=180,
|
|
enabled=True,
|
|
)
|
|
assert config.api_key == "secret-key"
|
|
assert config.default_model == "llama-3.1-8b"
|
|
assert config.timeout == 180
|
|
|
|
|
|
class TestLoadConfig:
|
|
"""Tests für load_config Funktion."""
|
|
|
|
def test_load_config_defaults(self):
|
|
"""Test Laden mit Standardwerten."""
|
|
with patch.dict(os.environ, {}, clear=True):
|
|
config = load_config()
|
|
assert config.host == "0.0.0.0"
|
|
assert config.port == 8002
|
|
assert config.debug is False
|
|
|
|
def test_load_config_with_env_vars(self):
|
|
"""Test Laden mit Umgebungsvariablen."""
|
|
env = {
|
|
"LLM_GATEWAY_HOST": "127.0.0.1",
|
|
"LLM_GATEWAY_PORT": "9000",
|
|
"LLM_GATEWAY_DEBUG": "true",
|
|
"LLM_RATE_LIMIT_RPM": "120",
|
|
"LLM_LOG_LEVEL": "DEBUG",
|
|
}
|
|
with patch.dict(os.environ, env, clear=True):
|
|
config = load_config()
|
|
assert config.host == "127.0.0.1"
|
|
assert config.port == 9000
|
|
assert config.debug is True
|
|
assert config.rate_limit_requests_per_minute == 120
|
|
assert config.log_level == "DEBUG"
|
|
|
|
def test_load_config_ollama_backend(self):
|
|
"""Test Ollama Backend Konfiguration."""
|
|
env = {
|
|
"OLLAMA_BASE_URL": "http://localhost:11434",
|
|
"OLLAMA_DEFAULT_MODEL": "mistral:7b",
|
|
"OLLAMA_TIMEOUT": "60",
|
|
"OLLAMA_ENABLED": "true",
|
|
}
|
|
with patch.dict(os.environ, env, clear=True):
|
|
config = load_config()
|
|
assert config.ollama is not None
|
|
assert config.ollama.base_url == "http://localhost:11434"
|
|
assert config.ollama.default_model == "mistral:7b"
|
|
assert config.ollama.timeout == 60
|
|
assert config.ollama.enabled is True
|
|
|
|
def test_load_config_vllm_backend(self):
|
|
"""Test vLLM Backend Konfiguration."""
|
|
env = {
|
|
"VLLM_BASE_URL": "http://gpu-server:8000",
|
|
"VLLM_API_KEY": "secret-key",
|
|
"VLLM_DEFAULT_MODEL": "meta-llama/Llama-3.1-8B-Instruct",
|
|
"VLLM_ENABLED": "true",
|
|
}
|
|
with patch.dict(os.environ, env, clear=True):
|
|
config = load_config()
|
|
assert config.vllm is not None
|
|
assert config.vllm.base_url == "http://gpu-server:8000"
|
|
assert config.vllm.api_key == "secret-key"
|
|
assert config.vllm.enabled is True
|
|
|
|
def test_load_config_anthropic_backend(self):
|
|
"""Test Anthropic Backend Konfiguration."""
|
|
env = {
|
|
"ANTHROPIC_API_KEY": "sk-ant-xxx",
|
|
"ANTHROPIC_DEFAULT_MODEL": "claude-3-5-sonnet-20241022",
|
|
"ANTHROPIC_ENABLED": "true",
|
|
}
|
|
with patch.dict(os.environ, env, clear=True):
|
|
config = load_config()
|
|
assert config.anthropic is not None
|
|
assert config.anthropic.api_key == "sk-ant-xxx"
|
|
assert config.anthropic.default_model == "claude-3-5-sonnet-20241022"
|
|
assert config.anthropic.enabled is True
|
|
|
|
def test_load_config_no_anthropic_without_key(self):
|
|
"""Test dass Anthropic ohne Key nicht konfiguriert wird."""
|
|
with patch.dict(os.environ, {}, clear=True):
|
|
config = load_config()
|
|
assert config.anthropic is None
|
|
|
|
def test_load_config_backend_priority(self):
|
|
"""Test Backend Priorität."""
|
|
env = {
|
|
"LLM_BACKEND_PRIORITY": "vllm,anthropic,ollama",
|
|
}
|
|
with patch.dict(os.environ, env, clear=True):
|
|
config = load_config()
|
|
assert config.backend_priority == ["vllm", "anthropic", "ollama"]
|
|
|
|
def test_load_config_api_keys(self):
|
|
"""Test API Keys Liste."""
|
|
env = {
|
|
"LLM_API_KEYS": "key1,key2,key3",
|
|
}
|
|
with patch.dict(os.environ, env, clear=True):
|
|
config = load_config()
|
|
assert config.api_keys == ["key1", "key2", "key3"]
|
|
|
|
def test_load_config_jwt_secret(self):
|
|
"""Test JWT Secret."""
|
|
env = {
|
|
"JWT_SECRET": "my-secret-key",
|
|
}
|
|
with patch.dict(os.environ, env, clear=True):
|
|
config = load_config()
|
|
assert config.jwt_secret == "my-secret-key"
|