fix: LLM Provider erkennt COMPLIANCE_LLM_PROVIDER=ollama
Ollama als eigener Enum-Wert neben self_hosted, damit die docker-compose-Konfiguration (ollama) korrekt aufgeloest wird. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -173,6 +173,7 @@ class LLMProviderType(str, Enum):
|
|||||||
"""Supported LLM provider types."""
|
"""Supported LLM provider types."""
|
||||||
ANTHROPIC = "anthropic"
|
ANTHROPIC = "anthropic"
|
||||||
SELF_HOSTED = "self_hosted"
|
SELF_HOSTED = "self_hosted"
|
||||||
|
OLLAMA = "ollama" # Alias for self_hosted (Ollama-specific)
|
||||||
MOCK = "mock" # For testing
|
MOCK = "mock" # For testing
|
||||||
|
|
||||||
|
|
||||||
@@ -549,7 +550,7 @@ def get_llm_config() -> LLMConfig:
|
|||||||
vault_path="breakpilot/api_keys/anthropic",
|
vault_path="breakpilot/api_keys/anthropic",
|
||||||
env_var="ANTHROPIC_API_KEY"
|
env_var="ANTHROPIC_API_KEY"
|
||||||
)
|
)
|
||||||
elif provider_type == LLMProviderType.SELF_HOSTED:
|
elif provider_type in (LLMProviderType.SELF_HOSTED, LLMProviderType.OLLAMA):
|
||||||
api_key = get_secret_from_vault_or_env(
|
api_key = get_secret_from_vault_or_env(
|
||||||
vault_path="breakpilot/api_keys/self_hosted_llm",
|
vault_path="breakpilot/api_keys/self_hosted_llm",
|
||||||
env_var="SELF_HOSTED_LLM_KEY"
|
env_var="SELF_HOSTED_LLM_KEY"
|
||||||
@@ -558,7 +559,7 @@ def get_llm_config() -> LLMConfig:
|
|||||||
# Select model based on provider type
|
# Select model based on provider type
|
||||||
if provider_type == LLMProviderType.ANTHROPIC:
|
if provider_type == LLMProviderType.ANTHROPIC:
|
||||||
model = os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514")
|
model = os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514")
|
||||||
elif provider_type == LLMProviderType.SELF_HOSTED:
|
elif provider_type in (LLMProviderType.SELF_HOSTED, LLMProviderType.OLLAMA):
|
||||||
model = os.getenv("SELF_HOSTED_LLM_MODEL", "qwen2.5:14b")
|
model = os.getenv("SELF_HOSTED_LLM_MODEL", "qwen2.5:14b")
|
||||||
else:
|
else:
|
||||||
model = "mock-model"
|
model = "mock-model"
|
||||||
@@ -591,7 +592,7 @@ def get_llm_provider(config: Optional[LLMConfig] = None) -> LLMProvider:
|
|||||||
return MockProvider(config)
|
return MockProvider(config)
|
||||||
return AnthropicProvider(config)
|
return AnthropicProvider(config)
|
||||||
|
|
||||||
elif config.provider_type == LLMProviderType.SELF_HOSTED:
|
elif config.provider_type in (LLMProviderType.SELF_HOSTED, LLMProviderType.OLLAMA):
|
||||||
if not config.base_url:
|
if not config.base_url:
|
||||||
logger.warning("No self-hosted LLM URL found, using mock provider")
|
logger.warning("No self-hosted LLM URL found, using mock provider")
|
||||||
return MockProvider(config)
|
return MockProvider(config)
|
||||||
|
|||||||
Reference in New Issue
Block a user