Compare commits
5 Commits
2efec74eca
...
feat/litel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8aa7c4c33c | ||
|
|
1a818ee5b9 | ||
|
|
0cb350e26e | ||
| 0deaaca848 | |||
| 1d7aebf37c |
12
.env.example
12
.env.example
@@ -34,10 +34,11 @@ MONGODB_DATABASE=certifai
|
|||||||
SEARXNG_URL=http://localhost:8888
|
SEARXNG_URL=http://localhost:8888
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# Ollama LLM instance [OPTIONAL - defaults shown]
|
# LiteLLM proxy [OPTIONAL - defaults shown]
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
OLLAMA_URL=http://localhost:11434
|
LITELLM_URL=http://localhost:4000
|
||||||
OLLAMA_MODEL=llama3.1:8b
|
LITELLM_MODEL=qwen3-32b
|
||||||
|
LITELLM_API_KEY=
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# LibreChat (external chat via SSO) [OPTIONAL - default: http://localhost:3080]
|
# LibreChat (external chat via SSO) [OPTIONAL - default: http://localhost:3080]
|
||||||
@@ -47,7 +48,7 @@ LIBRECHAT_URL=http://localhost:3080
|
|||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# LLM Providers (comma-separated list) [OPTIONAL]
|
# LLM Providers (comma-separated list) [OPTIONAL]
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
LLM_PROVIDERS=ollama
|
LLM_PROVIDERS=litellm
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# SMTP (transactional email) [OPTIONAL]
|
# SMTP (transactional email) [OPTIONAL]
|
||||||
@@ -66,10 +67,11 @@ STRIPE_WEBHOOK_SECRET=
|
|||||||
STRIPE_PUBLISHABLE_KEY=
|
STRIPE_PUBLISHABLE_KEY=
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# LangChain / LangGraph / Langfuse [OPTIONAL]
|
# LangChain / LangGraph / LangFlow / Langfuse [OPTIONAL]
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
LANGCHAIN_URL=
|
LANGCHAIN_URL=
|
||||||
LANGGRAPH_URL=
|
LANGGRAPH_URL=
|
||||||
|
LANGFLOW_URL=
|
||||||
LANGFUSE_URL=
|
LANGFUSE_URL=
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
|
|||||||
@@ -121,13 +121,13 @@ jobs:
|
|||||||
if: always()
|
if: always()
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
# Stage 2b: E2E tests (only on main / PRs to main, after quality checks)
|
# Stage 4: E2E tests (only on main, after deploy)
|
||||||
# ---------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------
|
||||||
e2e:
|
e2e:
|
||||||
name: E2E Tests
|
name: E2E Tests
|
||||||
runs-on: docker
|
runs-on: docker
|
||||||
needs: [fmt, clippy, audit]
|
needs: [deploy]
|
||||||
if: github.ref == 'refs/heads/main' || github.event_name == 'pull_request'
|
if: github.ref == 'refs/heads/main'
|
||||||
container:
|
container:
|
||||||
image: rust:1.89-bookworm
|
image: rust:1.89-bookworm
|
||||||
# MongoDB and SearXNG can start immediately (no repo files needed).
|
# MongoDB and SearXNG can start immediately (no repo files needed).
|
||||||
@@ -154,6 +154,9 @@ jobs:
|
|||||||
MONGODB_URI: mongodb://root:example@mongo:27017
|
MONGODB_URI: mongodb://root:example@mongo:27017
|
||||||
MONGODB_DATABASE: certifai
|
MONGODB_DATABASE: certifai
|
||||||
SEARXNG_URL: http://searxng:8080
|
SEARXNG_URL: http://searxng:8080
|
||||||
|
LANGGRAPH_URL: ""
|
||||||
|
LANGFLOW_URL: ""
|
||||||
|
LANGFUSE_URL: ""
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
run: |
|
run: |
|
||||||
@@ -256,7 +259,7 @@ jobs:
|
|||||||
deploy:
|
deploy:
|
||||||
name: Deploy
|
name: Deploy
|
||||||
runs-on: docker
|
runs-on: docker
|
||||||
needs: [test, e2e]
|
needs: [test]
|
||||||
if: github.ref == 'refs/heads/main'
|
if: github.ref == 'refs/heads/main'
|
||||||
container:
|
container:
|
||||||
image: alpine:latest
|
image: alpine:latest
|
||||||
|
|||||||
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -773,6 +773,7 @@ dependencies = [
|
|||||||
"dioxus-sdk",
|
"dioxus-sdk",
|
||||||
"dotenvy",
|
"dotenvy",
|
||||||
"futures",
|
"futures",
|
||||||
|
"js-sys",
|
||||||
"maud",
|
"maud",
|
||||||
"mongodb",
|
"mongodb",
|
||||||
"petname",
|
"petname",
|
||||||
|
|||||||
@@ -61,6 +61,7 @@ secrecy = { version = "0.10", default-features = false, optional = true }
|
|||||||
serde_json = { version = "1.0.133", default-features = false }
|
serde_json = { version = "1.0.133", default-features = false }
|
||||||
maud = { version = "0.27", default-features = false }
|
maud = { version = "0.27", default-features = false }
|
||||||
url = { version = "2.5.4", default-features = false, optional = true }
|
url = { version = "2.5.4", default-features = false, optional = true }
|
||||||
|
js-sys = { version = "0.3", optional = true }
|
||||||
wasm-bindgen = { version = "0.2", optional = true }
|
wasm-bindgen = { version = "0.2", optional = true }
|
||||||
web-sys = { version = "0.3", optional = true, features = [
|
web-sys = { version = "0.3", optional = true, features = [
|
||||||
"Clipboard",
|
"Clipboard",
|
||||||
@@ -91,7 +92,7 @@ bytes = { version = "1", optional = true }
|
|||||||
|
|
||||||
[features]
|
[features]
|
||||||
# default = ["web"]
|
# default = ["web"]
|
||||||
web = ["dioxus/web", "dep:reqwest", "dep:web-sys", "dep:wasm-bindgen"]
|
web = ["dioxus/web", "dep:reqwest", "dep:web-sys", "dep:wasm-bindgen", "dep:js-sys"]
|
||||||
server = [
|
server = [
|
||||||
"dioxus/server",
|
"dioxus/server",
|
||||||
"dep:axum",
|
"dep:axum",
|
||||||
|
|||||||
@@ -58,15 +58,15 @@
|
|||||||
"title": "Dashboard",
|
"title": "Dashboard",
|
||||||
"subtitle": "KI-Nachrichten und Neuigkeiten",
|
"subtitle": "KI-Nachrichten und Neuigkeiten",
|
||||||
"topic_placeholder": "Themenname...",
|
"topic_placeholder": "Themenname...",
|
||||||
"ollama_settings": "Ollama-Einstellungen",
|
"litellm_settings": "LiteLLM-Einstellungen",
|
||||||
"settings_hint": "Leer lassen, um OLLAMA_URL / OLLAMA_MODEL aus .env zu verwenden",
|
"settings_hint": "Leer lassen, um LITELLM_URL / LITELLM_MODEL aus .env zu verwenden",
|
||||||
"ollama_url": "Ollama-URL",
|
"litellm_url": "LiteLLM-URL",
|
||||||
"ollama_url_placeholder": "Verwendet OLLAMA_URL aus .env",
|
"litellm_url_placeholder": "Verwendet LITELLM_URL aus .env",
|
||||||
"model": "Modell",
|
"model": "Modell",
|
||||||
"model_placeholder": "Verwendet OLLAMA_MODEL aus .env",
|
"model_placeholder": "Verwendet LITELLM_MODEL aus .env",
|
||||||
"searching": "Suche laeuft...",
|
"searching": "Suche laeuft...",
|
||||||
"search_failed": "Suche fehlgeschlagen: {e}",
|
"search_failed": "Suche fehlgeschlagen: {e}",
|
||||||
"ollama_status": "Ollama-Status",
|
"litellm_status": "LiteLLM-Status",
|
||||||
"trending": "Im Trend",
|
"trending": "Im Trend",
|
||||||
"recent_searches": "Letzte Suchen"
|
"recent_searches": "Letzte Suchen"
|
||||||
},
|
},
|
||||||
@@ -96,7 +96,38 @@
|
|||||||
"total_requests": "Anfragen gesamt",
|
"total_requests": "Anfragen gesamt",
|
||||||
"avg_latency": "Durchschn. Latenz",
|
"avg_latency": "Durchschn. Latenz",
|
||||||
"tokens_used": "Verbrauchte Token",
|
"tokens_used": "Verbrauchte Token",
|
||||||
"error_rate": "Fehlerrate"
|
"error_rate": "Fehlerrate",
|
||||||
|
"not_configured": "Nicht konfiguriert",
|
||||||
|
"open_new_tab": "In neuem Tab oeffnen",
|
||||||
|
"agents_status_connected": "Verbunden",
|
||||||
|
"agents_status_not_connected": "Nicht verbunden",
|
||||||
|
"agents_config_hint": "Setzen Sie LANGGRAPH_URL in .env, um eine Verbindung herzustellen",
|
||||||
|
"agents_quick_start": "Schnellstart",
|
||||||
|
"agents_docs": "Dokumentation",
|
||||||
|
"agents_docs_desc": "Offizielle LangGraph-Dokumentation und API-Anleitungen.",
|
||||||
|
"agents_getting_started": "Erste Schritte",
|
||||||
|
"agents_getting_started_desc": "Schritt-fuer-Schritt-Anleitung zum Erstellen Ihres ersten Agenten.",
|
||||||
|
"agents_github": "GitHub",
|
||||||
|
"agents_github_desc": "Quellcode, Issues und Community-Beitraege.",
|
||||||
|
"agents_examples": "Beispiele",
|
||||||
|
"agents_examples_desc": "Einsatzbereite Vorlagen und Beispielprojekte fuer Agenten.",
|
||||||
|
"agents_api_ref": "API-Referenz",
|
||||||
|
"agents_api_ref_desc": "Lokale Swagger-Dokumentation fuer Ihre LangGraph-Instanz.",
|
||||||
|
"agents_running_title": "Laufende Agenten",
|
||||||
|
"agents_none": "Keine Agenten registriert. Stellen Sie einen Assistenten in LangGraph bereit, um ihn hier zu sehen.",
|
||||||
|
"agents_col_name": "Name",
|
||||||
|
"agents_col_id": "ID",
|
||||||
|
"agents_col_description": "Beschreibung",
|
||||||
|
"agents_col_status": "Status",
|
||||||
|
"analytics_status_connected": "Verbunden",
|
||||||
|
"analytics_status_not_connected": "Nicht verbunden",
|
||||||
|
"analytics_config_hint": "Setzen Sie LANGFUSE_URL in .env, um eine Verbindung herzustellen",
|
||||||
|
"analytics_sso_hint": "Langfuse nutzt Keycloak-SSO. Sie werden automatisch mit Ihrem CERTifAI-Konto angemeldet.",
|
||||||
|
"analytics_quick_actions": "Schnellaktionen",
|
||||||
|
"analytics_traces": "Traces",
|
||||||
|
"analytics_traces_desc": "Alle LLM-Aufrufe, Latenzen und Token-Verbrauch anzeigen und filtern.",
|
||||||
|
"analytics_dashboard": "Dashboard",
|
||||||
|
"analytics_dashboard_desc": "Ueberblick ueber Kosten, Qualitaetsmetriken und Nutzungstrends."
|
||||||
},
|
},
|
||||||
"org": {
|
"org": {
|
||||||
"title": "Organisation",
|
"title": "Organisation",
|
||||||
@@ -113,6 +144,16 @@
|
|||||||
"email_address": "E-Mail-Adresse",
|
"email_address": "E-Mail-Adresse",
|
||||||
"email_placeholder": "kollege@firma.de",
|
"email_placeholder": "kollege@firma.de",
|
||||||
"send_invite": "Einladung senden",
|
"send_invite": "Einladung senden",
|
||||||
|
"total_spend": "Gesamtausgaben",
|
||||||
|
"total_tokens": "Tokens gesamt",
|
||||||
|
"model_usage": "Nutzung nach Modell",
|
||||||
|
"model": "Modell",
|
||||||
|
"tokens": "Tokens",
|
||||||
|
"spend": "Ausgaben",
|
||||||
|
"usage_unavailable": "Nutzungsdaten nicht verfuegbar",
|
||||||
|
"loading_usage": "Nutzungsdaten werden geladen...",
|
||||||
|
"prompt_tokens": "Prompt-Tokens",
|
||||||
|
"completion_tokens": "Antwort-Tokens",
|
||||||
"pricing_title": "Preise",
|
"pricing_title": "Preise",
|
||||||
"pricing_subtitle": "Waehlen Sie den passenden Plan fuer Ihre Organisation"
|
"pricing_subtitle": "Waehlen Sie den passenden Plan fuer Ihre Organisation"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -58,15 +58,15 @@
|
|||||||
"title": "Dashboard",
|
"title": "Dashboard",
|
||||||
"subtitle": "AI news and updates",
|
"subtitle": "AI news and updates",
|
||||||
"topic_placeholder": "Topic name...",
|
"topic_placeholder": "Topic name...",
|
||||||
"ollama_settings": "Ollama Settings",
|
"litellm_settings": "LiteLLM Settings",
|
||||||
"settings_hint": "Leave empty to use OLLAMA_URL / OLLAMA_MODEL from .env",
|
"settings_hint": "Leave empty to use LITELLM_URL / LITELLM_MODEL from .env",
|
||||||
"ollama_url": "Ollama URL",
|
"litellm_url": "LiteLLM URL",
|
||||||
"ollama_url_placeholder": "Uses OLLAMA_URL from .env",
|
"litellm_url_placeholder": "Uses LITELLM_URL from .env",
|
||||||
"model": "Model",
|
"model": "Model",
|
||||||
"model_placeholder": "Uses OLLAMA_MODEL from .env",
|
"model_placeholder": "Uses LITELLM_MODEL from .env",
|
||||||
"searching": "Searching...",
|
"searching": "Searching...",
|
||||||
"search_failed": "Search failed: {e}",
|
"search_failed": "Search failed: {e}",
|
||||||
"ollama_status": "Ollama Status",
|
"litellm_status": "LiteLLM Status",
|
||||||
"trending": "Trending",
|
"trending": "Trending",
|
||||||
"recent_searches": "Recent Searches"
|
"recent_searches": "Recent Searches"
|
||||||
},
|
},
|
||||||
@@ -96,7 +96,38 @@
|
|||||||
"total_requests": "Total Requests",
|
"total_requests": "Total Requests",
|
||||||
"avg_latency": "Avg Latency",
|
"avg_latency": "Avg Latency",
|
||||||
"tokens_used": "Tokens Used",
|
"tokens_used": "Tokens Used",
|
||||||
"error_rate": "Error Rate"
|
"error_rate": "Error Rate",
|
||||||
|
"not_configured": "Not Configured",
|
||||||
|
"open_new_tab": "Open in New Tab",
|
||||||
|
"agents_status_connected": "Connected",
|
||||||
|
"agents_status_not_connected": "Not Connected",
|
||||||
|
"agents_config_hint": "Set LANGGRAPH_URL in .env to connect",
|
||||||
|
"agents_quick_start": "Quick Start",
|
||||||
|
"agents_docs": "Documentation",
|
||||||
|
"agents_docs_desc": "Official LangGraph documentation and API guides.",
|
||||||
|
"agents_getting_started": "Getting Started",
|
||||||
|
"agents_getting_started_desc": "Step-by-step tutorial to build your first agent.",
|
||||||
|
"agents_github": "GitHub",
|
||||||
|
"agents_github_desc": "Source code, issues, and community contributions.",
|
||||||
|
"agents_examples": "Examples",
|
||||||
|
"agents_examples_desc": "Ready-to-use templates and example agent projects.",
|
||||||
|
"agents_api_ref": "API Reference",
|
||||||
|
"agents_api_ref_desc": "Local Swagger docs for your LangGraph instance.",
|
||||||
|
"agents_running_title": "Running Agents",
|
||||||
|
"agents_none": "No agents registered. Deploy an assistant to LangGraph to see it here.",
|
||||||
|
"agents_col_name": "Name",
|
||||||
|
"agents_col_id": "ID",
|
||||||
|
"agents_col_description": "Description",
|
||||||
|
"agents_col_status": "Status",
|
||||||
|
"analytics_status_connected": "Connected",
|
||||||
|
"analytics_status_not_connected": "Not Connected",
|
||||||
|
"analytics_config_hint": "Set LANGFUSE_URL in .env to connect",
|
||||||
|
"analytics_sso_hint": "Langfuse uses Keycloak SSO. You will be signed in automatically with your CERTifAI account.",
|
||||||
|
"analytics_quick_actions": "Quick Actions",
|
||||||
|
"analytics_traces": "Traces",
|
||||||
|
"analytics_traces_desc": "View and filter all LLM call traces, latencies, and token usage.",
|
||||||
|
"analytics_dashboard": "Dashboard",
|
||||||
|
"analytics_dashboard_desc": "Overview of costs, quality metrics, and usage trends."
|
||||||
},
|
},
|
||||||
"org": {
|
"org": {
|
||||||
"title": "Organization",
|
"title": "Organization",
|
||||||
@@ -113,6 +144,16 @@
|
|||||||
"email_address": "Email Address",
|
"email_address": "Email Address",
|
||||||
"email_placeholder": "colleague@company.com",
|
"email_placeholder": "colleague@company.com",
|
||||||
"send_invite": "Send Invite",
|
"send_invite": "Send Invite",
|
||||||
|
"total_spend": "Total Spend",
|
||||||
|
"total_tokens": "Total Tokens",
|
||||||
|
"model_usage": "Usage by Model",
|
||||||
|
"model": "Model",
|
||||||
|
"tokens": "Tokens",
|
||||||
|
"spend": "Spend",
|
||||||
|
"usage_unavailable": "Usage data unavailable",
|
||||||
|
"loading_usage": "Loading usage data...",
|
||||||
|
"prompt_tokens": "Prompt Tokens",
|
||||||
|
"completion_tokens": "Completion Tokens",
|
||||||
"pricing_title": "Pricing",
|
"pricing_title": "Pricing",
|
||||||
"pricing_subtitle": "Choose the plan that fits your organization"
|
"pricing_subtitle": "Choose the plan that fits your organization"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -58,15 +58,15 @@
|
|||||||
"title": "Panel de control",
|
"title": "Panel de control",
|
||||||
"subtitle": "Noticias y actualizaciones de IA",
|
"subtitle": "Noticias y actualizaciones de IA",
|
||||||
"topic_placeholder": "Nombre del tema...",
|
"topic_placeholder": "Nombre del tema...",
|
||||||
"ollama_settings": "Configuracion de Ollama",
|
"litellm_settings": "Configuracion de LiteLLM",
|
||||||
"settings_hint": "Dejar vacio para usar OLLAMA_URL / OLLAMA_MODEL del archivo .env",
|
"settings_hint": "Dejar vacio para usar LITELLM_URL / LITELLM_MODEL del archivo .env",
|
||||||
"ollama_url": "URL de Ollama",
|
"litellm_url": "URL de LiteLLM",
|
||||||
"ollama_url_placeholder": "Usa OLLAMA_URL del archivo .env",
|
"litellm_url_placeholder": "Usa LITELLM_URL del archivo .env",
|
||||||
"model": "Modelo",
|
"model": "Modelo",
|
||||||
"model_placeholder": "Usa OLLAMA_MODEL del archivo .env",
|
"model_placeholder": "Usa LITELLM_MODEL del archivo .env",
|
||||||
"searching": "Buscando...",
|
"searching": "Buscando...",
|
||||||
"search_failed": "La busqueda fallo: {e}",
|
"search_failed": "La busqueda fallo: {e}",
|
||||||
"ollama_status": "Estado de Ollama",
|
"litellm_status": "Estado de LiteLLM",
|
||||||
"trending": "Tendencias",
|
"trending": "Tendencias",
|
||||||
"recent_searches": "Busquedas recientes"
|
"recent_searches": "Busquedas recientes"
|
||||||
},
|
},
|
||||||
@@ -96,7 +96,38 @@
|
|||||||
"total_requests": "Total de solicitudes",
|
"total_requests": "Total de solicitudes",
|
||||||
"avg_latency": "Latencia promedio",
|
"avg_latency": "Latencia promedio",
|
||||||
"tokens_used": "Tokens utilizados",
|
"tokens_used": "Tokens utilizados",
|
||||||
"error_rate": "Tasa de errores"
|
"error_rate": "Tasa de errores",
|
||||||
|
"not_configured": "No configurado",
|
||||||
|
"open_new_tab": "Abrir en nueva pestana",
|
||||||
|
"agents_status_connected": "Conectado",
|
||||||
|
"agents_status_not_connected": "No conectado",
|
||||||
|
"agents_config_hint": "Configure LANGGRAPH_URL en .env para conectar",
|
||||||
|
"agents_quick_start": "Inicio rapido",
|
||||||
|
"agents_docs": "Documentacion",
|
||||||
|
"agents_docs_desc": "Documentacion oficial de LangGraph y guias de API.",
|
||||||
|
"agents_getting_started": "Primeros pasos",
|
||||||
|
"agents_getting_started_desc": "Tutorial paso a paso para crear su primer agente.",
|
||||||
|
"agents_github": "GitHub",
|
||||||
|
"agents_github_desc": "Codigo fuente, issues y contribuciones de la comunidad.",
|
||||||
|
"agents_examples": "Ejemplos",
|
||||||
|
"agents_examples_desc": "Plantillas y proyectos de agentes listos para usar.",
|
||||||
|
"agents_api_ref": "Referencia API",
|
||||||
|
"agents_api_ref_desc": "Documentacion Swagger local para su instancia de LangGraph.",
|
||||||
|
"agents_running_title": "Agentes en ejecucion",
|
||||||
|
"agents_none": "No hay agentes registrados. Despliegue un asistente en LangGraph para verlo aqui.",
|
||||||
|
"agents_col_name": "Nombre",
|
||||||
|
"agents_col_id": "ID",
|
||||||
|
"agents_col_description": "Descripcion",
|
||||||
|
"agents_col_status": "Estado",
|
||||||
|
"analytics_status_connected": "Conectado",
|
||||||
|
"analytics_status_not_connected": "No conectado",
|
||||||
|
"analytics_config_hint": "Configure LANGFUSE_URL en .env para conectar",
|
||||||
|
"analytics_sso_hint": "Langfuse utiliza SSO de Keycloak. Iniciara sesion automaticamente con su cuenta CERTifAI.",
|
||||||
|
"analytics_quick_actions": "Acciones rapidas",
|
||||||
|
"analytics_traces": "Trazas",
|
||||||
|
"analytics_traces_desc": "Ver y filtrar todas las llamadas LLM, latencias y uso de tokens.",
|
||||||
|
"analytics_dashboard": "Panel de control",
|
||||||
|
"analytics_dashboard_desc": "Resumen de costos, metricas de calidad y tendencias de uso."
|
||||||
},
|
},
|
||||||
"org": {
|
"org": {
|
||||||
"title": "Organizacion",
|
"title": "Organizacion",
|
||||||
@@ -113,6 +144,16 @@
|
|||||||
"email_address": "Direccion de correo electronico",
|
"email_address": "Direccion de correo electronico",
|
||||||
"email_placeholder": "colega@empresa.com",
|
"email_placeholder": "colega@empresa.com",
|
||||||
"send_invite": "Enviar invitacion",
|
"send_invite": "Enviar invitacion",
|
||||||
|
"total_spend": "Gasto total",
|
||||||
|
"total_tokens": "Tokens totales",
|
||||||
|
"model_usage": "Uso por modelo",
|
||||||
|
"model": "Modelo",
|
||||||
|
"tokens": "Tokens",
|
||||||
|
"spend": "Gasto",
|
||||||
|
"usage_unavailable": "Datos de uso no disponibles",
|
||||||
|
"loading_usage": "Cargando datos de uso...",
|
||||||
|
"prompt_tokens": "Tokens de entrada",
|
||||||
|
"completion_tokens": "Tokens de respuesta",
|
||||||
"pricing_title": "Precios",
|
"pricing_title": "Precios",
|
||||||
"pricing_subtitle": "Elija el plan que se adapte a su organizacion"
|
"pricing_subtitle": "Elija el plan que se adapte a su organizacion"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -58,15 +58,15 @@
|
|||||||
"title": "Tableau de bord",
|
"title": "Tableau de bord",
|
||||||
"subtitle": "Actualites et mises a jour IA",
|
"subtitle": "Actualites et mises a jour IA",
|
||||||
"topic_placeholder": "Nom du sujet...",
|
"topic_placeholder": "Nom du sujet...",
|
||||||
"ollama_settings": "Parametres Ollama",
|
"litellm_settings": "Parametres LiteLLM",
|
||||||
"settings_hint": "Laissez vide pour utiliser OLLAMA_URL / OLLAMA_MODEL du fichier .env",
|
"settings_hint": "Laissez vide pour utiliser LITELLM_URL / LITELLM_MODEL du fichier .env",
|
||||||
"ollama_url": "URL Ollama",
|
"litellm_url": "URL LiteLLM",
|
||||||
"ollama_url_placeholder": "Utilise OLLAMA_URL du fichier .env",
|
"litellm_url_placeholder": "Utilise LITELLM_URL du fichier .env",
|
||||||
"model": "Modele",
|
"model": "Modele",
|
||||||
"model_placeholder": "Utilise OLLAMA_MODEL du fichier .env",
|
"model_placeholder": "Utilise LITELLM_MODEL du fichier .env",
|
||||||
"searching": "Recherche en cours...",
|
"searching": "Recherche en cours...",
|
||||||
"search_failed": "Echec de la recherche : {e}",
|
"search_failed": "Echec de la recherche : {e}",
|
||||||
"ollama_status": "Statut Ollama",
|
"litellm_status": "Statut LiteLLM",
|
||||||
"trending": "Tendances",
|
"trending": "Tendances",
|
||||||
"recent_searches": "Recherches recentes"
|
"recent_searches": "Recherches recentes"
|
||||||
},
|
},
|
||||||
@@ -96,7 +96,38 @@
|
|||||||
"total_requests": "Requetes totales",
|
"total_requests": "Requetes totales",
|
||||||
"avg_latency": "Latence moyenne",
|
"avg_latency": "Latence moyenne",
|
||||||
"tokens_used": "Tokens utilises",
|
"tokens_used": "Tokens utilises",
|
||||||
"error_rate": "Taux d'erreur"
|
"error_rate": "Taux d'erreur",
|
||||||
|
"not_configured": "Non configure",
|
||||||
|
"open_new_tab": "Ouvrir dans un nouvel onglet",
|
||||||
|
"agents_status_connected": "Connecte",
|
||||||
|
"agents_status_not_connected": "Non connecte",
|
||||||
|
"agents_config_hint": "Definissez LANGGRAPH_URL dans .env pour vous connecter",
|
||||||
|
"agents_quick_start": "Demarrage rapide",
|
||||||
|
"agents_docs": "Documentation",
|
||||||
|
"agents_docs_desc": "Documentation officielle de LangGraph et guides API.",
|
||||||
|
"agents_getting_started": "Premiers pas",
|
||||||
|
"agents_getting_started_desc": "Tutoriel etape par etape pour creer votre premier agent.",
|
||||||
|
"agents_github": "GitHub",
|
||||||
|
"agents_github_desc": "Code source, issues et contributions de la communaute.",
|
||||||
|
"agents_examples": "Exemples",
|
||||||
|
"agents_examples_desc": "Modeles et projets d'agents prets a l'emploi.",
|
||||||
|
"agents_api_ref": "Reference API",
|
||||||
|
"agents_api_ref_desc": "Documentation Swagger locale pour votre instance LangGraph.",
|
||||||
|
"agents_running_title": "Agents en cours",
|
||||||
|
"agents_none": "Aucun agent enregistre. Deployez un assistant dans LangGraph pour le voir ici.",
|
||||||
|
"agents_col_name": "Nom",
|
||||||
|
"agents_col_id": "ID",
|
||||||
|
"agents_col_description": "Description",
|
||||||
|
"agents_col_status": "Statut",
|
||||||
|
"analytics_status_connected": "Connecte",
|
||||||
|
"analytics_status_not_connected": "Non connecte",
|
||||||
|
"analytics_config_hint": "Definissez LANGFUSE_URL dans .env pour vous connecter",
|
||||||
|
"analytics_sso_hint": "Langfuse utilise le SSO Keycloak. Vous serez connecte automatiquement avec votre compte CERTifAI.",
|
||||||
|
"analytics_quick_actions": "Actions rapides",
|
||||||
|
"analytics_traces": "Traces",
|
||||||
|
"analytics_traces_desc": "Afficher et filtrer tous les appels LLM, latences et consommation de tokens.",
|
||||||
|
"analytics_dashboard": "Tableau de bord",
|
||||||
|
"analytics_dashboard_desc": "Apercu des couts, metriques de qualite et tendances d'utilisation."
|
||||||
},
|
},
|
||||||
"org": {
|
"org": {
|
||||||
"title": "Organisation",
|
"title": "Organisation",
|
||||||
@@ -113,6 +144,16 @@
|
|||||||
"email_address": "Adresse e-mail",
|
"email_address": "Adresse e-mail",
|
||||||
"email_placeholder": "collegue@entreprise.com",
|
"email_placeholder": "collegue@entreprise.com",
|
||||||
"send_invite": "Envoyer l'invitation",
|
"send_invite": "Envoyer l'invitation",
|
||||||
|
"total_spend": "Depenses totales",
|
||||||
|
"total_tokens": "Tokens totaux",
|
||||||
|
"model_usage": "Utilisation par modele",
|
||||||
|
"model": "Modele",
|
||||||
|
"tokens": "Tokens",
|
||||||
|
"spend": "Depenses",
|
||||||
|
"usage_unavailable": "Donnees d'utilisation indisponibles",
|
||||||
|
"loading_usage": "Chargement des donnees d'utilisation...",
|
||||||
|
"prompt_tokens": "Tokens d'entree",
|
||||||
|
"completion_tokens": "Tokens de reponse",
|
||||||
"pricing_title": "Tarifs",
|
"pricing_title": "Tarifs",
|
||||||
"pricing_subtitle": "Choisissez le plan adapte a votre organisation"
|
"pricing_subtitle": "Choisissez le plan adapte a votre organisation"
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -58,15 +58,15 @@
|
|||||||
"title": "Painel",
|
"title": "Painel",
|
||||||
"subtitle": "Noticias e atualizacoes de IA",
|
"subtitle": "Noticias e atualizacoes de IA",
|
||||||
"topic_placeholder": "Nome do topico...",
|
"topic_placeholder": "Nome do topico...",
|
||||||
"ollama_settings": "Definicoes do Ollama",
|
"litellm_settings": "Definicoes do LiteLLM",
|
||||||
"settings_hint": "Deixe vazio para usar OLLAMA_URL / OLLAMA_MODEL do .env",
|
"settings_hint": "Deixe vazio para usar LITELLM_URL / LITELLM_MODEL do .env",
|
||||||
"ollama_url": "URL do Ollama",
|
"litellm_url": "URL do LiteLLM",
|
||||||
"ollama_url_placeholder": "Utiliza OLLAMA_URL do .env",
|
"litellm_url_placeholder": "Utiliza LITELLM_URL do .env",
|
||||||
"model": "Modelo",
|
"model": "Modelo",
|
||||||
"model_placeholder": "Utiliza OLLAMA_MODEL do .env",
|
"model_placeholder": "Utiliza LITELLM_MODEL do .env",
|
||||||
"searching": "A pesquisar...",
|
"searching": "A pesquisar...",
|
||||||
"search_failed": "A pesquisa falhou: {e}",
|
"search_failed": "A pesquisa falhou: {e}",
|
||||||
"ollama_status": "Estado do Ollama",
|
"litellm_status": "Estado do LiteLLM",
|
||||||
"trending": "Em destaque",
|
"trending": "Em destaque",
|
||||||
"recent_searches": "Pesquisas recentes"
|
"recent_searches": "Pesquisas recentes"
|
||||||
},
|
},
|
||||||
@@ -96,7 +96,38 @@
|
|||||||
"total_requests": "Total de Pedidos",
|
"total_requests": "Total de Pedidos",
|
||||||
"avg_latency": "Latencia Media",
|
"avg_latency": "Latencia Media",
|
||||||
"tokens_used": "Tokens Utilizados",
|
"tokens_used": "Tokens Utilizados",
|
||||||
"error_rate": "Taxa de Erros"
|
"error_rate": "Taxa de Erros",
|
||||||
|
"not_configured": "Nao configurado",
|
||||||
|
"open_new_tab": "Abrir em novo separador",
|
||||||
|
"agents_status_connected": "Conectado",
|
||||||
|
"agents_status_not_connected": "Nao conectado",
|
||||||
|
"agents_config_hint": "Defina LANGGRAPH_URL no .env para conectar",
|
||||||
|
"agents_quick_start": "Inicio rapido",
|
||||||
|
"agents_docs": "Documentacao",
|
||||||
|
"agents_docs_desc": "Documentacao oficial do LangGraph e guias de API.",
|
||||||
|
"agents_getting_started": "Primeiros passos",
|
||||||
|
"agents_getting_started_desc": "Tutorial passo a passo para criar o seu primeiro agente.",
|
||||||
|
"agents_github": "GitHub",
|
||||||
|
"agents_github_desc": "Codigo fonte, issues e contribuicoes da comunidade.",
|
||||||
|
"agents_examples": "Exemplos",
|
||||||
|
"agents_examples_desc": "Modelos e projetos de agentes prontos a usar.",
|
||||||
|
"agents_api_ref": "Referencia API",
|
||||||
|
"agents_api_ref_desc": "Documentacao Swagger local para a sua instancia LangGraph.",
|
||||||
|
"agents_running_title": "Agentes em execucao",
|
||||||
|
"agents_none": "Nenhum agente registado. Implemente um assistente no LangGraph para o ver aqui.",
|
||||||
|
"agents_col_name": "Nome",
|
||||||
|
"agents_col_id": "ID",
|
||||||
|
"agents_col_description": "Descricao",
|
||||||
|
"agents_col_status": "Estado",
|
||||||
|
"analytics_status_connected": "Conectado",
|
||||||
|
"analytics_status_not_connected": "Nao conectado",
|
||||||
|
"analytics_config_hint": "Defina LANGFUSE_URL no .env para conectar",
|
||||||
|
"analytics_sso_hint": "O Langfuse utiliza SSO do Keycloak. Sera autenticado automaticamente com a sua conta CERTifAI.",
|
||||||
|
"analytics_quick_actions": "Acoes rapidas",
|
||||||
|
"analytics_traces": "Traces",
|
||||||
|
"analytics_traces_desc": "Ver e filtrar todas as chamadas LLM, latencias e uso de tokens.",
|
||||||
|
"analytics_dashboard": "Painel",
|
||||||
|
"analytics_dashboard_desc": "Resumo de custos, metricas de qualidade e tendencias de uso."
|
||||||
},
|
},
|
||||||
"org": {
|
"org": {
|
||||||
"title": "Organizacao",
|
"title": "Organizacao",
|
||||||
@@ -113,6 +144,16 @@
|
|||||||
"email_address": "Endereco de Email",
|
"email_address": "Endereco de Email",
|
||||||
"email_placeholder": "colleague@company.com",
|
"email_placeholder": "colleague@company.com",
|
||||||
"send_invite": "Enviar Convite",
|
"send_invite": "Enviar Convite",
|
||||||
|
"total_spend": "Gasto total",
|
||||||
|
"total_tokens": "Tokens totais",
|
||||||
|
"model_usage": "Uso por modelo",
|
||||||
|
"model": "Modelo",
|
||||||
|
"tokens": "Tokens",
|
||||||
|
"spend": "Gasto",
|
||||||
|
"usage_unavailable": "Dados de uso indisponiveis",
|
||||||
|
"loading_usage": "Carregando dados de uso...",
|
||||||
|
"prompt_tokens": "Tokens de entrada",
|
||||||
|
"completion_tokens": "Tokens de resposta",
|
||||||
"pricing_title": "Precos",
|
"pricing_title": "Precos",
|
||||||
"pricing_subtitle": "Escolha o plano adequado a sua organizacao"
|
"pricing_subtitle": "Escolha o plano adequado a sua organizacao"
|
||||||
},
|
},
|
||||||
|
|||||||
390
assets/main.css
390
assets/main.css
@@ -2591,6 +2591,58 @@ h6 {
|
|||||||
border-radius: 20px;
|
border-radius: 20px;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ===== Tool Embed (iframe integration) ===== */
|
||||||
|
.tool-embed {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
flex: 1;
|
||||||
|
height: calc(100vh - 60px);
|
||||||
|
min-height: 400px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.tool-embed-toolbar {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: space-between;
|
||||||
|
padding: 12px 20px;
|
||||||
|
background-color: var(--bg-card);
|
||||||
|
border-bottom: 1px solid var(--border-primary);
|
||||||
|
}
|
||||||
|
|
||||||
|
.tool-embed-title {
|
||||||
|
font-family: 'Space Grotesk', sans-serif;
|
||||||
|
font-size: 16px;
|
||||||
|
font-weight: 600;
|
||||||
|
color: var(--text-heading);
|
||||||
|
}
|
||||||
|
|
||||||
|
.tool-embed-popout-btn {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 6px;
|
||||||
|
padding: 6px 14px;
|
||||||
|
font-size: 13px;
|
||||||
|
font-weight: 500;
|
||||||
|
color: var(--accent);
|
||||||
|
background-color: transparent;
|
||||||
|
border: 1px solid var(--accent);
|
||||||
|
border-radius: 6px;
|
||||||
|
text-decoration: none;
|
||||||
|
cursor: pointer;
|
||||||
|
transition: background-color 0.15s, color 0.15s;
|
||||||
|
}
|
||||||
|
|
||||||
|
.tool-embed-popout-btn:hover {
|
||||||
|
background-color: var(--accent);
|
||||||
|
color: var(--bg-body);
|
||||||
|
}
|
||||||
|
|
||||||
|
.tool-embed-iframe {
|
||||||
|
flex: 1;
|
||||||
|
width: 100%;
|
||||||
|
border: none;
|
||||||
|
}
|
||||||
|
|
||||||
/* ===== Analytics Stats Bar ===== */
|
/* ===== Analytics Stats Bar ===== */
|
||||||
.analytics-stats-bar {
|
.analytics-stats-bar {
|
||||||
display: flex;
|
display: flex;
|
||||||
@@ -3323,3 +3375,341 @@ h6 {
|
|||||||
padding: 20px 16px;
|
padding: 20px 16px;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ===== Agents Page ===== */
|
||||||
|
.agents-page {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
padding: 32px;
|
||||||
|
gap: 32px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-hero {
|
||||||
|
max-width: 720px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
gap: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-hero-row {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: row;
|
||||||
|
align-items: center;
|
||||||
|
gap: 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-hero-icon {
|
||||||
|
width: 48px;
|
||||||
|
height: 48px;
|
||||||
|
min-width: 48px;
|
||||||
|
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
|
||||||
|
color: var(--avatar-text);
|
||||||
|
border-radius: 12px;
|
||||||
|
font-size: 24px;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
font-weight: 700;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-hero-title {
|
||||||
|
font-family: 'Space Grotesk', sans-serif;
|
||||||
|
font-size: 28px;
|
||||||
|
font-weight: 700;
|
||||||
|
color: var(--text-heading);
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-hero-desc {
|
||||||
|
font-size: 15px;
|
||||||
|
color: var(--text-muted);
|
||||||
|
line-height: 1.6;
|
||||||
|
max-width: 600px;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-status {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 8px;
|
||||||
|
font-size: 13px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-status-dot {
|
||||||
|
width: 8px;
|
||||||
|
height: 8px;
|
||||||
|
border-radius: 50%;
|
||||||
|
display: inline-block;
|
||||||
|
flex-shrink: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-status-dot--on {
|
||||||
|
background-color: #22c55e;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-status-dot--off {
|
||||||
|
background-color: var(--text-faint);
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-status-url {
|
||||||
|
font-family: 'JetBrains Mono', 'Fira Code', monospace;
|
||||||
|
color: var(--accent);
|
||||||
|
font-size: 13px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-status-hint {
|
||||||
|
font-size: 13px;
|
||||||
|
color: var(--text-faint);
|
||||||
|
font-style: italic;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-section-title {
|
||||||
|
font-size: 18px;
|
||||||
|
font-weight: 600;
|
||||||
|
color: var(--text-heading);
|
||||||
|
margin: 0 0 12px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-grid {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(3, 1fr);
|
||||||
|
gap: 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-card {
|
||||||
|
display: block;
|
||||||
|
text-decoration: none;
|
||||||
|
background-color: var(--bg-card);
|
||||||
|
border: 1px solid var(--border-primary);
|
||||||
|
border-radius: 12px;
|
||||||
|
padding: 24px;
|
||||||
|
transition: border-color 0.2s, transform 0.2s;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-card:hover {
|
||||||
|
border-color: var(--accent);
|
||||||
|
transform: translateY(-2px);
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-card-icon {
|
||||||
|
width: 36px;
|
||||||
|
height: 36px;
|
||||||
|
min-width: 36px;
|
||||||
|
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
|
||||||
|
color: var(--avatar-text);
|
||||||
|
border-radius: 8px;
|
||||||
|
font-size: 18px;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
font-weight: 700;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-card-title {
|
||||||
|
font-size: 16px;
|
||||||
|
font-weight: 600;
|
||||||
|
color: var(--text-heading);
|
||||||
|
margin: 12px 0 4px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-card-desc {
|
||||||
|
font-size: 13px;
|
||||||
|
color: var(--text-muted);
|
||||||
|
line-height: 1.5;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-card--disabled {
|
||||||
|
opacity: 0.4;
|
||||||
|
pointer-events: none;
|
||||||
|
cursor: default;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* -- Agents table -- */
|
||||||
|
.agents-table-section {
|
||||||
|
max-width: 960px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-table-wrap {
|
||||||
|
overflow-x: auto;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-table {
|
||||||
|
width: 100%;
|
||||||
|
border-collapse: collapse;
|
||||||
|
font-size: 14px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-table thead th {
|
||||||
|
text-align: left;
|
||||||
|
font-size: 12px;
|
||||||
|
font-weight: 600;
|
||||||
|
color: var(--text-faint);
|
||||||
|
text-transform: uppercase;
|
||||||
|
letter-spacing: 0.04em;
|
||||||
|
padding: 8px 12px;
|
||||||
|
border-bottom: 1px solid var(--border-secondary);
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-table tbody td {
|
||||||
|
padding: 10px 12px;
|
||||||
|
border-bottom: 1px solid var(--border-primary);
|
||||||
|
color: var(--text-primary);
|
||||||
|
vertical-align: middle;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-table tbody tr:hover {
|
||||||
|
background-color: var(--bg-surface);
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-cell-name {
|
||||||
|
font-weight: 600;
|
||||||
|
color: var(--text-heading);
|
||||||
|
white-space: nowrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-cell-id {
|
||||||
|
font-family: 'JetBrains Mono', 'Fira Code', monospace;
|
||||||
|
font-size: 12px;
|
||||||
|
color: var(--text-muted);
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-cell-desc {
|
||||||
|
max-width: 300px;
|
||||||
|
overflow: hidden;
|
||||||
|
text-overflow: ellipsis;
|
||||||
|
white-space: nowrap;
|
||||||
|
color: var(--text-muted);
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-cell-none {
|
||||||
|
color: var(--text-faint);
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-badge {
|
||||||
|
display: inline-block;
|
||||||
|
font-size: 12px;
|
||||||
|
font-weight: 600;
|
||||||
|
padding: 2px 10px;
|
||||||
|
border-radius: 9999px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-badge--active {
|
||||||
|
background-color: rgba(34, 197, 94, 0.15);
|
||||||
|
color: #22c55e;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-table-loading,
|
||||||
|
.agents-table-empty {
|
||||||
|
font-size: 14px;
|
||||||
|
color: var(--text-faint);
|
||||||
|
font-style: italic;
|
||||||
|
padding: 16px 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 768px) {
|
||||||
|
.agents-grid {
|
||||||
|
grid-template-columns: repeat(2, 1fr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 480px) {
|
||||||
|
.agents-page,
|
||||||
|
.analytics-page {
|
||||||
|
padding: 20px 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.agents-grid {
|
||||||
|
grid-template-columns: 1fr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ===== Analytics Page ===== */
|
||||||
|
.analytics-page {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
padding: 32px;
|
||||||
|
gap: 32px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-hero {
|
||||||
|
max-width: 720px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
gap: 12px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-hero-row {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: row;
|
||||||
|
align-items: center;
|
||||||
|
gap: 16px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-hero-icon {
|
||||||
|
width: 48px;
|
||||||
|
height: 48px;
|
||||||
|
min-width: 48px;
|
||||||
|
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
|
||||||
|
color: var(--avatar-text);
|
||||||
|
border-radius: 12px;
|
||||||
|
font-size: 24px;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-hero-title {
|
||||||
|
font-family: 'Space Grotesk', sans-serif;
|
||||||
|
font-size: 28px;
|
||||||
|
font-weight: 700;
|
||||||
|
color: var(--text-heading);
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-hero-desc {
|
||||||
|
font-size: 15px;
|
||||||
|
color: var(--text-muted);
|
||||||
|
line-height: 1.6;
|
||||||
|
max-width: 600px;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-sso-hint {
|
||||||
|
font-size: 13px;
|
||||||
|
color: var(--text-muted);
|
||||||
|
font-style: italic;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-launch-btn {
|
||||||
|
display: inline-flex;
|
||||||
|
align-items: center;
|
||||||
|
gap: 8px;
|
||||||
|
padding: 10px 20px;
|
||||||
|
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
|
||||||
|
color: var(--avatar-text);
|
||||||
|
font-size: 14px;
|
||||||
|
font-weight: 600;
|
||||||
|
border-radius: 8px;
|
||||||
|
text-decoration: none;
|
||||||
|
transition: opacity 0.2s, transform 0.2s;
|
||||||
|
width: fit-content;
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-launch-btn:hover {
|
||||||
|
opacity: 0.9;
|
||||||
|
transform: translateY(-1px);
|
||||||
|
}
|
||||||
|
|
||||||
|
.analytics-stats-bar {
|
||||||
|
display: flex;
|
||||||
|
gap: 16px;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
}
|
||||||
|
|
||||||
|
@media (max-width: 768px) {
|
||||||
|
.analytics-stats-bar {
|
||||||
|
flex-direction: column;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
/*! tailwindcss v4.2.0 | MIT License | https://tailwindcss.com */
|
/*! tailwindcss v4.2.1 | MIT License | https://tailwindcss.com */
|
||||||
@layer properties;
|
@layer properties;
|
||||||
@layer theme, base, components, utilities;
|
@layer theme, base, components, utilities;
|
||||||
@layer theme {
|
@layer theme {
|
||||||
@@ -162,59 +162,6 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
@layer utilities {
|
@layer utilities {
|
||||||
.diff {
|
|
||||||
@layer daisyui.l1.l2.l3 {
|
|
||||||
position: relative;
|
|
||||||
display: grid;
|
|
||||||
width: 100%;
|
|
||||||
overflow: hidden;
|
|
||||||
webkit-user-select: none;
|
|
||||||
user-select: none;
|
|
||||||
grid-template-rows: 1fr 1.8rem 1fr;
|
|
||||||
direction: ltr;
|
|
||||||
container-type: inline-size;
|
|
||||||
grid-template-columns: auto 1fr;
|
|
||||||
&:focus-visible, &:has(.diff-item-1:focus-visible) {
|
|
||||||
outline-style: var(--tw-outline-style);
|
|
||||||
outline-width: 2px;
|
|
||||||
outline-offset: 1px;
|
|
||||||
outline-color: var(--color-base-content);
|
|
||||||
}
|
|
||||||
&:focus-visible {
|
|
||||||
outline-style: var(--tw-outline-style);
|
|
||||||
outline-width: 2px;
|
|
||||||
outline-offset: 1px;
|
|
||||||
outline-color: var(--color-base-content);
|
|
||||||
.diff-resizer {
|
|
||||||
min-width: 95cqi;
|
|
||||||
max-width: 95cqi;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&:has(.diff-item-1:focus-visible) {
|
|
||||||
outline-style: var(--tw-outline-style);
|
|
||||||
outline-width: 2px;
|
|
||||||
outline-offset: 1px;
|
|
||||||
.diff-resizer {
|
|
||||||
min-width: 5cqi;
|
|
||||||
max-width: 5cqi;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@supports (-webkit-overflow-scrolling: touch) and (overflow: -webkit-paged-x) {
|
|
||||||
&:focus {
|
|
||||||
.diff-resizer {
|
|
||||||
min-width: 5cqi;
|
|
||||||
max-width: 5cqi;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&:has(.diff-item-1:focus) {
|
|
||||||
.diff-resizer {
|
|
||||||
min-width: 95cqi;
|
|
||||||
max-width: 95cqi;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.modal {
|
.modal {
|
||||||
@layer daisyui.l1.l2.l3 {
|
@layer daisyui.l1.l2.l3 {
|
||||||
pointer-events: none;
|
pointer-events: none;
|
||||||
@@ -1110,31 +1057,98 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
.chat-bubble {
|
.range {
|
||||||
@layer daisyui.l1.l2.l3 {
|
@layer daisyui.l1.l2.l3 {
|
||||||
position: relative;
|
appearance: none;
|
||||||
display: block;
|
webkit-appearance: none;
|
||||||
width: fit-content;
|
--range-thumb: var(--color-base-100);
|
||||||
border-radius: var(--radius-field);
|
--range-thumb-size: calc(var(--size-selector, 0.25rem) * 6);
|
||||||
background-color: var(--color-base-300);
|
--range-progress: currentColor;
|
||||||
padding-inline: calc(0.25rem * 4);
|
--range-fill: 1;
|
||||||
padding-block: calc(0.25rem * 2);
|
--range-p: 0.25rem;
|
||||||
color: var(--color-base-content);
|
--range-bg: currentColor;
|
||||||
grid-row-end: 3;
|
@supports (color: color-mix(in lab, red, red)) {
|
||||||
min-height: 2rem;
|
--range-bg: color-mix(in oklab, currentColor 10%, #0000);
|
||||||
min-width: 2.5rem;
|
}
|
||||||
max-width: 90%;
|
cursor: pointer;
|
||||||
&:before {
|
overflow: hidden;
|
||||||
position: absolute;
|
background-color: transparent;
|
||||||
bottom: calc(0.25rem * 0);
|
vertical-align: middle;
|
||||||
height: calc(0.25rem * 3);
|
width: clamp(3rem, 20rem, 100%);
|
||||||
width: calc(0.25rem * 3);
|
--radius-selector-max: calc(
|
||||||
background-color: inherit;
|
var(--radius-selector) + var(--radius-selector) + var(--radius-selector)
|
||||||
content: "";
|
);
|
||||||
mask-repeat: no-repeat;
|
border-radius: calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)));
|
||||||
mask-image: var(--mask-chat);
|
border: none;
|
||||||
mask-position: 0px -1px;
|
height: var(--range-thumb-size);
|
||||||
mask-size: 0.8125rem;
|
[dir="rtl"] & {
|
||||||
|
--range-dir: -1;
|
||||||
|
}
|
||||||
|
&:focus {
|
||||||
|
outline: none;
|
||||||
|
}
|
||||||
|
&:focus-visible {
|
||||||
|
outline: 2px solid;
|
||||||
|
outline-offset: 2px;
|
||||||
|
}
|
||||||
|
&::-webkit-slider-runnable-track {
|
||||||
|
width: 100%;
|
||||||
|
background-color: var(--range-bg);
|
||||||
|
border-radius: var(--radius-selector);
|
||||||
|
height: calc(var(--range-thumb-size) * 0.5);
|
||||||
|
}
|
||||||
|
@media (forced-colors: active) {
|
||||||
|
&::-webkit-slider-runnable-track {
|
||||||
|
border: 1px solid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@media (forced-colors: active) {
|
||||||
|
&::-moz-range-track {
|
||||||
|
border: 1px solid;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
&::-webkit-slider-thumb {
|
||||||
|
position: relative;
|
||||||
|
box-sizing: border-box;
|
||||||
|
border-radius: calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)));
|
||||||
|
background-color: var(--range-thumb);
|
||||||
|
height: var(--range-thumb-size);
|
||||||
|
width: var(--range-thumb-size);
|
||||||
|
border: var(--range-p) solid;
|
||||||
|
appearance: none;
|
||||||
|
webkit-appearance: none;
|
||||||
|
top: 50%;
|
||||||
|
color: var(--range-progress);
|
||||||
|
transform: translateY(-50%);
|
||||||
|
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px currentColor, 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
|
||||||
|
@supports (color: color-mix(in lab, red, red)) {
|
||||||
|
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000), 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
&::-moz-range-track {
|
||||||
|
width: 100%;
|
||||||
|
background-color: var(--range-bg);
|
||||||
|
border-radius: var(--radius-selector);
|
||||||
|
height: calc(var(--range-thumb-size) * 0.5);
|
||||||
|
}
|
||||||
|
&::-moz-range-thumb {
|
||||||
|
position: relative;
|
||||||
|
box-sizing: border-box;
|
||||||
|
border-radius: calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)));
|
||||||
|
background-color: currentColor;
|
||||||
|
height: var(--range-thumb-size);
|
||||||
|
width: var(--range-thumb-size);
|
||||||
|
border: var(--range-p) solid;
|
||||||
|
top: 50%;
|
||||||
|
color: var(--range-progress);
|
||||||
|
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px currentColor, 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
|
||||||
|
@supports (color: color-mix(in lab, red, red)) {
|
||||||
|
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000), 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
&:disabled {
|
||||||
|
cursor: not-allowed;
|
||||||
|
opacity: 30%;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1525,81 +1539,6 @@
|
|||||||
padding: calc(0.25rem * 4);
|
padding: calc(0.25rem * 4);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
.textarea {
|
|
||||||
@layer daisyui.l1.l2.l3 {
|
|
||||||
border: var(--border) solid #0000;
|
|
||||||
min-height: calc(0.25rem * 20);
|
|
||||||
flex-shrink: 1;
|
|
||||||
appearance: none;
|
|
||||||
border-radius: var(--radius-field);
|
|
||||||
background-color: var(--color-base-100);
|
|
||||||
padding-block: calc(0.25rem * 2);
|
|
||||||
vertical-align: middle;
|
|
||||||
width: clamp(3rem, 20rem, 100%);
|
|
||||||
padding-inline-start: 0.75rem;
|
|
||||||
padding-inline-end: 0.75rem;
|
|
||||||
font-size: max(var(--font-size, 0.875rem), 0.875rem);
|
|
||||||
touch-action: manipulation;
|
|
||||||
border-color: var(--input-color);
|
|
||||||
box-shadow: 0 1px var(--input-color) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset;
|
|
||||||
@supports (color: color-mix(in lab, red, red)) {
|
|
||||||
box-shadow: 0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset;
|
|
||||||
}
|
|
||||||
--input-color: var(--color-base-content);
|
|
||||||
@supports (color: color-mix(in lab, red, red)) {
|
|
||||||
--input-color: color-mix(in oklab, var(--color-base-content) 20%, #0000);
|
|
||||||
}
|
|
||||||
textarea {
|
|
||||||
appearance: none;
|
|
||||||
background-color: transparent;
|
|
||||||
border: none;
|
|
||||||
&:focus, &:focus-within {
|
|
||||||
--tw-outline-style: none;
|
|
||||||
outline-style: none;
|
|
||||||
@media (forced-colors: active) {
|
|
||||||
outline: 2px solid transparent;
|
|
||||||
outline-offset: 2px;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&:focus, &:focus-within {
|
|
||||||
--input-color: var(--color-base-content);
|
|
||||||
box-shadow: 0 1px var(--input-color);
|
|
||||||
@supports (color: color-mix(in lab, red, red)) {
|
|
||||||
box-shadow: 0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000);
|
|
||||||
}
|
|
||||||
outline: 2px solid var(--input-color);
|
|
||||||
outline-offset: 2px;
|
|
||||||
isolation: isolate;
|
|
||||||
}
|
|
||||||
@media (pointer: coarse) {
|
|
||||||
@supports (-webkit-touch-callout: none) {
|
|
||||||
&:focus, &:focus-within {
|
|
||||||
--font-size: 1rem;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
&:has(> textarea[disabled]), &:is(:disabled, [disabled]) {
|
|
||||||
cursor: not-allowed;
|
|
||||||
border-color: var(--color-base-200);
|
|
||||||
background-color: var(--color-base-200);
|
|
||||||
color: var(--color-base-content);
|
|
||||||
@supports (color: color-mix(in lab, red, red)) {
|
|
||||||
color: color-mix(in oklab, var(--color-base-content) 40%, transparent);
|
|
||||||
}
|
|
||||||
&::placeholder {
|
|
||||||
color: var(--color-base-content);
|
|
||||||
@supports (color: color-mix(in lab, red, red)) {
|
|
||||||
color: color-mix(in oklab, var(--color-base-content) 20%, transparent);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
box-shadow: none;
|
|
||||||
}
|
|
||||||
&:has(> textarea[disabled]) > textarea[disabled] {
|
|
||||||
cursor: not-allowed;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.stack {
|
.stack {
|
||||||
@layer daisyui.l1.l2.l3 {
|
@layer daisyui.l1.l2.l3 {
|
||||||
display: inline-grid;
|
display: inline-grid;
|
||||||
|
|||||||
@@ -55,6 +55,8 @@ services:
|
|||||||
mongo:
|
mongo:
|
||||||
condition: service_started
|
condition: service_started
|
||||||
environment:
|
environment:
|
||||||
|
# LiteLLM API key (used by librechat.yaml endpoint config)
|
||||||
|
LITELLM_API_KEY: ${LITELLM_API_KEY:-}
|
||||||
# MongoDB (use localhost since we're on host network)
|
# MongoDB (use localhost since we're on host network)
|
||||||
MONGO_URI: mongodb://root:example@localhost:27017/librechat?authSource=admin
|
MONGO_URI: mongodb://root:example@localhost:27017/librechat?authSource=admin
|
||||||
DOMAIN_CLIENT: http://localhost:3080
|
DOMAIN_CLIENT: http://localhost:3080
|
||||||
@@ -70,7 +72,6 @@ services:
|
|||||||
OPENID_CALLBACK_URL: /oauth/openid/callback
|
OPENID_CALLBACK_URL: /oauth/openid/callback
|
||||||
OPENID_SCOPE: openid profile email
|
OPENID_SCOPE: openid profile email
|
||||||
OPENID_BUTTON_LABEL: Login with CERTifAI
|
OPENID_BUTTON_LABEL: Login with CERTifAI
|
||||||
OPENID_AUTH_EXTRA_PARAMS: prompt=none
|
|
||||||
# Disable local auth (SSO only)
|
# Disable local auth (SSO only)
|
||||||
ALLOW_EMAIL_LOGIN: "false"
|
ALLOW_EMAIL_LOGIN: "false"
|
||||||
ALLOW_REGISTRATION: "false"
|
ALLOW_REGISTRATION: "false"
|
||||||
@@ -94,5 +95,164 @@ services:
|
|||||||
- ./librechat/openidStrategy.js:/app/api/strategies/openidStrategy.js:ro
|
- ./librechat/openidStrategy.js:/app/api/strategies/openidStrategy.js:ro
|
||||||
- librechat-data:/app/data
|
- librechat-data:/app/data
|
||||||
|
|
||||||
|
langflow:
|
||||||
|
image: langflowai/langflow:latest
|
||||||
|
container_name: certifai-langflow
|
||||||
|
restart: unless-stopped
|
||||||
|
ports:
|
||||||
|
- "7860:7860"
|
||||||
|
environment:
|
||||||
|
LANGFLOW_AUTO_LOGIN: "true"
|
||||||
|
|
||||||
|
langgraph:
|
||||||
|
image: langchain/langgraph-trial:3.12
|
||||||
|
container_name: certifai-langgraph
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
langgraph-db:
|
||||||
|
condition: service_started
|
||||||
|
langgraph-redis:
|
||||||
|
condition: service_started
|
||||||
|
ports:
|
||||||
|
- "8123:8000"
|
||||||
|
environment:
|
||||||
|
DATABASE_URI: postgresql://langgraph:langgraph@langgraph-db:5432/langgraph
|
||||||
|
REDIS_URI: redis://langgraph-redis:6379
|
||||||
|
|
||||||
|
langgraph-db:
|
||||||
|
image: postgres:16
|
||||||
|
container_name: certifai-langgraph-db
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: langgraph
|
||||||
|
POSTGRES_PASSWORD: langgraph
|
||||||
|
POSTGRES_DB: langgraph
|
||||||
|
volumes:
|
||||||
|
- langgraph-db-data:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
langgraph-redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: certifai-langgraph-redis
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
langfuse:
|
||||||
|
image: langfuse/langfuse:3
|
||||||
|
container_name: certifai-langfuse
|
||||||
|
restart: unless-stopped
|
||||||
|
depends_on:
|
||||||
|
keycloak:
|
||||||
|
condition: service_healthy
|
||||||
|
langfuse-db:
|
||||||
|
condition: service_healthy
|
||||||
|
langfuse-clickhouse:
|
||||||
|
condition: service_healthy
|
||||||
|
langfuse-redis:
|
||||||
|
condition: service_healthy
|
||||||
|
langfuse-minio:
|
||||||
|
condition: service_healthy
|
||||||
|
ports:
|
||||||
|
- "3000:3000"
|
||||||
|
environment:
|
||||||
|
DATABASE_URL: postgresql://langfuse:langfuse@langfuse-db:5432/langfuse
|
||||||
|
NEXTAUTH_URL: http://localhost:3000
|
||||||
|
NEXTAUTH_SECRET: certifai-langfuse-dev-secret
|
||||||
|
SALT: certifai-langfuse-dev-salt
|
||||||
|
ENCRYPTION_KEY: "0000000000000000000000000000000000000000000000000000000000000000"
|
||||||
|
# Keycloak OIDC SSO - shared realm with CERTifAI dashboard
|
||||||
|
AUTH_KEYCLOAK_CLIENT_ID: certifai-langfuse
|
||||||
|
AUTH_KEYCLOAK_CLIENT_SECRET: certifai-langfuse-secret
|
||||||
|
AUTH_KEYCLOAK_ISSUER: http://keycloak:8080/realms/certifai
|
||||||
|
AUTH_KEYCLOAK_ALLOW_ACCOUNT_LINKING: "true"
|
||||||
|
# Disable local email/password auth (SSO only)
|
||||||
|
AUTH_DISABLE_USERNAME_PASSWORD: "true"
|
||||||
|
CLICKHOUSE_URL: http://langfuse-clickhouse:8123
|
||||||
|
CLICKHOUSE_MIGRATION_URL: clickhouse://langfuse-clickhouse:9000
|
||||||
|
CLICKHOUSE_USER: clickhouse
|
||||||
|
CLICKHOUSE_PASSWORD: clickhouse
|
||||||
|
CLICKHOUSE_CLUSTER_ENABLED: "false"
|
||||||
|
REDIS_HOST: langfuse-redis
|
||||||
|
REDIS_PORT: "6379"
|
||||||
|
REDIS_AUTH: langfuse-dev-redis
|
||||||
|
LANGFUSE_S3_EVENT_UPLOAD_BUCKET: langfuse
|
||||||
|
LANGFUSE_S3_EVENT_UPLOAD_REGION: auto
|
||||||
|
LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: minio
|
||||||
|
LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: miniosecret
|
||||||
|
LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: http://langfuse-minio:9000
|
||||||
|
LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true"
|
||||||
|
LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: langfuse
|
||||||
|
LANGFUSE_S3_MEDIA_UPLOAD_REGION: auto
|
||||||
|
LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: minio
|
||||||
|
LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: miniosecret
|
||||||
|
LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: http://langfuse-minio:9000
|
||||||
|
LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true"
|
||||||
|
|
||||||
|
langfuse-db:
|
||||||
|
image: postgres:16
|
||||||
|
container_name: certifai-langfuse-db
|
||||||
|
restart: unless-stopped
|
||||||
|
environment:
|
||||||
|
POSTGRES_USER: langfuse
|
||||||
|
POSTGRES_PASSWORD: langfuse
|
||||||
|
POSTGRES_DB: langfuse
|
||||||
|
volumes:
|
||||||
|
- langfuse-db-data:/var/lib/postgresql/data
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "pg_isready -U langfuse"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 10
|
||||||
|
|
||||||
|
langfuse-clickhouse:
|
||||||
|
image: clickhouse/clickhouse-server:latest
|
||||||
|
container_name: certifai-langfuse-clickhouse
|
||||||
|
restart: unless-stopped
|
||||||
|
user: "101:101"
|
||||||
|
environment:
|
||||||
|
CLICKHOUSE_DB: default
|
||||||
|
CLICKHOUSE_USER: clickhouse
|
||||||
|
CLICKHOUSE_PASSWORD: clickhouse
|
||||||
|
ulimits:
|
||||||
|
nofile:
|
||||||
|
soft: 262144
|
||||||
|
hard: 262144
|
||||||
|
volumes:
|
||||||
|
- langfuse-clickhouse-data:/var/lib/clickhouse
|
||||||
|
- langfuse-clickhouse-logs:/var/log/clickhouse-server
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 10
|
||||||
|
|
||||||
|
langfuse-redis:
|
||||||
|
image: redis:7-alpine
|
||||||
|
container_name: certifai-langfuse-redis
|
||||||
|
restart: unless-stopped
|
||||||
|
command: redis-server --requirepass langfuse-dev-redis
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "redis-cli", "-a", "langfuse-dev-redis", "ping"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 10
|
||||||
|
|
||||||
|
langfuse-minio:
|
||||||
|
image: cgr.dev/chainguard/minio
|
||||||
|
container_name: certifai-langfuse-minio
|
||||||
|
restart: unless-stopped
|
||||||
|
entrypoint: sh
|
||||||
|
command: -c 'mkdir -p /data/langfuse && minio server --address ":9000" --console-address ":9001" /data'
|
||||||
|
environment:
|
||||||
|
MINIO_ROOT_USER: minio
|
||||||
|
MINIO_ROOT_PASSWORD: miniosecret
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "mc ready local || exit 1"]
|
||||||
|
interval: 5s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 10
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
librechat-data:
|
librechat-data:
|
||||||
|
langgraph-db-data:
|
||||||
|
langfuse-db-data:
|
||||||
|
langfuse-clickhouse-data:
|
||||||
|
langfuse-clickhouse-logs:
|
||||||
|
|||||||
@@ -11,23 +11,163 @@ test.describe("Developer section", () => {
|
|||||||
await expect(nav.locator("a", { hasText: "Analytics" })).toBeVisible();
|
await expect(nav.locator("a", { hasText: "Analytics" })).toBeVisible();
|
||||||
});
|
});
|
||||||
|
|
||||||
test("agents page shows Coming Soon badge", async ({ page }) => {
|
test("agents page renders informational landing", async ({ page }) => {
|
||||||
await page.goto("/developer/agents");
|
await page.goto("/developer/agents");
|
||||||
await page.waitForSelector(".placeholder-page", { timeout: 15_000 });
|
await page.waitForSelector(".agents-page", { timeout: 15_000 });
|
||||||
|
|
||||||
await expect(page.locator(".placeholder-badge")).toContainText(
|
// Hero section
|
||||||
"Coming Soon"
|
await expect(page.locator(".agents-hero-title")).toContainText(
|
||||||
|
"Agent Builder"
|
||||||
);
|
);
|
||||||
await expect(page.locator("h2")).toContainText("Agent Builder");
|
await expect(page.locator(".agents-hero-desc")).toBeVisible();
|
||||||
|
|
||||||
|
// Connection status indicator is present
|
||||||
|
await expect(page.locator(".agents-status")).toBeVisible();
|
||||||
});
|
});
|
||||||
|
|
||||||
test("analytics page loads via sub-nav", async ({ page }) => {
|
test("agents page shows Not Connected when URL is empty", async ({
|
||||||
await page.goto("/developer/analytics");
|
page,
|
||||||
await page.waitForSelector(".placeholder-page", { timeout: 15_000 });
|
}) => {
|
||||||
|
await page.goto("/developer/agents");
|
||||||
|
await page.waitForSelector(".agents-page", { timeout: 15_000 });
|
||||||
|
|
||||||
await expect(page.locator("h2")).toContainText("Analytics");
|
await expect(page.locator(".agents-status")).toContainText(
|
||||||
await expect(page.locator(".placeholder-badge")).toContainText(
|
"Not Connected"
|
||||||
"Coming Soon"
|
|
||||||
);
|
);
|
||||||
|
await expect(page.locator(".agents-status-dot--off")).toBeVisible();
|
||||||
|
await expect(page.locator(".agents-status-hint")).toBeVisible();
|
||||||
|
});
|
||||||
|
|
||||||
|
test("agents page shows quick start cards", async ({ page }) => {
|
||||||
|
await page.goto("/developer/agents");
|
||||||
|
await page.waitForSelector(".agents-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
const grid = page.locator(".agents-grid");
|
||||||
|
const cards = grid.locator(".agents-card");
|
||||||
|
await expect(cards).toHaveCount(5);
|
||||||
|
|
||||||
|
// Verify card titles are rendered
|
||||||
|
await expect(
|
||||||
|
grid.locator(".agents-card-title", { hasText: "Documentation" })
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
grid.locator(".agents-card-title", { hasText: "Getting Started" })
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
grid.locator(".agents-card-title", { hasText: "GitHub" })
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
grid.locator(".agents-card-title", { hasText: "Examples" })
|
||||||
|
).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
grid.locator(".agents-card-title", { hasText: "API Reference" })
|
||||||
|
).toBeVisible();
|
||||||
|
});
|
||||||
|
|
||||||
|
test("agents page disables API Reference card when not connected", async ({
|
||||||
|
page,
|
||||||
|
}) => {
|
||||||
|
await page.goto("/developer/agents");
|
||||||
|
await page.waitForSelector(".agents-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
// When LANGGRAPH_URL is empty, the API Reference card should be disabled
|
||||||
|
const statusHint = page.locator(".agents-status-hint");
|
||||||
|
if (await statusHint.isVisible()) {
|
||||||
|
const apiCard = page.locator(".agents-card--disabled");
|
||||||
|
await expect(apiCard).toBeVisible();
|
||||||
|
await expect(
|
||||||
|
apiCard.locator(".agents-card-title")
|
||||||
|
).toContainText("API Reference");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
test("agents page shows running agents section", async ({ page }) => {
|
||||||
|
await page.goto("/developer/agents");
|
||||||
|
await page.waitForSelector(".agents-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
// The running agents section title should always be visible
|
||||||
|
await expect(
|
||||||
|
page.locator(".agents-section-title", { hasText: "Running Agents" })
|
||||||
|
).toBeVisible();
|
||||||
|
|
||||||
|
// Either the table, loading state, or empty message should appear
|
||||||
|
await page.waitForTimeout(3000);
|
||||||
|
const table = page.locator(".agents-table");
|
||||||
|
const empty = page.locator(".agents-table-empty");
|
||||||
|
|
||||||
|
const hasTable = await table.isVisible();
|
||||||
|
const hasEmpty = await empty.isVisible();
|
||||||
|
expect(hasTable || hasEmpty).toBeTruthy();
|
||||||
|
});
|
||||||
|
|
||||||
|
test("agents page shows connected status when URL is configured", async ({
|
||||||
|
page,
|
||||||
|
}) => {
|
||||||
|
// This test only passes when LANGGRAPH_URL is set in the environment.
|
||||||
|
await page.goto("/developer/agents");
|
||||||
|
await page.waitForSelector(".agents-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
const connectedDot = page.locator(".agents-status-dot--on");
|
||||||
|
const disconnectedDot = page.locator(".agents-status-dot--off");
|
||||||
|
|
||||||
|
if (await connectedDot.isVisible()) {
|
||||||
|
await expect(page.locator(".agents-status")).toContainText("Connected");
|
||||||
|
await expect(page.locator(".agents-status-url")).toBeVisible();
|
||||||
|
// API Reference card should NOT be disabled
|
||||||
|
await expect(page.locator(".agents-card--disabled")).toHaveCount(0);
|
||||||
|
} else {
|
||||||
|
await expect(disconnectedDot).toBeVisible();
|
||||||
|
await expect(page.locator(".agents-status")).toContainText(
|
||||||
|
"Not Connected"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
test("analytics page renders informational landing", async ({ page }) => {
|
||||||
|
await page.goto("/developer/analytics");
|
||||||
|
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
// Hero section
|
||||||
|
await expect(page.locator(".analytics-hero-title")).toBeVisible();
|
||||||
|
await expect(page.locator(".analytics-hero-desc")).toBeVisible();
|
||||||
|
|
||||||
|
// Connection status indicator
|
||||||
|
await expect(page.locator(".agents-status")).toBeVisible();
|
||||||
|
|
||||||
|
// Metrics bar
|
||||||
|
await expect(page.locator(".analytics-stats-bar")).toBeVisible();
|
||||||
|
});
|
||||||
|
|
||||||
|
test("analytics page shows Not Connected when URL is empty", async ({
|
||||||
|
page,
|
||||||
|
}) => {
|
||||||
|
await page.goto("/developer/analytics");
|
||||||
|
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
await expect(page.locator(".agents-status")).toContainText(
|
||||||
|
"Not Connected"
|
||||||
|
);
|
||||||
|
await expect(page.locator(".agents-status-dot--off")).toBeVisible();
|
||||||
|
});
|
||||||
|
|
||||||
|
test("analytics page shows quick action cards", async ({ page }) => {
|
||||||
|
await page.goto("/developer/analytics");
|
||||||
|
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
const grid = page.locator(".agents-grid");
|
||||||
|
const cards = grid.locator(".agents-card, .agents-card--disabled");
|
||||||
|
await expect(cards).toHaveCount(2);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("analytics page shows SSO hint when connected", async ({ page }) => {
|
||||||
|
// Only meaningful when LANGFUSE_URL is configured.
|
||||||
|
await page.goto("/developer/analytics");
|
||||||
|
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
|
||||||
|
|
||||||
|
const connectedDot = page.locator(".agents-status-dot--on");
|
||||||
|
if (await connectedDot.isVisible()) {
|
||||||
|
await expect(page.locator(".analytics-sso-hint")).toBeVisible();
|
||||||
|
await expect(page.locator(".analytics-launch-btn")).toBeVisible();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -79,6 +79,39 @@
|
|||||||
"offline_access"
|
"offline_access"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"clientId": "certifai-langfuse",
|
||||||
|
"name": "CERTifAI Langfuse",
|
||||||
|
"description": "Langfuse OIDC client for CERTifAI",
|
||||||
|
"enabled": true,
|
||||||
|
"publicClient": false,
|
||||||
|
"directAccessGrantsEnabled": false,
|
||||||
|
"standardFlowEnabled": true,
|
||||||
|
"implicitFlowEnabled": false,
|
||||||
|
"serviceAccountsEnabled": false,
|
||||||
|
"protocol": "openid-connect",
|
||||||
|
"secret": "certifai-langfuse-secret",
|
||||||
|
"rootUrl": "http://localhost:3000",
|
||||||
|
"baseUrl": "http://localhost:3000",
|
||||||
|
"redirectUris": [
|
||||||
|
"http://localhost:3000/*"
|
||||||
|
],
|
||||||
|
"webOrigins": [
|
||||||
|
"http://localhost:3000",
|
||||||
|
"http://localhost:8000"
|
||||||
|
],
|
||||||
|
"attributes": {
|
||||||
|
"post.logout.redirect.uris": "http://localhost:3000"
|
||||||
|
},
|
||||||
|
"defaultClientScopes": [
|
||||||
|
"openid",
|
||||||
|
"profile",
|
||||||
|
"email"
|
||||||
|
],
|
||||||
|
"optionalClientScopes": [
|
||||||
|
"offline_access"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"clientId": "certifai-librechat",
|
"clientId": "certifai-librechat",
|
||||||
"name": "CERTifAI Chat",
|
"name": "CERTifAI Chat",
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# CERTifAI LibreChat Configuration
|
# CERTifAI LibreChat Configuration
|
||||||
# Ollama backend for self-hosted LLM inference.
|
# LiteLLM proxy for unified multi-provider LLM access.
|
||||||
version: 1.2.8
|
version: 1.2.8
|
||||||
|
|
||||||
cache: true
|
cache: true
|
||||||
@@ -19,22 +19,16 @@ interface:
|
|||||||
|
|
||||||
endpoints:
|
endpoints:
|
||||||
custom:
|
custom:
|
||||||
- name: "Ollama"
|
- name: "LiteLLM"
|
||||||
apiKey: "ollama"
|
apiKey: "${LITELLM_API_KEY}"
|
||||||
baseURL: "https://mac-mini-von-benjamin-2:11434/v1/"
|
baseURL: "https://llm-dev.meghsakha.com/v1/"
|
||||||
models:
|
models:
|
||||||
default:
|
default:
|
||||||
- "llama3.1:8b"
|
- "Qwen3-Coder-30B-A3B-Instruct"
|
||||||
- "qwen3:30b-a3b"
|
|
||||||
fetch: true
|
fetch: true
|
||||||
titleConvo: true
|
titleConvo: true
|
||||||
titleModel: "current_model"
|
titleModel: "current_model"
|
||||||
summarize: false
|
summarize: false
|
||||||
summaryModel: "current_model"
|
summaryModel: "current_model"
|
||||||
forcePrompt: false
|
forcePrompt: false
|
||||||
modelDisplayLabel: "CERTifAI Ollama"
|
modelDisplayLabel: "CERTifAI LiteLLM"
|
||||||
dropParams:
|
|
||||||
- stop
|
|
||||||
- user
|
|
||||||
- frequency_penalty
|
|
||||||
- presence_penalty
|
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use dioxus_free_icons::Icon;
|
|||||||
use crate::components::sidebar::Sidebar;
|
use crate::components::sidebar::Sidebar;
|
||||||
use crate::i18n::{t, tw, Locale};
|
use crate::i18n::{t, tw, Locale};
|
||||||
use crate::infrastructure::auth_check::check_auth;
|
use crate::infrastructure::auth_check::check_auth;
|
||||||
use crate::models::AuthInfo;
|
use crate::models::{AuthInfo, ServiceUrlsContext};
|
||||||
use crate::Route;
|
use crate::Route;
|
||||||
|
|
||||||
/// Application shell layout that wraps all authenticated pages.
|
/// Application shell layout that wraps all authenticated pages.
|
||||||
@@ -29,6 +29,16 @@ pub fn AppShell() -> Element {
|
|||||||
|
|
||||||
match auth_snapshot {
|
match auth_snapshot {
|
||||||
Some(Ok(info)) if info.authenticated => {
|
Some(Ok(info)) if info.authenticated => {
|
||||||
|
// Provide developer tool URLs as context so child pages
|
||||||
|
// can read them without prop-drilling through layouts.
|
||||||
|
use_context_provider(|| {
|
||||||
|
Signal::new(ServiceUrlsContext {
|
||||||
|
langgraph_url: info.langgraph_url.clone(),
|
||||||
|
langflow_url: info.langflow_url.clone(),
|
||||||
|
langfuse_url: info.langfuse_url.clone(),
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
let menu_open = *mobile_menu_open.read();
|
let menu_open = *mobile_menu_open.read();
|
||||||
let sidebar_cls = if menu_open {
|
let sidebar_cls = if menu_open {
|
||||||
"sidebar sidebar--open"
|
"sidebar sidebar--open"
|
||||||
|
|||||||
@@ -1,9 +1,9 @@
|
|||||||
use dioxus::prelude::*;
|
use dioxus::prelude::*;
|
||||||
|
|
||||||
use crate::i18n::{t, Locale};
|
use crate::i18n::{t, Locale};
|
||||||
use crate::infrastructure::ollama::{get_ollama_status, OllamaStatus};
|
use crate::infrastructure::litellm::{get_litellm_status, LitellmStatus};
|
||||||
|
|
||||||
/// Right sidebar for the dashboard, showing Ollama status, trending topics,
|
/// Right sidebar for the dashboard, showing LiteLLM status, trending topics,
|
||||||
/// and recent search history.
|
/// and recent search history.
|
||||||
///
|
///
|
||||||
/// Appears when no article card is selected. Disappears when the user opens
|
/// Appears when no article card is selected. Disappears when the user opens
|
||||||
@@ -11,13 +11,13 @@ use crate::infrastructure::ollama::{get_ollama_status, OllamaStatus};
|
|||||||
///
|
///
|
||||||
/// # Props
|
/// # Props
|
||||||
///
|
///
|
||||||
/// * `ollama_url` - Ollama instance URL for status polling
|
/// * `litellm_url` - LiteLLM proxy URL for status polling
|
||||||
/// * `trending` - Trending topic keywords extracted from recent news headlines
|
/// * `trending` - Trending topic keywords extracted from recent news headlines
|
||||||
/// * `recent_searches` - Recent search topics stored in localStorage
|
/// * `recent_searches` - Recent search topics stored in localStorage
|
||||||
/// * `on_topic_click` - Fires when a trending or recent topic is clicked
|
/// * `on_topic_click` - Fires when a trending or recent topic is clicked
|
||||||
#[component]
|
#[component]
|
||||||
pub fn DashboardSidebar(
|
pub fn DashboardSidebar(
|
||||||
ollama_url: String,
|
litellm_url: String,
|
||||||
trending: Vec<String>,
|
trending: Vec<String>,
|
||||||
recent_searches: Vec<String>,
|
recent_searches: Vec<String>,
|
||||||
on_topic_click: EventHandler<String>,
|
on_topic_click: EventHandler<String>,
|
||||||
@@ -25,26 +25,26 @@ pub fn DashboardSidebar(
|
|||||||
let locale = use_context::<Signal<Locale>>();
|
let locale = use_context::<Signal<Locale>>();
|
||||||
let l = *locale.read();
|
let l = *locale.read();
|
||||||
|
|
||||||
// Fetch Ollama status once on mount.
|
// Fetch LiteLLM status once on mount.
|
||||||
// use_resource with no signal dependencies runs exactly once and
|
// use_resource with no signal dependencies runs exactly once and
|
||||||
// won't re-fire on parent re-renders (unlike use_effect).
|
// won't re-fire on parent re-renders (unlike use_effect).
|
||||||
let url = ollama_url.clone();
|
let url = litellm_url.clone();
|
||||||
let status_resource = use_resource(move || {
|
let status_resource = use_resource(move || {
|
||||||
let u = url.clone();
|
let u = url.clone();
|
||||||
async move {
|
async move {
|
||||||
get_ollama_status(u).await.unwrap_or(OllamaStatus {
|
get_litellm_status(u).await.unwrap_or(LitellmStatus {
|
||||||
online: false,
|
online: false,
|
||||||
models: Vec::new(),
|
models: Vec::new(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let current_status: OllamaStatus =
|
let current_status: LitellmStatus =
|
||||||
status_resource
|
status_resource
|
||||||
.read()
|
.read()
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.cloned()
|
.cloned()
|
||||||
.unwrap_or(OllamaStatus {
|
.unwrap_or(LitellmStatus {
|
||||||
online: false,
|
online: false,
|
||||||
models: Vec::new(),
|
models: Vec::new(),
|
||||||
});
|
});
|
||||||
@@ -52,9 +52,9 @@ pub fn DashboardSidebar(
|
|||||||
rsx! {
|
rsx! {
|
||||||
aside { class: "dashboard-sidebar",
|
aside { class: "dashboard-sidebar",
|
||||||
|
|
||||||
// -- Ollama Status Section --
|
// -- LiteLLM Status Section --
|
||||||
div { class: "sidebar-section",
|
div { class: "sidebar-section",
|
||||||
h4 { class: "sidebar-section-title", "{t(l, \"dashboard.ollama_status\")}" }
|
h4 { class: "sidebar-section-title", "{t(l, \"dashboard.litellm_status\")}" }
|
||||||
div { class: "sidebar-status-row",
|
div { class: "sidebar-status-row",
|
||||||
span { class: if current_status.online { "sidebar-status-dot sidebar-status-dot--online" } else { "sidebar-status-dot sidebar-status-dot--offline" } }
|
span { class: if current_status.online { "sidebar-status-dot sidebar-status-dot--online" } else { "sidebar-status-dot sidebar-status-dot--offline" } }
|
||||||
span { class: "sidebar-status-label",
|
span { class: "sidebar-status-label",
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ mod page_header;
|
|||||||
mod pricing_card;
|
mod pricing_card;
|
||||||
pub mod sidebar;
|
pub mod sidebar;
|
||||||
pub mod sub_nav;
|
pub mod sub_nav;
|
||||||
|
mod tool_embed;
|
||||||
|
|
||||||
pub use app_shell::*;
|
pub use app_shell::*;
|
||||||
pub use article_detail::*;
|
pub use article_detail::*;
|
||||||
@@ -20,3 +21,4 @@ pub use news_card::*;
|
|||||||
pub use page_header::*;
|
pub use page_header::*;
|
||||||
pub use pricing_card::*;
|
pub use pricing_card::*;
|
||||||
pub use sub_nav::*;
|
pub use sub_nav::*;
|
||||||
|
pub use tool_embed::*;
|
||||||
|
|||||||
@@ -112,12 +112,12 @@ pub fn mock_news() -> Vec<NewsCardModel> {
|
|||||||
published_at: "2026-02-16".into(),
|
published_at: "2026-02-16".into(),
|
||||||
},
|
},
|
||||||
NewsCardModel {
|
NewsCardModel {
|
||||||
title: "Ollama Adds Multi-GPU Scheduling".into(),
|
title: "LiteLLM Adds Multi-Provider Routing".into(),
|
||||||
source: "Ollama".into(),
|
source: "LiteLLM".into(),
|
||||||
summary: "Run large models across multiple GPUs with automatic sharding.".into(),
|
summary: "Route requests across multiple LLM providers with automatic fallback.".into(),
|
||||||
content: "Ollama now supports multi-GPU scheduling with automatic \
|
content: "LiteLLM now supports multi-provider routing with automatic \
|
||||||
model sharding. Users can run models across multiple GPUs \
|
fallback. Users can route requests across multiple providers \
|
||||||
for improved inference performance."
|
for improved reliability and cost optimization."
|
||||||
.into(),
|
.into(),
|
||||||
category: "Infrastructure".into(),
|
category: "Infrastructure".into(),
|
||||||
url: "#".into(),
|
url: "#".into(),
|
||||||
|
|||||||
81
src/components/tool_embed.rs
Normal file
81
src/components/tool_embed.rs
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
use dioxus::prelude::*;
|
||||||
|
|
||||||
|
use crate::i18n::{t, Locale};
|
||||||
|
|
||||||
|
/// Properties for the [`ToolEmbed`] component.
|
||||||
|
///
|
||||||
|
/// # Fields
|
||||||
|
///
|
||||||
|
/// * `url` - Service URL; when empty, a "Not Configured" placeholder is shown
|
||||||
|
/// * `title` - Display title for the tool (e.g. "Agent Builder")
|
||||||
|
/// * `description` - Description text shown in the placeholder card
|
||||||
|
/// * `icon` - Single-character icon for the placeholder card
|
||||||
|
/// * `launch_label` - Label for the disabled button in the placeholder
|
||||||
|
#[derive(Props, Clone, PartialEq)]
|
||||||
|
pub struct ToolEmbedProps {
|
||||||
|
/// Service URL. Empty string means "not configured".
|
||||||
|
pub url: String,
|
||||||
|
/// Display title shown in the toolbar / placeholder heading.
|
||||||
|
pub title: String,
|
||||||
|
/// Description shown in the "not configured" placeholder.
|
||||||
|
pub description: String,
|
||||||
|
/// Single-character icon for the placeholder card.
|
||||||
|
pub icon: &'static str,
|
||||||
|
/// Label for the disabled launch button in placeholder mode.
|
||||||
|
pub launch_label: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Hybrid iframe / placeholder component for developer tool pages.
|
||||||
|
///
|
||||||
|
/// When `url` is non-empty, renders a toolbar (title + pop-out button)
|
||||||
|
/// above a full-height iframe embedding the service. When `url` is
|
||||||
|
/// empty, renders the existing placeholder card with a "Not Configured"
|
||||||
|
/// badge instead of "Coming Soon".
|
||||||
|
#[component]
|
||||||
|
pub fn ToolEmbed(props: ToolEmbedProps) -> Element {
|
||||||
|
let locale = use_context::<Signal<Locale>>();
|
||||||
|
let l = *locale.read();
|
||||||
|
|
||||||
|
if props.url.is_empty() {
|
||||||
|
// Not configured -- show placeholder card
|
||||||
|
rsx! {
|
||||||
|
section { class: "placeholder-page",
|
||||||
|
div { class: "placeholder-card",
|
||||||
|
div { class: "placeholder-icon", "{props.icon}" }
|
||||||
|
h2 { "{props.title}" }
|
||||||
|
p { class: "placeholder-desc", "{props.description}" }
|
||||||
|
button {
|
||||||
|
class: "btn-primary",
|
||||||
|
disabled: true,
|
||||||
|
"{props.launch_label}"
|
||||||
|
}
|
||||||
|
span { class: "placeholder-badge",
|
||||||
|
"{t(l, \"developer.not_configured\")}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// URL is set -- render toolbar + iframe
|
||||||
|
let pop_out_url = props.url.clone();
|
||||||
|
rsx! {
|
||||||
|
div { class: "tool-embed",
|
||||||
|
div { class: "tool-embed-toolbar",
|
||||||
|
span { class: "tool-embed-title", "{props.title}" }
|
||||||
|
a {
|
||||||
|
class: "tool-embed-popout-btn",
|
||||||
|
href: "{pop_out_url}",
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
"{t(l, \"developer.open_new_tab\")}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iframe {
|
||||||
|
class: "tool-embed-iframe",
|
||||||
|
src: "{props.url}",
|
||||||
|
title: "{props.title}",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -27,6 +27,15 @@ pub async fn check_auth() -> Result<AuthInfo, ServerFnError> {
|
|||||||
Some(u) => {
|
Some(u) => {
|
||||||
let librechat_url =
|
let librechat_url =
|
||||||
std::env::var("LIBRECHAT_URL").unwrap_or_else(|_| "http://localhost:3080".into());
|
std::env::var("LIBRECHAT_URL").unwrap_or_else(|_| "http://localhost:3080".into());
|
||||||
|
|
||||||
|
// Extract service URLs from server state so the frontend can
|
||||||
|
// embed developer tools (LangGraph, LangFlow, Langfuse).
|
||||||
|
let state: crate::infrastructure::server_state::ServerState =
|
||||||
|
FullstackContext::extract().await?;
|
||||||
|
let langgraph_url = state.services.langgraph_url.clone();
|
||||||
|
let langflow_url = state.services.langflow_url.clone();
|
||||||
|
let langfuse_url = state.services.langfuse_url.clone();
|
||||||
|
|
||||||
Ok(AuthInfo {
|
Ok(AuthInfo {
|
||||||
authenticated: true,
|
authenticated: true,
|
||||||
sub: u.sub,
|
sub: u.sub,
|
||||||
@@ -34,6 +43,9 @@ pub async fn check_auth() -> Result<AuthInfo, ServerFnError> {
|
|||||||
name: u.user.name,
|
name: u.user.name,
|
||||||
avatar_url: u.user.avatar_url,
|
avatar_url: u.user.avatar_url,
|
||||||
librechat_url,
|
librechat_url,
|
||||||
|
langgraph_url,
|
||||||
|
langflow_url,
|
||||||
|
langfuse_url,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
None => Ok(AuthInfo::default()),
|
None => Ok(AuthInfo::default()),
|
||||||
|
|||||||
@@ -134,7 +134,7 @@ pub async fn list_chat_sessions() -> Result<Vec<ChatSession>, ServerFnError> {
|
|||||||
///
|
///
|
||||||
/// * `title` - Display title for the session
|
/// * `title` - Display title for the session
|
||||||
/// * `namespace` - Namespace string: `"General"` or `"News"`
|
/// * `namespace` - Namespace string: `"General"` or `"News"`
|
||||||
/// * `provider` - LLM provider name (e.g. "ollama")
|
/// * `provider` - LLM provider name (e.g. "litellm")
|
||||||
/// * `model` - Model ID (e.g. "llama3.1:8b")
|
/// * `model` - Model ID (e.g. "llama3.1:8b")
|
||||||
/// * `article_url` - Source article URL (only for `News` namespace, empty if none)
|
/// * `article_url` - Source article URL (only for `News` namespace, empty if none)
|
||||||
///
|
///
|
||||||
@@ -441,8 +441,8 @@ pub async fn chat_complete(
|
|||||||
|
|
||||||
// Resolve provider URL and model
|
// Resolve provider URL and model
|
||||||
let (base_url, model) = resolve_provider_url(
|
let (base_url, model) = resolve_provider_url(
|
||||||
&state.services.ollama_url,
|
&state.services.litellm_url,
|
||||||
&state.services.ollama_model,
|
&state.services.litellm_model,
|
||||||
&session.provider,
|
&session.provider,
|
||||||
&session.model,
|
&session.model,
|
||||||
);
|
);
|
||||||
@@ -485,22 +485,22 @@ pub async fn chat_complete(
|
|||||||
.ok_or_else(|| ServerFnError::new("empty LLM response"))
|
.ok_or_else(|| ServerFnError::new("empty LLM response"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Resolve the base URL for a provider, falling back to Ollama defaults.
|
/// Resolve the base URL for a provider, falling back to LiteLLM defaults.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `ollama_url` - Default Ollama base URL from config
|
/// * `litellm_url` - Default LiteLLM base URL from config
|
||||||
/// * `ollama_model` - Default Ollama model from config
|
/// * `litellm_model` - Default LiteLLM model from config
|
||||||
/// * `provider` - Provider name (e.g. "openai", "anthropic", "huggingface")
|
/// * `provider` - Provider name (e.g. "openai", "anthropic", "huggingface")
|
||||||
/// * `model` - Model ID (may be empty for Ollama default)
|
/// * `model` - Model ID (may be empty for LiteLLM default)
|
||||||
///
|
///
|
||||||
/// # Returns
|
/// # Returns
|
||||||
///
|
///
|
||||||
/// A `(base_url, model)` tuple resolved for the given provider.
|
/// A `(base_url, model)` tuple resolved for the given provider.
|
||||||
#[cfg(feature = "server")]
|
#[cfg(feature = "server")]
|
||||||
pub(crate) fn resolve_provider_url(
|
pub(crate) fn resolve_provider_url(
|
||||||
ollama_url: &str,
|
litellm_url: &str,
|
||||||
ollama_model: &str,
|
litellm_model: &str,
|
||||||
provider: &str,
|
provider: &str,
|
||||||
model: &str,
|
model: &str,
|
||||||
) -> (String, String) {
|
) -> (String, String) {
|
||||||
@@ -511,11 +511,11 @@ pub(crate) fn resolve_provider_url(
|
|||||||
format!("https://api-inference.huggingface.co/models/{}", model),
|
format!("https://api-inference.huggingface.co/models/{}", model),
|
||||||
model.to_string(),
|
model.to_string(),
|
||||||
),
|
),
|
||||||
// Default to Ollama
|
// Default to LiteLLM
|
||||||
_ => (
|
_ => (
|
||||||
ollama_url.to_string(),
|
litellm_url.to_string(),
|
||||||
if model.is_empty() {
|
if model.is_empty() {
|
||||||
ollama_model.to_string()
|
litellm_model.to_string()
|
||||||
} else {
|
} else {
|
||||||
model.to_string()
|
model.to_string()
|
||||||
},
|
},
|
||||||
@@ -595,7 +595,7 @@ mod tests {
|
|||||||
"_id": oid,
|
"_id": oid,
|
||||||
"user_sub": "u",
|
"user_sub": "u",
|
||||||
"title": "t",
|
"title": "t",
|
||||||
"provider": "ollama",
|
"provider": "litellm",
|
||||||
"model": "m",
|
"model": "m",
|
||||||
"created_at": "c",
|
"created_at": "c",
|
||||||
"updated_at": "u",
|
"updated_at": "u",
|
||||||
@@ -612,7 +612,7 @@ mod tests {
|
|||||||
"user_sub": "u",
|
"user_sub": "u",
|
||||||
"title": "t",
|
"title": "t",
|
||||||
"namespace": "News",
|
"namespace": "News",
|
||||||
"provider": "ollama",
|
"provider": "litellm",
|
||||||
"model": "m",
|
"model": "m",
|
||||||
"created_at": "c",
|
"created_at": "c",
|
||||||
"updated_at": "u",
|
"updated_at": "u",
|
||||||
@@ -684,13 +684,13 @@ mod tests {
|
|||||||
|
|
||||||
// -- resolve_provider_url --
|
// -- resolve_provider_url --
|
||||||
|
|
||||||
const TEST_OLLAMA_URL: &str = "http://localhost:11434";
|
const TEST_LITELLM_URL: &str = "http://localhost:4000";
|
||||||
const TEST_OLLAMA_MODEL: &str = "llama3.1:8b";
|
const TEST_LITELLM_MODEL: &str = "qwen3-32b";
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn resolve_openai_returns_api_openai() {
|
fn resolve_openai_returns_api_openai() {
|
||||||
let (url, model) =
|
let (url, model) =
|
||||||
resolve_provider_url(TEST_OLLAMA_URL, TEST_OLLAMA_MODEL, "openai", "gpt-4o");
|
resolve_provider_url(TEST_LITELLM_URL, TEST_LITELLM_MODEL, "openai", "gpt-4o");
|
||||||
assert_eq!(url, "https://api.openai.com");
|
assert_eq!(url, "https://api.openai.com");
|
||||||
assert_eq!(model, "gpt-4o");
|
assert_eq!(model, "gpt-4o");
|
||||||
}
|
}
|
||||||
@@ -698,8 +698,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn resolve_anthropic_returns_api_anthropic() {
|
fn resolve_anthropic_returns_api_anthropic() {
|
||||||
let (url, model) = resolve_provider_url(
|
let (url, model) = resolve_provider_url(
|
||||||
TEST_OLLAMA_URL,
|
TEST_LITELLM_URL,
|
||||||
TEST_OLLAMA_MODEL,
|
TEST_LITELLM_MODEL,
|
||||||
"anthropic",
|
"anthropic",
|
||||||
"claude-3-opus",
|
"claude-3-opus",
|
||||||
);
|
);
|
||||||
@@ -710,8 +710,8 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn resolve_huggingface_returns_model_url() {
|
fn resolve_huggingface_returns_model_url() {
|
||||||
let (url, model) = resolve_provider_url(
|
let (url, model) = resolve_provider_url(
|
||||||
TEST_OLLAMA_URL,
|
TEST_LITELLM_URL,
|
||||||
TEST_OLLAMA_MODEL,
|
TEST_LITELLM_MODEL,
|
||||||
"huggingface",
|
"huggingface",
|
||||||
"meta-llama/Llama-2-7b",
|
"meta-llama/Llama-2-7b",
|
||||||
);
|
);
|
||||||
@@ -723,19 +723,19 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn resolve_unknown_defaults_to_ollama() {
|
fn resolve_unknown_defaults_to_litellm() {
|
||||||
let (url, model) =
|
let (url, model) =
|
||||||
resolve_provider_url(TEST_OLLAMA_URL, TEST_OLLAMA_MODEL, "ollama", "mistral:7b");
|
resolve_provider_url(TEST_LITELLM_URL, TEST_LITELLM_MODEL, "litellm", "qwen3-32b");
|
||||||
assert_eq!(url, TEST_OLLAMA_URL);
|
assert_eq!(url, TEST_LITELLM_URL);
|
||||||
assert_eq!(model, "mistral:7b");
|
assert_eq!(model, "qwen3-32b");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn resolve_empty_model_falls_back_to_server_default() {
|
fn resolve_empty_model_falls_back_to_server_default() {
|
||||||
let (url, model) =
|
let (url, model) =
|
||||||
resolve_provider_url(TEST_OLLAMA_URL, TEST_OLLAMA_MODEL, "ollama", "");
|
resolve_provider_url(TEST_LITELLM_URL, TEST_LITELLM_MODEL, "litellm", "");
|
||||||
assert_eq!(url, TEST_OLLAMA_URL);
|
assert_eq!(url, TEST_LITELLM_URL);
|
||||||
assert_eq!(model, TEST_OLLAMA_MODEL);
|
assert_eq!(model, TEST_LITELLM_MODEL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -141,19 +141,23 @@ impl SmtpConfig {
|
|||||||
// ServiceUrls
|
// ServiceUrls
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
|
|
||||||
/// URLs and credentials for external services (Ollama, SearXNG, S3, etc.).
|
/// URLs and credentials for external services (LiteLLM, SearXNG, S3, etc.).
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct ServiceUrls {
|
pub struct ServiceUrls {
|
||||||
/// Ollama LLM instance base URL.
|
/// LiteLLM proxy base URL.
|
||||||
pub ollama_url: String,
|
pub litellm_url: String,
|
||||||
/// Default Ollama model to use.
|
/// Default LiteLLM model to use.
|
||||||
pub ollama_model: String,
|
pub litellm_model: String,
|
||||||
|
/// LiteLLM API key for authenticated requests.
|
||||||
|
pub litellm_api_key: String,
|
||||||
/// SearXNG meta-search engine base URL.
|
/// SearXNG meta-search engine base URL.
|
||||||
pub searxng_url: String,
|
pub searxng_url: String,
|
||||||
/// LangChain service URL.
|
/// LangChain service URL.
|
||||||
pub langchain_url: String,
|
pub langchain_url: String,
|
||||||
/// LangGraph service URL.
|
/// LangGraph service URL.
|
||||||
pub langgraph_url: String,
|
pub langgraph_url: String,
|
||||||
|
/// LangFlow visual workflow builder URL.
|
||||||
|
pub langflow_url: String,
|
||||||
/// Langfuse observability URL.
|
/// Langfuse observability URL.
|
||||||
pub langfuse_url: String,
|
pub langfuse_url: String,
|
||||||
/// Vector database URL.
|
/// Vector database URL.
|
||||||
@@ -176,13 +180,15 @@ impl ServiceUrls {
|
|||||||
/// Currently infallible but returns `Result` for consistency.
|
/// Currently infallible but returns `Result` for consistency.
|
||||||
pub fn from_env() -> Result<Self, Error> {
|
pub fn from_env() -> Result<Self, Error> {
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
ollama_url: std::env::var("OLLAMA_URL")
|
litellm_url: std::env::var("LITELLM_URL")
|
||||||
.unwrap_or_else(|_| "http://localhost:11434".into()),
|
.unwrap_or_else(|_| "http://localhost:4000".into()),
|
||||||
ollama_model: std::env::var("OLLAMA_MODEL").unwrap_or_else(|_| "llama3.1:8b".into()),
|
litellm_model: std::env::var("LITELLM_MODEL").unwrap_or_else(|_| "qwen3-32b".into()),
|
||||||
|
litellm_api_key: optional_env("LITELLM_API_KEY"),
|
||||||
searxng_url: std::env::var("SEARXNG_URL")
|
searxng_url: std::env::var("SEARXNG_URL")
|
||||||
.unwrap_or_else(|_| "http://localhost:8888".into()),
|
.unwrap_or_else(|_| "http://localhost:8888".into()),
|
||||||
langchain_url: optional_env("LANGCHAIN_URL"),
|
langchain_url: optional_env("LANGCHAIN_URL"),
|
||||||
langgraph_url: optional_env("LANGGRAPH_URL"),
|
langgraph_url: optional_env("LANGGRAPH_URL"),
|
||||||
|
langflow_url: optional_env("LANGFLOW_URL"),
|
||||||
langfuse_url: optional_env("LANGFUSE_URL"),
|
langfuse_url: optional_env("LANGFUSE_URL"),
|
||||||
vectordb_url: optional_env("VECTORDB_URL"),
|
vectordb_url: optional_env("VECTORDB_URL"),
|
||||||
s3_url: optional_env("S3_URL"),
|
s3_url: optional_env("S3_URL"),
|
||||||
@@ -228,7 +234,7 @@ impl StripeConfig {
|
|||||||
|
|
||||||
/// Comma-separated list of enabled LLM provider identifiers.
|
/// Comma-separated list of enabled LLM provider identifiers.
|
||||||
///
|
///
|
||||||
/// For example: `LLM_PROVIDERS=ollama,openai,anthropic`
|
/// For example: `LLM_PROVIDERS=litellm,openai,anthropic`
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct LlmProvidersConfig {
|
pub struct LlmProvidersConfig {
|
||||||
/// Parsed provider names.
|
/// Parsed provider names.
|
||||||
@@ -328,36 +334,36 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn llm_providers_single() {
|
fn llm_providers_single() {
|
||||||
std::env::set_var("LLM_PROVIDERS", "ollama");
|
std::env::set_var("LLM_PROVIDERS", "litellm");
|
||||||
let cfg = LlmProvidersConfig::from_env().unwrap();
|
let cfg = LlmProvidersConfig::from_env().unwrap();
|
||||||
assert_eq!(cfg.providers, vec!["ollama"]);
|
assert_eq!(cfg.providers, vec!["litellm"]);
|
||||||
std::env::remove_var("LLM_PROVIDERS");
|
std::env::remove_var("LLM_PROVIDERS");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn llm_providers_multiple() {
|
fn llm_providers_multiple() {
|
||||||
std::env::set_var("LLM_PROVIDERS", "ollama,openai,anthropic");
|
std::env::set_var("LLM_PROVIDERS", "litellm,openai,anthropic");
|
||||||
let cfg = LlmProvidersConfig::from_env().unwrap();
|
let cfg = LlmProvidersConfig::from_env().unwrap();
|
||||||
assert_eq!(cfg.providers, vec!["ollama", "openai", "anthropic"]);
|
assert_eq!(cfg.providers, vec!["litellm", "openai", "anthropic"]);
|
||||||
std::env::remove_var("LLM_PROVIDERS");
|
std::env::remove_var("LLM_PROVIDERS");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn llm_providers_trims_whitespace() {
|
fn llm_providers_trims_whitespace() {
|
||||||
std::env::set_var("LLM_PROVIDERS", " ollama , openai ");
|
std::env::set_var("LLM_PROVIDERS", " litellm , openai ");
|
||||||
let cfg = LlmProvidersConfig::from_env().unwrap();
|
let cfg = LlmProvidersConfig::from_env().unwrap();
|
||||||
assert_eq!(cfg.providers, vec!["ollama", "openai"]);
|
assert_eq!(cfg.providers, vec!["litellm", "openai"]);
|
||||||
std::env::remove_var("LLM_PROVIDERS");
|
std::env::remove_var("LLM_PROVIDERS");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn llm_providers_filters_empty_entries() {
|
fn llm_providers_filters_empty_entries() {
|
||||||
std::env::set_var("LLM_PROVIDERS", "ollama,,openai,");
|
std::env::set_var("LLM_PROVIDERS", "litellm,,openai,");
|
||||||
let cfg = LlmProvidersConfig::from_env().unwrap();
|
let cfg = LlmProvidersConfig::from_env().unwrap();
|
||||||
assert_eq!(cfg.providers, vec!["ollama", "openai"]);
|
assert_eq!(cfg.providers, vec!["litellm", "openai"]);
|
||||||
std::env::remove_var("LLM_PROVIDERS");
|
std::env::remove_var("LLM_PROVIDERS");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -367,18 +373,18 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn service_urls_default_ollama_url() {
|
fn service_urls_default_litellm_url() {
|
||||||
std::env::remove_var("OLLAMA_URL");
|
std::env::remove_var("LITELLM_URL");
|
||||||
let svc = ServiceUrls::from_env().unwrap();
|
let svc = ServiceUrls::from_env().unwrap();
|
||||||
assert_eq!(svc.ollama_url, "http://localhost:11434");
|
assert_eq!(svc.litellm_url, "http://localhost:4000");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn service_urls_default_ollama_model() {
|
fn service_urls_default_litellm_model() {
|
||||||
std::env::remove_var("OLLAMA_MODEL");
|
std::env::remove_var("LITELLM_MODEL");
|
||||||
let svc = ServiceUrls::from_env().unwrap();
|
let svc = ServiceUrls::from_env().unwrap();
|
||||||
assert_eq!(svc.ollama_model, "llama3.1:8b");
|
assert_eq!(svc.litellm_model, "qwen3-32b");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -391,11 +397,11 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[serial]
|
#[serial]
|
||||||
fn service_urls_custom_ollama_url() {
|
fn service_urls_custom_litellm_url() {
|
||||||
std::env::set_var("OLLAMA_URL", "http://gpu-host:11434");
|
std::env::set_var("LITELLM_URL", "http://litellm-host:4000");
|
||||||
let svc = ServiceUrls::from_env().unwrap();
|
let svc = ServiceUrls::from_env().unwrap();
|
||||||
assert_eq!(svc.ollama_url, "http://gpu-host:11434");
|
assert_eq!(svc.litellm_url, "http://litellm-host:4000");
|
||||||
std::env::remove_var("OLLAMA_URL");
|
std::env::remove_var("LITELLM_URL");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|||||||
108
src/infrastructure/langgraph.rs
Normal file
108
src/infrastructure/langgraph.rs
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
use dioxus::prelude::*;
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
use crate::models::AgentEntry;
|
||||||
|
|
||||||
|
/// Raw assistant object returned by the LangGraph `POST /assistants/search`
|
||||||
|
/// endpoint. Only the fields we display are deserialized; unknown keys are
|
||||||
|
/// silently ignored thanks to serde defaults.
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct LangGraphAssistant {
|
||||||
|
assistant_id: String,
|
||||||
|
#[serde(default)]
|
||||||
|
name: String,
|
||||||
|
#[serde(default)]
|
||||||
|
graph_id: String,
|
||||||
|
#[serde(default)]
|
||||||
|
metadata: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the list of assistants (agents) from a LangGraph instance.
|
||||||
|
///
|
||||||
|
/// Calls `POST <langgraph_url>/assistants/search` with an empty body to
|
||||||
|
/// retrieve every registered assistant. Each result is mapped to the
|
||||||
|
/// frontend-friendly `AgentEntry` model.
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A vector of `AgentEntry` structs. Returns an empty vector when the
|
||||||
|
/// LangGraph URL is not configured or the service is unreachable.
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns `ServerFnError` on network or deserialization failures that
|
||||||
|
/// indicate a misconfigured (but present) LangGraph instance.
|
||||||
|
#[server(endpoint = "list-langgraph-agents")]
|
||||||
|
pub async fn list_langgraph_agents() -> Result<Vec<AgentEntry>, ServerFnError> {
|
||||||
|
let state: crate::infrastructure::ServerState =
|
||||||
|
dioxus_fullstack::FullstackContext::extract().await?;
|
||||||
|
|
||||||
|
let base_url = state.services.langgraph_url.clone();
|
||||||
|
if base_url.is_empty() {
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let url = format!("{}/assistants/search", base_url.trim_end_matches('/'));
|
||||||
|
|
||||||
|
let client = reqwest::Client::builder()
|
||||||
|
.timeout(std::time::Duration::from_secs(5))
|
||||||
|
.build()
|
||||||
|
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
|
||||||
|
|
||||||
|
// LangGraph expects a POST with a JSON body (empty object = no filters).
|
||||||
|
let resp = match client
|
||||||
|
.post(&url)
|
||||||
|
.header("content-type", "application/json")
|
||||||
|
.body("{}")
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(r) if r.status().is_success() => r,
|
||||||
|
Ok(r) => {
|
||||||
|
let status = r.status();
|
||||||
|
let body = r.text().await.unwrap_or_default();
|
||||||
|
tracing::error!("LangGraph returned {status}: {body}");
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("LangGraph request failed: {e}");
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let assistants: Vec<LangGraphAssistant> = resp
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| ServerFnError::new(format!("Failed to parse LangGraph response: {e}")))?;
|
||||||
|
|
||||||
|
let entries = assistants
|
||||||
|
.into_iter()
|
||||||
|
.map(|a| {
|
||||||
|
// Use the assistant name if present, otherwise fall back to graph_id.
|
||||||
|
let name = if a.name.is_empty() {
|
||||||
|
a.graph_id.clone()
|
||||||
|
} else {
|
||||||
|
a.name
|
||||||
|
};
|
||||||
|
|
||||||
|
// Extract a description from metadata if available.
|
||||||
|
let description = a
|
||||||
|
.metadata
|
||||||
|
.get("description")
|
||||||
|
.and_then(|v| v.as_str())
|
||||||
|
.unwrap_or("")
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
AgentEntry {
|
||||||
|
id: a.assistant_id,
|
||||||
|
name,
|
||||||
|
description,
|
||||||
|
status: "active".to_string(),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(entries)
|
||||||
|
}
|
||||||
403
src/infrastructure/litellm.rs
Normal file
403
src/infrastructure/litellm.rs
Normal file
@@ -0,0 +1,403 @@
|
|||||||
|
#[cfg(feature = "server")]
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use dioxus::prelude::*;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
use crate::models::LitellmUsageStats;
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
use crate::models::ModelUsage;
|
||||||
|
|
||||||
|
/// Status of a LiteLLM proxy instance, including connectivity and available models.
|
||||||
|
///
|
||||||
|
/// # Fields
|
||||||
|
///
|
||||||
|
/// * `online` - Whether the LiteLLM API responded successfully
|
||||||
|
/// * `models` - List of model IDs available through the proxy
|
||||||
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct LitellmStatus {
|
||||||
|
pub online: bool,
|
||||||
|
pub models: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Response from LiteLLM's `GET /v1/models` endpoint (OpenAI-compatible).
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ModelsResponse {
|
||||||
|
data: Vec<ModelObject>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A single model entry from the OpenAI-compatible models list.
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ModelObject {
|
||||||
|
id: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check the status of a LiteLLM proxy by querying its models endpoint.
|
||||||
|
///
|
||||||
|
/// Calls `GET <litellm_url>/v1/models` to list available models and determine
|
||||||
|
/// whether the instance is reachable. Sends the API key as a Bearer token
|
||||||
|
/// if configured.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `litellm_url` - Base URL of the LiteLLM proxy (e.g. "http://localhost:4000")
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A `LitellmStatus` with `online: true` and model IDs if reachable,
|
||||||
|
/// or `online: false` with an empty model list on failure
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns `ServerFnError` only on serialization issues; network failures
|
||||||
|
/// are caught and returned as `online: false`
|
||||||
|
#[post("/api/litellm-status")]
|
||||||
|
pub async fn get_litellm_status(litellm_url: String) -> Result<LitellmStatus, ServerFnError> {
|
||||||
|
let state: crate::infrastructure::ServerState =
|
||||||
|
dioxus_fullstack::FullstackContext::extract().await?;
|
||||||
|
|
||||||
|
let base_url = if litellm_url.is_empty() {
|
||||||
|
state.services.litellm_url.clone()
|
||||||
|
} else {
|
||||||
|
litellm_url
|
||||||
|
};
|
||||||
|
|
||||||
|
let api_key = state.services.litellm_api_key.clone();
|
||||||
|
let url = format!("{}/v1/models", base_url.trim_end_matches('/'));
|
||||||
|
|
||||||
|
let client = reqwest::Client::builder()
|
||||||
|
.timeout(std::time::Duration::from_secs(5))
|
||||||
|
.build()
|
||||||
|
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
|
||||||
|
|
||||||
|
let mut request = client.get(&url);
|
||||||
|
if !api_key.is_empty() {
|
||||||
|
request = request.header("Authorization", format!("Bearer {api_key}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = match request.send().await {
|
||||||
|
Ok(r) if r.status().is_success() => r,
|
||||||
|
_ => {
|
||||||
|
return Ok(LitellmStatus {
|
||||||
|
online: false,
|
||||||
|
models: Vec::new(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let body: ModelsResponse = match resp.json().await {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(_) => {
|
||||||
|
return Ok(LitellmStatus {
|
||||||
|
online: true,
|
||||||
|
models: Vec::new(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let models = body.data.into_iter().map(|m| m.id).collect();
|
||||||
|
|
||||||
|
Ok(LitellmStatus {
|
||||||
|
online: true,
|
||||||
|
models,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Response from LiteLLM's `GET /global/activity` endpoint.
|
||||||
|
///
|
||||||
|
/// Returns aggregate token counts and API request totals for a date range.
|
||||||
|
/// Available on the free tier (no Enterprise license needed).
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct ActivityResponse {
|
||||||
|
/// Total tokens across all models in the date range
|
||||||
|
#[serde(default)]
|
||||||
|
sum_total_tokens: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Per-model entry from `GET /global/activity/model`.
|
||||||
|
///
|
||||||
|
/// Each entry contains a model name and its aggregated token total.
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct ActivityModelEntry {
|
||||||
|
/// Model identifier (may be empty for unattributed traffic)
|
||||||
|
#[serde(default)]
|
||||||
|
model: String,
|
||||||
|
/// Sum of tokens used by this model in the date range
|
||||||
|
#[serde(default)]
|
||||||
|
sum_total_tokens: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Per-model spend entry from `GET /global/spend/models`.
|
||||||
|
///
|
||||||
|
/// Each entry maps a model name to its total spend in USD.
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct SpendModelEntry {
|
||||||
|
/// Model identifier
|
||||||
|
#[serde(default)]
|
||||||
|
model: String,
|
||||||
|
/// Total spend in USD
|
||||||
|
#[serde(default)]
|
||||||
|
total_spend: f64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merge per-model token counts and spend data into `ModelUsage` entries.
|
||||||
|
///
|
||||||
|
/// Joins `activity_models` (tokens) and `spend_models` (spend) by model
|
||||||
|
/// name using a HashMap for O(n + m) merge. Entries with empty model
|
||||||
|
/// names are skipped.
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `activity_models` - Per-model token data from `/global/activity/model`
|
||||||
|
/// * `spend_models` - Per-model spend data from `/global/spend/models`
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// Merged list sorted by total tokens descending
|
||||||
|
#[cfg(feature = "server")]
|
||||||
|
fn merge_model_data(
|
||||||
|
activity_models: Vec<ActivityModelEntry>,
|
||||||
|
spend_models: Vec<SpendModelEntry>,
|
||||||
|
) -> Vec<ModelUsage> {
|
||||||
|
let mut model_map: HashMap<String, ModelUsage> = HashMap::new();
|
||||||
|
|
||||||
|
for entry in activity_models {
|
||||||
|
if entry.model.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
model_map
|
||||||
|
.entry(entry.model.clone())
|
||||||
|
.or_insert_with(|| ModelUsage {
|
||||||
|
model: entry.model,
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
.total_tokens = entry.sum_total_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
|
for entry in spend_models {
|
||||||
|
if entry.model.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
model_map
|
||||||
|
.entry(entry.model.clone())
|
||||||
|
.or_insert_with(|| ModelUsage {
|
||||||
|
model: entry.model,
|
||||||
|
..Default::default()
|
||||||
|
})
|
||||||
|
.spend = entry.total_spend;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut result: Vec<ModelUsage> = model_map.into_values().collect();
|
||||||
|
result.sort_by(|a, b| b.total_tokens.cmp(&a.total_tokens));
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch aggregated usage statistics from LiteLLM's free-tier APIs.
|
||||||
|
///
|
||||||
|
/// Combines three endpoints to build a complete usage picture:
|
||||||
|
/// - `GET /global/activity` - total token counts
|
||||||
|
/// - `GET /global/activity/model` - per-model token breakdown
|
||||||
|
/// - `GET /global/spend/models` - per-model spend in USD
|
||||||
|
///
|
||||||
|
/// # Arguments
|
||||||
|
///
|
||||||
|
/// * `start_date` - Start of the reporting period in `YYYY-MM-DD` format
|
||||||
|
/// * `end_date` - End of the reporting period in `YYYY-MM-DD` format
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// Aggregated usage stats; returns default (zeroed) stats on network
|
||||||
|
/// failure or permission errors
|
||||||
|
///
|
||||||
|
/// # Errors
|
||||||
|
///
|
||||||
|
/// Returns `ServerFnError` only on HTTP client construction failure
|
||||||
|
#[post("/api/litellm-usage")]
|
||||||
|
pub async fn get_litellm_usage(
|
||||||
|
start_date: String,
|
||||||
|
end_date: String,
|
||||||
|
) -> Result<LitellmUsageStats, ServerFnError> {
|
||||||
|
let state: crate::infrastructure::ServerState =
|
||||||
|
dioxus_fullstack::FullstackContext::extract().await?;
|
||||||
|
|
||||||
|
let base_url = &state.services.litellm_url;
|
||||||
|
let api_key = &state.services.litellm_api_key;
|
||||||
|
|
||||||
|
if base_url.is_empty() {
|
||||||
|
return Ok(LitellmUsageStats::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
let base = base_url.trim_end_matches('/');
|
||||||
|
let date_params = format!("start_date={start_date}&end_date={end_date}");
|
||||||
|
|
||||||
|
let client = reqwest::Client::builder()
|
||||||
|
.timeout(std::time::Duration::from_secs(10))
|
||||||
|
.build()
|
||||||
|
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
|
||||||
|
|
||||||
|
// Helper closure to build an authenticated GET request
|
||||||
|
let auth_get = |url: String| {
|
||||||
|
let mut req = client.get(url);
|
||||||
|
if !api_key.is_empty() {
|
||||||
|
req = req.header("Authorization", format!("Bearer {api_key}"));
|
||||||
|
}
|
||||||
|
req
|
||||||
|
};
|
||||||
|
|
||||||
|
// Fire all three requests concurrently to minimise latency
|
||||||
|
let (activity_res, model_activity_res, model_spend_res) = tokio::join!(
|
||||||
|
auth_get(format!("{base}/global/activity?{date_params}")).send(),
|
||||||
|
auth_get(format!("{base}/global/activity/model?{date_params}")).send(),
|
||||||
|
auth_get(format!("{base}/global/spend/models?{date_params}")).send(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Parse total token count from /global/activity
|
||||||
|
let total_tokens = match activity_res {
|
||||||
|
Ok(r) if r.status().is_success() => r
|
||||||
|
.json::<ActivityResponse>()
|
||||||
|
.await
|
||||||
|
.map(|a| a.sum_total_tokens)
|
||||||
|
.unwrap_or(0),
|
||||||
|
_ => 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parse per-model token breakdown from /global/activity/model
|
||||||
|
let activity_models: Vec<ActivityModelEntry> = match model_activity_res {
|
||||||
|
Ok(r) if r.status().is_success() => r.json().await.unwrap_or_default(),
|
||||||
|
_ => Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Parse per-model spend from /global/spend/models
|
||||||
|
let spend_models: Vec<SpendModelEntry> = match model_spend_res {
|
||||||
|
Ok(r) if r.status().is_success() => r.json().await.unwrap_or_default(),
|
||||||
|
_ => Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let total_spend: f64 = spend_models.iter().map(|m| m.total_spend).sum();
|
||||||
|
let model_breakdown = merge_model_data(activity_models, spend_models);
|
||||||
|
|
||||||
|
Ok(LitellmUsageStats {
|
||||||
|
total_spend,
|
||||||
|
// Free-tier endpoints don't provide prompt/completion split;
|
||||||
|
// total_tokens comes from /global/activity.
|
||||||
|
total_prompt_tokens: 0,
|
||||||
|
total_completion_tokens: 0,
|
||||||
|
total_tokens,
|
||||||
|
model_breakdown,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, feature = "server"))]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn merge_empty_inputs() {
|
||||||
|
let result = merge_model_data(Vec::new(), Vec::new());
|
||||||
|
assert!(result.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn merge_activity_only() {
|
||||||
|
let activity = vec![ActivityModelEntry {
|
||||||
|
model: "gpt-4".into(),
|
||||||
|
sum_total_tokens: 1500,
|
||||||
|
}];
|
||||||
|
let result = merge_model_data(activity, Vec::new());
|
||||||
|
assert_eq!(result.len(), 1);
|
||||||
|
assert_eq!(result[0].model, "gpt-4");
|
||||||
|
assert_eq!(result[0].total_tokens, 1500);
|
||||||
|
assert_eq!(result[0].spend, 0.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn merge_spend_only() {
|
||||||
|
let spend = vec![SpendModelEntry {
|
||||||
|
model: "gpt-4".into(),
|
||||||
|
total_spend: 2.5,
|
||||||
|
}];
|
||||||
|
let result = merge_model_data(Vec::new(), spend);
|
||||||
|
assert_eq!(result.len(), 1);
|
||||||
|
assert_eq!(result[0].model, "gpt-4");
|
||||||
|
assert_eq!(result[0].spend, 2.5);
|
||||||
|
assert_eq!(result[0].total_tokens, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn merge_joins_by_model_name() {
|
||||||
|
let activity = vec![
|
||||||
|
ActivityModelEntry {
|
||||||
|
model: "gpt-4".into(),
|
||||||
|
sum_total_tokens: 5000,
|
||||||
|
},
|
||||||
|
ActivityModelEntry {
|
||||||
|
model: "claude-3".into(),
|
||||||
|
sum_total_tokens: 3000,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
let spend = vec![
|
||||||
|
SpendModelEntry {
|
||||||
|
model: "gpt-4".into(),
|
||||||
|
total_spend: 1.0,
|
||||||
|
},
|
||||||
|
SpendModelEntry {
|
||||||
|
model: "claude-3".into(),
|
||||||
|
total_spend: 0.5,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
let result = merge_model_data(activity, spend);
|
||||||
|
assert_eq!(result.len(), 2);
|
||||||
|
// Sorted by tokens descending: gpt-4 (5000) before claude-3 (3000)
|
||||||
|
assert_eq!(result[0].model, "gpt-4");
|
||||||
|
assert_eq!(result[0].total_tokens, 5000);
|
||||||
|
assert_eq!(result[0].spend, 1.0);
|
||||||
|
assert_eq!(result[1].model, "claude-3");
|
||||||
|
assert_eq!(result[1].total_tokens, 3000);
|
||||||
|
assert_eq!(result[1].spend, 0.5);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn merge_skips_empty_model_names() {
|
||||||
|
let activity = vec![
|
||||||
|
ActivityModelEntry {
|
||||||
|
model: "".into(),
|
||||||
|
sum_total_tokens: 100,
|
||||||
|
},
|
||||||
|
ActivityModelEntry {
|
||||||
|
model: "gpt-4".into(),
|
||||||
|
sum_total_tokens: 500,
|
||||||
|
},
|
||||||
|
];
|
||||||
|
let spend = vec![SpendModelEntry {
|
||||||
|
model: "".into(),
|
||||||
|
total_spend: 0.01,
|
||||||
|
}];
|
||||||
|
let result = merge_model_data(activity, spend);
|
||||||
|
assert_eq!(result.len(), 1);
|
||||||
|
assert_eq!(result[0].model, "gpt-4");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn merge_unmatched_models_appear_in_both_directions() {
|
||||||
|
let activity = vec![ActivityModelEntry {
|
||||||
|
model: "tokens-only".into(),
|
||||||
|
sum_total_tokens: 1000,
|
||||||
|
}];
|
||||||
|
let spend = vec![SpendModelEntry {
|
||||||
|
model: "spend-only".into(),
|
||||||
|
total_spend: 0.5,
|
||||||
|
}];
|
||||||
|
let result = merge_model_data(activity, spend);
|
||||||
|
assert_eq!(result.len(), 2);
|
||||||
|
// tokens-only has 1000 tokens, spend-only has 0 tokens
|
||||||
|
assert_eq!(result[0].model, "tokens-only");
|
||||||
|
assert_eq!(result[0].total_tokens, 1000);
|
||||||
|
assert_eq!(result[1].model, "spend-only");
|
||||||
|
assert_eq!(result[1].spend, 0.5);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,23 +4,23 @@ use dioxus::prelude::*;
|
|||||||
mod inner {
|
mod inner {
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
/// A single message in the OpenAI-compatible chat format used by Ollama.
|
/// A single message in the OpenAI-compatible chat format used by LiteLLM.
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
pub(super) struct ChatMessage {
|
pub(super) struct ChatMessage {
|
||||||
pub role: String,
|
pub role: String,
|
||||||
pub content: String,
|
pub content: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request body for Ollama's OpenAI-compatible chat completions endpoint.
|
/// Request body for the OpenAI-compatible chat completions endpoint.
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
pub(super) struct OllamaChatRequest {
|
pub(super) struct ChatCompletionRequest {
|
||||||
pub model: String,
|
pub model: String,
|
||||||
pub messages: Vec<ChatMessage>,
|
pub messages: Vec<ChatMessage>,
|
||||||
/// Disable streaming so we get a single JSON response.
|
/// Disable streaming so we get a single JSON response.
|
||||||
pub stream: bool,
|
pub stream: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A single choice in the Ollama chat completions response.
|
/// A single choice in the chat completions response.
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
pub(super) struct ChatChoice {
|
pub(super) struct ChatChoice {
|
||||||
pub message: ChatResponseMessage,
|
pub message: ChatResponseMessage,
|
||||||
@@ -32,9 +32,9 @@ mod inner {
|
|||||||
pub content: String,
|
pub content: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Top-level response from Ollama's `/v1/chat/completions` endpoint.
|
/// Top-level response from the `/v1/chat/completions` endpoint.
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
pub(super) struct OllamaChatResponse {
|
pub(super) struct ChatCompletionResponse {
|
||||||
pub choices: Vec<ChatChoice>,
|
pub choices: Vec<ChatChoice>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -157,7 +157,7 @@ mod inner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Summarize an article using a local Ollama instance.
|
/// Summarize an article using a LiteLLM proxy.
|
||||||
///
|
///
|
||||||
/// First attempts to fetch the full article text from the provided URL.
|
/// First attempts to fetch the full article text from the provided URL.
|
||||||
/// If that fails (paywall, timeout, etc.), falls back to the search snippet.
|
/// If that fails (paywall, timeout, etc.), falls back to the search snippet.
|
||||||
@@ -167,8 +167,8 @@ mod inner {
|
|||||||
///
|
///
|
||||||
/// * `snippet` - The search result snippet (fallback content)
|
/// * `snippet` - The search result snippet (fallback content)
|
||||||
/// * `article_url` - The original article URL to fetch full text from
|
/// * `article_url` - The original article URL to fetch full text from
|
||||||
/// * `ollama_url` - Base URL of the Ollama instance (e.g. "http://localhost:11434")
|
/// * `litellm_url` - Base URL of the LiteLLM proxy (e.g. "http://localhost:4000")
|
||||||
/// * `model` - The Ollama model ID to use (e.g. "llama3.1:8b")
|
/// * `model` - The model ID to use (e.g. "qwen3-32b")
|
||||||
///
|
///
|
||||||
/// # Returns
|
/// # Returns
|
||||||
///
|
///
|
||||||
@@ -176,36 +176,38 @@ mod inner {
|
|||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// Returns `ServerFnError` if the Ollama request fails or response parsing fails
|
/// Returns `ServerFnError` if the LiteLLM request fails or response parsing fails
|
||||||
#[post("/api/summarize")]
|
#[post("/api/summarize")]
|
||||||
pub async fn summarize_article(
|
pub async fn summarize_article(
|
||||||
snippet: String,
|
snippet: String,
|
||||||
article_url: String,
|
article_url: String,
|
||||||
ollama_url: String,
|
litellm_url: String,
|
||||||
model: String,
|
model: String,
|
||||||
) -> Result<String, ServerFnError> {
|
) -> Result<String, ServerFnError> {
|
||||||
use inner::{fetch_article_text, ChatMessage, OllamaChatRequest, OllamaChatResponse};
|
use inner::{fetch_article_text, ChatCompletionRequest, ChatCompletionResponse, ChatMessage};
|
||||||
|
|
||||||
let state: crate::infrastructure::ServerState =
|
let state: crate::infrastructure::ServerState =
|
||||||
dioxus_fullstack::FullstackContext::extract().await?;
|
dioxus_fullstack::FullstackContext::extract().await?;
|
||||||
|
|
||||||
// Use caller-provided values or fall back to ServerState config
|
// Use caller-provided values or fall back to ServerState config
|
||||||
let base_url = if ollama_url.is_empty() {
|
let base_url = if litellm_url.is_empty() {
|
||||||
state.services.ollama_url.clone()
|
state.services.litellm_url.clone()
|
||||||
} else {
|
} else {
|
||||||
ollama_url
|
litellm_url
|
||||||
};
|
};
|
||||||
|
|
||||||
let model = if model.is_empty() {
|
let model = if model.is_empty() {
|
||||||
state.services.ollama_model.clone()
|
state.services.litellm_model.clone()
|
||||||
} else {
|
} else {
|
||||||
model
|
model
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let api_key = state.services.litellm_api_key.clone();
|
||||||
|
|
||||||
// Try to fetch the full article; fall back to the search snippet
|
// Try to fetch the full article; fall back to the search snippet
|
||||||
let article_text = fetch_article_text(&article_url).await.unwrap_or(snippet);
|
let article_text = fetch_article_text(&article_url).await.unwrap_or(snippet);
|
||||||
|
|
||||||
let request_body = OllamaChatRequest {
|
let request_body = ChatCompletionRequest {
|
||||||
model,
|
model,
|
||||||
stream: false,
|
stream: false,
|
||||||
messages: vec![ChatMessage {
|
messages: vec![ChatMessage {
|
||||||
@@ -223,42 +225,48 @@ pub async fn summarize_article(
|
|||||||
|
|
||||||
let url = format!("{}/v1/chat/completions", base_url.trim_end_matches('/'));
|
let url = format!("{}/v1/chat/completions", base_url.trim_end_matches('/'));
|
||||||
let client = reqwest::Client::new();
|
let client = reqwest::Client::new();
|
||||||
let resp = client
|
let mut request = client
|
||||||
.post(&url)
|
.post(&url)
|
||||||
.header("content-type", "application/json")
|
.header("content-type", "application/json")
|
||||||
.json(&request_body)
|
.json(&request_body);
|
||||||
|
|
||||||
|
if !api_key.is_empty() {
|
||||||
|
request = request.header("Authorization", format!("Bearer {api_key}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = request
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ServerFnError::new(format!("Ollama request failed: {e}")))?;
|
.map_err(|e| ServerFnError::new(format!("LiteLLM request failed: {e}")))?;
|
||||||
|
|
||||||
if !resp.status().is_success() {
|
if !resp.status().is_success() {
|
||||||
let status = resp.status();
|
let status = resp.status();
|
||||||
let body = resp.text().await.unwrap_or_default();
|
let body = resp.text().await.unwrap_or_default();
|
||||||
return Err(ServerFnError::new(format!(
|
return Err(ServerFnError::new(format!(
|
||||||
"Ollama returned {status}: {body}"
|
"LiteLLM returned {status}: {body}"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let body: OllamaChatResponse = resp
|
let body: ChatCompletionResponse = resp
|
||||||
.json()
|
.json()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ServerFnError::new(format!("Failed to parse Ollama response: {e}")))?;
|
.map_err(|e| ServerFnError::new(format!("Failed to parse LiteLLM response: {e}")))?;
|
||||||
|
|
||||||
body.choices
|
body.choices
|
||||||
.first()
|
.first()
|
||||||
.map(|choice| choice.message.content.clone())
|
.map(|choice| choice.message.content.clone())
|
||||||
.ok_or_else(|| ServerFnError::new("Empty response from Ollama"))
|
.ok_or_else(|| ServerFnError::new("Empty response from LiteLLM"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A lightweight chat message for the follow-up conversation.
|
/// A lightweight chat message for the follow-up conversation.
|
||||||
/// Uses simple String role ("system"/"user"/"assistant") for Ollama compatibility.
|
/// Uses simple String role ("system"/"user"/"assistant") for OpenAI compatibility.
|
||||||
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
|
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
|
||||||
pub struct FollowUpMessage {
|
pub struct FollowUpMessage {
|
||||||
pub role: String,
|
pub role: String,
|
||||||
pub content: String,
|
pub content: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a follow-up question about an article using a local Ollama instance.
|
/// Send a follow-up question about an article using a LiteLLM proxy.
|
||||||
///
|
///
|
||||||
/// Accepts the full conversation history (system context + prior turns) and
|
/// Accepts the full conversation history (system context + prior turns) and
|
||||||
/// returns the assistant's next response. The system message should contain
|
/// returns the assistant's next response. The system message should contain
|
||||||
@@ -267,8 +275,8 @@ pub struct FollowUpMessage {
|
|||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `messages` - The conversation history including system context
|
/// * `messages` - The conversation history including system context
|
||||||
/// * `ollama_url` - Base URL of the Ollama instance
|
/// * `litellm_url` - Base URL of the LiteLLM proxy
|
||||||
/// * `model` - The Ollama model ID to use
|
/// * `model` - The model ID to use
|
||||||
///
|
///
|
||||||
/// # Returns
|
/// # Returns
|
||||||
///
|
///
|
||||||
@@ -276,30 +284,32 @@ pub struct FollowUpMessage {
|
|||||||
///
|
///
|
||||||
/// # Errors
|
/// # Errors
|
||||||
///
|
///
|
||||||
/// Returns `ServerFnError` if the Ollama request fails or response parsing fails
|
/// Returns `ServerFnError` if the LiteLLM request fails or response parsing fails
|
||||||
#[post("/api/chat")]
|
#[post("/api/chat")]
|
||||||
pub async fn chat_followup(
|
pub async fn chat_followup(
|
||||||
messages: Vec<FollowUpMessage>,
|
messages: Vec<FollowUpMessage>,
|
||||||
ollama_url: String,
|
litellm_url: String,
|
||||||
model: String,
|
model: String,
|
||||||
) -> Result<String, ServerFnError> {
|
) -> Result<String, ServerFnError> {
|
||||||
use inner::{ChatMessage, OllamaChatRequest, OllamaChatResponse};
|
use inner::{ChatCompletionRequest, ChatCompletionResponse, ChatMessage};
|
||||||
|
|
||||||
let state: crate::infrastructure::ServerState =
|
let state: crate::infrastructure::ServerState =
|
||||||
dioxus_fullstack::FullstackContext::extract().await?;
|
dioxus_fullstack::FullstackContext::extract().await?;
|
||||||
|
|
||||||
let base_url = if ollama_url.is_empty() {
|
let base_url = if litellm_url.is_empty() {
|
||||||
state.services.ollama_url.clone()
|
state.services.litellm_url.clone()
|
||||||
} else {
|
} else {
|
||||||
ollama_url
|
litellm_url
|
||||||
};
|
};
|
||||||
|
|
||||||
let model = if model.is_empty() {
|
let model = if model.is_empty() {
|
||||||
state.services.ollama_model.clone()
|
state.services.litellm_model.clone()
|
||||||
} else {
|
} else {
|
||||||
model
|
model
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let api_key = state.services.litellm_api_key.clone();
|
||||||
|
|
||||||
// Convert FollowUpMessage to inner ChatMessage for the request
|
// Convert FollowUpMessage to inner ChatMessage for the request
|
||||||
let chat_messages: Vec<ChatMessage> = messages
|
let chat_messages: Vec<ChatMessage> = messages
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@@ -309,7 +319,7 @@ pub async fn chat_followup(
|
|||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let request_body = OllamaChatRequest {
|
let request_body = ChatCompletionRequest {
|
||||||
model,
|
model,
|
||||||
stream: false,
|
stream: false,
|
||||||
messages: chat_messages,
|
messages: chat_messages,
|
||||||
@@ -317,31 +327,37 @@ pub async fn chat_followup(
|
|||||||
|
|
||||||
let url = format!("{}/v1/chat/completions", base_url.trim_end_matches('/'));
|
let url = format!("{}/v1/chat/completions", base_url.trim_end_matches('/'));
|
||||||
let client = reqwest::Client::new();
|
let client = reqwest::Client::new();
|
||||||
let resp = client
|
let mut request = client
|
||||||
.post(&url)
|
.post(&url)
|
||||||
.header("content-type", "application/json")
|
.header("content-type", "application/json")
|
||||||
.json(&request_body)
|
.json(&request_body);
|
||||||
|
|
||||||
|
if !api_key.is_empty() {
|
||||||
|
request = request.header("Authorization", format!("Bearer {api_key}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = request
|
||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ServerFnError::new(format!("Ollama request failed: {e}")))?;
|
.map_err(|e| ServerFnError::new(format!("LiteLLM request failed: {e}")))?;
|
||||||
|
|
||||||
if !resp.status().is_success() {
|
if !resp.status().is_success() {
|
||||||
let status = resp.status();
|
let status = resp.status();
|
||||||
let body = resp.text().await.unwrap_or_default();
|
let body = resp.text().await.unwrap_or_default();
|
||||||
return Err(ServerFnError::new(format!(
|
return Err(ServerFnError::new(format!(
|
||||||
"Ollama returned {status}: {body}"
|
"LiteLLM returned {status}: {body}"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let body: OllamaChatResponse = resp
|
let body: ChatCompletionResponse = resp
|
||||||
.json()
|
.json()
|
||||||
.await
|
.await
|
||||||
.map_err(|e| ServerFnError::new(format!("Failed to parse Ollama response: {e}")))?;
|
.map_err(|e| ServerFnError::new(format!("Failed to parse LiteLLM response: {e}")))?;
|
||||||
|
|
||||||
body.choices
|
body.choices
|
||||||
.first()
|
.first()
|
||||||
.map(|choice| choice.message.content.clone())
|
.map(|choice| choice.message.content.clone())
|
||||||
.ok_or_else(|| ServerFnError::new("Empty response from Ollama"))
|
.ok_or_else(|| ServerFnError::new("Empty response from LiteLLM"))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
|||||||
@@ -2,8 +2,9 @@
|
|||||||
// the #[server] macro generates client stubs for the web target)
|
// the #[server] macro generates client stubs for the web target)
|
||||||
pub mod auth_check;
|
pub mod auth_check;
|
||||||
pub mod chat;
|
pub mod chat;
|
||||||
|
pub mod langgraph;
|
||||||
|
pub mod litellm;
|
||||||
pub mod llm;
|
pub mod llm;
|
||||||
pub mod ollama;
|
|
||||||
pub mod searxng;
|
pub mod searxng;
|
||||||
|
|
||||||
// Server-only modules (Axum handlers, state, configs, DB, etc.)
|
// Server-only modules (Axum handlers, state, configs, DB, etc.)
|
||||||
|
|||||||
@@ -1,92 +0,0 @@
|
|||||||
use dioxus::prelude::*;
|
|
||||||
use serde::{Deserialize, Serialize};
|
|
||||||
|
|
||||||
/// Status of a local Ollama instance, including connectivity and loaded models.
|
|
||||||
///
|
|
||||||
/// # Fields
|
|
||||||
///
|
|
||||||
/// * `online` - Whether the Ollama API responded successfully
|
|
||||||
/// * `models` - List of model names currently available on the instance
|
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
|
||||||
pub struct OllamaStatus {
|
|
||||||
pub online: bool,
|
|
||||||
pub models: Vec<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Response from Ollama's `GET /api/tags` endpoint.
|
|
||||||
#[cfg(feature = "server")]
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct OllamaTagsResponse {
|
|
||||||
models: Vec<OllamaModel>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A single model entry from Ollama's tags API.
|
|
||||||
#[cfg(feature = "server")]
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct OllamaModel {
|
|
||||||
name: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Check the status of a local Ollama instance by querying its tags endpoint.
|
|
||||||
///
|
|
||||||
/// Calls `GET <ollama_url>/api/tags` to list available models and determine
|
|
||||||
/// whether the instance is reachable.
|
|
||||||
///
|
|
||||||
/// # Arguments
|
|
||||||
///
|
|
||||||
/// * `ollama_url` - Base URL of the Ollama instance (e.g. "http://localhost:11434")
|
|
||||||
///
|
|
||||||
/// # Returns
|
|
||||||
///
|
|
||||||
/// An `OllamaStatus` with `online: true` and model names if reachable,
|
|
||||||
/// or `online: false` with an empty model list on failure
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
///
|
|
||||||
/// Returns `ServerFnError` only on serialization issues; network failures
|
|
||||||
/// are caught and returned as `online: false`
|
|
||||||
#[post("/api/ollama-status")]
|
|
||||||
pub async fn get_ollama_status(ollama_url: String) -> Result<OllamaStatus, ServerFnError> {
|
|
||||||
let state: crate::infrastructure::ServerState =
|
|
||||||
dioxus_fullstack::FullstackContext::extract().await?;
|
|
||||||
|
|
||||||
let base_url = if ollama_url.is_empty() {
|
|
||||||
state.services.ollama_url.clone()
|
|
||||||
} else {
|
|
||||||
ollama_url
|
|
||||||
};
|
|
||||||
|
|
||||||
let url = format!("{}/api/tags", base_url.trim_end_matches('/'));
|
|
||||||
|
|
||||||
let client = reqwest::Client::builder()
|
|
||||||
.timeout(std::time::Duration::from_secs(5))
|
|
||||||
.build()
|
|
||||||
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
|
|
||||||
|
|
||||||
let resp = match client.get(&url).send().await {
|
|
||||||
Ok(r) if r.status().is_success() => r,
|
|
||||||
_ => {
|
|
||||||
return Ok(OllamaStatus {
|
|
||||||
online: false,
|
|
||||||
models: Vec::new(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let body: OllamaTagsResponse = match resp.json().await {
|
|
||||||
Ok(b) => b,
|
|
||||||
Err(_) => {
|
|
||||||
return Ok(OllamaStatus {
|
|
||||||
online: true,
|
|
||||||
models: Vec::new(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let models = body.models.into_iter().map(|m| m.name).collect();
|
|
||||||
|
|
||||||
Ok(OllamaStatus {
|
|
||||||
online: true,
|
|
||||||
models,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
//! Unified LLM provider dispatch.
|
//! Unified LLM provider dispatch.
|
||||||
//!
|
//!
|
||||||
//! Routes chat completion requests to Ollama, OpenAI, Anthropic, or
|
//! Routes chat completion requests to LiteLLM, OpenAI, Anthropic, or
|
||||||
//! HuggingFace based on the session's provider setting. All providers
|
//! HuggingFace based on the session's provider setting. All providers
|
||||||
//! except Anthropic use the OpenAI-compatible chat completions format.
|
//! except Anthropic use the OpenAI-compatible chat completions format.
|
||||||
|
|
||||||
@@ -20,11 +20,11 @@ pub struct ProviderMessage {
|
|||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `state` - Server state (for default Ollama URL/model)
|
/// * `state` - Server state (for default LiteLLM URL/model)
|
||||||
/// * `provider` - Provider name (`"ollama"`, `"openai"`, `"anthropic"`, `"huggingface"`)
|
/// * `provider` - Provider name (`"litellm"`, `"openai"`, `"anthropic"`, `"huggingface"`)
|
||||||
/// * `model` - Model ID
|
/// * `model` - Model ID
|
||||||
/// * `messages` - Conversation history
|
/// * `messages` - Conversation history
|
||||||
/// * `api_key` - API key (required for non-Ollama providers)
|
/// * `api_key` - API key (required for non-LiteLLM providers; LiteLLM uses server config)
|
||||||
/// * `stream` - Whether to request streaming
|
/// * `stream` - Whether to request streaming
|
||||||
///
|
///
|
||||||
/// # Returns
|
/// # Returns
|
||||||
@@ -123,11 +123,11 @@ pub async fn send_chat_request(
|
|||||||
.send()
|
.send()
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
// Default: Ollama (OpenAI-compatible endpoint)
|
// Default: LiteLLM proxy (OpenAI-compatible endpoint)
|
||||||
_ => {
|
_ => {
|
||||||
let base_url = &state.services.ollama_url;
|
let base_url = &state.services.litellm_url;
|
||||||
let resolved_model = if model.is_empty() {
|
let resolved_model = if model.is_empty() {
|
||||||
&state.services.ollama_model
|
&state.services.litellm_model
|
||||||
} else {
|
} else {
|
||||||
model
|
model
|
||||||
};
|
};
|
||||||
@@ -137,12 +137,15 @@ pub async fn send_chat_request(
|
|||||||
"messages": messages,
|
"messages": messages,
|
||||||
"stream": stream,
|
"stream": stream,
|
||||||
});
|
});
|
||||||
client
|
let litellm_key = &state.services.litellm_api_key;
|
||||||
|
let mut request = client
|
||||||
.post(&url)
|
.post(&url)
|
||||||
.header("content-type", "application/json")
|
.header("content-type", "application/json")
|
||||||
.json(&body)
|
.json(&body);
|
||||||
.send()
|
if !litellm_key.is_empty() {
|
||||||
.await
|
request = request.header("Authorization", format!("Bearer {litellm_key}"));
|
||||||
|
}
|
||||||
|
request.send().await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ pub struct ServerStateInner {
|
|||||||
pub keycloak: &'static KeycloakConfig,
|
pub keycloak: &'static KeycloakConfig,
|
||||||
/// Outbound email settings.
|
/// Outbound email settings.
|
||||||
pub smtp: &'static SmtpConfig,
|
pub smtp: &'static SmtpConfig,
|
||||||
/// URLs for Ollama, SearXNG, LangChain, S3, etc.
|
/// URLs for LiteLLM, SearXNG, LangChain, S3, etc.
|
||||||
pub services: &'static ServiceUrls,
|
pub services: &'static ServiceUrls,
|
||||||
/// Stripe billing keys.
|
/// Stripe billing keys.
|
||||||
pub stripe: &'static StripeConfig,
|
pub stripe: &'static StripeConfig,
|
||||||
|
|||||||
@@ -60,8 +60,8 @@ pub struct Attachment {
|
|||||||
/// * `user_sub` - Keycloak subject ID (session owner)
|
/// * `user_sub` - Keycloak subject ID (session owner)
|
||||||
/// * `title` - Display title (auto-generated or user-renamed)
|
/// * `title` - Display title (auto-generated or user-renamed)
|
||||||
/// * `namespace` - Grouping for sidebar sections
|
/// * `namespace` - Grouping for sidebar sections
|
||||||
/// * `provider` - LLM provider used (e.g. "ollama", "openai")
|
/// * `provider` - LLM provider used (e.g. "litellm", "openai")
|
||||||
/// * `model` - Model ID used (e.g. "llama3.1:8b")
|
/// * `model` - Model ID used (e.g. "qwen3-32b")
|
||||||
/// * `created_at` - ISO 8601 creation timestamp
|
/// * `created_at` - ISO 8601 creation timestamp
|
||||||
/// * `updated_at` - ISO 8601 last-activity timestamp
|
/// * `updated_at` - ISO 8601 last-activity timestamp
|
||||||
/// * `article_url` - Source article URL (for News namespace sessions)
|
/// * `article_url` - Source article URL (for News namespace sessions)
|
||||||
@@ -171,8 +171,8 @@ mod tests {
|
|||||||
user_sub: "user-1".into(),
|
user_sub: "user-1".into(),
|
||||||
title: "Test Chat".into(),
|
title: "Test Chat".into(),
|
||||||
namespace: ChatNamespace::General,
|
namespace: ChatNamespace::General,
|
||||||
provider: "ollama".into(),
|
provider: "litellm".into(),
|
||||||
model: "llama3.1:8b".into(),
|
model: "qwen3-32b".into(),
|
||||||
created_at: "2025-01-01T00:00:00Z".into(),
|
created_at: "2025-01-01T00:00:00Z".into(),
|
||||||
updated_at: "2025-01-01T01:00:00Z".into(),
|
updated_at: "2025-01-01T01:00:00Z".into(),
|
||||||
article_url: None,
|
article_url: None,
|
||||||
@@ -189,7 +189,7 @@ mod tests {
|
|||||||
"_id": "mongo-id",
|
"_id": "mongo-id",
|
||||||
"user_sub": "u1",
|
"user_sub": "u1",
|
||||||
"title": "t",
|
"title": "t",
|
||||||
"provider": "ollama",
|
"provider": "litellm",
|
||||||
"model": "m",
|
"model": "m",
|
||||||
"created_at": "2025-01-01",
|
"created_at": "2025-01-01",
|
||||||
"updated_at": "2025-01-01"
|
"updated_at": "2025-01-01"
|
||||||
@@ -205,7 +205,7 @@ mod tests {
|
|||||||
user_sub: "u1".into(),
|
user_sub: "u1".into(),
|
||||||
title: "t".into(),
|
title: "t".into(),
|
||||||
namespace: ChatNamespace::default(),
|
namespace: ChatNamespace::default(),
|
||||||
provider: "ollama".into(),
|
provider: "litellm".into(),
|
||||||
model: "m".into(),
|
model: "m".into(),
|
||||||
created_at: "2025-01-01".into(),
|
created_at: "2025-01-01".into(),
|
||||||
updated_at: "2025-01-01".into(),
|
updated_at: "2025-01-01".into(),
|
||||||
@@ -223,7 +223,7 @@ mod tests {
|
|||||||
user_sub: "u1".into(),
|
user_sub: "u1".into(),
|
||||||
title: "t".into(),
|
title: "t".into(),
|
||||||
namespace: ChatNamespace::default(),
|
namespace: ChatNamespace::default(),
|
||||||
provider: "ollama".into(),
|
provider: "litellm".into(),
|
||||||
model: "m".into(),
|
model: "m".into(),
|
||||||
created_at: "2025-01-01".into(),
|
created_at: "2025-01-01".into(),
|
||||||
updated_at: "2025-01-01".into(),
|
updated_at: "2025-01-01".into(),
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ mod developer;
|
|||||||
mod news;
|
mod news;
|
||||||
mod organization;
|
mod organization;
|
||||||
mod provider;
|
mod provider;
|
||||||
|
mod services;
|
||||||
mod user;
|
mod user;
|
||||||
|
|
||||||
pub use chat::*;
|
pub use chat::*;
|
||||||
@@ -10,4 +11,5 @@ pub use developer::*;
|
|||||||
pub use news::*;
|
pub use news::*;
|
||||||
pub use organization::*;
|
pub use organization::*;
|
||||||
pub use provider::*;
|
pub use provider::*;
|
||||||
|
pub use services::*;
|
||||||
pub use user::*;
|
pub use user::*;
|
||||||
|
|||||||
@@ -83,6 +83,42 @@ pub struct BillingUsage {
|
|||||||
pub billing_cycle_end: String,
|
pub billing_cycle_end: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Aggregated token usage statistics from LiteLLM's spend tracking API.
|
||||||
|
///
|
||||||
|
/// # Fields
|
||||||
|
///
|
||||||
|
/// * `total_spend` - Total cost in USD across all models
|
||||||
|
/// * `total_prompt_tokens` - Sum of prompt (input) tokens
|
||||||
|
/// * `total_completion_tokens` - Sum of completion (output) tokens
|
||||||
|
/// * `total_tokens` - Sum of all tokens (prompt + completion)
|
||||||
|
/// * `model_breakdown` - Per-model usage breakdown
|
||||||
|
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct LitellmUsageStats {
|
||||||
|
pub total_spend: f64,
|
||||||
|
pub total_prompt_tokens: u64,
|
||||||
|
pub total_completion_tokens: u64,
|
||||||
|
pub total_tokens: u64,
|
||||||
|
pub model_breakdown: Vec<ModelUsage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Token and spend usage for a single LLM model.
|
||||||
|
///
|
||||||
|
/// # Fields
|
||||||
|
///
|
||||||
|
/// * `model` - Model identifier (e.g. "gpt-4", "claude-3-opus")
|
||||||
|
/// * `spend` - Cost in USD for this model
|
||||||
|
/// * `prompt_tokens` - Prompt (input) tokens consumed
|
||||||
|
/// * `completion_tokens` - Completion (output) tokens generated
|
||||||
|
/// * `total_tokens` - Total tokens (prompt + completion)
|
||||||
|
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct ModelUsage {
|
||||||
|
pub model: String,
|
||||||
|
pub spend: f64,
|
||||||
|
pub prompt_tokens: u64,
|
||||||
|
pub completion_tokens: u64,
|
||||||
|
pub total_tokens: u64,
|
||||||
|
}
|
||||||
|
|
||||||
/// Organisation-level settings stored in MongoDB.
|
/// Organisation-level settings stored in MongoDB.
|
||||||
///
|
///
|
||||||
/// These complement Keycloak's Organizations feature with
|
/// These complement Keycloak's Organizations feature with
|
||||||
@@ -234,4 +270,82 @@ mod tests {
|
|||||||
assert_eq!(record.seats_used, 0);
|
assert_eq!(record.seats_used, 0);
|
||||||
assert_eq!(record.tokens_used, 0);
|
assert_eq!(record.tokens_used, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn litellm_usage_stats_default() {
|
||||||
|
let stats = LitellmUsageStats::default();
|
||||||
|
assert_eq!(stats.total_spend, 0.0);
|
||||||
|
assert_eq!(stats.total_prompt_tokens, 0);
|
||||||
|
assert_eq!(stats.total_completion_tokens, 0);
|
||||||
|
assert_eq!(stats.total_tokens, 0);
|
||||||
|
assert!(stats.model_breakdown.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn litellm_usage_stats_serde_round_trip() {
|
||||||
|
let stats = LitellmUsageStats {
|
||||||
|
total_spend: 12.34,
|
||||||
|
total_prompt_tokens: 50_000,
|
||||||
|
total_completion_tokens: 25_000,
|
||||||
|
total_tokens: 75_000,
|
||||||
|
model_breakdown: vec![
|
||||||
|
ModelUsage {
|
||||||
|
model: "gpt-4".into(),
|
||||||
|
spend: 10.0,
|
||||||
|
prompt_tokens: 40_000,
|
||||||
|
completion_tokens: 20_000,
|
||||||
|
total_tokens: 60_000,
|
||||||
|
},
|
||||||
|
ModelUsage {
|
||||||
|
model: "claude-3-opus".into(),
|
||||||
|
spend: 2.34,
|
||||||
|
prompt_tokens: 10_000,
|
||||||
|
completion_tokens: 5_000,
|
||||||
|
total_tokens: 15_000,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
};
|
||||||
|
let json = serde_json::to_string(&stats).expect("serialize LitellmUsageStats");
|
||||||
|
let back: LitellmUsageStats =
|
||||||
|
serde_json::from_str(&json).expect("deserialize LitellmUsageStats");
|
||||||
|
assert_eq!(stats, back);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn model_usage_default() {
|
||||||
|
let usage = ModelUsage::default();
|
||||||
|
assert_eq!(usage.model, "");
|
||||||
|
assert_eq!(usage.spend, 0.0);
|
||||||
|
assert_eq!(usage.prompt_tokens, 0);
|
||||||
|
assert_eq!(usage.completion_tokens, 0);
|
||||||
|
assert_eq!(usage.total_tokens, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn model_usage_serde_round_trip() {
|
||||||
|
let usage = ModelUsage {
|
||||||
|
model: "gpt-4-turbo".into(),
|
||||||
|
spend: 5.67,
|
||||||
|
prompt_tokens: 30_000,
|
||||||
|
completion_tokens: 15_000,
|
||||||
|
total_tokens: 45_000,
|
||||||
|
};
|
||||||
|
let json = serde_json::to_string(&usage).expect("serialize ModelUsage");
|
||||||
|
let back: ModelUsage = serde_json::from_str(&json).expect("deserialize ModelUsage");
|
||||||
|
assert_eq!(usage, back);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn litellm_usage_stats_empty_breakdown_round_trip() {
|
||||||
|
let stats = LitellmUsageStats {
|
||||||
|
total_spend: 0.0,
|
||||||
|
total_prompt_tokens: 0,
|
||||||
|
total_completion_tokens: 0,
|
||||||
|
total_tokens: 0,
|
||||||
|
model_breakdown: Vec::new(),
|
||||||
|
};
|
||||||
|
let json = serde_json::to_string(&stats).expect("serialize empty stats");
|
||||||
|
let back: LitellmUsageStats = serde_json::from_str(&json).expect("deserialize empty stats");
|
||||||
|
assert_eq!(stats, back);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ use serde::{Deserialize, Serialize};
|
|||||||
/// Supported LLM provider backends.
|
/// Supported LLM provider backends.
|
||||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||||
pub enum LlmProvider {
|
pub enum LlmProvider {
|
||||||
/// Self-hosted models via Ollama
|
/// LiteLLM proxy for unified model access
|
||||||
Ollama,
|
LiteLlm,
|
||||||
/// Hugging Face Inference API
|
/// Hugging Face Inference API
|
||||||
HuggingFace,
|
HuggingFace,
|
||||||
/// OpenAI-compatible endpoints
|
/// OpenAI-compatible endpoints
|
||||||
@@ -17,7 +17,7 @@ impl LlmProvider {
|
|||||||
/// Returns the display name for a provider.
|
/// Returns the display name for a provider.
|
||||||
pub fn label(&self) -> &'static str {
|
pub fn label(&self) -> &'static str {
|
||||||
match self {
|
match self {
|
||||||
Self::Ollama => "Ollama",
|
Self::LiteLlm => "LiteLLM",
|
||||||
Self::HuggingFace => "Hugging Face",
|
Self::HuggingFace => "Hugging Face",
|
||||||
Self::OpenAi => "OpenAI",
|
Self::OpenAi => "OpenAI",
|
||||||
Self::Anthropic => "Anthropic",
|
Self::Anthropic => "Anthropic",
|
||||||
@@ -29,7 +29,7 @@ impl LlmProvider {
|
|||||||
///
|
///
|
||||||
/// # Fields
|
/// # Fields
|
||||||
///
|
///
|
||||||
/// * `id` - Unique model identifier (e.g. "llama3.1:8b")
|
/// * `id` - Unique model identifier (e.g. "qwen3-32b")
|
||||||
/// * `name` - Human-readable display name
|
/// * `name` - Human-readable display name
|
||||||
/// * `provider` - Which provider hosts this model
|
/// * `provider` - Which provider hosts this model
|
||||||
/// * `context_window` - Maximum context length in tokens
|
/// * `context_window` - Maximum context length in tokens
|
||||||
@@ -79,8 +79,8 @@ mod tests {
|
|||||||
use pretty_assertions::assert_eq;
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn llm_provider_label_ollama() {
|
fn llm_provider_label_litellm() {
|
||||||
assert_eq!(LlmProvider::Ollama.label(), "Ollama");
|
assert_eq!(LlmProvider::LiteLlm.label(), "LiteLLM");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -101,7 +101,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn llm_provider_serde_round_trip() {
|
fn llm_provider_serde_round_trip() {
|
||||||
for variant in [
|
for variant in [
|
||||||
LlmProvider::Ollama,
|
LlmProvider::LiteLlm,
|
||||||
LlmProvider::HuggingFace,
|
LlmProvider::HuggingFace,
|
||||||
LlmProvider::OpenAi,
|
LlmProvider::OpenAi,
|
||||||
LlmProvider::Anthropic,
|
LlmProvider::Anthropic,
|
||||||
@@ -117,10 +117,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn model_entry_serde_round_trip() {
|
fn model_entry_serde_round_trip() {
|
||||||
let entry = ModelEntry {
|
let entry = ModelEntry {
|
||||||
id: "llama3.1:8b".into(),
|
id: "qwen3-32b".into(),
|
||||||
name: "Llama 3.1 8B".into(),
|
name: "Qwen3 32B".into(),
|
||||||
provider: LlmProvider::Ollama,
|
provider: LlmProvider::LiteLlm,
|
||||||
context_window: 8192,
|
context_window: 32,
|
||||||
};
|
};
|
||||||
let json = serde_json::to_string(&entry).expect("serialize ModelEntry");
|
let json = serde_json::to_string(&entry).expect("serialize ModelEntry");
|
||||||
let back: ModelEntry = serde_json::from_str(&json).expect("deserialize ModelEntry");
|
let back: ModelEntry = serde_json::from_str(&json).expect("deserialize ModelEntry");
|
||||||
|
|||||||
43
src/models/services.rs
Normal file
43
src/models/services.rs
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
|
||||||
|
/// Frontend-facing URLs for developer tool services.
|
||||||
|
///
|
||||||
|
/// Provided as a context signal in `AppShell` so that developer pages
|
||||||
|
/// can read the configured URLs without threading props through layouts.
|
||||||
|
/// An empty string indicates the service is not configured.
|
||||||
|
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct ServiceUrlsContext {
|
||||||
|
/// LangGraph agent builder URL (empty if not configured)
|
||||||
|
pub langgraph_url: String,
|
||||||
|
/// LangFlow visual workflow builder URL (empty if not configured)
|
||||||
|
pub langflow_url: String,
|
||||||
|
/// Langfuse observability URL (empty if not configured)
|
||||||
|
pub langfuse_url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use pretty_assertions::assert_eq;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn default_urls_are_empty() {
|
||||||
|
let ctx = ServiceUrlsContext::default();
|
||||||
|
assert_eq!(ctx.langgraph_url, "");
|
||||||
|
assert_eq!(ctx.langflow_url, "");
|
||||||
|
assert_eq!(ctx.langfuse_url, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn serde_round_trip() {
|
||||||
|
let ctx = ServiceUrlsContext {
|
||||||
|
langgraph_url: "http://localhost:8123".into(),
|
||||||
|
langflow_url: "http://localhost:7860".into(),
|
||||||
|
langfuse_url: "http://localhost:3000".into(),
|
||||||
|
};
|
||||||
|
let json = serde_json::to_string(&ctx).expect("serialize ServiceUrlsContext");
|
||||||
|
let back: ServiceUrlsContext =
|
||||||
|
serde_json::from_str(&json).expect("deserialize ServiceUrlsContext");
|
||||||
|
assert_eq!(ctx, back);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -24,17 +24,23 @@ pub struct AuthInfo {
|
|||||||
pub avatar_url: String,
|
pub avatar_url: String,
|
||||||
/// LibreChat instance URL for the sidebar chat link
|
/// LibreChat instance URL for the sidebar chat link
|
||||||
pub librechat_url: String,
|
pub librechat_url: String,
|
||||||
|
/// LangGraph agent builder URL (empty if not configured)
|
||||||
|
pub langgraph_url: String,
|
||||||
|
/// LangFlow visual workflow builder URL (empty if not configured)
|
||||||
|
pub langflow_url: String,
|
||||||
|
/// Langfuse observability URL (empty if not configured)
|
||||||
|
pub langfuse_url: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Per-user LLM provider configuration stored in MongoDB.
|
/// Per-user LLM provider configuration stored in MongoDB.
|
||||||
///
|
///
|
||||||
/// Controls which provider and model the user's chat sessions default
|
/// Controls which provider and model the user's chat sessions default
|
||||||
/// to, and stores API keys for non-Ollama providers.
|
/// to, and stores API keys for non-LiteLLM providers.
|
||||||
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
|
||||||
pub struct UserProviderConfig {
|
pub struct UserProviderConfig {
|
||||||
/// Default provider name (e.g. "ollama", "openai")
|
/// Default provider name (e.g. "litellm", "openai")
|
||||||
pub default_provider: String,
|
pub default_provider: String,
|
||||||
/// Default model ID (e.g. "llama3.1:8b", "gpt-4o")
|
/// Default model ID (e.g. "qwen3-32b", "gpt-4o")
|
||||||
pub default_model: String,
|
pub default_model: String,
|
||||||
/// OpenAI API key (empty if not configured)
|
/// OpenAI API key (empty if not configured)
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
@@ -45,8 +51,8 @@ pub struct UserProviderConfig {
|
|||||||
/// HuggingFace API key
|
/// HuggingFace API key
|
||||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||||
pub huggingface_api_key: Option<String>,
|
pub huggingface_api_key: Option<String>,
|
||||||
/// Custom Ollama URL override (empty = use server default)
|
/// Custom LiteLLM URL override (empty = use server default)
|
||||||
pub ollama_url_override: String,
|
pub litellm_url_override: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Per-user preferences stored in MongoDB.
|
/// Per-user preferences stored in MongoDB.
|
||||||
@@ -60,10 +66,10 @@ pub struct UserPreferences {
|
|||||||
pub org_id: String,
|
pub org_id: String,
|
||||||
/// User-selected news/search topics
|
/// User-selected news/search topics
|
||||||
pub custom_topics: Vec<String>,
|
pub custom_topics: Vec<String>,
|
||||||
/// Per-user Ollama URL override (empty = use server default)
|
/// Per-user LiteLLM URL override (empty = use server default)
|
||||||
pub ollama_url_override: String,
|
pub litellm_url_override: String,
|
||||||
/// Per-user Ollama model override (empty = use server default)
|
/// Per-user LiteLLM model override (empty = use server default)
|
||||||
pub ollama_model_override: String,
|
pub litellm_model_override: String,
|
||||||
/// Recently searched queries for quick access
|
/// Recently searched queries for quick access
|
||||||
pub recent_searches: Vec<String>,
|
pub recent_searches: Vec<String>,
|
||||||
/// LLM provider configuration
|
/// LLM provider configuration
|
||||||
@@ -91,6 +97,9 @@ mod tests {
|
|||||||
assert_eq!(info.name, "");
|
assert_eq!(info.name, "");
|
||||||
assert_eq!(info.avatar_url, "");
|
assert_eq!(info.avatar_url, "");
|
||||||
assert_eq!(info.librechat_url, "");
|
assert_eq!(info.librechat_url, "");
|
||||||
|
assert_eq!(info.langgraph_url, "");
|
||||||
|
assert_eq!(info.langflow_url, "");
|
||||||
|
assert_eq!(info.langfuse_url, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -102,6 +111,9 @@ mod tests {
|
|||||||
name: "Test User".into(),
|
name: "Test User".into(),
|
||||||
avatar_url: "https://example.com/avatar.png".into(),
|
avatar_url: "https://example.com/avatar.png".into(),
|
||||||
librechat_url: "https://chat.example.com".into(),
|
librechat_url: "https://chat.example.com".into(),
|
||||||
|
langgraph_url: "http://localhost:8123".into(),
|
||||||
|
langflow_url: "http://localhost:7860".into(),
|
||||||
|
langfuse_url: "http://localhost:3000".into(),
|
||||||
};
|
};
|
||||||
let json = serde_json::to_string(&info).expect("serialize AuthInfo");
|
let json = serde_json::to_string(&info).expect("serialize AuthInfo");
|
||||||
let back: AuthInfo = serde_json::from_str(&json).expect("deserialize AuthInfo");
|
let back: AuthInfo = serde_json::from_str(&json).expect("deserialize AuthInfo");
|
||||||
@@ -120,12 +132,12 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn user_provider_config_optional_keys_skip_none() {
|
fn user_provider_config_optional_keys_skip_none() {
|
||||||
let cfg = UserProviderConfig {
|
let cfg = UserProviderConfig {
|
||||||
default_provider: "ollama".into(),
|
default_provider: "litellm".into(),
|
||||||
default_model: "llama3.1:8b".into(),
|
default_model: "qwen3-32b".into(),
|
||||||
openai_api_key: None,
|
openai_api_key: None,
|
||||||
anthropic_api_key: None,
|
anthropic_api_key: None,
|
||||||
huggingface_api_key: None,
|
huggingface_api_key: None,
|
||||||
ollama_url_override: String::new(),
|
litellm_url_override: String::new(),
|
||||||
};
|
};
|
||||||
let json = serde_json::to_string(&cfg).expect("serialize UserProviderConfig");
|
let json = serde_json::to_string(&cfg).expect("serialize UserProviderConfig");
|
||||||
assert!(!json.contains("openai_api_key"));
|
assert!(!json.contains("openai_api_key"));
|
||||||
@@ -141,7 +153,7 @@ mod tests {
|
|||||||
openai_api_key: Some("sk-test".into()),
|
openai_api_key: Some("sk-test".into()),
|
||||||
anthropic_api_key: Some("ak-test".into()),
|
anthropic_api_key: Some("ak-test".into()),
|
||||||
huggingface_api_key: None,
|
huggingface_api_key: None,
|
||||||
ollama_url_override: "http://custom:11434".into(),
|
litellm_url_override: "http://custom:4000".into(),
|
||||||
};
|
};
|
||||||
let json = serde_json::to_string(&cfg).expect("serialize");
|
let json = serde_json::to_string(&cfg).expect("serialize");
|
||||||
let back: UserProviderConfig = serde_json::from_str(&json).expect("deserialize");
|
let back: UserProviderConfig = serde_json::from_str(&json).expect("deserialize");
|
||||||
|
|||||||
@@ -25,8 +25,8 @@ const DEFAULT_TOPICS: &[&str] = &[
|
|||||||
///
|
///
|
||||||
/// State is persisted across sessions using localStorage:
|
/// State is persisted across sessions using localStorage:
|
||||||
/// - `certifai_topics`: custom user-defined search topics
|
/// - `certifai_topics`: custom user-defined search topics
|
||||||
/// - `certifai_ollama_url`: Ollama instance URL for summarization
|
/// - `certifai_litellm_url`: LiteLLM proxy URL for summarization
|
||||||
/// - `certifai_ollama_model`: Ollama model ID for summarization
|
/// - `certifai_litellm_model`: LiteLLM model ID for summarization
|
||||||
#[component]
|
#[component]
|
||||||
pub fn DashboardPage() -> Element {
|
pub fn DashboardPage() -> Element {
|
||||||
let locale = use_context::<Signal<Locale>>();
|
let locale = use_context::<Signal<Locale>>();
|
||||||
@@ -34,11 +34,11 @@ pub fn DashboardPage() -> Element {
|
|||||||
|
|
||||||
// Persistent state stored in localStorage
|
// Persistent state stored in localStorage
|
||||||
let mut custom_topics = use_persistent("certifai_topics".to_string(), Vec::<String>::new);
|
let mut custom_topics = use_persistent("certifai_topics".to_string(), Vec::<String>::new);
|
||||||
// Default to empty so the server functions use OLLAMA_URL / OLLAMA_MODEL
|
// Default to empty so the server functions use LITELLM_URL / LITELLM_MODEL
|
||||||
// from .env. Only stores a non-empty value when the user explicitly saves
|
// from .env. Only stores a non-empty value when the user explicitly saves
|
||||||
// an override via the Settings panel.
|
// an override via the Settings panel.
|
||||||
let mut ollama_url = use_persistent("certifai_ollama_url".to_string(), String::new);
|
let mut litellm_url = use_persistent("certifai_litellm_url".to_string(), String::new);
|
||||||
let mut ollama_model = use_persistent("certifai_ollama_model".to_string(), String::new);
|
let mut litellm_model = use_persistent("certifai_litellm_model".to_string(), String::new);
|
||||||
|
|
||||||
// Reactive signals for UI state
|
// Reactive signals for UI state
|
||||||
let mut active_topic = use_signal(|| "AI".to_string());
|
let mut active_topic = use_signal(|| "AI".to_string());
|
||||||
@@ -235,8 +235,8 @@ pub fn DashboardPage() -> Element {
|
|||||||
onclick: move |_| {
|
onclick: move |_| {
|
||||||
let currently_shown = *show_settings.read();
|
let currently_shown = *show_settings.read();
|
||||||
if !currently_shown {
|
if !currently_shown {
|
||||||
settings_url.set(ollama_url.read().clone());
|
settings_url.set(litellm_url.read().clone());
|
||||||
settings_model.set(ollama_model.read().clone());
|
settings_model.set(litellm_model.read().clone());
|
||||||
}
|
}
|
||||||
show_settings.set(!currently_shown);
|
show_settings.set(!currently_shown);
|
||||||
},
|
},
|
||||||
@@ -247,16 +247,16 @@ pub fn DashboardPage() -> Element {
|
|||||||
// Settings panel (collapsible)
|
// Settings panel (collapsible)
|
||||||
if *show_settings.read() {
|
if *show_settings.read() {
|
||||||
div { class: "settings-panel",
|
div { class: "settings-panel",
|
||||||
h4 { class: "settings-panel-title", "{t(l, \"dashboard.ollama_settings\")}" }
|
h4 { class: "settings-panel-title", "{t(l, \"dashboard.litellm_settings\")}" }
|
||||||
p { class: "settings-hint",
|
p { class: "settings-hint",
|
||||||
"{t(l, \"dashboard.settings_hint\")}"
|
"{t(l, \"dashboard.settings_hint\")}"
|
||||||
}
|
}
|
||||||
div { class: "settings-field",
|
div { class: "settings-field",
|
||||||
label { "{t(l, \"dashboard.ollama_url\")}" }
|
label { "{t(l, \"dashboard.litellm_url\")}" }
|
||||||
input {
|
input {
|
||||||
class: "settings-input",
|
class: "settings-input",
|
||||||
r#type: "text",
|
r#type: "text",
|
||||||
placeholder: "{t(l, \"dashboard.ollama_url_placeholder\")}",
|
placeholder: "{t(l, \"dashboard.litellm_url_placeholder\")}",
|
||||||
value: "{settings_url}",
|
value: "{settings_url}",
|
||||||
oninput: move |e| settings_url.set(e.value()),
|
oninput: move |e| settings_url.set(e.value()),
|
||||||
}
|
}
|
||||||
@@ -274,8 +274,8 @@ pub fn DashboardPage() -> Element {
|
|||||||
button {
|
button {
|
||||||
class: "btn btn-primary",
|
class: "btn btn-primary",
|
||||||
onclick: move |_| {
|
onclick: move |_| {
|
||||||
*ollama_url.write() = settings_url.read().trim().to_string();
|
*litellm_url.write() = settings_url.read().trim().to_string();
|
||||||
*ollama_model.write() = settings_model.read().trim().to_string();
|
*litellm_model.write() = settings_model.read().trim().to_string();
|
||||||
show_settings.set(false);
|
show_settings.set(false);
|
||||||
},
|
},
|
||||||
"{t(l, \"common.save\")}"
|
"{t(l, \"common.save\")}"
|
||||||
@@ -320,14 +320,14 @@ pub fn DashboardPage() -> Element {
|
|||||||
news_session_id.set(None);
|
news_session_id.set(None);
|
||||||
|
|
||||||
|
|
||||||
let oll_url = ollama_url.read().clone();
|
let ll_url = litellm_url.read().clone();
|
||||||
let mdl = ollama_model.read().clone();
|
let mdl = litellm_model.read().clone();
|
||||||
spawn(async move {
|
spawn(async move {
|
||||||
is_summarizing.set(true);
|
is_summarizing.set(true);
|
||||||
match crate::infrastructure::llm::summarize_article(
|
match crate::infrastructure::llm::summarize_article(
|
||||||
snippet.clone(),
|
snippet.clone(),
|
||||||
article_url,
|
article_url,
|
||||||
oll_url,
|
ll_url,
|
||||||
mdl,
|
mdl,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
@@ -373,8 +373,8 @@ pub fn DashboardPage() -> Element {
|
|||||||
chat_messages: chat_messages.read().clone(),
|
chat_messages: chat_messages.read().clone(),
|
||||||
is_chatting: *is_chatting.read(),
|
is_chatting: *is_chatting.read(),
|
||||||
on_chat_send: move |question: String| {
|
on_chat_send: move |question: String| {
|
||||||
let oll_url = ollama_url.read().clone();
|
let ll_url = litellm_url.read().clone();
|
||||||
let mdl = ollama_model.read().clone();
|
let mdl = litellm_model.read().clone();
|
||||||
let ctx = article_context.read().clone();
|
let ctx = article_context.read().clone();
|
||||||
// Capture article info for News session creation
|
// Capture article info for News session creation
|
||||||
let card_title = selected_card
|
let card_title = selected_card
|
||||||
@@ -394,7 +394,7 @@ pub fn DashboardPage() -> Element {
|
|||||||
content: question.clone(),
|
content: question.clone(),
|
||||||
});
|
});
|
||||||
|
|
||||||
// Build full message history for Ollama
|
// Build full message history for LiteLLM
|
||||||
let system_msg = format!(
|
let system_msg = format!(
|
||||||
"You are a helpful assistant. The user is reading \
|
"You are a helpful assistant. The user is reading \
|
||||||
a news article. Use the following context to answer \
|
a news article. Use the following context to answer \
|
||||||
@@ -422,7 +422,7 @@ pub fn DashboardPage() -> Element {
|
|||||||
match create_chat_session(
|
match create_chat_session(
|
||||||
card_title,
|
card_title,
|
||||||
"News".to_string(),
|
"News".to_string(),
|
||||||
"ollama".to_string(),
|
"litellm".to_string(),
|
||||||
mdl.clone(),
|
mdl.clone(),
|
||||||
card_url,
|
card_url,
|
||||||
)
|
)
|
||||||
@@ -458,7 +458,7 @@ pub fn DashboardPage() -> Element {
|
|||||||
}
|
}
|
||||||
|
|
||||||
match crate::infrastructure::llm::chat_followup(
|
match crate::infrastructure::llm::chat_followup(
|
||||||
msgs, oll_url, mdl,
|
msgs, ll_url, mdl,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
@@ -495,7 +495,7 @@ pub fn DashboardPage() -> Element {
|
|||||||
// Right: sidebar (when no card selected)
|
// Right: sidebar (when no card selected)
|
||||||
if !has_selection {
|
if !has_selection {
|
||||||
DashboardSidebar {
|
DashboardSidebar {
|
||||||
ollama_url: ollama_url.read().clone(),
|
litellm_url: litellm_url.read().clone(),
|
||||||
trending: trending_topics.clone(),
|
trending: trending_topics.clone(),
|
||||||
recent_searches: recent_searches.read().clone(),
|
recent_searches: recent_searches.read().clone(),
|
||||||
on_topic_click: move |topic: String| {
|
on_topic_click: move |topic: String| {
|
||||||
|
|||||||
@@ -1,26 +1,239 @@
|
|||||||
use dioxus::prelude::*;
|
use dioxus::prelude::*;
|
||||||
|
use dioxus_free_icons::icons::bs_icons::{
|
||||||
|
BsBook, BsBoxArrowUpRight, BsCodeSquare, BsCpu, BsGithub, BsLightningCharge,
|
||||||
|
};
|
||||||
|
use dioxus_free_icons::Icon;
|
||||||
|
|
||||||
use crate::i18n::{t, Locale};
|
use crate::i18n::{t, Locale};
|
||||||
|
use crate::models::ServiceUrlsContext;
|
||||||
|
|
||||||
/// Agents page placeholder for the LangGraph agent builder.
|
/// Agents informational landing page for LangGraph.
|
||||||
///
|
///
|
||||||
/// Shows a "Coming Soon" card with a disabled launch button.
|
/// Since LangGraph is API-only (no web UI), this page displays a hero section
|
||||||
/// Will eventually integrate with the LangGraph framework.
|
/// explaining its role, a connection status indicator, a card grid linking
|
||||||
|
/// to documentation, and a live table of registered agents fetched from the
|
||||||
|
/// LangGraph assistants API.
|
||||||
#[component]
|
#[component]
|
||||||
pub fn AgentsPage() -> Element {
|
pub fn AgentsPage() -> Element {
|
||||||
let locale = use_context::<Signal<Locale>>();
|
let locale = use_context::<Signal<Locale>>();
|
||||||
|
let svc = use_context::<Signal<ServiceUrlsContext>>();
|
||||||
let l = *locale.read();
|
let l = *locale.read();
|
||||||
|
let url = svc.read().langgraph_url.clone();
|
||||||
|
|
||||||
|
// Derive whether a LangGraph URL is configured
|
||||||
|
let connected = !url.is_empty();
|
||||||
|
// Build the API reference URL from the configured base, falling back to "#"
|
||||||
|
let api_ref_href = if connected {
|
||||||
|
format!("{}/docs", url)
|
||||||
|
} else {
|
||||||
|
"#".to_string()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Fetch agents from LangGraph when connected
|
||||||
|
let agents_resource = use_resource(move || async move {
|
||||||
|
match crate::infrastructure::langgraph::list_langgraph_agents().await {
|
||||||
|
Ok(agents) => agents,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::error!("Failed to fetch agents: {e}");
|
||||||
|
Vec::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
rsx! {
|
rsx! {
|
||||||
section { class: "placeholder-page",
|
div { class: "agents-page",
|
||||||
div { class: "placeholder-card",
|
// -- Hero section --
|
||||||
div { class: "placeholder-icon", "A" }
|
div { class: "agents-hero",
|
||||||
h2 { "{t(l, \"developer.agents_title\")}" }
|
div { class: "agents-hero-row",
|
||||||
p { class: "placeholder-desc",
|
div { class: "agents-hero-icon",
|
||||||
"{t(l, \"developer.agents_desc\")}"
|
Icon { icon: BsCpu, width: 24, height: 24 }
|
||||||
|
}
|
||||||
|
h2 { class: "agents-hero-title",
|
||||||
|
{t(l, "developer.agents_title")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p { class: "agents-hero-desc",
|
||||||
|
{t(l, "developer.agents_desc")}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Connection status --
|
||||||
|
if connected {
|
||||||
|
div { class: "agents-status",
|
||||||
|
span {
|
||||||
|
class: "agents-status-dot agents-status-dot--on",
|
||||||
|
}
|
||||||
|
span { {t(l, "developer.agents_status_connected")} }
|
||||||
|
code { class: "agents-status-url", {url.clone()} }
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
div { class: "agents-status",
|
||||||
|
span {
|
||||||
|
class: "agents-status-dot agents-status-dot--off",
|
||||||
|
}
|
||||||
|
span { {t(l, "developer.agents_status_not_connected")} }
|
||||||
|
span { class: "agents-status-hint",
|
||||||
|
{t(l, "developer.agents_config_hint")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Running Agents table --
|
||||||
|
div { class: "agents-table-section",
|
||||||
|
h3 { class: "agents-section-title",
|
||||||
|
{t(l, "developer.agents_running_title")}
|
||||||
|
}
|
||||||
|
|
||||||
|
match agents_resource.read().as_ref() {
|
||||||
|
None => {
|
||||||
|
rsx! {
|
||||||
|
p { class: "agents-table-loading",
|
||||||
|
{t(l, "common.loading")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(agents) if agents.is_empty() => {
|
||||||
|
rsx! {
|
||||||
|
p { class: "agents-table-empty",
|
||||||
|
{t(l, "developer.agents_none")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(agents) => {
|
||||||
|
rsx! {
|
||||||
|
div { class: "agents-table-wrap",
|
||||||
|
table { class: "agents-table",
|
||||||
|
thead {
|
||||||
|
tr {
|
||||||
|
th { {t(l, "developer.agents_col_name")} }
|
||||||
|
th { {t(l, "developer.agents_col_id")} }
|
||||||
|
th { {t(l, "developer.agents_col_description")} }
|
||||||
|
th { {t(l, "developer.agents_col_status")} }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tbody {
|
||||||
|
for agent in agents.iter() {
|
||||||
|
tr { key: "{agent.id}",
|
||||||
|
td { class: "agents-cell-name",
|
||||||
|
{agent.name.clone()}
|
||||||
|
}
|
||||||
|
td {
|
||||||
|
code { class: "agents-cell-id",
|
||||||
|
{agent.id.clone()}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
td { class: "agents-cell-desc",
|
||||||
|
if agent.description.is_empty() {
|
||||||
|
span { class: "agents-cell-none", "--" }
|
||||||
|
} else {
|
||||||
|
{agent.description.clone()}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
td {
|
||||||
|
span { class: "agents-badge agents-badge--active",
|
||||||
|
{agent.status.clone()}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Quick Start card grid --
|
||||||
|
h3 { class: "agents-section-title",
|
||||||
|
{t(l, "developer.agents_quick_start")}
|
||||||
|
}
|
||||||
|
|
||||||
|
div { class: "agents-grid",
|
||||||
|
// Documentation
|
||||||
|
a {
|
||||||
|
class: "agents-card",
|
||||||
|
href: "https://langchain-ai.github.io/langgraph/",
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
div { class: "agents-card-icon",
|
||||||
|
Icon { icon: BsBook, width: 18, height: 18 }
|
||||||
|
}
|
||||||
|
div { class: "agents-card-title",
|
||||||
|
{t(l, "developer.agents_docs")}
|
||||||
|
}
|
||||||
|
div { class: "agents-card-desc",
|
||||||
|
{t(l, "developer.agents_docs_desc")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Getting Started
|
||||||
|
a {
|
||||||
|
class: "agents-card",
|
||||||
|
href: "https://langchain-ai.github.io/langgraph/tutorials/introduction/",
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
div { class: "agents-card-icon",
|
||||||
|
Icon { icon: BsLightningCharge, width: 18, height: 18 }
|
||||||
|
}
|
||||||
|
div { class: "agents-card-title",
|
||||||
|
{t(l, "developer.agents_getting_started")}
|
||||||
|
}
|
||||||
|
div { class: "agents-card-desc",
|
||||||
|
{t(l, "developer.agents_getting_started_desc")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GitHub
|
||||||
|
a {
|
||||||
|
class: "agents-card",
|
||||||
|
href: "https://github.com/langchain-ai/langgraph",
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
div { class: "agents-card-icon",
|
||||||
|
Icon { icon: BsGithub, width: 18, height: 18 }
|
||||||
|
}
|
||||||
|
div { class: "agents-card-title",
|
||||||
|
{t(l, "developer.agents_github")}
|
||||||
|
}
|
||||||
|
div { class: "agents-card-desc",
|
||||||
|
{t(l, "developer.agents_github_desc")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Examples
|
||||||
|
a {
|
||||||
|
class: "agents-card",
|
||||||
|
href: "https://github.com/langchain-ai/langgraph/tree/main/examples",
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
div { class: "agents-card-icon",
|
||||||
|
Icon { icon: BsCodeSquare, width: 18, height: 18 }
|
||||||
|
}
|
||||||
|
div { class: "agents-card-title",
|
||||||
|
{t(l, "developer.agents_examples")}
|
||||||
|
}
|
||||||
|
div { class: "agents-card-desc",
|
||||||
|
{t(l, "developer.agents_examples_desc")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// API Reference (disabled when URL is empty)
|
||||||
|
a {
|
||||||
|
class: if connected { "agents-card" } else { "agents-card agents-card--disabled" },
|
||||||
|
href: "{api_ref_href}",
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
div { class: "agents-card-icon",
|
||||||
|
Icon { icon: BsBoxArrowUpRight, width: 18, height: 18 }
|
||||||
|
}
|
||||||
|
div { class: "agents-card-title",
|
||||||
|
{t(l, "developer.agents_api_ref")}
|
||||||
|
}
|
||||||
|
div { class: "agents-card-desc",
|
||||||
|
{t(l, "developer.agents_api_ref_desc")}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
button { class: "btn-primary", disabled: true, "{t(l, \"developer.launch_agents\")}" }
|
|
||||||
span { class: "placeholder-badge", "{t(l, \"common.coming_soon\")}" }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,40 +1,142 @@
|
|||||||
use dioxus::prelude::*;
|
use dioxus::prelude::*;
|
||||||
|
use dioxus_free_icons::icons::bs_icons::{
|
||||||
|
BsBarChartLine, BsBoxArrowUpRight, BsGraphUp, BsSpeedometer,
|
||||||
|
};
|
||||||
|
use dioxus_free_icons::Icon;
|
||||||
|
|
||||||
use crate::i18n::{t, Locale};
|
use crate::i18n::{t, Locale};
|
||||||
use crate::models::AnalyticsMetric;
|
use crate::models::{AnalyticsMetric, ServiceUrlsContext};
|
||||||
|
|
||||||
/// Analytics page placeholder for LangFuse integration.
|
/// Analytics & Observability page for Langfuse.
|
||||||
///
|
///
|
||||||
/// Shows a "Coming Soon" card with a disabled launch button,
|
/// Langfuse is configured with Keycloak SSO (shared realm with CERTifAI).
|
||||||
/// plus a mock stats bar showing sample metrics.
|
/// When users open Langfuse, the existing Keycloak session auto-authenticates
|
||||||
|
/// them transparently. This page shows a metrics bar, connection status,
|
||||||
|
/// and a prominent button to open Langfuse in a new tab.
|
||||||
#[component]
|
#[component]
|
||||||
pub fn AnalyticsPage() -> Element {
|
pub fn AnalyticsPage() -> Element {
|
||||||
let locale = use_context::<Signal<Locale>>();
|
let locale = use_context::<Signal<Locale>>();
|
||||||
|
let svc = use_context::<Signal<ServiceUrlsContext>>();
|
||||||
let l = *locale.read();
|
let l = *locale.read();
|
||||||
|
let url = svc.read().langfuse_url.clone();
|
||||||
|
|
||||||
|
let connected = !url.is_empty();
|
||||||
let metrics = mock_metrics(l);
|
let metrics = mock_metrics(l);
|
||||||
|
|
||||||
rsx! {
|
rsx! {
|
||||||
section { class: "placeholder-page",
|
div { class: "analytics-page",
|
||||||
|
// -- Hero section --
|
||||||
|
div { class: "analytics-hero",
|
||||||
|
div { class: "analytics-hero-row",
|
||||||
|
div { class: "analytics-hero-icon",
|
||||||
|
Icon { icon: BsGraphUp, width: 24, height: 24 }
|
||||||
|
}
|
||||||
|
h2 { class: "analytics-hero-title",
|
||||||
|
{t(l, "developer.analytics_title")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
p { class: "analytics-hero-desc",
|
||||||
|
{t(l, "developer.analytics_desc")}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Connection status --
|
||||||
|
if connected {
|
||||||
|
div { class: "agents-status",
|
||||||
|
span {
|
||||||
|
class: "agents-status-dot agents-status-dot--on",
|
||||||
|
}
|
||||||
|
span { {t(l, "developer.analytics_status_connected")} }
|
||||||
|
code { class: "agents-status-url", {url.clone()} }
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
div { class: "agents-status",
|
||||||
|
span {
|
||||||
|
class: "agents-status-dot agents-status-dot--off",
|
||||||
|
}
|
||||||
|
span { {t(l, "developer.analytics_status_not_connected")} }
|
||||||
|
span { class: "agents-status-hint",
|
||||||
|
{t(l, "developer.analytics_config_hint")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- SSO info --
|
||||||
|
if connected {
|
||||||
|
p { class: "analytics-sso-hint",
|
||||||
|
{t(l, "developer.analytics_sso_hint")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Metrics bar --
|
||||||
div { class: "analytics-stats-bar",
|
div { class: "analytics-stats-bar",
|
||||||
for metric in &metrics {
|
for metric in &metrics {
|
||||||
div { class: "analytics-stat",
|
div { class: "analytics-stat",
|
||||||
span { class: "analytics-stat-value", "{metric.value}" }
|
span { class: "analytics-stat-value", "{metric.value}" }
|
||||||
span { class: "analytics-stat-label", "{metric.label}" }
|
span { class: "analytics-stat-label", "{metric.label}" }
|
||||||
span { class: if metric.change_pct >= 0.0 { "analytics-stat-change analytics-stat-change--up" } else { "analytics-stat-change analytics-stat-change--down" },
|
span {
|
||||||
|
class: if metric.change_pct >= 0.0 {
|
||||||
|
"analytics-stat-change analytics-stat-change--up"
|
||||||
|
} else {
|
||||||
|
"analytics-stat-change analytics-stat-change--down"
|
||||||
|
},
|
||||||
"{metric.change_pct:+.1}%"
|
"{metric.change_pct:+.1}%"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
div { class: "placeholder-card",
|
|
||||||
div { class: "placeholder-icon", "L" }
|
// -- Open Langfuse button --
|
||||||
h2 { "{t(l, \"developer.analytics_title\")}" }
|
if connected {
|
||||||
p { class: "placeholder-desc",
|
a {
|
||||||
"{t(l, \"developer.analytics_desc\")}"
|
class: "analytics-launch-btn",
|
||||||
|
href: "{url}",
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
Icon { icon: BsBoxArrowUpRight, width: 16, height: 16 }
|
||||||
|
span { {t(l, "developer.launch_analytics")} }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// -- Quick actions --
|
||||||
|
h3 { class: "agents-section-title",
|
||||||
|
{t(l, "developer.analytics_quick_actions")}
|
||||||
|
}
|
||||||
|
|
||||||
|
div { class: "agents-grid",
|
||||||
|
// Traces
|
||||||
|
a {
|
||||||
|
class: if connected { "agents-card" } else { "agents-card agents-card--disabled" },
|
||||||
|
href: if connected { format!("{url}/project") } else { "#".to_string() },
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
div { class: "agents-card-icon",
|
||||||
|
Icon { icon: BsBarChartLine, width: 18, height: 18 }
|
||||||
|
}
|
||||||
|
div { class: "agents-card-title",
|
||||||
|
{t(l, "developer.analytics_traces")}
|
||||||
|
}
|
||||||
|
div { class: "agents-card-desc",
|
||||||
|
{t(l, "developer.analytics_traces_desc")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dashboard
|
||||||
|
a {
|
||||||
|
class: if connected { "agents-card" } else { "agents-card agents-card--disabled" },
|
||||||
|
href: if connected { format!("{url}/project") } else { "#".to_string() },
|
||||||
|
target: "_blank",
|
||||||
|
rel: "noopener noreferrer",
|
||||||
|
div { class: "agents-card-icon",
|
||||||
|
Icon { icon: BsSpeedometer, width: 18, height: 18 }
|
||||||
|
}
|
||||||
|
div { class: "agents-card-title",
|
||||||
|
{t(l, "developer.analytics_dashboard")}
|
||||||
|
}
|
||||||
|
div { class: "agents-card-desc",
|
||||||
|
{t(l, "developer.analytics_dashboard_desc")}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
button { class: "btn-primary", disabled: true, "{t(l, \"developer.launch_analytics\")}" }
|
|
||||||
span { class: "placeholder-badge", "{t(l, \"common.coming_soon\")}" }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,27 +1,27 @@
|
|||||||
use dioxus::prelude::*;
|
use dioxus::prelude::*;
|
||||||
|
|
||||||
|
use crate::components::ToolEmbed;
|
||||||
use crate::i18n::{t, Locale};
|
use crate::i18n::{t, Locale};
|
||||||
|
use crate::models::ServiceUrlsContext;
|
||||||
|
|
||||||
/// Flow page placeholder for the LangFlow visual workflow builder.
|
/// Flow page embedding the LangFlow visual workflow builder.
|
||||||
///
|
///
|
||||||
/// Shows a "Coming Soon" card with a disabled launch button.
|
/// When `langflow_url` is configured, embeds the service in an iframe
|
||||||
/// Will eventually integrate with LangFlow for visual flow design.
|
/// with a pop-out button. Otherwise shows a "Not Configured" placeholder.
|
||||||
#[component]
|
#[component]
|
||||||
pub fn FlowPage() -> Element {
|
pub fn FlowPage() -> Element {
|
||||||
let locale = use_context::<Signal<Locale>>();
|
let locale = use_context::<Signal<Locale>>();
|
||||||
|
let svc = use_context::<Signal<ServiceUrlsContext>>();
|
||||||
let l = *locale.read();
|
let l = *locale.read();
|
||||||
|
let url = svc.read().langflow_url.clone();
|
||||||
|
|
||||||
rsx! {
|
rsx! {
|
||||||
section { class: "placeholder-page",
|
ToolEmbed {
|
||||||
div { class: "placeholder-card",
|
url,
|
||||||
div { class: "placeholder-icon", "F" }
|
title: t(l, "developer.flow_title"),
|
||||||
h2 { "{t(l, \"developer.flow_title\")}" }
|
description: t(l, "developer.flow_desc"),
|
||||||
p { class: "placeholder-desc",
|
icon: "F",
|
||||||
"{t(l, \"developer.flow_desc\")}"
|
launch_label: t(l, "developer.launch_flow"),
|
||||||
}
|
|
||||||
button { class: "btn-primary", disabled: true, "{t(l, \"developer.launch_flow\")}" }
|
|
||||||
span { class: "placeholder-badge", "{t(l, \"common.coming_soon\")}" }
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,12 +2,14 @@ use dioxus::prelude::*;
|
|||||||
|
|
||||||
use crate::components::{MemberRow, PageHeader};
|
use crate::components::{MemberRow, PageHeader};
|
||||||
use crate::i18n::{t, tw, Locale};
|
use crate::i18n::{t, tw, Locale};
|
||||||
use crate::models::{BillingUsage, MemberRole, OrgMember};
|
use crate::infrastructure::litellm::get_litellm_usage;
|
||||||
|
use crate::models::{BillingUsage, LitellmUsageStats, MemberRole, OrgMember};
|
||||||
|
|
||||||
/// Organization dashboard with billing stats, member table, and invite modal.
|
/// Organization dashboard with billing stats, member table, and invite modal.
|
||||||
///
|
///
|
||||||
/// Shows current billing usage, a table of organization members
|
/// Shows current billing usage (fetched from LiteLLM), a per-model
|
||||||
/// with role management, and a button to invite new members.
|
/// breakdown table, a table of organization members with role
|
||||||
|
/// management, and a button to invite new members.
|
||||||
#[component]
|
#[component]
|
||||||
pub fn OrgDashboardPage() -> Element {
|
pub fn OrgDashboardPage() -> Element {
|
||||||
let locale = use_context::<Signal<Locale>>();
|
let locale = use_context::<Signal<Locale>>();
|
||||||
@@ -20,6 +22,20 @@ pub fn OrgDashboardPage() -> Element {
|
|||||||
|
|
||||||
let members_list = members.read().clone();
|
let members_list = members.read().clone();
|
||||||
|
|
||||||
|
// Compute date range: 1st of current month to today
|
||||||
|
let (start_date, end_date) = current_month_range();
|
||||||
|
|
||||||
|
// Fetch real usage stats from LiteLLM via server function.
|
||||||
|
// use_resource memoises and won't re-fire on parent re-renders.
|
||||||
|
let usage_resource = use_resource(move || {
|
||||||
|
let start = start_date.clone();
|
||||||
|
let end = end_date.clone();
|
||||||
|
async move { get_litellm_usage(start, end).await }
|
||||||
|
});
|
||||||
|
|
||||||
|
// Clone out of Signal to avoid holding the borrow across rsx!
|
||||||
|
let usage_snapshot = usage_resource.read().clone();
|
||||||
|
|
||||||
// Format token counts for display
|
// Format token counts for display
|
||||||
let tokens_display = format_tokens(usage.tokens_used);
|
let tokens_display = format_tokens(usage.tokens_used);
|
||||||
let tokens_limit_display = format_tokens(usage.tokens_limit);
|
let tokens_limit_display = format_tokens(usage.tokens_limit);
|
||||||
@@ -30,26 +46,39 @@ pub fn OrgDashboardPage() -> Element {
|
|||||||
title: t(l, "org.title"),
|
title: t(l, "org.title"),
|
||||||
subtitle: t(l, "org.subtitle"),
|
subtitle: t(l, "org.subtitle"),
|
||||||
actions: rsx! {
|
actions: rsx! {
|
||||||
button { class: "btn-primary", onclick: move |_| show_invite.set(true), {t(l, "org.invite_member")} }
|
button {
|
||||||
|
class: "btn-primary",
|
||||||
|
onclick: move |_| show_invite.set(true),
|
||||||
|
{t(l, "org.invite_member")}
|
||||||
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stats bar
|
// Stats bar
|
||||||
div { class: "org-stats-bar",
|
div { class: "org-stats-bar",
|
||||||
div { class: "org-stat",
|
div { class: "org-stat",
|
||||||
span { class: "org-stat-value", "{usage.seats_used}/{usage.seats_total}" }
|
span { class: "org-stat-value",
|
||||||
|
"{usage.seats_used}/{usage.seats_total}"
|
||||||
|
}
|
||||||
span { class: "org-stat-label", {t(l, "org.seats_used")} }
|
span { class: "org-stat-label", {t(l, "org.seats_used")} }
|
||||||
}
|
}
|
||||||
div { class: "org-stat",
|
div { class: "org-stat",
|
||||||
span { class: "org-stat-value", "{tokens_display}" }
|
span { class: "org-stat-value", "{tokens_display}" }
|
||||||
span { class: "org-stat-label", {tw(l, "org.of_tokens", &[("limit", &tokens_limit_display)])} }
|
span { class: "org-stat-label",
|
||||||
|
{tw(l, "org.of_tokens", &[("limit", &tokens_limit_display)])}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
div { class: "org-stat",
|
div { class: "org-stat",
|
||||||
span { class: "org-stat-value", "{usage.billing_cycle_end}" }
|
span { class: "org-stat-value",
|
||||||
|
"{usage.billing_cycle_end}"
|
||||||
|
}
|
||||||
span { class: "org-stat-label", {t(l, "org.cycle_ends")} }
|
span { class: "org-stat-label", {t(l, "org.cycle_ends")} }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LiteLLM usage stats section
|
||||||
|
{render_usage_section(l, &usage_snapshot)}
|
||||||
|
|
||||||
// Members table
|
// Members table
|
||||||
div { class: "org-table-wrapper",
|
div { class: "org-table-wrapper",
|
||||||
table { class: "org-table",
|
table { class: "org-table",
|
||||||
@@ -114,6 +143,144 @@ pub fn OrgDashboardPage() -> Element {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Render the LiteLLM usage stats section: totals bar + per-model table.
|
||||||
|
///
|
||||||
|
/// Shows a loading state while the resource is pending, an error/empty
|
||||||
|
/// message on failure, and the full breakdown on success.
|
||||||
|
fn render_usage_section(
|
||||||
|
l: Locale,
|
||||||
|
snapshot: &Option<Result<LitellmUsageStats, ServerFnError>>,
|
||||||
|
) -> Element {
|
||||||
|
match snapshot {
|
||||||
|
None => rsx! {
|
||||||
|
div { class: "org-usage-loading",
|
||||||
|
span { {t(l, "org.loading_usage")} }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Some(Err(_)) => rsx! {
|
||||||
|
div { class: "org-usage-unavailable",
|
||||||
|
span { {t(l, "org.usage_unavailable")} }
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Some(Ok(stats)) if stats.total_tokens == 0 && stats.model_breakdown.is_empty() => {
|
||||||
|
rsx! {
|
||||||
|
div { class: "org-usage-unavailable",
|
||||||
|
span { {t(l, "org.usage_unavailable")} }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(Ok(stats)) => {
|
||||||
|
let spend_display = format!("${:.2}", stats.total_spend);
|
||||||
|
let total_display = format_tokens(stats.total_tokens);
|
||||||
|
// Free-tier LiteLLM doesn't provide prompt/completion split
|
||||||
|
let has_token_split =
|
||||||
|
stats.total_prompt_tokens > 0 || stats.total_completion_tokens > 0;
|
||||||
|
|
||||||
|
rsx! {
|
||||||
|
// Usage totals bar
|
||||||
|
div { class: "org-stats-bar",
|
||||||
|
div { class: "org-stat",
|
||||||
|
span { class: "org-stat-value", "{spend_display}" }
|
||||||
|
span { class: "org-stat-label",
|
||||||
|
{t(l, "org.total_spend")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
div { class: "org-stat",
|
||||||
|
span { class: "org-stat-value",
|
||||||
|
"{total_display}"
|
||||||
|
}
|
||||||
|
span { class: "org-stat-label",
|
||||||
|
{t(l, "org.total_tokens")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Only show prompt/completion split when available
|
||||||
|
if has_token_split {
|
||||||
|
div { class: "org-stat",
|
||||||
|
span { class: "org-stat-value",
|
||||||
|
{format_tokens(stats.total_prompt_tokens)}
|
||||||
|
}
|
||||||
|
span { class: "org-stat-label",
|
||||||
|
{t(l, "org.prompt_tokens")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
div { class: "org-stat",
|
||||||
|
span { class: "org-stat-value",
|
||||||
|
{format_tokens(stats.total_completion_tokens)}
|
||||||
|
}
|
||||||
|
span { class: "org-stat-label",
|
||||||
|
{t(l, "org.completion_tokens")}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Per-model breakdown table
|
||||||
|
if !stats.model_breakdown.is_empty() {
|
||||||
|
h3 { class: "org-section-title",
|
||||||
|
{t(l, "org.model_usage")}
|
||||||
|
}
|
||||||
|
div { class: "org-table-wrapper",
|
||||||
|
table { class: "org-table",
|
||||||
|
thead {
|
||||||
|
tr {
|
||||||
|
th { {t(l, "org.model")} }
|
||||||
|
th { {t(l, "org.tokens")} }
|
||||||
|
th { {t(l, "org.spend")} }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tbody {
|
||||||
|
for model in &stats.model_breakdown {
|
||||||
|
tr { key: "{model.model}",
|
||||||
|
td { "{model.model}" }
|
||||||
|
td {
|
||||||
|
{format_tokens(model.total_tokens)}
|
||||||
|
}
|
||||||
|
td {
|
||||||
|
{format!(
|
||||||
|
"${:.2}", model.spend
|
||||||
|
)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compute the date range for the current billing month.
|
||||||
|
///
|
||||||
|
/// Returns `(start_date, end_date)` as `YYYY-MM-DD` strings where
|
||||||
|
/// start_date is the 1st of the current month and end_date is today.
|
||||||
|
///
|
||||||
|
/// On the web target this uses `js_sys::Date` to read the browser clock.
|
||||||
|
/// On the server target (SSR) it falls back to `chrono::Utc::now()`.
|
||||||
|
fn current_month_range() -> (String, String) {
|
||||||
|
#[cfg(feature = "web")]
|
||||||
|
{
|
||||||
|
// js_sys::Date accesses the browser's local clock in WASM.
|
||||||
|
let now = js_sys::Date::new_0();
|
||||||
|
let year = now.get_full_year();
|
||||||
|
// JS months are 0-indexed, so add 1 for calendar month
|
||||||
|
let month = now.get_month() + 1;
|
||||||
|
let day = now.get_date();
|
||||||
|
let start = format!("{year:04}-{month:02}-01");
|
||||||
|
let end = format!("{year:04}-{month:02}-{day:02}");
|
||||||
|
(start, end)
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "web"))]
|
||||||
|
{
|
||||||
|
use chrono::Datelike;
|
||||||
|
let today = chrono::Utc::now().date_naive();
|
||||||
|
let start = format!("{:04}-{:02}-01", today.year(), today.month());
|
||||||
|
let end = today.format("%Y-%m-%d").to_string();
|
||||||
|
(start, end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Formats a token count into a human-readable string (e.g. "1.2M").
|
/// Formats a token count into a human-readable string (e.g. "1.2M").
|
||||||
fn format_tokens(count: u64) -> String {
|
fn format_tokens(count: u64) -> String {
|
||||||
const M: u64 = 1_000_000;
|
const M: u64 = 1_000_000;
|
||||||
|
|||||||
@@ -13,8 +13,8 @@ pub fn ProvidersPage() -> Element {
|
|||||||
let locale = use_context::<Signal<Locale>>();
|
let locale = use_context::<Signal<Locale>>();
|
||||||
let l = *locale.read();
|
let l = *locale.read();
|
||||||
|
|
||||||
let mut selected_provider = use_signal(|| LlmProvider::Ollama);
|
let mut selected_provider = use_signal(|| LlmProvider::LiteLlm);
|
||||||
let mut selected_model = use_signal(|| "llama3.1:8b".to_string());
|
let mut selected_model = use_signal(|| "qwen3-32b".to_string());
|
||||||
let mut selected_embedding = use_signal(|| "nomic-embed-text".to_string());
|
let mut selected_embedding = use_signal(|| "nomic-embed-text".to_string());
|
||||||
let mut api_key = use_signal(String::new);
|
let mut api_key = use_signal(String::new);
|
||||||
let mut saved = use_signal(|| false);
|
let mut saved = use_signal(|| false);
|
||||||
@@ -59,12 +59,12 @@ pub fn ProvidersPage() -> Element {
|
|||||||
"Hugging Face" => LlmProvider::HuggingFace,
|
"Hugging Face" => LlmProvider::HuggingFace,
|
||||||
"OpenAI" => LlmProvider::OpenAi,
|
"OpenAI" => LlmProvider::OpenAi,
|
||||||
"Anthropic" => LlmProvider::Anthropic,
|
"Anthropic" => LlmProvider::Anthropic,
|
||||||
_ => LlmProvider::Ollama,
|
_ => LlmProvider::LiteLlm,
|
||||||
};
|
};
|
||||||
selected_provider.set(prov);
|
selected_provider.set(prov);
|
||||||
saved.set(false);
|
saved.set(false);
|
||||||
},
|
},
|
||||||
option { value: "Ollama", "Ollama" }
|
option { value: "LiteLLM", "LiteLLM" }
|
||||||
option { value: "Hugging Face", "Hugging Face" }
|
option { value: "Hugging Face", "Hugging Face" }
|
||||||
option { value: "OpenAI", "OpenAI" }
|
option { value: "OpenAI", "OpenAI" }
|
||||||
option { value: "Anthropic", "Anthropic" }
|
option { value: "Anthropic", "Anthropic" }
|
||||||
@@ -156,23 +156,29 @@ pub fn ProvidersPage() -> Element {
|
|||||||
fn mock_models() -> Vec<ModelEntry> {
|
fn mock_models() -> Vec<ModelEntry> {
|
||||||
vec![
|
vec![
|
||||||
ModelEntry {
|
ModelEntry {
|
||||||
id: "llama3.1:8b".into(),
|
id: "qwen3-32b".into(),
|
||||||
name: "Llama 3.1 8B".into(),
|
name: "Qwen3 32B".into(),
|
||||||
provider: LlmProvider::Ollama,
|
provider: LlmProvider::LiteLlm,
|
||||||
context_window: 128,
|
|
||||||
},
|
|
||||||
ModelEntry {
|
|
||||||
id: "llama3.1:70b".into(),
|
|
||||||
name: "Llama 3.1 70B".into(),
|
|
||||||
provider: LlmProvider::Ollama,
|
|
||||||
context_window: 128,
|
|
||||||
},
|
|
||||||
ModelEntry {
|
|
||||||
id: "mistral:7b".into(),
|
|
||||||
name: "Mistral 7B".into(),
|
|
||||||
provider: LlmProvider::Ollama,
|
|
||||||
context_window: 32,
|
context_window: 32,
|
||||||
},
|
},
|
||||||
|
ModelEntry {
|
||||||
|
id: "llama-3.3-70b".into(),
|
||||||
|
name: "Llama 3.3 70B".into(),
|
||||||
|
provider: LlmProvider::LiteLlm,
|
||||||
|
context_window: 128,
|
||||||
|
},
|
||||||
|
ModelEntry {
|
||||||
|
id: "mistral-small-24b".into(),
|
||||||
|
name: "Mistral Small 24B".into(),
|
||||||
|
provider: LlmProvider::LiteLlm,
|
||||||
|
context_window: 32,
|
||||||
|
},
|
||||||
|
ModelEntry {
|
||||||
|
id: "deepseek-r1-70b".into(),
|
||||||
|
name: "DeepSeek R1 70B".into(),
|
||||||
|
provider: LlmProvider::LiteLlm,
|
||||||
|
context_window: 64,
|
||||||
|
},
|
||||||
ModelEntry {
|
ModelEntry {
|
||||||
id: "meta-llama/Llama-3.1-8B".into(),
|
id: "meta-llama/Llama-3.1-8B".into(),
|
||||||
name: "Llama 3.1 8B".into(),
|
name: "Llama 3.1 8B".into(),
|
||||||
@@ -200,7 +206,7 @@ fn mock_embeddings() -> Vec<EmbeddingEntry> {
|
|||||||
EmbeddingEntry {
|
EmbeddingEntry {
|
||||||
id: "nomic-embed-text".into(),
|
id: "nomic-embed-text".into(),
|
||||||
name: "Nomic Embed Text".into(),
|
name: "Nomic Embed Text".into(),
|
||||||
provider: LlmProvider::Ollama,
|
provider: LlmProvider::LiteLlm,
|
||||||
dimensions: 768,
|
dimensions: 768,
|
||||||
},
|
},
|
||||||
EmbeddingEntry {
|
EmbeddingEntry {
|
||||||
|
|||||||
Reference in New Issue
Block a user