7 Commits

Author SHA1 Message Date
Sharang Parnerkar
8aa7c4c33c fix(litellm): gate tests on server feature to fix bare cargo test
All checks were successful
CI / Format (push) Successful in 3s
CI / Clippy (push) Successful in 2m55s
CI / Security Audit (push) Has been skipped
CI / Tests (push) Has been skipped
CI / Deploy (push) Has been skipped
CI / E2E Tests (push) Has been skipped
The litellm test module uses server-only structs (ActivityModelEntry,
SpendModelEntry) that are behind #[cfg(feature = "server")]. Gate the
test module with #[cfg(all(test, feature = "server"))] so bare
cargo test (without --features server) compiles.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-26 19:25:57 +01:00
Sharang Parnerkar
1a818ee5b9 fix(i18n): rename Ollama keys to LiteLLM in all language files
All checks were successful
CI / E2E Tests (pull_request) Has been skipped
CI / Format (push) Successful in 28s
CI / Clippy (pull_request) Successful in 2m53s
CI / Security Audit (pull_request) Has been skipped
CI / Tests (pull_request) Has been skipped
CI / Deploy (push) Has been skipped
CI / Deploy (pull_request) Has been skipped
CI / E2E Tests (push) Has been skipped
CI / Clippy (push) Successful in 3m1s
CI / Security Audit (push) Has been skipped
CI / Tests (push) Has been skipped
CI / Format (pull_request) Successful in 2s
The dashboard code references dashboard.litellm_settings,
dashboard.litellm_url, dashboard.litellm_url_placeholder, and
dashboard.litellm_status but the i18n JSON files still had the
old Ollama key names. Also updates hint/placeholder text to
reference LITELLM_URL / LITELLM_MODEL env vars.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-26 18:43:00 +01:00
Sharang Parnerkar
0cb350e26e feat(org): add LiteLLM usage stats to organization dashboard
Replace mock token usage with real data from LiteLLM free-tier APIs
(global/activity, global/activity/model, global/spend/models). Adds
per-model breakdown table, loading/error states, usage data models
with serde tests, and i18n keys for all five languages.

Also includes: replace Ollama with LiteLLM proxy, update config,
docker-compose, and provider infrastructure.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-26 18:23:46 +01:00
0deaaca848 feat: added langflow, langfuse and langgraph integrations (#17)
Some checks failed
CI / Format (push) Successful in 3s
CI / Clippy (push) Successful in 2m48s
CI / Security Audit (push) Successful in 1m41s
CI / Tests (push) Successful in 4m8s
CI / Deploy (push) Successful in 5s
CI / E2E Tests (push) Failing after 19s
Co-authored-by: Sharang Parnerkar <parnerkarsharang@gmail.com>
Reviewed-on: #17
2026-02-25 20:08:48 +00:00
1d7aebf37c test: added more tests (#16)
Some checks failed
CI / Format (push) Successful in 3s
CI / Clippy (push) Successful in 2m47s
CI / Security Audit (push) Successful in 1m35s
CI / Tests (push) Successful in 3m54s
CI / E2E Tests (push) Failing after 16s
CI / Deploy (push) Has been skipped
Co-authored-by: Sharang Parnerkar <parnerkarsharang@gmail.com>
Reviewed-on: #16
2026-02-25 10:01:56 +00:00
9085da9fae hotfix: hard-coded URL for chat in dashboard (#15)
All checks were successful
CI / Format (push) Successful in 3s
CI / Clippy (push) Successful in 2m53s
CI / Security Audit (push) Successful in 1m42s
CI / Tests (push) Successful in 4m11s
CI / Deploy (push) Successful in 3s
Co-authored-by: Sharang Parnerkar <parnerkarsharang@gmail.com>
Reviewed-on: #15
2026-02-24 11:52:59 +00:00
208450e618 feat: use librechat instead of own chat (#14)
All checks were successful
CI / Format (push) Successful in 2s
CI / Clippy (push) Successful in 2m48s
CI / Security Audit (push) Successful in 1m44s
CI / Tests (push) Successful in 4m11s
CI / Deploy (push) Successful in 4s
Co-authored-by: Sharang Parnerkar <parnerkarsharang@gmail.com>
Reviewed-on: #14
2026-02-24 10:45:41 +00:00
80 changed files with 5774 additions and 2623 deletions

View File

@@ -34,15 +34,21 @@ MONGODB_DATABASE=certifai
SEARXNG_URL=http://localhost:8888
# ---------------------------------------------------------------------------
# Ollama LLM instance [OPTIONAL - defaults shown]
# LiteLLM proxy [OPTIONAL - defaults shown]
# ---------------------------------------------------------------------------
OLLAMA_URL=http://localhost:11434
OLLAMA_MODEL=llama3.1:8b
LITELLM_URL=http://localhost:4000
LITELLM_MODEL=qwen3-32b
LITELLM_API_KEY=
# ---------------------------------------------------------------------------
# LibreChat (external chat via SSO) [OPTIONAL - default: http://localhost:3080]
# ---------------------------------------------------------------------------
LIBRECHAT_URL=http://localhost:3080
# ---------------------------------------------------------------------------
# LLM Providers (comma-separated list) [OPTIONAL]
# ---------------------------------------------------------------------------
LLM_PROVIDERS=ollama
LLM_PROVIDERS=litellm
# ---------------------------------------------------------------------------
# SMTP (transactional email) [OPTIONAL]
@@ -61,10 +67,11 @@ STRIPE_WEBHOOK_SECRET=
STRIPE_PUBLISHABLE_KEY=
# ---------------------------------------------------------------------------
# LangChain / LangGraph / Langfuse [OPTIONAL]
# LangChain / LangGraph / LangFlow / Langfuse [OPTIONAL]
# ---------------------------------------------------------------------------
LANGCHAIN_URL=
LANGGRAPH_URL=
LANGFLOW_URL=
LANGFUSE_URL=
# ---------------------------------------------------------------------------

View File

@@ -120,6 +120,139 @@ jobs:
run: sccache --show-stats
if: always()
# ---------------------------------------------------------------------------
# Stage 4: E2E tests (only on main, after deploy)
# ---------------------------------------------------------------------------
e2e:
name: E2E Tests
runs-on: docker
needs: [deploy]
if: github.ref == 'refs/heads/main'
container:
image: rust:1.89-bookworm
# MongoDB and SearXNG can start immediately (no repo files needed).
# Keycloak requires realm-export.json from the repo, so it is started
# manually after checkout via docker CLI.
services:
mongo:
image: mongo:latest
env:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
ports:
- 27017:27017
searxng:
image: searxng/searxng:latest
env:
SEARXNG_BASE_URL: http://localhost:8888
ports:
- 8888:8080
env:
KEYCLOAK_URL: http://localhost:8080
KEYCLOAK_REALM: certifai
KEYCLOAK_CLIENT_ID: certifai-dashboard
MONGODB_URI: mongodb://root:example@mongo:27017
MONGODB_DATABASE: certifai
SEARXNG_URL: http://searxng:8080
LANGGRAPH_URL: ""
LANGFLOW_URL: ""
LANGFUSE_URL: ""
steps:
- name: Checkout
run: |
git init
git remote add origin "${GITHUB_SERVER_URL}/${GITHUB_REPOSITORY}.git"
git fetch --depth=1 origin "${GITHUB_SHA}"
git checkout FETCH_HEAD
- name: Install system dependencies
run: |
apt-get update -qq
apt-get install -y -qq --no-install-recommends \
unzip curl docker.io \
libglib2.0-0 libnss3 libnspr4 libdbus-1-3 libatk1.0-0 \
libatk-bridge2.0-0 libcups2 libdrm2 libxkbcommon0 libxcomposite1 \
libxdamage1 libxfixes3 libxrandr2 libgbm1 libpango-1.0-0 \
libcairo2 libasound2 libatspi2.0-0 libxshmfence1
- name: Start Keycloak
run: |
docker run -d --name ci-keycloak --network host \
-e KC_BOOTSTRAP_ADMIN_USERNAME=admin \
-e KC_BOOTSTRAP_ADMIN_PASSWORD=admin \
-e KC_DB=dev-mem \
-e KC_HEALTH_ENABLED=true \
-v "$PWD/keycloak/realm-export.json:/opt/keycloak/data/import/realm-export.json:ro" \
-v "$PWD/keycloak/themes/certifai:/opt/keycloak/themes/certifai:ro" \
quay.io/keycloak/keycloak:26.0 start-dev --import-realm
echo "Waiting for Keycloak..."
for i in $(seq 1 60); do
if curl -sf http://localhost:8080/realms/certifai > /dev/null 2>&1; then
echo "Keycloak is ready"
break
fi
if [ "$i" -eq 60 ]; then
echo "Keycloak failed to start within 60s"
docker logs ci-keycloak
exit 1
fi
sleep 2
done
- name: Install sccache
run: |
curl -fsSL https://github.com/mozilla/sccache/releases/download/v0.9.1/sccache-v0.9.1-x86_64-unknown-linux-musl.tar.gz \
| tar xz --strip-components=1 -C /usr/local/bin/ sccache-v0.9.1-x86_64-unknown-linux-musl/sccache
chmod +x /usr/local/bin/sccache
- name: Install dioxus-cli
run: cargo install dioxus-cli --locked
- name: Install bun
run: |
curl -fsSL https://bun.sh/install | bash
echo "$HOME/.bun/bin" >> "$GITHUB_PATH"
- name: Install Playwright
run: |
export PATH="$HOME/.bun/bin:$PATH"
bun install
bunx playwright install chromium
- name: Build app
run: dx build --release
- name: Start app and run E2E tests
run: |
export PATH="$HOME/.bun/bin:$PATH"
# Start the app in the background
dx serve --release --port 8000 &
APP_PID=$!
# Wait for the app to be ready
echo "Waiting for app to start..."
for i in $(seq 1 60); do
if curl -sf http://localhost:8000 > /dev/null 2>&1; then
echo "App is ready"
break
fi
if [ "$i" -eq 60 ]; then
echo "App failed to start within 60s"
exit 1
fi
sleep 1
done
BASE_URL=http://localhost:8000 bunx playwright test --reporter=list
kill "$APP_PID" 2>/dev/null || true
- name: Upload test report
if: always()
uses: actions/upload-artifact@v4
with:
name: playwright-report
path: playwright-report/
retention-days: 7
- name: Cleanup Keycloak
if: always()
run: docker rm -f ci-keycloak 2>/dev/null || true
- name: Show sccache stats
run: sccache --show-stats
if: always()
# ---------------------------------------------------------------------------
# Stage 3: Deploy (only after tests pass, only on main)
# ---------------------------------------------------------------------------

5
.gitignore vendored
View File

@@ -22,3 +22,8 @@ keycloak/*
node_modules/
searxng/
# Playwright
e2e/.auth/
playwright-report/
test-results/

66
Cargo.lock generated
View File

@@ -773,9 +773,11 @@ dependencies = [
"dioxus-sdk",
"dotenvy",
"futures",
"js-sys",
"maud",
"mongodb",
"petname",
"pretty_assertions",
"pulldown-cmark",
"rand 0.10.0",
"reqwest 0.13.2",
@@ -783,6 +785,7 @@ dependencies = [
"secrecy",
"serde",
"serde_json",
"serial_test",
"sha2",
"thiserror 2.0.18",
"time",
@@ -882,6 +885,12 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "diff"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
[[package]]
name = "digest"
version = "0.10.7"
@@ -3246,6 +3255,16 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
[[package]]
name = "pretty_assertions"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d"
dependencies = [
"diff",
"yansi",
]
[[package]]
name = "prettyplease"
version = "0.2.37"
@@ -3823,6 +3842,15 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "scc"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46e6f046b7fef48e2660c57ed794263155d713de679057f2d0c169bfc6e756cc"
dependencies = [
"sdd",
]
[[package]]
name = "schannel"
version = "0.1.28"
@@ -3862,6 +3890,12 @@ dependencies = [
"untrusted",
]
[[package]]
name = "sdd"
version = "3.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "490dcfcbfef26be6800d11870ff2df8774fa6e86d047e3e8c8a76b25655e41ca"
[[package]]
name = "secrecy"
version = "0.10.3"
@@ -4082,6 +4116,32 @@ dependencies = [
"syn 2.0.116",
]
[[package]]
name = "serial_test"
version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "911bd979bf1070a3f3aa7b691a3b3e9968f339ceeec89e08c280a8a22207a32f"
dependencies = [
"futures-executor",
"futures-util",
"log",
"once_cell",
"parking_lot",
"scc",
"serial_test_derive",
]
[[package]]
name = "serial_test_derive"
version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a7d91949b85b0d2fb687445e448b40d322b6b3e4af6b44a29b21d9a5f33e6d9"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.116",
]
[[package]]
name = "servo_arc"
version = "0.4.3"
@@ -5683,6 +5743,12 @@ version = "0.8.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fdd20c5420375476fbd4394763288da7eb0cc0b8c11deed431a91562af7335d3"
[[package]]
name = "yansi"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049"
[[package]]
name = "yazi"
version = "0.1.6"

View File

@@ -61,6 +61,7 @@ secrecy = { version = "0.10", default-features = false, optional = true }
serde_json = { version = "1.0.133", default-features = false }
maud = { version = "0.27", default-features = false }
url = { version = "2.5.4", default-features = false, optional = true }
js-sys = { version = "0.3", optional = true }
wasm-bindgen = { version = "0.2", optional = true }
web-sys = { version = "0.3", optional = true, features = [
"Clipboard",
@@ -91,7 +92,7 @@ bytes = { version = "1", optional = true }
[features]
# default = ["web"]
web = ["dioxus/web", "dep:reqwest", "dep:web-sys", "dep:wasm-bindgen"]
web = ["dioxus/web", "dep:reqwest", "dep:web-sys", "dep:wasm-bindgen", "dep:js-sys"]
server = [
"dioxus/server",
"dep:axum",
@@ -112,6 +113,10 @@ server = [
"dep:bytes",
]
[dev-dependencies]
pretty_assertions = "1.4"
serial_test = "3.2"
[[bin]]
name = "dashboard"
path = "bin/main.rs"

View File

@@ -38,8 +38,6 @@
"dashboard": "Dashboard",
"providers": "Provider",
"chat": "Chat",
"tools": "Werkzeuge",
"knowledge_base": "Wissensdatenbank",
"developer": "Entwickler",
"organization": "Organisation",
"switch_light": "Zum hellen Modus wechseln",
@@ -60,40 +58,18 @@
"title": "Dashboard",
"subtitle": "KI-Nachrichten und Neuigkeiten",
"topic_placeholder": "Themenname...",
"ollama_settings": "Ollama-Einstellungen",
"settings_hint": "Leer lassen, um OLLAMA_URL / OLLAMA_MODEL aus .env zu verwenden",
"ollama_url": "Ollama-URL",
"ollama_url_placeholder": "Verwendet OLLAMA_URL aus .env",
"litellm_settings": "LiteLLM-Einstellungen",
"settings_hint": "Leer lassen, um LITELLM_URL / LITELLM_MODEL aus .env zu verwenden",
"litellm_url": "LiteLLM-URL",
"litellm_url_placeholder": "Verwendet LITELLM_URL aus .env",
"model": "Modell",
"model_placeholder": "Verwendet OLLAMA_MODEL aus .env",
"model_placeholder": "Verwendet LITELLM_MODEL aus .env",
"searching": "Suche laeuft...",
"search_failed": "Suche fehlgeschlagen: {e}",
"ollama_status": "Ollama-Status",
"litellm_status": "LiteLLM-Status",
"trending": "Im Trend",
"recent_searches": "Letzte Suchen"
},
"chat": {
"new_chat": "Neuer Chat",
"general": "Allgemein",
"conversations": "Unterhaltungen",
"news_chats": "Nachrichten-Chats",
"all_chats": "Alle Chats",
"no_conversations": "Noch keine Unterhaltungen",
"type_message": "Nachricht eingeben...",
"model_label": "Modell:",
"no_models": "Keine Modelle verfuegbar",
"send_to_start": "Senden Sie eine Nachricht, um die Unterhaltung zu starten.",
"you": "Sie",
"assistant": "Assistent",
"thinking": "Denkt nach...",
"copy_response": "Letzte Antwort kopieren",
"copy_conversation": "Unterhaltung kopieren",
"edit_last": "Letzte Nachricht bearbeiten",
"just_now": "gerade eben",
"minutes_ago": "vor {n} Min.",
"hours_ago": "vor {n} Std.",
"days_ago": "vor {n} T."
},
"providers": {
"title": "Provider",
"subtitle": "Konfigurieren Sie Ihre LLM- und Embedding-Backends",
@@ -107,37 +83,6 @@
"active_config": "Aktive Konfiguration",
"embedding": "Embedding"
},
"tools": {
"title": "Werkzeuge",
"subtitle": "MCP-Server und Werkzeugintegrationen verwalten",
"calculator": "Taschenrechner",
"calculator_desc": "Mathematische Berechnungen und Einheitenumrechnung",
"tavily": "Tavily-Suche",
"tavily_desc": "KI-optimierte Websuche-API fuer Echtzeitinformationen",
"searxng": "SearXNG",
"searxng_desc": "Datenschutzfreundliche Metasuchmaschine",
"file_reader": "Dateileser",
"file_reader_desc": "Lokale Dateien in verschiedenen Formaten lesen und analysieren",
"code_executor": "Code-Ausfuehrer",
"code_executor_desc": "Isolierte Codeausfuehrung fuer Python und JavaScript",
"web_scraper": "Web-Scraper",
"web_scraper_desc": "Strukturierte Daten aus Webseiten extrahieren",
"email_sender": "E-Mail-Versand",
"email_sender_desc": "E-Mails ueber konfigurierten SMTP-Server versenden",
"git_ops": "Git-Operationen",
"git_ops_desc": "Mit Git-Repositories fuer Versionskontrolle interagieren"
},
"knowledge": {
"title": "Wissensdatenbank",
"subtitle": "Dokumente fuer RAG-Abfragen verwalten",
"search_placeholder": "Dateien suchen...",
"name": "Name",
"type": "Typ",
"size": "Groesse",
"chunks": "Abschnitte",
"uploaded": "Hochgeladen",
"actions": "Aktionen"
},
"developer": {
"agents_title": "Agent Builder",
"agents_desc": "Erstellen und verwalten Sie KI-Agenten mit LangGraph. Erstellen Sie mehrstufige Schlussfolgerungspipelines, werkzeugnutzende Agenten und autonome Workflows.",
@@ -151,7 +96,38 @@
"total_requests": "Anfragen gesamt",
"avg_latency": "Durchschn. Latenz",
"tokens_used": "Verbrauchte Token",
"error_rate": "Fehlerrate"
"error_rate": "Fehlerrate",
"not_configured": "Nicht konfiguriert",
"open_new_tab": "In neuem Tab oeffnen",
"agents_status_connected": "Verbunden",
"agents_status_not_connected": "Nicht verbunden",
"agents_config_hint": "Setzen Sie LANGGRAPH_URL in .env, um eine Verbindung herzustellen",
"agents_quick_start": "Schnellstart",
"agents_docs": "Dokumentation",
"agents_docs_desc": "Offizielle LangGraph-Dokumentation und API-Anleitungen.",
"agents_getting_started": "Erste Schritte",
"agents_getting_started_desc": "Schritt-fuer-Schritt-Anleitung zum Erstellen Ihres ersten Agenten.",
"agents_github": "GitHub",
"agents_github_desc": "Quellcode, Issues und Community-Beitraege.",
"agents_examples": "Beispiele",
"agents_examples_desc": "Einsatzbereite Vorlagen und Beispielprojekte fuer Agenten.",
"agents_api_ref": "API-Referenz",
"agents_api_ref_desc": "Lokale Swagger-Dokumentation fuer Ihre LangGraph-Instanz.",
"agents_running_title": "Laufende Agenten",
"agents_none": "Keine Agenten registriert. Stellen Sie einen Assistenten in LangGraph bereit, um ihn hier zu sehen.",
"agents_col_name": "Name",
"agents_col_id": "ID",
"agents_col_description": "Beschreibung",
"agents_col_status": "Status",
"analytics_status_connected": "Verbunden",
"analytics_status_not_connected": "Nicht verbunden",
"analytics_config_hint": "Setzen Sie LANGFUSE_URL in .env, um eine Verbindung herzustellen",
"analytics_sso_hint": "Langfuse nutzt Keycloak-SSO. Sie werden automatisch mit Ihrem CERTifAI-Konto angemeldet.",
"analytics_quick_actions": "Schnellaktionen",
"analytics_traces": "Traces",
"analytics_traces_desc": "Alle LLM-Aufrufe, Latenzen und Token-Verbrauch anzeigen und filtern.",
"analytics_dashboard": "Dashboard",
"analytics_dashboard_desc": "Ueberblick ueber Kosten, Qualitaetsmetriken und Nutzungstrends."
},
"org": {
"title": "Organisation",
@@ -168,6 +144,16 @@
"email_address": "E-Mail-Adresse",
"email_placeholder": "kollege@firma.de",
"send_invite": "Einladung senden",
"total_spend": "Gesamtausgaben",
"total_tokens": "Tokens gesamt",
"model_usage": "Nutzung nach Modell",
"model": "Modell",
"tokens": "Tokens",
"spend": "Ausgaben",
"usage_unavailable": "Nutzungsdaten nicht verfuegbar",
"loading_usage": "Nutzungsdaten werden geladen...",
"prompt_tokens": "Prompt-Tokens",
"completion_tokens": "Antwort-Tokens",
"pricing_title": "Preise",
"pricing_subtitle": "Waehlen Sie den passenden Plan fuer Ihre Organisation"
},

View File

@@ -38,8 +38,6 @@
"dashboard": "Dashboard",
"providers": "Providers",
"chat": "Chat",
"tools": "Tools",
"knowledge_base": "Knowledge Base",
"developer": "Developer",
"organization": "Organization",
"switch_light": "Switch to light mode",
@@ -60,40 +58,18 @@
"title": "Dashboard",
"subtitle": "AI news and updates",
"topic_placeholder": "Topic name...",
"ollama_settings": "Ollama Settings",
"settings_hint": "Leave empty to use OLLAMA_URL / OLLAMA_MODEL from .env",
"ollama_url": "Ollama URL",
"ollama_url_placeholder": "Uses OLLAMA_URL from .env",
"litellm_settings": "LiteLLM Settings",
"settings_hint": "Leave empty to use LITELLM_URL / LITELLM_MODEL from .env",
"litellm_url": "LiteLLM URL",
"litellm_url_placeholder": "Uses LITELLM_URL from .env",
"model": "Model",
"model_placeholder": "Uses OLLAMA_MODEL from .env",
"model_placeholder": "Uses LITELLM_MODEL from .env",
"searching": "Searching...",
"search_failed": "Search failed: {e}",
"ollama_status": "Ollama Status",
"litellm_status": "LiteLLM Status",
"trending": "Trending",
"recent_searches": "Recent Searches"
},
"chat": {
"new_chat": "New Chat",
"general": "General",
"conversations": "Conversations",
"news_chats": "News Chats",
"all_chats": "All Chats",
"no_conversations": "No conversations yet",
"type_message": "Type a message...",
"model_label": "Model:",
"no_models": "No models available",
"send_to_start": "Send a message to start the conversation.",
"you": "You",
"assistant": "Assistant",
"thinking": "Thinking...",
"copy_response": "Copy last response",
"copy_conversation": "Copy conversation",
"edit_last": "Edit last message",
"just_now": "just now",
"minutes_ago": "{n}m ago",
"hours_ago": "{n}h ago",
"days_ago": "{n}d ago"
},
"providers": {
"title": "Providers",
"subtitle": "Configure your LLM and embedding backends",
@@ -107,37 +83,6 @@
"active_config": "Active Configuration",
"embedding": "Embedding"
},
"tools": {
"title": "Tools",
"subtitle": "Manage MCP servers and tool integrations",
"calculator": "Calculator",
"calculator_desc": "Mathematical computation and unit conversion",
"tavily": "Tavily Search",
"tavily_desc": "AI-optimized web search API for real-time information",
"searxng": "SearXNG",
"searxng_desc": "Privacy-respecting metasearch engine",
"file_reader": "File Reader",
"file_reader_desc": "Read and parse local files in various formats",
"code_executor": "Code Executor",
"code_executor_desc": "Sandboxed code execution for Python and JavaScript",
"web_scraper": "Web Scraper",
"web_scraper_desc": "Extract structured data from web pages",
"email_sender": "Email Sender",
"email_sender_desc": "Send emails via configured SMTP server",
"git_ops": "Git Operations",
"git_ops_desc": "Interact with Git repositories for version control"
},
"knowledge": {
"title": "Knowledge Base",
"subtitle": "Manage documents for RAG retrieval",
"search_placeholder": "Search files...",
"name": "Name",
"type": "Type",
"size": "Size",
"chunks": "Chunks",
"uploaded": "Uploaded",
"actions": "Actions"
},
"developer": {
"agents_title": "Agent Builder",
"agents_desc": "Build and manage AI agents with LangGraph. Create multi-step reasoning pipelines, tool-using agents, and autonomous workflows.",
@@ -151,7 +96,38 @@
"total_requests": "Total Requests",
"avg_latency": "Avg Latency",
"tokens_used": "Tokens Used",
"error_rate": "Error Rate"
"error_rate": "Error Rate",
"not_configured": "Not Configured",
"open_new_tab": "Open in New Tab",
"agents_status_connected": "Connected",
"agents_status_not_connected": "Not Connected",
"agents_config_hint": "Set LANGGRAPH_URL in .env to connect",
"agents_quick_start": "Quick Start",
"agents_docs": "Documentation",
"agents_docs_desc": "Official LangGraph documentation and API guides.",
"agents_getting_started": "Getting Started",
"agents_getting_started_desc": "Step-by-step tutorial to build your first agent.",
"agents_github": "GitHub",
"agents_github_desc": "Source code, issues, and community contributions.",
"agents_examples": "Examples",
"agents_examples_desc": "Ready-to-use templates and example agent projects.",
"agents_api_ref": "API Reference",
"agents_api_ref_desc": "Local Swagger docs for your LangGraph instance.",
"agents_running_title": "Running Agents",
"agents_none": "No agents registered. Deploy an assistant to LangGraph to see it here.",
"agents_col_name": "Name",
"agents_col_id": "ID",
"agents_col_description": "Description",
"agents_col_status": "Status",
"analytics_status_connected": "Connected",
"analytics_status_not_connected": "Not Connected",
"analytics_config_hint": "Set LANGFUSE_URL in .env to connect",
"analytics_sso_hint": "Langfuse uses Keycloak SSO. You will be signed in automatically with your CERTifAI account.",
"analytics_quick_actions": "Quick Actions",
"analytics_traces": "Traces",
"analytics_traces_desc": "View and filter all LLM call traces, latencies, and token usage.",
"analytics_dashboard": "Dashboard",
"analytics_dashboard_desc": "Overview of costs, quality metrics, and usage trends."
},
"org": {
"title": "Organization",
@@ -168,6 +144,16 @@
"email_address": "Email Address",
"email_placeholder": "colleague@company.com",
"send_invite": "Send Invite",
"total_spend": "Total Spend",
"total_tokens": "Total Tokens",
"model_usage": "Usage by Model",
"model": "Model",
"tokens": "Tokens",
"spend": "Spend",
"usage_unavailable": "Usage data unavailable",
"loading_usage": "Loading usage data...",
"prompt_tokens": "Prompt Tokens",
"completion_tokens": "Completion Tokens",
"pricing_title": "Pricing",
"pricing_subtitle": "Choose the plan that fits your organization"
},

View File

@@ -38,8 +38,6 @@
"dashboard": "Panel de control",
"providers": "Proveedores",
"chat": "Chat",
"tools": "Herramientas",
"knowledge_base": "Base de conocimiento",
"developer": "Desarrollador",
"organization": "Organizacion",
"switch_light": "Cambiar a modo claro",
@@ -60,40 +58,18 @@
"title": "Panel de control",
"subtitle": "Noticias y actualizaciones de IA",
"topic_placeholder": "Nombre del tema...",
"ollama_settings": "Configuracion de Ollama",
"settings_hint": "Dejar vacio para usar OLLAMA_URL / OLLAMA_MODEL del archivo .env",
"ollama_url": "URL de Ollama",
"ollama_url_placeholder": "Usa OLLAMA_URL del archivo .env",
"litellm_settings": "Configuracion de LiteLLM",
"settings_hint": "Dejar vacio para usar LITELLM_URL / LITELLM_MODEL del archivo .env",
"litellm_url": "URL de LiteLLM",
"litellm_url_placeholder": "Usa LITELLM_URL del archivo .env",
"model": "Modelo",
"model_placeholder": "Usa OLLAMA_MODEL del archivo .env",
"model_placeholder": "Usa LITELLM_MODEL del archivo .env",
"searching": "Buscando...",
"search_failed": "La busqueda fallo: {e}",
"ollama_status": "Estado de Ollama",
"litellm_status": "Estado de LiteLLM",
"trending": "Tendencias",
"recent_searches": "Busquedas recientes"
},
"chat": {
"new_chat": "Nuevo chat",
"general": "General",
"conversations": "Conversaciones",
"news_chats": "Chats de noticias",
"all_chats": "Todos los chats",
"no_conversations": "Aun no hay conversaciones",
"type_message": "Escriba un mensaje...",
"model_label": "Modelo:",
"no_models": "No hay modelos disponibles",
"send_to_start": "Envie un mensaje para iniciar la conversacion.",
"you": "Usted",
"assistant": "Asistente",
"thinking": "Pensando...",
"copy_response": "Copiar ultima respuesta",
"copy_conversation": "Copiar conversacion",
"edit_last": "Editar ultimo mensaje",
"just_now": "justo ahora",
"minutes_ago": "hace {n}m",
"hours_ago": "hace {n}h",
"days_ago": "hace {n}d"
},
"providers": {
"title": "Proveedores",
"subtitle": "Configure sus backends de LLM y embeddings",
@@ -107,37 +83,6 @@
"active_config": "Configuracion activa",
"embedding": "Embedding"
},
"tools": {
"title": "Herramientas",
"subtitle": "Gestione servidores MCP e integraciones de herramientas",
"calculator": "Calculadora",
"calculator_desc": "Calculo matematico y conversion de unidades",
"tavily": "Tavily Search",
"tavily_desc": "API de busqueda web optimizada con IA para informacion en tiempo real",
"searxng": "SearXNG",
"searxng_desc": "Motor de metabusqueda que respeta la privacidad",
"file_reader": "Lector de archivos",
"file_reader_desc": "Leer y analizar archivos locales en varios formatos",
"code_executor": "Ejecutor de codigo",
"code_executor_desc": "Ejecucion de codigo en entorno aislado para Python y JavaScript",
"web_scraper": "Web Scraper",
"web_scraper_desc": "Extraer datos estructurados de paginas web",
"email_sender": "Envio de correo",
"email_sender_desc": "Enviar correos electronicos a traves del servidor SMTP configurado",
"git_ops": "Operaciones Git",
"git_ops_desc": "Interactuar con repositorios Git para control de versiones"
},
"knowledge": {
"title": "Base de conocimiento",
"subtitle": "Gestione documentos para recuperacion RAG",
"search_placeholder": "Buscar archivos...",
"name": "Nombre",
"type": "Tipo",
"size": "Tamano",
"chunks": "Fragmentos",
"uploaded": "Subido",
"actions": "Acciones"
},
"developer": {
"agents_title": "Constructor de agentes",
"agents_desc": "Construya y gestione agentes de IA con LangGraph. Cree pipelines de razonamiento de varios pasos, agentes que utilizan herramientas y flujos de trabajo autonomos.",
@@ -151,7 +96,38 @@
"total_requests": "Total de solicitudes",
"avg_latency": "Latencia promedio",
"tokens_used": "Tokens utilizados",
"error_rate": "Tasa de errores"
"error_rate": "Tasa de errores",
"not_configured": "No configurado",
"open_new_tab": "Abrir en nueva pestana",
"agents_status_connected": "Conectado",
"agents_status_not_connected": "No conectado",
"agents_config_hint": "Configure LANGGRAPH_URL en .env para conectar",
"agents_quick_start": "Inicio rapido",
"agents_docs": "Documentacion",
"agents_docs_desc": "Documentacion oficial de LangGraph y guias de API.",
"agents_getting_started": "Primeros pasos",
"agents_getting_started_desc": "Tutorial paso a paso para crear su primer agente.",
"agents_github": "GitHub",
"agents_github_desc": "Codigo fuente, issues y contribuciones de la comunidad.",
"agents_examples": "Ejemplos",
"agents_examples_desc": "Plantillas y proyectos de agentes listos para usar.",
"agents_api_ref": "Referencia API",
"agents_api_ref_desc": "Documentacion Swagger local para su instancia de LangGraph.",
"agents_running_title": "Agentes en ejecucion",
"agents_none": "No hay agentes registrados. Despliegue un asistente en LangGraph para verlo aqui.",
"agents_col_name": "Nombre",
"agents_col_id": "ID",
"agents_col_description": "Descripcion",
"agents_col_status": "Estado",
"analytics_status_connected": "Conectado",
"analytics_status_not_connected": "No conectado",
"analytics_config_hint": "Configure LANGFUSE_URL en .env para conectar",
"analytics_sso_hint": "Langfuse utiliza SSO de Keycloak. Iniciara sesion automaticamente con su cuenta CERTifAI.",
"analytics_quick_actions": "Acciones rapidas",
"analytics_traces": "Trazas",
"analytics_traces_desc": "Ver y filtrar todas las llamadas LLM, latencias y uso de tokens.",
"analytics_dashboard": "Panel de control",
"analytics_dashboard_desc": "Resumen de costos, metricas de calidad y tendencias de uso."
},
"org": {
"title": "Organizacion",
@@ -168,6 +144,16 @@
"email_address": "Direccion de correo electronico",
"email_placeholder": "colega@empresa.com",
"send_invite": "Enviar invitacion",
"total_spend": "Gasto total",
"total_tokens": "Tokens totales",
"model_usage": "Uso por modelo",
"model": "Modelo",
"tokens": "Tokens",
"spend": "Gasto",
"usage_unavailable": "Datos de uso no disponibles",
"loading_usage": "Cargando datos de uso...",
"prompt_tokens": "Tokens de entrada",
"completion_tokens": "Tokens de respuesta",
"pricing_title": "Precios",
"pricing_subtitle": "Elija el plan que se adapte a su organizacion"
},

View File

@@ -38,8 +38,6 @@
"dashboard": "Tableau de bord",
"providers": "Fournisseurs",
"chat": "Chat",
"tools": "Outils",
"knowledge_base": "Base de connaissances",
"developer": "Developpeur",
"organization": "Organisation",
"switch_light": "Passer en mode clair",
@@ -60,40 +58,18 @@
"title": "Tableau de bord",
"subtitle": "Actualites et mises a jour IA",
"topic_placeholder": "Nom du sujet...",
"ollama_settings": "Parametres Ollama",
"settings_hint": "Laissez vide pour utiliser OLLAMA_URL / OLLAMA_MODEL du fichier .env",
"ollama_url": "URL Ollama",
"ollama_url_placeholder": "Utilise OLLAMA_URL du fichier .env",
"litellm_settings": "Parametres LiteLLM",
"settings_hint": "Laissez vide pour utiliser LITELLM_URL / LITELLM_MODEL du fichier .env",
"litellm_url": "URL LiteLLM",
"litellm_url_placeholder": "Utilise LITELLM_URL du fichier .env",
"model": "Modele",
"model_placeholder": "Utilise OLLAMA_MODEL du fichier .env",
"model_placeholder": "Utilise LITELLM_MODEL du fichier .env",
"searching": "Recherche en cours...",
"search_failed": "Echec de la recherche : {e}",
"ollama_status": "Statut Ollama",
"litellm_status": "Statut LiteLLM",
"trending": "Tendances",
"recent_searches": "Recherches recentes"
},
"chat": {
"new_chat": "Nouvelle conversation",
"general": "General",
"conversations": "Conversations",
"news_chats": "Conversations actualites",
"all_chats": "Toutes les conversations",
"no_conversations": "Aucune conversation pour le moment",
"type_message": "Saisissez un message...",
"model_label": "Modele :",
"no_models": "Aucun modele disponible",
"send_to_start": "Envoyez un message pour demarrer la conversation.",
"you": "Vous",
"assistant": "Assistant",
"thinking": "Reflexion en cours...",
"copy_response": "Copier la derniere reponse",
"copy_conversation": "Copier la conversation",
"edit_last": "Modifier le dernier message",
"just_now": "a l'instant",
"minutes_ago": "il y a {n} min",
"hours_ago": "il y a {n} h",
"days_ago": "il y a {n} j"
},
"providers": {
"title": "Fournisseurs",
"subtitle": "Configurez vos backends LLM et d'embeddings",
@@ -107,37 +83,6 @@
"active_config": "Configuration active",
"embedding": "Embedding"
},
"tools": {
"title": "Outils",
"subtitle": "Gerez les serveurs MCP et les integrations d'outils",
"calculator": "Calculatrice",
"calculator_desc": "Calcul mathematique et conversion d'unites",
"tavily": "Recherche Tavily",
"tavily_desc": "API de recherche web optimisee par IA pour des informations en temps reel",
"searxng": "SearXNG",
"searxng_desc": "Metamoteur de recherche respectueux de la vie privee",
"file_reader": "Lecteur de fichiers",
"file_reader_desc": "Lire et analyser des fichiers locaux dans divers formats",
"code_executor": "Executeur de code",
"code_executor_desc": "Execution de code en bac a sable pour Python et JavaScript",
"web_scraper": "Extracteur web",
"web_scraper_desc": "Extraire des donnees structurees a partir de pages web",
"email_sender": "Envoi d'e-mails",
"email_sender_desc": "Envoyer des e-mails via le serveur SMTP configure",
"git_ops": "Operations Git",
"git_ops_desc": "Interagir avec les depots Git pour le controle de version"
},
"knowledge": {
"title": "Base de connaissances",
"subtitle": "Gerez les documents pour la recuperation RAG",
"search_placeholder": "Rechercher des fichiers...",
"name": "Nom",
"type": "Type",
"size": "Taille",
"chunks": "Segments",
"uploaded": "Importe",
"actions": "Actions"
},
"developer": {
"agents_title": "Constructeur d'agents",
"agents_desc": "Construisez et gerez des agents IA avec LangGraph. Creez des pipelines de raisonnement multi-etapes, des agents utilisant des outils et des flux de travail autonomes.",
@@ -151,7 +96,38 @@
"total_requests": "Requetes totales",
"avg_latency": "Latence moyenne",
"tokens_used": "Tokens utilises",
"error_rate": "Taux d'erreur"
"error_rate": "Taux d'erreur",
"not_configured": "Non configure",
"open_new_tab": "Ouvrir dans un nouvel onglet",
"agents_status_connected": "Connecte",
"agents_status_not_connected": "Non connecte",
"agents_config_hint": "Definissez LANGGRAPH_URL dans .env pour vous connecter",
"agents_quick_start": "Demarrage rapide",
"agents_docs": "Documentation",
"agents_docs_desc": "Documentation officielle de LangGraph et guides API.",
"agents_getting_started": "Premiers pas",
"agents_getting_started_desc": "Tutoriel etape par etape pour creer votre premier agent.",
"agents_github": "GitHub",
"agents_github_desc": "Code source, issues et contributions de la communaute.",
"agents_examples": "Exemples",
"agents_examples_desc": "Modeles et projets d'agents prets a l'emploi.",
"agents_api_ref": "Reference API",
"agents_api_ref_desc": "Documentation Swagger locale pour votre instance LangGraph.",
"agents_running_title": "Agents en cours",
"agents_none": "Aucun agent enregistre. Deployez un assistant dans LangGraph pour le voir ici.",
"agents_col_name": "Nom",
"agents_col_id": "ID",
"agents_col_description": "Description",
"agents_col_status": "Statut",
"analytics_status_connected": "Connecte",
"analytics_status_not_connected": "Non connecte",
"analytics_config_hint": "Definissez LANGFUSE_URL dans .env pour vous connecter",
"analytics_sso_hint": "Langfuse utilise le SSO Keycloak. Vous serez connecte automatiquement avec votre compte CERTifAI.",
"analytics_quick_actions": "Actions rapides",
"analytics_traces": "Traces",
"analytics_traces_desc": "Afficher et filtrer tous les appels LLM, latences et consommation de tokens.",
"analytics_dashboard": "Tableau de bord",
"analytics_dashboard_desc": "Apercu des couts, metriques de qualite et tendances d'utilisation."
},
"org": {
"title": "Organisation",
@@ -168,6 +144,16 @@
"email_address": "Adresse e-mail",
"email_placeholder": "collegue@entreprise.com",
"send_invite": "Envoyer l'invitation",
"total_spend": "Depenses totales",
"total_tokens": "Tokens totaux",
"model_usage": "Utilisation par modele",
"model": "Modele",
"tokens": "Tokens",
"spend": "Depenses",
"usage_unavailable": "Donnees d'utilisation indisponibles",
"loading_usage": "Chargement des donnees d'utilisation...",
"prompt_tokens": "Tokens d'entree",
"completion_tokens": "Tokens de reponse",
"pricing_title": "Tarifs",
"pricing_subtitle": "Choisissez le plan adapte a votre organisation"
},

View File

@@ -38,8 +38,6 @@
"dashboard": "Painel",
"providers": "Fornecedores",
"chat": "Chat",
"tools": "Ferramentas",
"knowledge_base": "Base de Conhecimento",
"developer": "Programador",
"organization": "Organizacao",
"switch_light": "Mudar para modo claro",
@@ -60,40 +58,18 @@
"title": "Painel",
"subtitle": "Noticias e atualizacoes de IA",
"topic_placeholder": "Nome do topico...",
"ollama_settings": "Definicoes do Ollama",
"settings_hint": "Deixe vazio para usar OLLAMA_URL / OLLAMA_MODEL do .env",
"ollama_url": "URL do Ollama",
"ollama_url_placeholder": "Utiliza OLLAMA_URL do .env",
"litellm_settings": "Definicoes do LiteLLM",
"settings_hint": "Deixe vazio para usar LITELLM_URL / LITELLM_MODEL do .env",
"litellm_url": "URL do LiteLLM",
"litellm_url_placeholder": "Utiliza LITELLM_URL do .env",
"model": "Modelo",
"model_placeholder": "Utiliza OLLAMA_MODEL do .env",
"model_placeholder": "Utiliza LITELLM_MODEL do .env",
"searching": "A pesquisar...",
"search_failed": "A pesquisa falhou: {e}",
"ollama_status": "Estado do Ollama",
"litellm_status": "Estado do LiteLLM",
"trending": "Em destaque",
"recent_searches": "Pesquisas recentes"
},
"chat": {
"new_chat": "Nova conversa",
"general": "Geral",
"conversations": "Conversas",
"news_chats": "Conversas de noticias",
"all_chats": "Todas as conversas",
"no_conversations": "Ainda sem conversas",
"type_message": "Escreva uma mensagem...",
"model_label": "Modelo:",
"no_models": "Nenhum modelo disponivel",
"send_to_start": "Envie uma mensagem para iniciar a conversa.",
"you": "Voce",
"assistant": "Assistente",
"thinking": "A pensar...",
"copy_response": "Copiar ultima resposta",
"copy_conversation": "Copiar conversa",
"edit_last": "Editar ultima mensagem",
"just_now": "agora mesmo",
"minutes_ago": "ha {n}m",
"hours_ago": "ha {n}h",
"days_ago": "ha {n}d"
},
"providers": {
"title": "Fornecedores",
"subtitle": "Configure os seus backends de LLM e embeddings",
@@ -107,37 +83,6 @@
"active_config": "Configuracao Ativa",
"embedding": "Embedding"
},
"tools": {
"title": "Ferramentas",
"subtitle": "Gerir servidores MCP e integracoes de ferramentas",
"calculator": "Calculadora",
"calculator_desc": "Calculo matematico e conversao de unidades",
"tavily": "Pesquisa Tavily",
"tavily_desc": "API de pesquisa web otimizada por IA para informacao em tempo real",
"searxng": "SearXNG",
"searxng_desc": "Motor de metapesquisa que respeita a privacidade",
"file_reader": "Leitor de Ficheiros",
"file_reader_desc": "Ler e analisar ficheiros locais em varios formatos",
"code_executor": "Executor de Codigo",
"code_executor_desc": "Execucao de codigo em sandbox para Python e JavaScript",
"web_scraper": "Web Scraper",
"web_scraper_desc": "Extrair dados estruturados de paginas web",
"email_sender": "Envio de Email",
"email_sender_desc": "Enviar emails atraves do servidor SMTP configurado",
"git_ops": "Operacoes Git",
"git_ops_desc": "Interagir com repositorios Git para controlo de versoes"
},
"knowledge": {
"title": "Base de Conhecimento",
"subtitle": "Gerir documentos para recuperacao RAG",
"search_placeholder": "Pesquisar ficheiros...",
"name": "Nome",
"type": "Tipo",
"size": "Tamanho",
"chunks": "Fragmentos",
"uploaded": "Carregado",
"actions": "Acoes"
},
"developer": {
"agents_title": "Construtor de Agentes",
"agents_desc": "Construa e gira agentes de IA com LangGraph. Crie pipelines de raciocinio multi-etapa, agentes com ferramentas e fluxos de trabalho autonomos.",
@@ -151,7 +96,38 @@
"total_requests": "Total de Pedidos",
"avg_latency": "Latencia Media",
"tokens_used": "Tokens Utilizados",
"error_rate": "Taxa de Erros"
"error_rate": "Taxa de Erros",
"not_configured": "Nao configurado",
"open_new_tab": "Abrir em novo separador",
"agents_status_connected": "Conectado",
"agents_status_not_connected": "Nao conectado",
"agents_config_hint": "Defina LANGGRAPH_URL no .env para conectar",
"agents_quick_start": "Inicio rapido",
"agents_docs": "Documentacao",
"agents_docs_desc": "Documentacao oficial do LangGraph e guias de API.",
"agents_getting_started": "Primeiros passos",
"agents_getting_started_desc": "Tutorial passo a passo para criar o seu primeiro agente.",
"agents_github": "GitHub",
"agents_github_desc": "Codigo fonte, issues e contribuicoes da comunidade.",
"agents_examples": "Exemplos",
"agents_examples_desc": "Modelos e projetos de agentes prontos a usar.",
"agents_api_ref": "Referencia API",
"agents_api_ref_desc": "Documentacao Swagger local para a sua instancia LangGraph.",
"agents_running_title": "Agentes em execucao",
"agents_none": "Nenhum agente registado. Implemente um assistente no LangGraph para o ver aqui.",
"agents_col_name": "Nome",
"agents_col_id": "ID",
"agents_col_description": "Descricao",
"agents_col_status": "Estado",
"analytics_status_connected": "Conectado",
"analytics_status_not_connected": "Nao conectado",
"analytics_config_hint": "Defina LANGFUSE_URL no .env para conectar",
"analytics_sso_hint": "O Langfuse utiliza SSO do Keycloak. Sera autenticado automaticamente com a sua conta CERTifAI.",
"analytics_quick_actions": "Acoes rapidas",
"analytics_traces": "Traces",
"analytics_traces_desc": "Ver e filtrar todas as chamadas LLM, latencias e uso de tokens.",
"analytics_dashboard": "Painel",
"analytics_dashboard_desc": "Resumo de custos, metricas de qualidade e tendencias de uso."
},
"org": {
"title": "Organizacao",
@@ -168,6 +144,16 @@
"email_address": "Endereco de Email",
"email_placeholder": "colleague@company.com",
"send_invite": "Enviar Convite",
"total_spend": "Gasto total",
"total_tokens": "Tokens totais",
"model_usage": "Uso por modelo",
"model": "Modelo",
"tokens": "Tokens",
"spend": "Gasto",
"usage_unavailable": "Dados de uso indisponiveis",
"loading_usage": "Carregando dados de uso...",
"prompt_tokens": "Tokens de entrada",
"completion_tokens": "Tokens de resposta",
"pricing_title": "Precos",
"pricing_subtitle": "Escolha o plano adequado a sua organizacao"
},

View File

@@ -2591,6 +2591,58 @@ h6 {
border-radius: 20px;
}
/* ===== Tool Embed (iframe integration) ===== */
.tool-embed {
display: flex;
flex-direction: column;
flex: 1;
height: calc(100vh - 60px);
min-height: 400px;
}
.tool-embed-toolbar {
display: flex;
align-items: center;
justify-content: space-between;
padding: 12px 20px;
background-color: var(--bg-card);
border-bottom: 1px solid var(--border-primary);
}
.tool-embed-title {
font-family: 'Space Grotesk', sans-serif;
font-size: 16px;
font-weight: 600;
color: var(--text-heading);
}
.tool-embed-popout-btn {
display: inline-flex;
align-items: center;
gap: 6px;
padding: 6px 14px;
font-size: 13px;
font-weight: 500;
color: var(--accent);
background-color: transparent;
border: 1px solid var(--accent);
border-radius: 6px;
text-decoration: none;
cursor: pointer;
transition: background-color 0.15s, color 0.15s;
}
.tool-embed-popout-btn:hover {
background-color: var(--accent);
color: var(--bg-body);
}
.tool-embed-iframe {
flex: 1;
width: 100%;
border: none;
}
/* ===== Analytics Stats Bar ===== */
.analytics-stats-bar {
display: flex;
@@ -3322,4 +3374,342 @@ h6 {
.feature-card {
padding: 20px 16px;
}
}
/* ===== Agents Page ===== */
.agents-page {
display: flex;
flex-direction: column;
padding: 32px;
gap: 32px;
}
.agents-hero {
max-width: 720px;
display: flex;
flex-direction: column;
gap: 12px;
}
.agents-hero-row {
display: flex;
flex-direction: row;
align-items: center;
gap: 16px;
}
.agents-hero-icon {
width: 48px;
height: 48px;
min-width: 48px;
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
color: var(--avatar-text);
border-radius: 12px;
font-size: 24px;
display: flex;
align-items: center;
justify-content: center;
font-weight: 700;
}
.agents-hero-title {
font-family: 'Space Grotesk', sans-serif;
font-size: 28px;
font-weight: 700;
color: var(--text-heading);
margin: 0;
}
.agents-hero-desc {
font-size: 15px;
color: var(--text-muted);
line-height: 1.6;
max-width: 600px;
margin: 0;
}
.agents-status {
display: inline-flex;
align-items: center;
gap: 8px;
font-size: 13px;
}
.agents-status-dot {
width: 8px;
height: 8px;
border-radius: 50%;
display: inline-block;
flex-shrink: 0;
}
.agents-status-dot--on {
background-color: #22c55e;
}
.agents-status-dot--off {
background-color: var(--text-faint);
}
.agents-status-url {
font-family: 'JetBrains Mono', 'Fira Code', monospace;
color: var(--accent);
font-size: 13px;
}
.agents-status-hint {
font-size: 13px;
color: var(--text-faint);
font-style: italic;
}
.agents-section-title {
font-size: 18px;
font-weight: 600;
color: var(--text-heading);
margin: 0 0 12px 0;
}
.agents-grid {
display: grid;
grid-template-columns: repeat(3, 1fr);
gap: 16px;
}
.agents-card {
display: block;
text-decoration: none;
background-color: var(--bg-card);
border: 1px solid var(--border-primary);
border-radius: 12px;
padding: 24px;
transition: border-color 0.2s, transform 0.2s;
cursor: pointer;
}
.agents-card:hover {
border-color: var(--accent);
transform: translateY(-2px);
}
.agents-card-icon {
width: 36px;
height: 36px;
min-width: 36px;
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
color: var(--avatar-text);
border-radius: 8px;
font-size: 18px;
display: flex;
align-items: center;
justify-content: center;
font-weight: 700;
}
.agents-card-title {
font-size: 16px;
font-weight: 600;
color: var(--text-heading);
margin: 12px 0 4px;
}
.agents-card-desc {
font-size: 13px;
color: var(--text-muted);
line-height: 1.5;
}
.agents-card--disabled {
opacity: 0.4;
pointer-events: none;
cursor: default;
}
/* -- Agents table -- */
.agents-table-section {
max-width: 960px;
}
.agents-table-wrap {
overflow-x: auto;
}
.agents-table {
width: 100%;
border-collapse: collapse;
font-size: 14px;
}
.agents-table thead th {
text-align: left;
font-size: 12px;
font-weight: 600;
color: var(--text-faint);
text-transform: uppercase;
letter-spacing: 0.04em;
padding: 8px 12px;
border-bottom: 1px solid var(--border-secondary);
}
.agents-table tbody td {
padding: 10px 12px;
border-bottom: 1px solid var(--border-primary);
color: var(--text-primary);
vertical-align: middle;
}
.agents-table tbody tr:hover {
background-color: var(--bg-surface);
}
.agents-cell-name {
font-weight: 600;
color: var(--text-heading);
white-space: nowrap;
}
.agents-cell-id {
font-family: 'JetBrains Mono', 'Fira Code', monospace;
font-size: 12px;
color: var(--text-muted);
}
.agents-cell-desc {
max-width: 300px;
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap;
color: var(--text-muted);
}
.agents-cell-none {
color: var(--text-faint);
}
.agents-badge {
display: inline-block;
font-size: 12px;
font-weight: 600;
padding: 2px 10px;
border-radius: 9999px;
}
.agents-badge--active {
background-color: rgba(34, 197, 94, 0.15);
color: #22c55e;
}
.agents-table-loading,
.agents-table-empty {
font-size: 14px;
color: var(--text-faint);
font-style: italic;
padding: 16px 0;
}
@media (max-width: 768px) {
.agents-grid {
grid-template-columns: repeat(2, 1fr);
}
}
@media (max-width: 480px) {
.agents-page,
.analytics-page {
padding: 20px 16px;
}
.agents-grid {
grid-template-columns: 1fr;
}
}
/* ===== Analytics Page ===== */
.analytics-page {
display: flex;
flex-direction: column;
padding: 32px;
gap: 32px;
}
.analytics-hero {
max-width: 720px;
display: flex;
flex-direction: column;
gap: 12px;
}
.analytics-hero-row {
display: flex;
flex-direction: row;
align-items: center;
gap: 16px;
}
.analytics-hero-icon {
width: 48px;
height: 48px;
min-width: 48px;
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
color: var(--avatar-text);
border-radius: 12px;
font-size: 24px;
display: flex;
align-items: center;
justify-content: center;
}
.analytics-hero-title {
font-family: 'Space Grotesk', sans-serif;
font-size: 28px;
font-weight: 700;
color: var(--text-heading);
margin: 0;
}
.analytics-hero-desc {
font-size: 15px;
color: var(--text-muted);
line-height: 1.6;
max-width: 600px;
margin: 0;
}
.analytics-sso-hint {
font-size: 13px;
color: var(--text-muted);
font-style: italic;
margin: 0;
}
.analytics-launch-btn {
display: inline-flex;
align-items: center;
gap: 8px;
padding: 10px 20px;
background: linear-gradient(135deg, var(--accent), var(--accent-secondary));
color: var(--avatar-text);
font-size: 14px;
font-weight: 600;
border-radius: 8px;
text-decoration: none;
transition: opacity 0.2s, transform 0.2s;
width: fit-content;
}
.analytics-launch-btn:hover {
opacity: 0.9;
transform: translateY(-1px);
}
.analytics-stats-bar {
display: flex;
gap: 16px;
flex-wrap: wrap;
}
@media (max-width: 768px) {
.analytics-stats-bar {
flex-direction: column;
}
}

View File

@@ -1,4 +1,4 @@
/*! tailwindcss v4.2.0 | MIT License | https://tailwindcss.com */
/*! tailwindcss v4.2.1 | MIT License | https://tailwindcss.com */
@layer properties;
@layer theme, base, components, utilities;
@layer theme {
@@ -162,59 +162,6 @@
}
}
@layer utilities {
.diff {
@layer daisyui.l1.l2.l3 {
position: relative;
display: grid;
width: 100%;
overflow: hidden;
webkit-user-select: none;
user-select: none;
grid-template-rows: 1fr 1.8rem 1fr;
direction: ltr;
container-type: inline-size;
grid-template-columns: auto 1fr;
&:focus-visible, &:has(.diff-item-1:focus-visible) {
outline-style: var(--tw-outline-style);
outline-width: 2px;
outline-offset: 1px;
outline-color: var(--color-base-content);
}
&:focus-visible {
outline-style: var(--tw-outline-style);
outline-width: 2px;
outline-offset: 1px;
outline-color: var(--color-base-content);
.diff-resizer {
min-width: 95cqi;
max-width: 95cqi;
}
}
&:has(.diff-item-1:focus-visible) {
outline-style: var(--tw-outline-style);
outline-width: 2px;
outline-offset: 1px;
.diff-resizer {
min-width: 5cqi;
max-width: 5cqi;
}
}
@supports (-webkit-overflow-scrolling: touch) and (overflow: -webkit-paged-x) {
&:focus {
.diff-resizer {
min-width: 5cqi;
max-width: 5cqi;
}
}
&:has(.diff-item-1:focus) {
.diff-resizer {
min-width: 95cqi;
max-width: 95cqi;
}
}
}
}
}
.modal {
@layer daisyui.l1.l2.l3 {
pointer-events: none;
@@ -1110,31 +1057,98 @@
}
}
}
.chat-bubble {
.range {
@layer daisyui.l1.l2.l3 {
position: relative;
display: block;
width: fit-content;
border-radius: var(--radius-field);
background-color: var(--color-base-300);
padding-inline: calc(0.25rem * 4);
padding-block: calc(0.25rem * 2);
color: var(--color-base-content);
grid-row-end: 3;
min-height: 2rem;
min-width: 2.5rem;
max-width: 90%;
&:before {
position: absolute;
bottom: calc(0.25rem * 0);
height: calc(0.25rem * 3);
width: calc(0.25rem * 3);
background-color: inherit;
content: "";
mask-repeat: no-repeat;
mask-image: var(--mask-chat);
mask-position: 0px -1px;
mask-size: 0.8125rem;
appearance: none;
webkit-appearance: none;
--range-thumb: var(--color-base-100);
--range-thumb-size: calc(var(--size-selector, 0.25rem) * 6);
--range-progress: currentColor;
--range-fill: 1;
--range-p: 0.25rem;
--range-bg: currentColor;
@supports (color: color-mix(in lab, red, red)) {
--range-bg: color-mix(in oklab, currentColor 10%, #0000);
}
cursor: pointer;
overflow: hidden;
background-color: transparent;
vertical-align: middle;
width: clamp(3rem, 20rem, 100%);
--radius-selector-max: calc(
var(--radius-selector) + var(--radius-selector) + var(--radius-selector)
);
border-radius: calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)));
border: none;
height: var(--range-thumb-size);
[dir="rtl"] & {
--range-dir: -1;
}
&:focus {
outline: none;
}
&:focus-visible {
outline: 2px solid;
outline-offset: 2px;
}
&::-webkit-slider-runnable-track {
width: 100%;
background-color: var(--range-bg);
border-radius: var(--radius-selector);
height: calc(var(--range-thumb-size) * 0.5);
}
@media (forced-colors: active) {
&::-webkit-slider-runnable-track {
border: 1px solid;
}
}
@media (forced-colors: active) {
&::-moz-range-track {
border: 1px solid;
}
}
&::-webkit-slider-thumb {
position: relative;
box-sizing: border-box;
border-radius: calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)));
background-color: var(--range-thumb);
height: var(--range-thumb-size);
width: var(--range-thumb-size);
border: var(--range-p) solid;
appearance: none;
webkit-appearance: none;
top: 50%;
color: var(--range-progress);
transform: translateY(-50%);
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px currentColor, 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
@supports (color: color-mix(in lab, red, red)) {
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000), 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
}
}
&::-moz-range-track {
width: 100%;
background-color: var(--range-bg);
border-radius: var(--radius-selector);
height: calc(var(--range-thumb-size) * 0.5);
}
&::-moz-range-thumb {
position: relative;
box-sizing: border-box;
border-radius: calc(var(--radius-selector) + min(var(--range-p), var(--radius-selector-max)));
background-color: currentColor;
height: var(--range-thumb-size);
width: var(--range-thumb-size);
border: var(--range-p) solid;
top: 50%;
color: var(--range-progress);
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px currentColor, 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
@supports (color: color-mix(in lab, red, red)) {
box-shadow: 0 -1px oklch(0% 0 0 / calc(var(--depth) * 0.1)) inset, 0 8px 0 -4px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset, 0 1px color-mix(in oklab, currentColor calc(var(--depth) * 10%), #0000), 0 0 0 2rem var(--range-thumb) inset, calc((var(--range-dir, 1) * -100cqw) - (var(--range-dir, 1) * var(--range-thumb-size) / 2)) 0 0 calc(100cqw * var(--range-fill));
}
}
&:disabled {
cursor: not-allowed;
opacity: 30%;
}
}
}
@@ -1525,81 +1539,6 @@
padding: calc(0.25rem * 4);
}
}
.textarea {
@layer daisyui.l1.l2.l3 {
border: var(--border) solid #0000;
min-height: calc(0.25rem * 20);
flex-shrink: 1;
appearance: none;
border-radius: var(--radius-field);
background-color: var(--color-base-100);
padding-block: calc(0.25rem * 2);
vertical-align: middle;
width: clamp(3rem, 20rem, 100%);
padding-inline-start: 0.75rem;
padding-inline-end: 0.75rem;
font-size: max(var(--font-size, 0.875rem), 0.875rem);
touch-action: manipulation;
border-color: var(--input-color);
box-shadow: 0 1px var(--input-color) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset;
@supports (color: color-mix(in lab, red, red)) {
box-shadow: 0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000) inset, 0 -1px oklch(100% 0 0 / calc(var(--depth) * 0.1)) inset;
}
--input-color: var(--color-base-content);
@supports (color: color-mix(in lab, red, red)) {
--input-color: color-mix(in oklab, var(--color-base-content) 20%, #0000);
}
textarea {
appearance: none;
background-color: transparent;
border: none;
&:focus, &:focus-within {
--tw-outline-style: none;
outline-style: none;
@media (forced-colors: active) {
outline: 2px solid transparent;
outline-offset: 2px;
}
}
}
&:focus, &:focus-within {
--input-color: var(--color-base-content);
box-shadow: 0 1px var(--input-color);
@supports (color: color-mix(in lab, red, red)) {
box-shadow: 0 1px color-mix(in oklab, var(--input-color) calc(var(--depth) * 10%), #0000);
}
outline: 2px solid var(--input-color);
outline-offset: 2px;
isolation: isolate;
}
@media (pointer: coarse) {
@supports (-webkit-touch-callout: none) {
&:focus, &:focus-within {
--font-size: 1rem;
}
}
}
&:has(> textarea[disabled]), &:is(:disabled, [disabled]) {
cursor: not-allowed;
border-color: var(--color-base-200);
background-color: var(--color-base-200);
color: var(--color-base-content);
@supports (color: color-mix(in lab, red, red)) {
color: color-mix(in oklab, var(--color-base-content) 40%, transparent);
}
&::placeholder {
color: var(--color-base-content);
@supports (color: color-mix(in lab, red, red)) {
color: color-mix(in oklab, var(--color-base-content) 20%, transparent);
}
}
box-shadow: none;
}
&:has(> textarea[disabled]) > textarea[disabled] {
cursor: not-allowed;
}
}
}
.stack {
@layer daisyui.l1.l2.l3 {
display: inline-grid;

View File

@@ -8,6 +8,7 @@
"tailwindcss": "^4.1.18",
},
"devDependencies": {
"@playwright/test": "^1.52.0",
"@types/bun": "latest",
},
"peerDependencies": {
@@ -16,6 +17,8 @@
},
},
"packages": {
"@playwright/test": ["@playwright/test@1.58.2", "", { "dependencies": { "playwright": "1.58.2" }, "bin": { "playwright": "cli.js" } }, "sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA=="],
"@types/bun": ["@types/bun@1.3.9", "", { "dependencies": { "bun-types": "1.3.9" } }, "sha512-KQ571yULOdWJiMH+RIWIOZ7B2RXQGpL1YQrBtLIV3FqDcCu6FsbFUBwhdKUlCKUpS3PJDsHlJ1QKlpxoVR+xtw=="],
"@types/node": ["@types/node@25.2.3", "", { "dependencies": { "undici-types": "~7.16.0" } }, "sha512-m0jEgYlYz+mDJZ2+F4v8D1AyQb+QzsNqRuI7xg1VQX/KlKS0qT9r1Mo16yo5F/MtifXFgaofIFsdFMox2SxIbQ=="],
@@ -24,6 +27,12 @@
"daisyui": ["daisyui@5.5.18", "", {}, "sha512-VVzjpOitMGB6DWIBeRSapbjdOevFqyzpk9u5Um6a4tyId3JFrU5pbtF0vgjXDth76mJZbueN/j9Ok03SPrh/og=="],
"fsevents": ["fsevents@2.3.2", "", { "os": "darwin" }, "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA=="],
"playwright": ["playwright@1.58.2", "", { "dependencies": { "playwright-core": "1.58.2" }, "optionalDependencies": { "fsevents": "2.3.2" }, "bin": { "playwright": "cli.js" } }, "sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A=="],
"playwright-core": ["playwright-core@1.58.2", "", { "bin": { "playwright-core": "cli.js" } }, "sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg=="],
"tailwindcss": ["tailwindcss@4.1.18", "", {}, "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw=="],
"typescript": ["typescript@5.9.3", "", { "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" } }, "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw=="],

View File

@@ -1,13 +1,12 @@
version: '3.8'
services:
keycloak:
image: quay.io/keycloak/keycloak:26.0
container_name: certifai-keycloak
environment:
KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: admin
KC_BOOTSTRAP_ADMIN_USERNAME: admin
KC_BOOTSTRAP_ADMIN_PASSWORD: admin
KC_DB: dev-mem
KC_HEALTH_ENABLED: "true"
ports:
- "8080:8080"
command:
@@ -17,10 +16,11 @@ services:
- ./keycloak/realm-export.json:/opt/keycloak/data/import/realm-export.json:ro
- ./keycloak/themes/certifai:/opt/keycloak/themes/certifai:ro
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health/ready"]
test: ["CMD-SHELL", "exec 3<>/dev/tcp/localhost/8080 && echo -e 'GET /realms/master HTTP/1.1\\r\\nHost: localhost\\r\\nConnection: close\\r\\n\\r\\n' >&3 && head -1 <&3 | grep -q '200 OK'"]
interval: 10s
timeout: 5s
retries: 5
retries: 10
start_period: 30s
mongo:
image: mongo:latest
@@ -40,4 +40,219 @@ services:
environment:
- SEARXNG_BASE_URL=http://localhost:8888
volumes:
- ./searxng:/etc/searxng:rw
- ./searxng:/etc/searxng:rw
librechat:
image: ghcr.io/danny-avila/librechat:latest
container_name: certifai-librechat
restart: unless-stopped
# Use host networking so localhost:8080 (Keycloak) is reachable for
# OIDC discovery, and the browser redirect URLs match the issuer.
network_mode: host
depends_on:
keycloak:
condition: service_healthy
mongo:
condition: service_started
environment:
# LiteLLM API key (used by librechat.yaml endpoint config)
LITELLM_API_KEY: ${LITELLM_API_KEY:-}
# MongoDB (use localhost since we're on host network)
MONGO_URI: mongodb://root:example@localhost:27017/librechat?authSource=admin
DOMAIN_CLIENT: http://localhost:3080
DOMAIN_SERVER: http://localhost:3080
# Allow HTTP for local dev OIDC (Keycloak on localhost without TLS)
NODE_TLS_REJECT_UNAUTHORIZED: "0"
NODE_ENV: development
# Keycloak OIDC SSO
OPENID_ISSUER: http://localhost:8080/realms/certifai
OPENID_CLIENT_ID: certifai-librechat
OPENID_CLIENT_SECRET: certifai-librechat-secret
OPENID_SESSION_SECRET: "a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6a1b2c3d4e5f6"
OPENID_CALLBACK_URL: /oauth/openid/callback
OPENID_SCOPE: openid profile email
OPENID_BUTTON_LABEL: Login with CERTifAI
# Disable local auth (SSO only)
ALLOW_EMAIL_LOGIN: "false"
ALLOW_REGISTRATION: "false"
ALLOW_SOCIAL_LOGIN: "true"
ALLOW_SOCIAL_REGISTRATION: "true"
# JWT / encryption secrets (required by LibreChat)
CREDS_KEY: "97e95d72cdda06774a264f9fb7768097a6815dc1e930898d2e39c9a3a253b157"
CREDS_IV: "2ea456ab25279089b0ff9e7aca1df6e6"
JWT_SECRET: "767b962176666eab56e180e6f2d3fe95145dc6b978e37d4eb8d1da5421c5fb26"
JWT_REFRESH_SECRET: "51a43a1fca4b7b501b37e226a638645d962066e0686b82248921f3160e96501e"
# App settings
APP_TITLE: CERTifAI Chat
CUSTOM_FOOTER: CERTifAI - Sovereign GenAI Infrastructure
HOST: 0.0.0.0
PORT: "3080"
NO_INDEX: "true"
volumes:
- ./librechat/librechat.yaml:/app/librechat.yaml:ro
- ./librechat/logo.svg:/app/client/public/assets/logo.svg:ro
# Patch: allow HTTP issuer for local dev (openid-client v6 enforces HTTPS)
- ./librechat/openidStrategy.js:/app/api/strategies/openidStrategy.js:ro
- librechat-data:/app/data
langflow:
image: langflowai/langflow:latest
container_name: certifai-langflow
restart: unless-stopped
ports:
- "7860:7860"
environment:
LANGFLOW_AUTO_LOGIN: "true"
langgraph:
image: langchain/langgraph-trial:3.12
container_name: certifai-langgraph
restart: unless-stopped
depends_on:
langgraph-db:
condition: service_started
langgraph-redis:
condition: service_started
ports:
- "8123:8000"
environment:
DATABASE_URI: postgresql://langgraph:langgraph@langgraph-db:5432/langgraph
REDIS_URI: redis://langgraph-redis:6379
langgraph-db:
image: postgres:16
container_name: certifai-langgraph-db
restart: unless-stopped
environment:
POSTGRES_USER: langgraph
POSTGRES_PASSWORD: langgraph
POSTGRES_DB: langgraph
volumes:
- langgraph-db-data:/var/lib/postgresql/data
langgraph-redis:
image: redis:7-alpine
container_name: certifai-langgraph-redis
restart: unless-stopped
langfuse:
image: langfuse/langfuse:3
container_name: certifai-langfuse
restart: unless-stopped
depends_on:
keycloak:
condition: service_healthy
langfuse-db:
condition: service_healthy
langfuse-clickhouse:
condition: service_healthy
langfuse-redis:
condition: service_healthy
langfuse-minio:
condition: service_healthy
ports:
- "3000:3000"
environment:
DATABASE_URL: postgresql://langfuse:langfuse@langfuse-db:5432/langfuse
NEXTAUTH_URL: http://localhost:3000
NEXTAUTH_SECRET: certifai-langfuse-dev-secret
SALT: certifai-langfuse-dev-salt
ENCRYPTION_KEY: "0000000000000000000000000000000000000000000000000000000000000000"
# Keycloak OIDC SSO - shared realm with CERTifAI dashboard
AUTH_KEYCLOAK_CLIENT_ID: certifai-langfuse
AUTH_KEYCLOAK_CLIENT_SECRET: certifai-langfuse-secret
AUTH_KEYCLOAK_ISSUER: http://keycloak:8080/realms/certifai
AUTH_KEYCLOAK_ALLOW_ACCOUNT_LINKING: "true"
# Disable local email/password auth (SSO only)
AUTH_DISABLE_USERNAME_PASSWORD: "true"
CLICKHOUSE_URL: http://langfuse-clickhouse:8123
CLICKHOUSE_MIGRATION_URL: clickhouse://langfuse-clickhouse:9000
CLICKHOUSE_USER: clickhouse
CLICKHOUSE_PASSWORD: clickhouse
CLICKHOUSE_CLUSTER_ENABLED: "false"
REDIS_HOST: langfuse-redis
REDIS_PORT: "6379"
REDIS_AUTH: langfuse-dev-redis
LANGFUSE_S3_EVENT_UPLOAD_BUCKET: langfuse
LANGFUSE_S3_EVENT_UPLOAD_REGION: auto
LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: minio
LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: miniosecret
LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: http://langfuse-minio:9000
LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: "true"
LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: langfuse
LANGFUSE_S3_MEDIA_UPLOAD_REGION: auto
LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: minio
LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: miniosecret
LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: http://langfuse-minio:9000
LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: "true"
langfuse-db:
image: postgres:16
container_name: certifai-langfuse-db
restart: unless-stopped
environment:
POSTGRES_USER: langfuse
POSTGRES_PASSWORD: langfuse
POSTGRES_DB: langfuse
volumes:
- langfuse-db-data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U langfuse"]
interval: 5s
timeout: 5s
retries: 10
langfuse-clickhouse:
image: clickhouse/clickhouse-server:latest
container_name: certifai-langfuse-clickhouse
restart: unless-stopped
user: "101:101"
environment:
CLICKHOUSE_DB: default
CLICKHOUSE_USER: clickhouse
CLICKHOUSE_PASSWORD: clickhouse
ulimits:
nofile:
soft: 262144
hard: 262144
volumes:
- langfuse-clickhouse-data:/var/lib/clickhouse
- langfuse-clickhouse-logs:/var/log/clickhouse-server
healthcheck:
test: ["CMD-SHELL", "wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1"]
interval: 5s
timeout: 5s
retries: 10
langfuse-redis:
image: redis:7-alpine
container_name: certifai-langfuse-redis
restart: unless-stopped
command: redis-server --requirepass langfuse-dev-redis
healthcheck:
test: ["CMD", "redis-cli", "-a", "langfuse-dev-redis", "ping"]
interval: 5s
timeout: 5s
retries: 10
langfuse-minio:
image: cgr.dev/chainguard/minio
container_name: certifai-langfuse-minio
restart: unless-stopped
entrypoint: sh
command: -c 'mkdir -p /data/langfuse && minio server --address ":9000" --console-address ":9001" /data'
environment:
MINIO_ROOT_USER: minio
MINIO_ROOT_PASSWORD: miniosecret
healthcheck:
test: ["CMD-SHELL", "mc ready local || exit 1"]
interval: 5s
timeout: 5s
retries: 10
volumes:
librechat-data:
langgraph-db-data:
langfuse-db-data:
langfuse-clickhouse-data:
langfuse-clickhouse-logs:

24
e2e/auth.setup.ts Normal file
View File

@@ -0,0 +1,24 @@
import { test as setup, expect } from "@playwright/test";
const AUTH_FILE = "e2e/.auth/user.json";
setup("authenticate via Keycloak", async ({ page }) => {
// Navigate to a protected route to trigger the auth redirect chain:
// /dashboard -> /auth (Axum) -> Keycloak login page
await page.goto("/dashboard");
// Wait for Keycloak login form to appear
await page.waitForSelector("#username", { timeout: 15_000 });
// Fill Keycloak credentials
await page.fill("#username", process.env.TEST_USER ?? "admin@certifai.local");
await page.fill("#password", process.env.TEST_PASSWORD ?? "admin");
await page.click("#kc-login");
// Wait for redirect back to the app dashboard
await page.waitForURL("**/dashboard", { timeout: 15_000 });
await expect(page.locator(".sidebar")).toBeVisible();
// Persist authenticated state (cookies + localStorage)
await page.context().storageState({ path: AUTH_FILE });
});

72
e2e/auth.spec.ts Normal file
View File

@@ -0,0 +1,72 @@
import { test, expect } from "@playwright/test";
// These tests use a fresh browser context (no saved auth state)
test.use({ storageState: { cookies: [], origins: [] } });
test.describe("Authentication flow", () => {
test("unauthenticated visit to /dashboard redirects to Keycloak", async ({
page,
}) => {
await page.goto("/dashboard");
// Should end up on Keycloak login page
await page.waitForSelector("#username", { timeout: 15_000 });
await expect(page.locator("#kc-login")).toBeVisible();
});
test("valid credentials log in and redirect to dashboard", async ({
page,
}) => {
await page.goto("/dashboard");
await page.waitForSelector("#username", { timeout: 15_000 });
await page.fill(
"#username",
process.env.TEST_USER ?? "admin@certifai.local"
);
await page.fill("#password", process.env.TEST_PASSWORD ?? "admin");
await page.click("#kc-login");
await page.waitForURL("**/dashboard", { timeout: 15_000 });
await expect(page.locator(".dashboard-page")).toBeVisible();
});
test("dashboard shows sidebar with user info after login", async ({
page,
}) => {
await page.goto("/dashboard");
await page.waitForSelector("#username", { timeout: 15_000 });
await page.fill(
"#username",
process.env.TEST_USER ?? "admin@certifai.local"
);
await page.fill("#password", process.env.TEST_PASSWORD ?? "admin");
await page.click("#kc-login");
await page.waitForURL("**/dashboard", { timeout: 15_000 });
await expect(page.locator(".sidebar-name")).toBeVisible();
await expect(page.locator(".sidebar-email")).toBeVisible();
});
test("logout redirects away from dashboard", async ({ page }) => {
// First log in
await page.goto("/dashboard");
await page.waitForSelector("#username", { timeout: 15_000 });
await page.fill(
"#username",
process.env.TEST_USER ?? "admin@certifai.local"
);
await page.fill("#password", process.env.TEST_PASSWORD ?? "admin");
await page.click("#kc-login");
await page.waitForURL("**/dashboard", { timeout: 15_000 });
// Click logout
await page.locator('a.logout-btn, a[href="/logout"]').click();
// Should no longer be on the dashboard
await expect(page).not.toHaveURL(/\/dashboard/);
});
});

75
e2e/dashboard.spec.ts Normal file
View File

@@ -0,0 +1,75 @@
import { test, expect } from "@playwright/test";
test.describe("Dashboard", () => {
test.beforeEach(async ({ page }) => {
await page.goto("/dashboard");
// Wait for WASM hydration and auth check to complete
await page.waitForSelector(".dashboard-page", { timeout: 15_000 });
});
test("dashboard page loads with page header", async ({ page }) => {
await expect(page.locator(".page-header")).toContainText("Dashboard");
});
test("default topic chips are visible", async ({ page }) => {
const topics = ["AI", "Technology", "Science", "Finance", "Writing", "Research"];
for (const topic of topics) {
await expect(
page.locator(".filter-tab", { hasText: topic })
).toBeVisible();
}
});
test("clicking a topic chip triggers search", async ({ page }) => {
const chip = page.locator(".filter-tab", { hasText: "AI" });
await chip.click();
// Either a loading state or results should appear
const searchingOrResults = page
.locator(".dashboard-loading, .news-grid, .dashboard-empty");
await expect(searchingOrResults.first()).toBeVisible({ timeout: 10_000 });
});
test("news cards render after search completes", async ({ page }) => {
// Click a topic to trigger search
await page.locator(".filter-tab", { hasText: "Technology" }).click();
// Wait for loading to finish
await page.waitForSelector(".dashboard-loading", {
state: "hidden",
timeout: 15_000,
}).catch(() => {
// Loading may already be done
});
// Either news cards or an empty state message should be visible
const content = page.locator(".news-grid .news-card, .dashboard-empty");
await expect(content.first()).toBeVisible({ timeout: 10_000 });
});
test("clicking a news card opens article detail panel", async ({ page }) => {
// Trigger a search and wait for results
await page.locator(".filter-tab", { hasText: "AI" }).click();
await page.waitForSelector(".dashboard-loading", {
state: "hidden",
timeout: 15_000,
}).catch(() => {});
const firstCard = page.locator(".news-card").first();
// Only test if cards are present (search results depend on live data)
if (await firstCard.isVisible().catch(() => false)) {
await firstCard.click();
await expect(page.locator(".dashboard-right, .dashboard-split")).toBeVisible();
}
});
test("settings toggle opens settings panel", async ({ page }) => {
const settingsBtn = page.locator(".settings-toggle");
await settingsBtn.click();
await expect(page.locator(".settings-panel")).toBeVisible();
await expect(page.locator(".settings-panel-title")).toBeVisible();
});
});

173
e2e/developer.spec.ts Normal file
View File

@@ -0,0 +1,173 @@
import { test, expect } from "@playwright/test";
test.describe("Developer section", () => {
test("agents page loads with sub-nav tabs", async ({ page }) => {
await page.goto("/developer/agents");
await page.waitForSelector(".developer-shell", { timeout: 15_000 });
const nav = page.locator(".sub-nav");
await expect(nav.locator("a", { hasText: "Agents" })).toBeVisible();
await expect(nav.locator("a", { hasText: "Flow" })).toBeVisible();
await expect(nav.locator("a", { hasText: "Analytics" })).toBeVisible();
});
test("agents page renders informational landing", async ({ page }) => {
await page.goto("/developer/agents");
await page.waitForSelector(".agents-page", { timeout: 15_000 });
// Hero section
await expect(page.locator(".agents-hero-title")).toContainText(
"Agent Builder"
);
await expect(page.locator(".agents-hero-desc")).toBeVisible();
// Connection status indicator is present
await expect(page.locator(".agents-status")).toBeVisible();
});
test("agents page shows Not Connected when URL is empty", async ({
page,
}) => {
await page.goto("/developer/agents");
await page.waitForSelector(".agents-page", { timeout: 15_000 });
await expect(page.locator(".agents-status")).toContainText(
"Not Connected"
);
await expect(page.locator(".agents-status-dot--off")).toBeVisible();
await expect(page.locator(".agents-status-hint")).toBeVisible();
});
test("agents page shows quick start cards", async ({ page }) => {
await page.goto("/developer/agents");
await page.waitForSelector(".agents-page", { timeout: 15_000 });
const grid = page.locator(".agents-grid");
const cards = grid.locator(".agents-card");
await expect(cards).toHaveCount(5);
// Verify card titles are rendered
await expect(
grid.locator(".agents-card-title", { hasText: "Documentation" })
).toBeVisible();
await expect(
grid.locator(".agents-card-title", { hasText: "Getting Started" })
).toBeVisible();
await expect(
grid.locator(".agents-card-title", { hasText: "GitHub" })
).toBeVisible();
await expect(
grid.locator(".agents-card-title", { hasText: "Examples" })
).toBeVisible();
await expect(
grid.locator(".agents-card-title", { hasText: "API Reference" })
).toBeVisible();
});
test("agents page disables API Reference card when not connected", async ({
page,
}) => {
await page.goto("/developer/agents");
await page.waitForSelector(".agents-page", { timeout: 15_000 });
// When LANGGRAPH_URL is empty, the API Reference card should be disabled
const statusHint = page.locator(".agents-status-hint");
if (await statusHint.isVisible()) {
const apiCard = page.locator(".agents-card--disabled");
await expect(apiCard).toBeVisible();
await expect(
apiCard.locator(".agents-card-title")
).toContainText("API Reference");
}
});
test("agents page shows running agents section", async ({ page }) => {
await page.goto("/developer/agents");
await page.waitForSelector(".agents-page", { timeout: 15_000 });
// The running agents section title should always be visible
await expect(
page.locator(".agents-section-title", { hasText: "Running Agents" })
).toBeVisible();
// Either the table, loading state, or empty message should appear
await page.waitForTimeout(3000);
const table = page.locator(".agents-table");
const empty = page.locator(".agents-table-empty");
const hasTable = await table.isVisible();
const hasEmpty = await empty.isVisible();
expect(hasTable || hasEmpty).toBeTruthy();
});
test("agents page shows connected status when URL is configured", async ({
page,
}) => {
// This test only passes when LANGGRAPH_URL is set in the environment.
await page.goto("/developer/agents");
await page.waitForSelector(".agents-page", { timeout: 15_000 });
const connectedDot = page.locator(".agents-status-dot--on");
const disconnectedDot = page.locator(".agents-status-dot--off");
if (await connectedDot.isVisible()) {
await expect(page.locator(".agents-status")).toContainText("Connected");
await expect(page.locator(".agents-status-url")).toBeVisible();
// API Reference card should NOT be disabled
await expect(page.locator(".agents-card--disabled")).toHaveCount(0);
} else {
await expect(disconnectedDot).toBeVisible();
await expect(page.locator(".agents-status")).toContainText(
"Not Connected"
);
}
});
test("analytics page renders informational landing", async ({ page }) => {
await page.goto("/developer/analytics");
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
// Hero section
await expect(page.locator(".analytics-hero-title")).toBeVisible();
await expect(page.locator(".analytics-hero-desc")).toBeVisible();
// Connection status indicator
await expect(page.locator(".agents-status")).toBeVisible();
// Metrics bar
await expect(page.locator(".analytics-stats-bar")).toBeVisible();
});
test("analytics page shows Not Connected when URL is empty", async ({
page,
}) => {
await page.goto("/developer/analytics");
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
await expect(page.locator(".agents-status")).toContainText(
"Not Connected"
);
await expect(page.locator(".agents-status-dot--off")).toBeVisible();
});
test("analytics page shows quick action cards", async ({ page }) => {
await page.goto("/developer/analytics");
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
const grid = page.locator(".agents-grid");
const cards = grid.locator(".agents-card, .agents-card--disabled");
await expect(cards).toHaveCount(2);
});
test("analytics page shows SSO hint when connected", async ({ page }) => {
// Only meaningful when LANGFUSE_URL is configured.
await page.goto("/developer/analytics");
await page.waitForSelector(".analytics-page", { timeout: 15_000 });
const connectedDot = page.locator(".agents-status-dot--on");
if (await connectedDot.isVisible()) {
await expect(page.locator(".analytics-sso-hint")).toBeVisible();
await expect(page.locator(".analytics-launch-btn")).toBeVisible();
}
});
});

52
e2e/navigation.spec.ts Normal file
View File

@@ -0,0 +1,52 @@
import { test, expect } from "@playwright/test";
test.describe("Sidebar navigation", () => {
test.beforeEach(async ({ page }) => {
await page.goto("/dashboard");
await page.waitForSelector(".sidebar", { timeout: 15_000 });
});
test("sidebar links route to correct pages", async ({ page }) => {
const navTests = [
{ label: "Providers", url: /\/providers/ },
{ label: "Developer", url: /\/developer\/agents/ },
{ label: "Organization", url: /\/organization\/pricing/ },
{ label: "Dashboard", url: /\/dashboard/ },
];
for (const { label, url } of navTests) {
await page.locator(".sidebar-link", { hasText: label }).click();
await expect(page).toHaveURL(url, { timeout: 10_000 });
}
});
test("browser back/forward navigation works", async ({ page }) => {
// Navigate to Providers
await page.locator(".sidebar-link", { hasText: "Providers" }).click();
await expect(page).toHaveURL(/\/providers/);
// Navigate to Developer
await page.locator(".sidebar-link", { hasText: "Developer" }).click();
await expect(page).toHaveURL(/\/developer/);
// Go back
await page.goBack();
await expect(page).toHaveURL(/\/providers/);
// Go forward
await page.goForward();
await expect(page).toHaveURL(/\/developer/);
});
test("logo link navigates to dashboard", async ({ page }) => {
// Navigate away first
await page.locator(".sidebar-link", { hasText: "Providers" }).click();
await expect(page).toHaveURL(/\/providers/);
// Click the logo/brand in sidebar header
const logo = page.locator(".sidebar-brand, .sidebar-logo, .sidebar a").first();
await logo.click();
await expect(page).toHaveURL(/\/dashboard/);
});
});

41
e2e/organization.spec.ts Normal file
View File

@@ -0,0 +1,41 @@
import { test, expect } from "@playwright/test";
test.describe("Organization section", () => {
test("pricing page loads with three pricing cards", async ({ page }) => {
await page.goto("/organization/pricing");
await page.waitForSelector(".org-shell", { timeout: 15_000 });
const cards = page.locator(".pricing-card");
await expect(cards).toHaveCount(3);
});
test("pricing cards show Starter, Team, Enterprise tiers", async ({
page,
}) => {
await page.goto("/organization/pricing");
await page.waitForSelector(".org-shell", { timeout: 15_000 });
await expect(page.locator(".pricing-card", { hasText: "Starter" })).toBeVisible();
await expect(page.locator(".pricing-card", { hasText: "Team" })).toBeVisible();
await expect(page.locator(".pricing-card", { hasText: "Enterprise" })).toBeVisible();
});
test("organization dashboard loads with billing stats", async ({ page }) => {
await page.goto("/organization/dashboard");
await page.waitForSelector(".org-dashboard-page", { timeout: 15_000 });
await expect(page.locator(".page-header")).toContainText("Organization");
await expect(page.locator(".org-stats-bar")).toBeVisible();
await expect(page.locator(".org-stat").first()).toBeVisible();
});
test("member table is visible on org dashboard", async ({ page }) => {
await page.goto("/organization/dashboard");
await page.waitForSelector(".org-dashboard-page", { timeout: 15_000 });
await expect(page.locator(".org-table")).toBeVisible();
await expect(page.locator(".org-table thead")).toContainText("Name");
await expect(page.locator(".org-table thead")).toContainText("Email");
await expect(page.locator(".org-table thead")).toContainText("Role");
});
});

55
e2e/providers.spec.ts Normal file
View File

@@ -0,0 +1,55 @@
import { test, expect } from "@playwright/test";
test.describe("Providers page", () => {
test.beforeEach(async ({ page }) => {
await page.goto("/providers");
await page.waitForSelector(".providers-page", { timeout: 15_000 });
});
test("providers page loads with header", async ({ page }) => {
await expect(page.locator(".page-header")).toContainText("Providers");
});
test("provider dropdown has Ollama selected by default", async ({
page,
}) => {
const providerSelect = page
.locator(".form-group")
.filter({ hasText: "Provider" })
.locator("select");
await expect(providerSelect).toHaveValue(/ollama/i);
});
test("changing provider updates the model dropdown", async ({ page }) => {
const providerSelect = page
.locator(".form-group")
.filter({ hasText: "Provider" })
.locator("select");
// Get current model options
const modelSelect = page
.locator(".form-group")
.filter({ hasText: /^Model/ })
.locator("select");
const initialOptions = await modelSelect.locator("option").allTextContents();
// Change to a different provider
await providerSelect.selectOption({ label: "OpenAI" });
// Wait for model list to update
await page.waitForTimeout(500);
const updatedOptions = await modelSelect.locator("option").allTextContents();
// Model options should differ between providers
expect(updatedOptions).not.toEqual(initialOptions);
});
test("save button shows confirmation feedback", async ({ page }) => {
const saveBtn = page.locator("button", { hasText: "Save Configuration" });
await saveBtn.click();
await expect(page.locator(".form-success")).toBeVisible({ timeout: 5_000 });
await expect(page.locator(".form-success")).toContainText("saved");
});
});

60
e2e/public.spec.ts Normal file
View File

@@ -0,0 +1,60 @@
import { test, expect } from "@playwright/test";
test.describe("Public pages", () => {
test("landing page loads with heading and nav links", async ({ page }) => {
await page.goto("/");
await expect(page.locator(".landing-logo").first()).toHaveText("CERTifAI");
await expect(page.locator(".landing-nav-links")).toBeVisible();
await expect(page.locator('a[href="#features"]')).toBeVisible();
await expect(page.locator('a[href="#how-it-works"]')).toBeVisible();
await expect(page.locator('a[href="#pricing"]')).toBeVisible();
});
test("landing page Log In link navigates to login route", async ({
page,
}) => {
await page.goto("/");
const loginLink = page
.locator(".landing-nav-actions a, .landing-nav-actions Link")
.filter({ hasText: "Log In" });
await loginLink.click();
await expect(page).toHaveURL(/\/login/);
});
test("impressum page loads with legal content", async ({ page }) => {
await page.goto("/impressum");
await expect(page.locator("h1")).toHaveText("Impressum");
await expect(
page.locator("h2", { hasText: "Information according to" })
).toBeVisible();
await expect(page.locator(".legal-content")).toContainText(
"CERTifAI GmbH"
);
});
test("privacy page loads with privacy content", async ({ page }) => {
await page.goto("/privacy");
await expect(page.locator("h1")).toHaveText("Privacy Policy");
await expect(
page.locator("h2", { hasText: "Introduction" })
).toBeVisible();
await expect(
page.locator("h2", { hasText: "Your Rights" })
).toBeVisible();
});
test("footer links are present on landing page", async ({ page }) => {
await page.goto("/");
const footer = page.locator(".landing-footer");
await expect(footer.locator('a:has-text("Impressum")')).toBeVisible();
await expect(
footer.locator('a:has-text("Privacy Policy")')
).toBeVisible();
});
});

View File

@@ -78,6 +78,72 @@
"optionalClientScopes": [
"offline_access"
]
},
{
"clientId": "certifai-langfuse",
"name": "CERTifAI Langfuse",
"description": "Langfuse OIDC client for CERTifAI",
"enabled": true,
"publicClient": false,
"directAccessGrantsEnabled": false,
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"serviceAccountsEnabled": false,
"protocol": "openid-connect",
"secret": "certifai-langfuse-secret",
"rootUrl": "http://localhost:3000",
"baseUrl": "http://localhost:3000",
"redirectUris": [
"http://localhost:3000/*"
],
"webOrigins": [
"http://localhost:3000",
"http://localhost:8000"
],
"attributes": {
"post.logout.redirect.uris": "http://localhost:3000"
},
"defaultClientScopes": [
"openid",
"profile",
"email"
],
"optionalClientScopes": [
"offline_access"
]
},
{
"clientId": "certifai-librechat",
"name": "CERTifAI Chat",
"description": "LibreChat OIDC client for CERTifAI",
"enabled": true,
"publicClient": false,
"directAccessGrantsEnabled": false,
"standardFlowEnabled": true,
"implicitFlowEnabled": false,
"serviceAccountsEnabled": false,
"protocol": "openid-connect",
"secret": "certifai-librechat-secret",
"rootUrl": "http://localhost:3080",
"baseUrl": "http://localhost:3080",
"redirectUris": [
"http://localhost:3080/*"
],
"webOrigins": [
"http://localhost:3080",
"http://localhost:8000"
],
"attributes": {
"post.logout.redirect.uris": "http://localhost:3080"
},
"defaultClientScopes": [
"openid",
"profile",
"email"
],
"optionalClientScopes": [
"offline_access"
]
}
],
"clientScopes": [

34
librechat/librechat.yaml Normal file
View File

@@ -0,0 +1,34 @@
# CERTifAI LibreChat Configuration
# LiteLLM proxy for unified multi-provider LLM access.
version: 1.2.8
cache: true
registration:
socialLogins:
- openid
interface:
privacyPolicy:
externalUrl: https://dash-dev.meghsakha.com/privacy
termsOfService:
externalUrl: https://dash-dev.meghsakha.com/impressum
endpointsMenu: true
modelSelect: true
parameters: true
endpoints:
custom:
- name: "LiteLLM"
apiKey: "${LITELLM_API_KEY}"
baseURL: "https://llm-dev.meghsakha.com/v1/"
models:
default:
- "Qwen3-Coder-30B-A3B-Instruct"
fetch: true
titleConvo: true
titleModel: "current_model"
summarize: false
summaryModel: "current_model"
forcePrompt: false
modelDisplayLabel: "CERTifAI LiteLLM"

25
librechat/logo.svg Normal file
View File

@@ -0,0 +1,25 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 64 64" fill="none">
<!-- Shield body -->
<path d="M32 4L8 16v16c0 14.4 10.24 27.2 24 32 13.76-4.8 24-17.6 24-32V16L32 4z"
fill="#4B3FE0" fill-opacity="0.12" stroke="#4B3FE0" stroke-width="2"
stroke-linejoin="round"/>
<!-- Inner shield highlight -->
<path d="M32 10L14 19v11c0 11.6 7.68 22 18 26 10.32-4 18-14.4 18-26V19L32 10z"
fill="none" stroke="#4B3FE0" stroke-width="1" stroke-opacity="0.3"
stroke-linejoin="round"/>
<!-- Neural network nodes -->
<circle cx="32" cy="24" r="3.5" fill="#38B2AC"/>
<circle cx="22" cy="36" r="3" fill="#38B2AC"/>
<circle cx="42" cy="36" r="3" fill="#38B2AC"/>
<circle cx="27" cy="48" r="2.5" fill="#38B2AC" fill-opacity="0.7"/>
<circle cx="37" cy="48" r="2.5" fill="#38B2AC" fill-opacity="0.7"/>
<!-- Neural network edges -->
<line x1="32" y1="24" x2="22" y2="36" stroke="#38B2AC" stroke-width="1.2" stroke-opacity="0.6"/>
<line x1="32" y1="24" x2="42" y2="36" stroke="#38B2AC" stroke-width="1.2" stroke-opacity="0.6"/>
<line x1="22" y1="36" x2="27" y2="48" stroke="#38B2AC" stroke-width="1" stroke-opacity="0.4"/>
<line x1="22" y1="36" x2="37" y2="48" stroke="#38B2AC" stroke-width="1" stroke-opacity="0.4"/>
<line x1="42" y1="36" x2="27" y2="48" stroke="#38B2AC" stroke-width="1" stroke-opacity="0.4"/>
<line x1="42" y1="36" x2="37" y2="48" stroke="#38B2AC" stroke-width="1" stroke-opacity="0.4"/>
<!-- Cross edge for connectivity -->
<line x1="22" y1="36" x2="42" y2="36" stroke="#38B2AC" stroke-width="0.8" stroke-opacity="0.3"/>
</svg>

After

Width:  |  Height:  |  Size: 1.6 KiB

743
librechat/openidStrategy.js Normal file
View File

@@ -0,0 +1,743 @@
const undici = require('undici');
const { get } = require('lodash');
const fetch = require('node-fetch');
const passport = require('passport');
const client = require('openid-client');
const jwtDecode = require('jsonwebtoken/decode');
const { HttpsProxyAgent } = require('https-proxy-agent');
const { hashToken, logger } = require('@librechat/data-schemas');
const { Strategy: OpenIDStrategy } = require('openid-client/passport');
const { CacheKeys, ErrorTypes, SystemRoles } = require('librechat-data-provider');
const {
isEnabled,
logHeaders,
safeStringify,
findOpenIDUser,
getBalanceConfig,
isEmailDomainAllowed,
} = require('@librechat/api');
const { getStrategyFunctions } = require('~/server/services/Files/strategies');
const { findUser, createUser, updateUser } = require('~/models');
const { getAppConfig } = require('~/server/services/Config');
const getLogStores = require('~/cache/getLogStores');
/**
* @typedef {import('openid-client').ClientMetadata} ClientMetadata
* @typedef {import('openid-client').Configuration} Configuration
**/
/**
* @param {string} url
* @param {client.CustomFetchOptions} options
*/
async function customFetch(url, options) {
const urlStr = url.toString();
logger.debug(`[openidStrategy] Request to: ${urlStr}`);
const debugOpenId = isEnabled(process.env.DEBUG_OPENID_REQUESTS);
if (debugOpenId) {
logger.debug(`[openidStrategy] Request method: ${options.method || 'GET'}`);
logger.debug(`[openidStrategy] Request headers: ${logHeaders(options.headers)}`);
if (options.body) {
let bodyForLogging = '';
if (options.body instanceof URLSearchParams) {
bodyForLogging = options.body.toString();
} else if (typeof options.body === 'string') {
bodyForLogging = options.body;
} else {
bodyForLogging = safeStringify(options.body);
}
logger.debug(`[openidStrategy] Request body: ${bodyForLogging}`);
}
}
try {
/** @type {undici.RequestInit} */
let fetchOptions = options;
if (process.env.PROXY) {
logger.info(`[openidStrategy] proxy agent configured: ${process.env.PROXY}`);
fetchOptions = {
...options,
dispatcher: new undici.ProxyAgent(process.env.PROXY),
};
}
const response = await undici.fetch(url, fetchOptions);
if (debugOpenId) {
logger.debug(`[openidStrategy] Response status: ${response.status} ${response.statusText}`);
logger.debug(`[openidStrategy] Response headers: ${logHeaders(response.headers)}`);
}
if (response.status === 200 && response.headers.has('www-authenticate')) {
const wwwAuth = response.headers.get('www-authenticate');
logger.warn(`[openidStrategy] Non-standard WWW-Authenticate header found in successful response (200 OK): ${wwwAuth}.
This violates RFC 7235 and may cause issues with strict OAuth clients. Removing header for compatibility.`);
/** Cloned response without the WWW-Authenticate header */
const responseBody = await response.arrayBuffer();
const newHeaders = new Headers();
for (const [key, value] of response.headers.entries()) {
if (key.toLowerCase() !== 'www-authenticate') {
newHeaders.append(key, value);
}
}
return new Response(responseBody, {
status: response.status,
statusText: response.statusText,
headers: newHeaders,
});
}
return response;
} catch (error) {
logger.error(`[openidStrategy] Fetch error: ${error.message}`);
throw error;
}
}
/** @typedef {Configuration | null} */
let openidConfig = null;
/**
* Custom OpenID Strategy
*
* Note: Originally overrode currentUrl() to work around Express 4's req.host not including port.
* With Express 5, req.host now includes the port by default, but we continue to use DOMAIN_SERVER
* for consistency and explicit configuration control.
* More info: https://github.com/panva/openid-client/pull/713
*/
class CustomOpenIDStrategy extends OpenIDStrategy {
currentUrl(req) {
const hostAndProtocol = process.env.DOMAIN_SERVER;
return new URL(`${hostAndProtocol}${req.originalUrl ?? req.url}`);
}
authorizationRequestParams(req, options) {
const params = super.authorizationRequestParams(req, options);
if (options?.state && !params.has('state')) {
params.set('state', options.state);
}
if (process.env.OPENID_AUDIENCE) {
params.set('audience', process.env.OPENID_AUDIENCE);
logger.debug(
`[openidStrategy] Adding audience to authorization request: ${process.env.OPENID_AUDIENCE}`,
);
}
// Parse OPENID_AUTH_EXTRA_PARAMS (format: "key=value" or "key1=value1,key2=value2")
if (process.env.OPENID_AUTH_EXTRA_PARAMS) {
const extraParts = process.env.OPENID_AUTH_EXTRA_PARAMS.split(',');
for (const part of extraParts) {
const [key, ...rest] = part.trim().split('=');
if (key && rest.length > 0) {
params.set(key.trim(), rest.join('=').trim());
logger.debug(`[openidStrategy] Adding extra auth param: ${key.trim()}=${rest.join('=').trim()}`);
}
}
}
/** Generate nonce for federated providers that require it */
const shouldGenerateNonce = isEnabled(process.env.OPENID_GENERATE_NONCE);
if (shouldGenerateNonce && !params.has('nonce') && this._sessionKey) {
const crypto = require('crypto');
const nonce = crypto.randomBytes(16).toString('hex');
params.set('nonce', nonce);
logger.debug('[openidStrategy] Generated nonce for federated provider:', nonce);
}
return params;
}
}
/**
* Exchange the access token for a new access token using the on-behalf-of flow if required.
* @param {Configuration} config
* @param {string} accessToken access token to be exchanged if necessary
* @param {string} sub - The subject identifier of the user. usually found as "sub" in the claims of the token
* @param {boolean} fromCache - Indicates whether to use cached tokens.
* @returns {Promise<string>} The new access token if exchanged, otherwise the original access token.
*/
const exchangeAccessTokenIfNeeded = async (config, accessToken, sub, fromCache = false) => {
const tokensCache = getLogStores(CacheKeys.OPENID_EXCHANGED_TOKENS);
const onBehalfFlowRequired = isEnabled(process.env.OPENID_ON_BEHALF_FLOW_FOR_USERINFO_REQUIRED);
if (onBehalfFlowRequired) {
if (fromCache) {
const cachedToken = await tokensCache.get(sub);
if (cachedToken) {
return cachedToken.access_token;
}
}
const grantResponse = await client.genericGrantRequest(
config,
'urn:ietf:params:oauth:grant-type:jwt-bearer',
{
scope: process.env.OPENID_ON_BEHALF_FLOW_USERINFO_SCOPE || 'user.read',
assertion: accessToken,
requested_token_use: 'on_behalf_of',
},
);
await tokensCache.set(
sub,
{
access_token: grantResponse.access_token,
},
grantResponse.expires_in * 1000,
);
return grantResponse.access_token;
}
return accessToken;
};
/**
* get user info from openid provider
* @param {Configuration} config
* @param {string} accessToken access token
* @param {string} sub - The subject identifier of the user. usually found as "sub" in the claims of the token
* @returns {Promise<Object|null>}
*/
const getUserInfo = async (config, accessToken, sub) => {
try {
const exchangedAccessToken = await exchangeAccessTokenIfNeeded(config, accessToken, sub);
return await client.fetchUserInfo(config, exchangedAccessToken, sub);
} catch (error) {
logger.error('[openidStrategy] getUserInfo: Error fetching user info:', error);
return null;
}
};
/**
* Downloads an image from a URL using an access token.
* @param {string} url
* @param {Configuration} config
* @param {string} accessToken access token
* @param {string} sub - The subject identifier of the user. usually found as "sub" in the claims of the token
* @returns {Promise<Buffer | string>} The image buffer or an empty string if the download fails.
*/
const downloadImage = async (url, config, accessToken, sub) => {
const exchangedAccessToken = await exchangeAccessTokenIfNeeded(config, accessToken, sub, true);
if (!url) {
return '';
}
try {
const options = {
method: 'GET',
headers: {
Authorization: `Bearer ${exchangedAccessToken}`,
},
};
if (process.env.PROXY) {
options.agent = new HttpsProxyAgent(process.env.PROXY);
}
const response = await fetch(url, options);
if (response.ok) {
const buffer = await response.buffer();
return buffer;
} else {
throw new Error(`${response.statusText} (HTTP ${response.status})`);
}
} catch (error) {
logger.error(
`[openidStrategy] downloadImage: Error downloading image at URL "${url}": ${error}`,
);
return '';
}
};
/**
* Determines the full name of a user based on OpenID userinfo and environment configuration.
*
* @param {Object} userinfo - The user information object from OpenID Connect
* @param {string} [userinfo.given_name] - The user's first name
* @param {string} [userinfo.family_name] - The user's last name
* @param {string} [userinfo.username] - The user's username
* @param {string} [userinfo.email] - The user's email address
* @returns {string} The determined full name of the user
*/
function getFullName(userinfo) {
if (process.env.OPENID_NAME_CLAIM) {
return userinfo[process.env.OPENID_NAME_CLAIM];
}
if (userinfo.given_name && userinfo.family_name) {
return `${userinfo.given_name} ${userinfo.family_name}`;
}
if (userinfo.given_name) {
return userinfo.given_name;
}
if (userinfo.family_name) {
return userinfo.family_name;
}
return userinfo.username || userinfo.email;
}
/**
* Converts an input into a string suitable for a username.
* If the input is a string, it will be returned as is.
* If the input is an array, elements will be joined with underscores.
* In case of undefined or other falsy values, a default value will be returned.
*
* @param {string | string[] | undefined} input - The input value to be converted into a username.
* @param {string} [defaultValue=''] - The default value to return if the input is falsy.
* @returns {string} The processed input as a string suitable for a username.
*/
function convertToUsername(input, defaultValue = '') {
if (typeof input === 'string') {
return input;
} else if (Array.isArray(input)) {
return input.join('_');
}
return defaultValue;
}
/**
* Resolve Azure AD groups when group overage is in effect (groups moved to _claim_names/_claim_sources).
*
* NOTE: Microsoft recommends treating _claim_names/_claim_sources as a signal only and using Microsoft Graph
* to resolve group membership instead of calling the endpoint in _claim_sources directly.
*
* @param {string} accessToken - Access token with Microsoft Graph permissions
* @returns {Promise<string[] | null>} Resolved group IDs or null on failure
* @see https://learn.microsoft.com/en-us/entra/identity-platform/access-token-claims-reference#groups-overage-claim
* @see https://learn.microsoft.com/en-us/graph/api/directoryobject-getmemberobjects
*/
async function resolveGroupsFromOverage(accessToken) {
try {
if (!accessToken) {
logger.error('[openidStrategy] Access token missing; cannot resolve group overage');
return null;
}
// Use /me/getMemberObjects so least-privileged delegated permission User.Read is sufficient
// when resolving the signed-in user's group membership.
const url = 'https://graph.microsoft.com/v1.0/me/getMemberObjects';
logger.debug(
`[openidStrategy] Detected group overage, resolving groups via Microsoft Graph getMemberObjects: ${url}`,
);
const fetchOptions = {
method: 'POST',
headers: {
Authorization: `Bearer ${accessToken}`,
'Content-Type': 'application/json',
},
body: JSON.stringify({ securityEnabledOnly: false }),
};
if (process.env.PROXY) {
const { ProxyAgent } = undici;
fetchOptions.dispatcher = new ProxyAgent(process.env.PROXY);
}
const response = await undici.fetch(url, fetchOptions);
if (!response.ok) {
logger.error(
`[openidStrategy] Failed to resolve groups via Microsoft Graph getMemberObjects: HTTP ${response.status} ${response.statusText}`,
);
return null;
}
const data = await response.json();
const values = Array.isArray(data?.value) ? data.value : null;
if (!values) {
logger.error(
'[openidStrategy] Unexpected response format when resolving groups via Microsoft Graph getMemberObjects',
);
return null;
}
const groupIds = values.filter((id) => typeof id === 'string');
logger.debug(
`[openidStrategy] Successfully resolved ${groupIds.length} groups via Microsoft Graph getMemberObjects`,
);
return groupIds;
} catch (err) {
logger.error(
'[openidStrategy] Error resolving groups via Microsoft Graph getMemberObjects:',
err,
);
return null;
}
}
/**
* Process OpenID authentication tokenset and userinfo
* This is the core logic extracted from the passport strategy callback
* Can be reused by both the passport strategy and proxy authentication
*
* @param {Object} tokenset - The OpenID tokenset containing access_token, id_token, etc.
* @param {boolean} existingUsersOnly - If true, only existing users will be processed
* @returns {Promise<Object>} The authenticated user object with tokenset
*/
async function processOpenIDAuth(tokenset, existingUsersOnly = false) {
const claims = tokenset.claims ? tokenset.claims() : tokenset;
const userinfo = {
...claims,
};
if (tokenset.access_token) {
const providerUserinfo = await getUserInfo(openidConfig, tokenset.access_token, claims.sub);
Object.assign(userinfo, providerUserinfo);
}
const appConfig = await getAppConfig();
/** Azure AD sometimes doesn't return email, use preferred_username as fallback */
const email = userinfo.email || userinfo.preferred_username || userinfo.upn;
if (!isEmailDomainAllowed(email, appConfig?.registration?.allowedDomains)) {
logger.error(
`[OpenID Strategy] Authentication blocked - email domain not allowed [Email: ${userinfo.email}]`,
);
throw new Error('Email domain not allowed');
}
const result = await findOpenIDUser({
findUser,
email: email,
openidId: claims.sub || userinfo.sub,
idOnTheSource: claims.oid || userinfo.oid,
strategyName: 'openidStrategy',
});
let user = result.user;
const error = result.error;
if (error) {
throw new Error(ErrorTypes.AUTH_FAILED);
}
const fullName = getFullName(userinfo);
const requiredRole = process.env.OPENID_REQUIRED_ROLE;
if (requiredRole) {
const requiredRoles = requiredRole
.split(',')
.map((role) => role.trim())
.filter(Boolean);
const requiredRoleParameterPath = process.env.OPENID_REQUIRED_ROLE_PARAMETER_PATH;
const requiredRoleTokenKind = process.env.OPENID_REQUIRED_ROLE_TOKEN_KIND;
let decodedToken = '';
if (requiredRoleTokenKind === 'access' && tokenset.access_token) {
decodedToken = jwtDecode(tokenset.access_token);
} else if (requiredRoleTokenKind === 'id' && tokenset.id_token) {
decodedToken = jwtDecode(tokenset.id_token);
}
let roles = get(decodedToken, requiredRoleParameterPath);
// Handle Azure AD group overage for ID token groups: when hasgroups or _claim_* indicate overage,
// resolve groups via Microsoft Graph instead of relying on token group values.
if (
!Array.isArray(roles) &&
typeof roles !== 'string' &&
requiredRoleTokenKind === 'id' &&
requiredRoleParameterPath === 'groups' &&
decodedToken &&
(decodedToken.hasgroups ||
(decodedToken._claim_names?.groups &&
decodedToken._claim_sources?.[decodedToken._claim_names.groups]))
) {
const overageGroups = await resolveGroupsFromOverage(tokenset.access_token);
if (overageGroups) {
roles = overageGroups;
}
}
if (!roles || (!Array.isArray(roles) && typeof roles !== 'string')) {
logger.error(
`[openidStrategy] Key '${requiredRoleParameterPath}' not found in ${requiredRoleTokenKind} token!`,
);
const rolesList =
requiredRoles.length === 1
? `"${requiredRoles[0]}"`
: `one of: ${requiredRoles.map((r) => `"${r}"`).join(', ')}`;
throw new Error(`You must have ${rolesList} role to log in.`);
}
const roleValues = Array.isArray(roles) ? roles : [roles];
if (!requiredRoles.some((role) => roleValues.includes(role))) {
const rolesList =
requiredRoles.length === 1
? `"${requiredRoles[0]}"`
: `one of: ${requiredRoles.map((r) => `"${r}"`).join(', ')}`;
throw new Error(`You must have ${rolesList} role to log in.`);
}
}
let username = '';
if (process.env.OPENID_USERNAME_CLAIM) {
username = userinfo[process.env.OPENID_USERNAME_CLAIM];
} else {
username = convertToUsername(
userinfo.preferred_username || userinfo.username || userinfo.email,
);
}
if (existingUsersOnly && !user) {
throw new Error('User does not exist');
}
if (!user) {
user = {
provider: 'openid',
openidId: userinfo.sub,
username,
email: email || '',
emailVerified: userinfo.email_verified || false,
name: fullName,
idOnTheSource: userinfo.oid,
};
const balanceConfig = getBalanceConfig(appConfig);
user = await createUser(user, balanceConfig, true, true);
} else {
user.provider = 'openid';
user.openidId = userinfo.sub;
user.username = username;
user.name = fullName;
user.idOnTheSource = userinfo.oid;
if (email && email !== user.email) {
user.email = email;
user.emailVerified = userinfo.email_verified || false;
}
}
const adminRole = process.env.OPENID_ADMIN_ROLE;
const adminRoleParameterPath = process.env.OPENID_ADMIN_ROLE_PARAMETER_PATH;
const adminRoleTokenKind = process.env.OPENID_ADMIN_ROLE_TOKEN_KIND;
if (adminRole && adminRoleParameterPath && adminRoleTokenKind) {
let adminRoleObject;
switch (adminRoleTokenKind) {
case 'access':
adminRoleObject = jwtDecode(tokenset.access_token);
break;
case 'id':
adminRoleObject = jwtDecode(tokenset.id_token);
break;
case 'userinfo':
adminRoleObject = userinfo;
break;
default:
logger.error(
`[openidStrategy] Invalid admin role token kind: ${adminRoleTokenKind}. Must be one of 'access', 'id', or 'userinfo'.`,
);
throw new Error('Invalid admin role token kind');
}
const adminRoles = get(adminRoleObject, adminRoleParameterPath);
if (
adminRoles &&
(adminRoles === true ||
adminRoles === adminRole ||
(Array.isArray(adminRoles) && adminRoles.includes(adminRole)))
) {
user.role = SystemRoles.ADMIN;
logger.info(`[openidStrategy] User ${username} is an admin based on role: ${adminRole}`);
} else if (user.role === SystemRoles.ADMIN) {
user.role = SystemRoles.USER;
logger.info(
`[openidStrategy] User ${username} demoted from admin - role no longer present in token`,
);
}
}
if (!!userinfo && userinfo.picture && !user.avatar?.includes('manual=true')) {
/** @type {string | undefined} */
const imageUrl = userinfo.picture;
let fileName;
if (crypto) {
fileName = (await hashToken(userinfo.sub)) + '.png';
} else {
fileName = userinfo.sub + '.png';
}
const imageBuffer = await downloadImage(
imageUrl,
openidConfig,
tokenset.access_token,
userinfo.sub,
);
if (imageBuffer) {
const { saveBuffer } = getStrategyFunctions(
appConfig?.fileStrategy ?? process.env.CDN_PROVIDER,
);
const imagePath = await saveBuffer({
fileName,
userId: user._id.toString(),
buffer: imageBuffer,
});
user.avatar = imagePath ?? '';
}
}
user = await updateUser(user._id, user);
logger.info(
`[openidStrategy] login success openidId: ${user.openidId} | email: ${user.email} | username: ${user.username} `,
{
user: {
openidId: user.openidId,
username: user.username,
email: user.email,
name: user.name,
},
},
);
return {
...user,
tokenset,
federatedTokens: {
access_token: tokenset.access_token,
id_token: tokenset.id_token,
refresh_token: tokenset.refresh_token,
expires_at: tokenset.expires_at,
},
};
}
/**
* @param {boolean | undefined} [existingUsersOnly]
*/
function createOpenIDCallback(existingUsersOnly) {
return async (tokenset, done) => {
try {
const user = await processOpenIDAuth(tokenset, existingUsersOnly);
done(null, user);
} catch (err) {
if (err.message === 'Email domain not allowed') {
return done(null, false, { message: err.message });
}
if (err.message === ErrorTypes.AUTH_FAILED) {
return done(null, false, { message: err.message });
}
if (err.message && err.message.includes('role to log in')) {
return done(null, false, { message: err.message });
}
logger.error('[openidStrategy] login failed', err);
done(err);
}
};
}
/**
* Sets up the OpenID strategy specifically for admin authentication.
* @param {Configuration} openidConfig
*/
const setupOpenIdAdmin = (openidConfig) => {
try {
if (!openidConfig) {
throw new Error('OpenID configuration not initialized');
}
const openidAdminLogin = new CustomOpenIDStrategy(
{
config: openidConfig,
scope: process.env.OPENID_SCOPE,
usePKCE: isEnabled(process.env.OPENID_USE_PKCE),
clockTolerance: process.env.OPENID_CLOCK_TOLERANCE || 300,
callbackURL: process.env.DOMAIN_SERVER + '/api/admin/oauth/openid/callback',
},
createOpenIDCallback(true),
);
passport.use('openidAdmin', openidAdminLogin);
} catch (err) {
logger.error('[openidStrategy] setupOpenIdAdmin', err);
}
};
/**
* Sets up the OpenID strategy for authentication.
* This function configures the OpenID client, handles proxy settings,
* and defines the OpenID strategy for Passport.js.
*
* @async
* @function setupOpenId
* @returns {Promise<Configuration | null>} A promise that resolves when the OpenID strategy is set up and returns the openid client config object.
* @throws {Error} If an error occurs during the setup process.
*/
async function setupOpenId() {
try {
const shouldGenerateNonce = isEnabled(process.env.OPENID_GENERATE_NONCE);
/** @type {ClientMetadata} */
const clientMetadata = {
client_id: process.env.OPENID_CLIENT_ID,
client_secret: process.env.OPENID_CLIENT_SECRET,
};
if (shouldGenerateNonce) {
clientMetadata.response_types = ['code'];
clientMetadata.grant_types = ['authorization_code'];
clientMetadata.token_endpoint_auth_method = 'client_secret_post';
}
/** @type {Configuration} */
openidConfig = await client.discovery(
new URL(process.env.OPENID_ISSUER),
process.env.OPENID_CLIENT_ID,
clientMetadata,
undefined,
{
[client.customFetch]: customFetch,
execute: [client.allowInsecureRequests],
},
);
logger.info(`[openidStrategy] OpenID authentication configuration`, {
generateNonce: shouldGenerateNonce,
reason: shouldGenerateNonce
? 'OPENID_GENERATE_NONCE=true - Will generate nonce and use explicit metadata for federated providers'
: 'OPENID_GENERATE_NONCE=false - Standard flow without explicit nonce or metadata',
});
const openidLogin = new CustomOpenIDStrategy(
{
config: openidConfig,
scope: process.env.OPENID_SCOPE,
callbackURL: process.env.DOMAIN_SERVER + process.env.OPENID_CALLBACK_URL,
clockTolerance: process.env.OPENID_CLOCK_TOLERANCE || 300,
usePKCE: isEnabled(process.env.OPENID_USE_PKCE),
},
createOpenIDCallback(),
);
passport.use('openid', openidLogin);
setupOpenIdAdmin(openidConfig);
return openidConfig;
} catch (err) {
logger.error('[openidStrategy]', err);
return null;
}
}
/**
* @function getOpenIdConfig
* @description Returns the OpenID client instance.
* @throws {Error} If the OpenID client is not initialized.
* @returns {Configuration}
*/
function getOpenIdConfig() {
if (!openidConfig) {
throw new Error('OpenID client is not initialized. Please call setupOpenId first.');
}
return openidConfig;
}
module.exports = {
setupOpenId,
getOpenIdConfig,
};

View File

@@ -4,6 +4,7 @@
"type": "module",
"private": true,
"devDependencies": {
"@playwright/test": "^1.52.0",
"@types/bun": "latest"
},
"peerDependencies": {

40
playwright.config.ts Normal file
View File

@@ -0,0 +1,40 @@
import { defineConfig, devices } from "@playwright/test";
export default defineConfig({
testDir: "./e2e",
fullyParallel: true,
forbidOnly: !!process.env.CI,
retries: process.env.CI ? 2 : 0,
workers: process.env.CI ? 1 : undefined,
reporter: [["html"], ["list"]],
timeout: 30_000,
use: {
baseURL: process.env.BASE_URL ?? "http://localhost:8000",
actionTimeout: 10_000,
trace: "on-first-retry",
screenshot: "only-on-failure",
},
projects: [
{
name: "setup",
testMatch: /auth\.setup\.ts/,
},
{
name: "public",
testMatch: /public\.spec\.ts/,
use: { ...devices["Desktop Chrome"] },
},
{
name: "authenticated",
testMatch: /\.spec\.ts$/,
testIgnore: /public\.spec\.ts$/,
dependencies: ["setup"],
use: {
...devices["Desktop Chrome"],
storageState: "e2e/.auth/user.json",
},
},
],
});

View File

@@ -22,12 +22,6 @@ pub enum Route {
DashboardPage {},
#[route("/providers")]
ProvidersPage {},
#[route("/chat")]
ChatPage {},
#[route("/tools")]
ToolsPage {},
#[route("/knowledge")]
KnowledgePage {},
#[layout(DeveloperShell)]
#[route("/developer/agents")]

View File

@@ -5,7 +5,7 @@ use dioxus_free_icons::Icon;
use crate::components::sidebar::Sidebar;
use crate::i18n::{t, tw, Locale};
use crate::infrastructure::auth_check::check_auth;
use crate::models::AuthInfo;
use crate::models::{AuthInfo, ServiceUrlsContext};
use crate::Route;
/// Application shell layout that wraps all authenticated pages.
@@ -29,6 +29,16 @@ pub fn AppShell() -> Element {
match auth_snapshot {
Some(Ok(info)) if info.authenticated => {
// Provide developer tool URLs as context so child pages
// can read them without prop-drilling through layouts.
use_context_provider(|| {
Signal::new(ServiceUrlsContext {
langgraph_url: info.langgraph_url.clone(),
langflow_url: info.langflow_url.clone(),
langfuse_url: info.langfuse_url.clone(),
})
});
let menu_open = *mobile_menu_open.read();
let sidebar_cls = if menu_open {
"sidebar sidebar--open"
@@ -65,6 +75,7 @@ pub fn AppShell() -> Element {
email: info.email,
name: info.name,
avatar_url: info.avatar_url,
librechat_url: info.librechat_url,
class: sidebar_cls,
on_nav: move |_| mobile_menu_open.set(false),
}

View File

@@ -1,69 +0,0 @@
use crate::i18n::{t, Locale};
use dioxus::prelude::*;
use dioxus_free_icons::icons::fa_solid_icons::{FaCopy, FaPenToSquare, FaShareNodes};
/// Action bar displayed above the chat input with copy, share, and edit buttons.
///
/// Only visible when there is at least one message in the conversation.
///
/// # Arguments
///
/// * `on_copy` - Copies the last assistant response to the clipboard
/// * `on_share` - Copies the full conversation as text to the clipboard
/// * `on_edit` - Places the last user message back in the input for editing
/// * `has_messages` - Whether any messages exist (hides the bar when empty)
/// * `has_assistant_message` - Whether an assistant message exists (disables copy if not)
/// * `has_user_message` - Whether a user message exists (disables edit if not)
#[component]
pub fn ChatActionBar(
on_copy: EventHandler<()>,
on_share: EventHandler<()>,
on_edit: EventHandler<()>,
has_messages: bool,
has_assistant_message: bool,
has_user_message: bool,
) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
if !has_messages {
return rsx! {};
}
rsx! {
div { class: "chat-action-bar",
button {
class: "chat-action-btn",
disabled: !has_assistant_message,
title: "{t(l, \"chat.copy_response\")}",
onclick: move |_| on_copy.call(()),
dioxus_free_icons::Icon {
icon: FaCopy,
width: 14, height: 14,
}
span { class: "chat-action-label", "{t(l, \"common.copy\")}" }
}
button {
class: "chat-action-btn",
title: "{t(l, \"chat.copy_conversation\")}",
onclick: move |_| on_share.call(()),
dioxus_free_icons::Icon {
icon: FaShareNodes,
width: 14, height: 14,
}
span { class: "chat-action-label", "{t(l, \"common.share\")}" }
}
button {
class: "chat-action-btn",
disabled: !has_user_message,
title: "{t(l, \"chat.edit_last\")}",
onclick: move |_| on_edit.call(()),
dioxus_free_icons::Icon {
icon: FaPenToSquare,
width: 14, height: 14,
}
span { class: "chat-action-label", "{t(l, \"common.edit\")}" }
}
}
}
}

View File

@@ -1,142 +0,0 @@
use crate::i18n::{t, Locale};
use crate::models::{ChatMessage, ChatRole};
use dioxus::prelude::*;
/// Render markdown content to HTML using `pulldown-cmark`.
///
/// # Arguments
///
/// * `md` - Raw markdown string
///
/// # Returns
///
/// HTML string suitable for `dangerous_inner_html`
fn markdown_to_html(md: &str) -> String {
use pulldown_cmark::{Options, Parser};
let mut opts = Options::empty();
opts.insert(Options::ENABLE_TABLES);
opts.insert(Options::ENABLE_STRIKETHROUGH);
opts.insert(Options::ENABLE_TASKLISTS);
let parser = Parser::new_ext(md, opts);
let mut html = String::with_capacity(md.len() * 2);
pulldown_cmark::html::push_html(&mut html, parser);
html
}
/// Renders a single chat message bubble with role-based styling.
///
/// User messages are displayed as plain text, right-aligned.
/// Assistant messages are rendered as markdown with `pulldown-cmark`.
/// System messages are hidden from the UI.
///
/// # Arguments
///
/// * `message` - The chat message to render
#[component]
pub fn ChatBubble(message: ChatMessage) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
// System messages are not rendered in the UI
if message.role == ChatRole::System {
return rsx! {};
}
let bubble_class = match message.role {
ChatRole::User => "chat-bubble chat-bubble--user",
ChatRole::Assistant => "chat-bubble chat-bubble--assistant",
ChatRole::System => unreachable!(),
};
let role_label = match message.role {
ChatRole::User => t(l, "chat.you"),
ChatRole::Assistant => t(l, "chat.assistant"),
ChatRole::System => unreachable!(),
};
// Format timestamp for display (show time only if today)
let display_time = if message.timestamp.len() >= 16 {
// Extract HH:MM from ISO 8601
message.timestamp[11..16].to_string()
} else {
message.timestamp.clone()
};
let is_assistant = message.role == ChatRole::Assistant;
rsx! {
div { class: "{bubble_class}",
div { class: "chat-bubble-header",
span { class: "chat-bubble-role", "{role_label}" }
span { class: "chat-bubble-time", "{display_time}" }
}
if is_assistant {
// Render markdown for assistant messages
div {
class: "chat-bubble-content chat-prose",
dangerous_inner_html: "{markdown_to_html(&message.content)}",
}
} else {
div { class: "chat-bubble-content", "{message.content}" }
}
if !message.attachments.is_empty() {
div { class: "chat-bubble-attachments",
for att in &message.attachments {
span { class: "chat-attachment", "{att.name}" }
}
}
}
}
}
}
/// Renders a streaming assistant message bubble.
///
/// While waiting for tokens, shows a "Thinking..." indicator with
/// a pulsing dot animation. Once tokens arrive, renders them as
/// markdown with a blinking cursor.
///
/// # Arguments
///
/// * `content` - The accumulated streaming content so far
#[component]
pub fn StreamingBubble(content: String) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
if content.is_empty() {
// Thinking state -- no tokens yet
rsx! {
div { class: "chat-bubble chat-bubble--assistant chat-bubble--thinking",
div { class: "chat-thinking",
span { class: "chat-thinking-dots",
span { class: "chat-dot" }
span { class: "chat-dot" }
span { class: "chat-dot" }
}
span { class: "chat-thinking-text",
"{t(l, \"chat.thinking\")}"
}
}
}
}
} else {
let html = markdown_to_html(&content);
rsx! {
div { class: "chat-bubble chat-bubble--assistant chat-bubble--streaming",
div { class: "chat-bubble-header",
span { class: "chat-bubble-role",
"{t(l, \"chat.assistant\")}"
}
}
div {
class: "chat-bubble-content chat-prose",
dangerous_inner_html: "{html}",
}
span { class: "chat-streaming-cursor" }
}
}
}
}

View File

@@ -1,73 +0,0 @@
use crate::i18n::{t, Locale};
use dioxus::prelude::*;
/// Chat input bar with a textarea and send button.
///
/// Enter sends the message; Shift+Enter inserts a newline.
/// The input is disabled during streaming.
///
/// # Arguments
///
/// * `input_text` - Two-way bound input text signal
/// * `on_send` - Callback fired with the message text when sent
/// * `is_streaming` - Whether to disable the input (streaming in progress)
#[component]
pub fn ChatInputBar(
input_text: Signal<String>,
on_send: EventHandler<String>,
is_streaming: bool,
) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
let mut input = input_text;
rsx! {
div { class: "chat-input-bar",
textarea {
class: "chat-input",
placeholder: "{t(l, \"chat.type_message\")}",
disabled: is_streaming,
rows: "1",
value: "{input}",
oninput: move |e: Event<FormData>| {
input.set(e.value());
},
onkeypress: move |e: Event<KeyboardData>| {
// Enter sends, Shift+Enter adds newline
if e.key() == Key::Enter && !e.modifiers().shift() {
e.prevent_default();
let text = input.read().trim().to_string();
if !text.is_empty() {
on_send.call(text);
input.set(String::new());
}
}
},
}
button {
class: "btn-primary chat-send-btn",
disabled: is_streaming || input.read().trim().is_empty(),
onclick: move |_| {
let text = input.read().trim().to_string();
if !text.is_empty() {
on_send.call(text);
input.set(String::new());
}
},
if is_streaming {
// Stop icon during streaming
dioxus_free_icons::Icon {
icon: dioxus_free_icons::icons::fa_solid_icons::FaStop,
width: 16, height: 16,
}
} else {
dioxus_free_icons::Icon {
icon: dioxus_free_icons::icons::fa_solid_icons::FaPaperPlane,
width: 16, height: 16,
}
}
}
}
}
}

View File

@@ -1,42 +0,0 @@
use crate::components::{ChatBubble, StreamingBubble};
use crate::i18n::{t, Locale};
use crate::models::ChatMessage;
use dioxus::prelude::*;
/// Scrollable message list that renders all messages in a chat session.
///
/// Auto-scrolls to the bottom when new messages arrive or during streaming.
/// Shows a streaming bubble with a blinking cursor when `is_streaming` is true.
///
/// # Arguments
///
/// * `messages` - All loaded messages for the current session
/// * `streaming_content` - Accumulated content from the SSE stream
/// * `is_streaming` - Whether a response is currently streaming
#[component]
pub fn ChatMessageList(
messages: Vec<ChatMessage>,
streaming_content: String,
is_streaming: bool,
) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
rsx! {
div {
class: "chat-message-list",
id: "chat-message-list",
if messages.is_empty() && !is_streaming {
div { class: "chat-empty",
p { "{t(l, \"chat.send_to_start\")}" }
}
}
for msg in &messages {
ChatBubble { key: "{msg.id}", message: msg.clone() }
}
if is_streaming {
StreamingBubble { content: streaming_content }
}
}
}
}

View File

@@ -1,50 +0,0 @@
use crate::i18n::{t, Locale};
use dioxus::prelude::*;
/// Dropdown bar for selecting the LLM model for the current chat session.
///
/// Displays the currently selected model and a list of available models
/// from the Ollama instance. Fires `on_change` when the user selects
/// a different model.
///
/// # Arguments
///
/// * `selected_model` - The currently active model ID
/// * `available_models` - List of model names from Ollama
/// * `on_change` - Callback fired with the new model name
#[component]
pub fn ChatModelSelector(
selected_model: String,
available_models: Vec<String>,
on_change: EventHandler<String>,
) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
rsx! {
div { class: "chat-model-bar",
label { class: "chat-model-label",
"{t(l, \"chat.model_label\")}"
}
select {
class: "chat-model-select",
value: "{selected_model}",
onchange: move |e: Event<FormData>| {
on_change.call(e.value());
},
for model in &available_models {
option {
value: "{model}",
selected: *model == selected_model,
"{model}"
}
}
if available_models.is_empty() {
option { disabled: true,
"{t(l, \"chat.no_models\")}"
}
}
}
}
}
}

View File

@@ -1,258 +0,0 @@
use crate::i18n::{t, tw, Locale};
use crate::models::{ChatNamespace, ChatSession};
use dioxus::prelude::*;
/// Chat sidebar displaying grouped session list with actions.
///
/// Sessions are split into "News Chats" and "General" sections.
/// Each session item shows the title and relative date, with
/// rename and delete actions on hover.
///
/// # Arguments
///
/// * `sessions` - All chat sessions for the user
/// * `active_session_id` - Currently selected session ID (highlighted)
/// * `on_select` - Callback when a session is clicked
/// * `on_new` - Callback to create a new chat session
/// * `on_rename` - Callback with `(session_id, new_title)`
/// * `on_delete` - Callback with `session_id`
#[component]
pub fn ChatSidebar(
sessions: Vec<ChatSession>,
active_session_id: Option<String>,
on_select: EventHandler<String>,
on_new: EventHandler<()>,
on_rename: EventHandler<(String, String)>,
on_delete: EventHandler<String>,
) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
// Split sessions by namespace
let news_sessions: Vec<&ChatSession> = sessions
.iter()
.filter(|s| s.namespace == ChatNamespace::News)
.collect();
let general_sessions: Vec<&ChatSession> = sessions
.iter()
.filter(|s| s.namespace == ChatNamespace::General)
.collect();
// Signal for inline rename state: Option<(session_id, current_value)>
let rename_state: Signal<Option<(String, String)>> = use_signal(|| None);
rsx! {
div { class: "chat-sidebar-panel",
div { class: "chat-sidebar-header",
h3 { "{t(l, \"chat.conversations\")}" }
button {
class: "btn-icon",
title: "{t(l, \"chat.new_chat\")}",
onclick: move |_| on_new.call(()),
"+"
}
}
div { class: "chat-session-list",
// News Chats section
if !news_sessions.is_empty() {
div { class: "chat-namespace-header",
"{t(l, \"chat.news_chats\")}"
}
for session in &news_sessions {
SessionItem {
session: (*session).clone(),
is_active: active_session_id.as_deref() == Some(&session.id),
rename_state: rename_state,
on_select: on_select,
on_rename: on_rename,
on_delete: on_delete,
}
}
}
// General section
div { class: "chat-namespace-header",
if news_sessions.is_empty() {
"{t(l, \"chat.all_chats\")}"
} else {
"{t(l, \"chat.general\")}"
}
}
if general_sessions.is_empty() {
p { class: "chat-empty-hint",
"{t(l, \"chat.no_conversations\")}"
}
}
for session in &general_sessions {
SessionItem {
session: (*session).clone(),
is_active: active_session_id.as_deref() == Some(&session.id),
rename_state: rename_state,
on_select: on_select,
on_rename: on_rename,
on_delete: on_delete,
}
}
}
}
}
}
/// Individual session item component. Handles rename inline editing.
#[component]
fn SessionItem(
session: ChatSession,
is_active: bool,
rename_state: Signal<Option<(String, String)>>,
on_select: EventHandler<String>,
on_rename: EventHandler<(String, String)>,
on_delete: EventHandler<String>,
) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
let mut rename_sig = rename_state;
let item_class = if is_active {
"chat-session-item chat-session-item--active"
} else {
"chat-session-item"
};
let is_renaming = rename_sig
.read()
.as_ref()
.is_some_and(|(id, _)| id == &session.id);
let session_id = session.id.clone();
let session_title = session.title.clone();
let date_display = format_relative_date(&session.updated_at, l);
if is_renaming {
let rename_value = rename_sig
.read()
.as_ref()
.map(|(_, v)| v.clone())
.unwrap_or_default();
let sid = session_id.clone();
rsx! {
div { class: "{item_class}",
input {
class: "chat-session-rename-input",
r#type: "text",
value: "{rename_value}",
autofocus: true,
oninput: move |e: Event<FormData>| {
let val = e.value();
let id = sid.clone();
rename_sig.set(Some((id, val)));
},
onkeypress: move |e: Event<KeyboardData>| {
if e.key() == Key::Enter {
if let Some((id, val)) = rename_sig.read().clone() {
if !val.trim().is_empty() {
on_rename.call((id, val));
}
}
rename_sig.set(None);
} else if e.key() == Key::Escape {
rename_sig.set(None);
}
},
onfocusout: move |_| {
if let Some((ref id, ref val)) = *rename_sig.read() {
if !val.trim().is_empty() {
on_rename.call((id.clone(), val.clone()));
}
}
rename_sig.set(None);
},
}
}
}
} else {
let sid_select = session_id.clone();
let sid_delete = session_id.clone();
let sid_rename = session_id.clone();
let title_for_rename = session_title.clone();
rsx! {
div {
class: "{item_class}",
onclick: move |_| on_select.call(sid_select.clone()),
div { class: "chat-session-info",
span { class: "chat-session-title", "{session_title}" }
span { class: "chat-session-date", "{date_display}" }
}
div { class: "chat-session-actions",
button {
class: "btn-icon-sm",
title: "{t(l, \"common.rename\")}",
onclick: move |e: Event<MouseData>| {
e.stop_propagation();
rename_sig.set(Some((
sid_rename.clone(),
title_for_rename.clone(),
)));
},
dioxus_free_icons::Icon {
icon: dioxus_free_icons::icons::fa_solid_icons::FaPen,
width: 12, height: 12,
}
}
button {
class: "btn-icon-sm btn-icon-danger",
title: "{t(l, \"common.delete\")}",
onclick: move |e: Event<MouseData>| {
e.stop_propagation();
on_delete.call(sid_delete.clone());
},
dioxus_free_icons::Icon {
icon: dioxus_free_icons::icons::fa_solid_icons::FaTrash,
width: 12, height: 12,
}
}
}
}
}
}
}
/// Format an ISO 8601 timestamp as a relative date string.
///
/// # Arguments
///
/// * `iso` - ISO 8601 timestamp string
/// * `locale` - The locale to use for translated time labels
fn format_relative_date(iso: &str, locale: Locale) -> String {
if let Ok(dt) = chrono::DateTime::parse_from_rfc3339(iso) {
let now = chrono::Utc::now();
let diff = now.signed_duration_since(dt);
if diff.num_minutes() < 1 {
t(locale, "chat.just_now")
} else if diff.num_hours() < 1 {
tw(
locale,
"chat.minutes_ago",
&[("n", &diff.num_minutes().to_string())],
)
} else if diff.num_hours() < 24 {
tw(
locale,
"chat.hours_ago",
&[("n", &diff.num_hours().to_string())],
)
} else if diff.num_days() < 7 {
tw(
locale,
"chat.days_ago",
&[("n", &diff.num_days().to_string())],
)
} else {
dt.format("%b %d").to_string()
}
} else {
iso.to_string()
}
}

View File

@@ -1,9 +1,9 @@
use dioxus::prelude::*;
use crate::i18n::{t, Locale};
use crate::infrastructure::ollama::{get_ollama_status, OllamaStatus};
use crate::infrastructure::litellm::{get_litellm_status, LitellmStatus};
/// Right sidebar for the dashboard, showing Ollama status, trending topics,
/// Right sidebar for the dashboard, showing LiteLLM status, trending topics,
/// and recent search history.
///
/// Appears when no article card is selected. Disappears when the user opens
@@ -11,13 +11,13 @@ use crate::infrastructure::ollama::{get_ollama_status, OllamaStatus};
///
/// # Props
///
/// * `ollama_url` - Ollama instance URL for status polling
/// * `litellm_url` - LiteLLM proxy URL for status polling
/// * `trending` - Trending topic keywords extracted from recent news headlines
/// * `recent_searches` - Recent search topics stored in localStorage
/// * `on_topic_click` - Fires when a trending or recent topic is clicked
#[component]
pub fn DashboardSidebar(
ollama_url: String,
litellm_url: String,
trending: Vec<String>,
recent_searches: Vec<String>,
on_topic_click: EventHandler<String>,
@@ -25,26 +25,26 @@ pub fn DashboardSidebar(
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
// Fetch Ollama status once on mount.
// Fetch LiteLLM status once on mount.
// use_resource with no signal dependencies runs exactly once and
// won't re-fire on parent re-renders (unlike use_effect).
let url = ollama_url.clone();
let url = litellm_url.clone();
let status_resource = use_resource(move || {
let u = url.clone();
async move {
get_ollama_status(u).await.unwrap_or(OllamaStatus {
get_litellm_status(u).await.unwrap_or(LitellmStatus {
online: false,
models: Vec::new(),
})
}
});
let current_status: OllamaStatus =
let current_status: LitellmStatus =
status_resource
.read()
.as_ref()
.cloned()
.unwrap_or(OllamaStatus {
.unwrap_or(LitellmStatus {
online: false,
models: Vec::new(),
});
@@ -52,9 +52,9 @@ pub fn DashboardSidebar(
rsx! {
aside { class: "dashboard-sidebar",
// -- Ollama Status Section --
// -- LiteLLM Status Section --
div { class: "sidebar-section",
h4 { class: "sidebar-section-title", "{t(l, \"dashboard.ollama_status\")}" }
h4 { class: "sidebar-section-title", "{t(l, \"dashboard.litellm_status\")}" }
div { class: "sidebar-status-row",
span { class: if current_status.online { "sidebar-status-dot sidebar-status-dot--online" } else { "sidebar-status-dot sidebar-status-dot--offline" } }
span { class: "sidebar-status-label",

View File

@@ -1,59 +0,0 @@
use dioxus::prelude::*;
use crate::i18n::{t, Locale};
use crate::models::KnowledgeFile;
/// Renders a table row for a knowledge base file.
///
/// # Arguments
///
/// * `file` - The knowledge file data to render
/// * `on_delete` - Callback fired when the delete button is clicked
#[component]
pub fn FileRow(file: KnowledgeFile, on_delete: EventHandler<String>) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
// Format file size for human readability (Python devs: similar to humanize.naturalsize)
let size_display = format_size(file.size_bytes);
rsx! {
tr { class: "file-row",
td { class: "file-row-name",
span { class: "file-row-icon", "{file.kind.icon()}" }
"{file.name}"
}
td { "{file.kind.label()}" }
td { "{size_display}" }
td { "{file.chunk_count} {t(l, \"common.chunks\")}" }
td { "{file.uploaded_at}" }
td {
button {
class: "btn-icon btn-danger",
onclick: {
let id = file.id.clone();
move |_| on_delete.call(id.clone())
},
"{t(l, \"common.delete\")}"
}
}
}
}
}
/// Formats a byte count into a human-readable string (e.g. "1.2 MB").
fn format_size(bytes: u64) -> String {
const KB: u64 = 1024;
const MB: u64 = KB * 1024;
const GB: u64 = MB * 1024;
if bytes >= GB {
format!("{:.1} GB", bytes as f64 / GB as f64)
} else if bytes >= MB {
format!("{:.1} MB", bytes as f64 / MB as f64)
} else if bytes >= KB {
format!("{:.1} KB", bytes as f64 / KB as f64)
} else {
format!("{bytes} B")
}
}

View File

@@ -1,14 +1,7 @@
mod app_shell;
mod article_detail;
mod card;
mod chat_action_bar;
mod chat_bubble;
mod chat_input_bar;
mod chat_message_list;
mod chat_model_selector;
mod chat_sidebar;
mod dashboard_sidebar;
mod file_row;
mod login;
mod member_row;
pub mod news_card;
@@ -16,23 +9,16 @@ mod page_header;
mod pricing_card;
pub mod sidebar;
pub mod sub_nav;
mod tool_card;
mod tool_embed;
pub use app_shell::*;
pub use article_detail::*;
pub use card::*;
pub use chat_action_bar::*;
pub use chat_bubble::*;
pub use chat_input_bar::*;
pub use chat_message_list::*;
pub use chat_model_selector::*;
pub use chat_sidebar::*;
pub use dashboard_sidebar::*;
pub use file_row::*;
pub use login::*;
pub use member_row::*;
pub use news_card::*;
pub use page_header::*;
pub use pricing_card::*;
pub use sub_nav::*;
pub use tool_card::*;
pub use tool_embed::*;

View File

@@ -112,12 +112,12 @@ pub fn mock_news() -> Vec<NewsCardModel> {
published_at: "2026-02-16".into(),
},
NewsCardModel {
title: "Ollama Adds Multi-GPU Scheduling".into(),
source: "Ollama".into(),
summary: "Run large models across multiple GPUs with automatic sharding.".into(),
content: "Ollama now supports multi-GPU scheduling with automatic \
model sharding. Users can run models across multiple GPUs \
for improved inference performance."
title: "LiteLLM Adds Multi-Provider Routing".into(),
source: "LiteLLM".into(),
summary: "Route requests across multiple LLM providers with automatic fallback.".into(),
content: "LiteLLM now supports multi-provider routing with automatic \
fallback. Users can route requests across multiple providers \
for improved reliability and cost optimization."
.into(),
category: "Infrastructure".into(),
url: "#".into(),

View File

@@ -1,13 +1,21 @@
use dioxus::prelude::*;
use dioxus_free_icons::icons::bs_icons::{
BsBoxArrowRight, BsBuilding, BsChatDots, BsCloudArrowUp, BsCodeSlash, BsCollection, BsGithub,
BsGlobe2, BsGrid, BsHouseDoor, BsMoonFill, BsPuzzle, BsSunFill,
BsBoxArrowRight, BsBuilding, BsChatDots, BsCloudArrowUp, BsCodeSlash, BsGithub, BsGlobe2,
BsGrid, BsHouseDoor, BsMoonFill, BsSunFill,
};
use dioxus_free_icons::Icon;
use crate::i18n::{t, Locale};
use crate::Route;
/// Destination for a sidebar link: either an internal route or an external URL.
enum NavTarget {
/// Internal Dioxus route (rendered as `Link { to: route }`).
Internal(Route),
/// External URL opened in a new tab (rendered as `<a href>`).
External(String),
}
/// Navigation entry for the sidebar.
///
/// `key` is a stable identifier used for active-route detection and never
@@ -15,7 +23,7 @@ use crate::Route;
struct NavItem {
key: &'static str,
label: String,
route: Route,
target: NavTarget,
/// Bootstrap icon element rendered beside the label.
icon: Element,
}
@@ -35,6 +43,7 @@ pub fn Sidebar(
name: String,
email: String,
avatar_url: String,
#[props(default = "http://localhost:3080".to_string())] librechat_url: String,
#[props(default = "sidebar".to_string())] class: String,
#[props(default)] on_nav: EventHandler<()>,
) -> Element {
@@ -45,43 +54,32 @@ pub fn Sidebar(
NavItem {
key: "dashboard",
label: t(locale_val, "nav.dashboard"),
route: Route::DashboardPage {},
target: NavTarget::Internal(Route::DashboardPage {}),
icon: rsx! { Icon { icon: BsHouseDoor, width: 18, height: 18 } },
},
NavItem {
key: "providers",
label: t(locale_val, "nav.providers"),
route: Route::ProvidersPage {},
target: NavTarget::Internal(Route::ProvidersPage {}),
icon: rsx! { Icon { icon: BsCloudArrowUp, width: 18, height: 18 } },
},
NavItem {
key: "chat",
label: t(locale_val, "nav.chat"),
route: Route::ChatPage {},
// Opens LibreChat in a new tab; SSO via shared Keycloak realm.
target: NavTarget::External(librechat_url.clone()),
icon: rsx! { Icon { icon: BsChatDots, width: 18, height: 18 } },
},
NavItem {
key: "tools",
label: t(locale_val, "nav.tools"),
route: Route::ToolsPage {},
icon: rsx! { Icon { icon: BsPuzzle, width: 18, height: 18 } },
},
NavItem {
key: "knowledge_base",
label: t(locale_val, "nav.knowledge_base"),
route: Route::KnowledgePage {},
icon: rsx! { Icon { icon: BsCollection, width: 18, height: 18 } },
},
NavItem {
key: "developer",
label: t(locale_val, "nav.developer"),
route: Route::AgentsPage {},
target: NavTarget::Internal(Route::AgentsPage {}),
icon: rsx! { Icon { icon: BsCodeSlash, width: 18, height: 18 } },
},
NavItem {
key: "organization",
label: t(locale_val, "nav.organization"),
route: Route::OrgPricingPage {},
target: NavTarget::Internal(Route::OrgPricingPage {}),
icon: rsx! { Icon { icon: BsBuilding, width: 18, height: 18 } },
},
];
@@ -100,25 +98,45 @@ pub fn Sidebar(
nav { class: "sidebar-nav",
for item in nav_items {
{
// Active detection for nested routes: highlight the parent nav
// item when any child route within the nested shell is active.
let is_active = match &current_route {
Route::AgentsPage {} | Route::FlowPage {} | Route::AnalyticsPage {} => {
item.key == "developer"
match &item.target {
NavTarget::Internal(route) => {
// Active detection for nested routes: highlight the parent
// nav item when any child route within the nested shell
// is active.
let is_active = match &current_route {
Route::AgentsPage {} | Route::FlowPage {} | Route::AnalyticsPage {} => {
item.key == "developer"
}
Route::OrgPricingPage {} | Route::OrgDashboardPage {} => {
item.key == "organization"
}
_ => *route == current_route,
};
let cls = if is_active { "sidebar-link active" } else { "sidebar-link" };
let route = route.clone();
rsx! {
Link {
to: route,
class: cls,
onclick: move |_| on_nav.call(()),
{item.icon}
span { "{item.label}" }
}
}
}
Route::OrgPricingPage {} | Route::OrgDashboardPage {} => {
item.key == "organization"
}
_ => item.route == current_route,
};
let cls = if is_active { "sidebar-link active" } else { "sidebar-link" };
rsx! {
Link {
to: item.route,
class: cls,
onclick: move |_| on_nav.call(()),
{item.icon}
span { "{item.label}" }
NavTarget::External(url) => {
let url = url.clone();
rsx! {
a {
href: url,
target: "_blank",
rel: "noopener noreferrer",
class: "sidebar-link",
onclick: move |_| on_nav.call(()),
{item.icon}
span { "{item.label}" }
}
}
}
}
}

View File

@@ -1,49 +0,0 @@
use dioxus::prelude::*;
use crate::i18n::{t, Locale};
use crate::models::McpTool;
/// Renders an MCP tool card with name, description, status indicator, and toggle.
///
/// # Arguments
///
/// * `tool` - The MCP tool data to render
/// * `on_toggle` - Callback fired when the enable/disable toggle is clicked
#[component]
pub fn ToolCard(tool: McpTool, on_toggle: EventHandler<String>) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
let status_class = format!("tool-status tool-status--{}", tool.status.css_class());
let toggle_class = if tool.enabled {
"tool-toggle tool-toggle--on"
} else {
"tool-toggle tool-toggle--off"
};
rsx! {
div { class: "tool-card",
div { class: "tool-card-header",
div { class: "tool-card-icon", "\u{2699}" }
span { class: "{status_class}", "" }
}
h3 { class: "tool-card-name", "{tool.name}" }
p { class: "tool-card-desc", "{tool.description}" }
div { class: "tool-card-footer",
span { class: "tool-card-category", "{tool.category.label()}" }
button {
class: "{toggle_class}",
onclick: {
let id = tool.id.clone();
move |_| on_toggle.call(id.clone())
},
if tool.enabled {
"{t(l, \"common.on\")}"
} else {
"{t(l, \"common.off\")}"
}
}
}
}
}
}

View File

@@ -0,0 +1,81 @@
use dioxus::prelude::*;
use crate::i18n::{t, Locale};
/// Properties for the [`ToolEmbed`] component.
///
/// # Fields
///
/// * `url` - Service URL; when empty, a "Not Configured" placeholder is shown
/// * `title` - Display title for the tool (e.g. "Agent Builder")
/// * `description` - Description text shown in the placeholder card
/// * `icon` - Single-character icon for the placeholder card
/// * `launch_label` - Label for the disabled button in the placeholder
#[derive(Props, Clone, PartialEq)]
pub struct ToolEmbedProps {
/// Service URL. Empty string means "not configured".
pub url: String,
/// Display title shown in the toolbar / placeholder heading.
pub title: String,
/// Description shown in the "not configured" placeholder.
pub description: String,
/// Single-character icon for the placeholder card.
pub icon: &'static str,
/// Label for the disabled launch button in placeholder mode.
pub launch_label: String,
}
/// Hybrid iframe / placeholder component for developer tool pages.
///
/// When `url` is non-empty, renders a toolbar (title + pop-out button)
/// above a full-height iframe embedding the service. When `url` is
/// empty, renders the existing placeholder card with a "Not Configured"
/// badge instead of "Coming Soon".
#[component]
pub fn ToolEmbed(props: ToolEmbedProps) -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
if props.url.is_empty() {
// Not configured -- show placeholder card
rsx! {
section { class: "placeholder-page",
div { class: "placeholder-card",
div { class: "placeholder-icon", "{props.icon}" }
h2 { "{props.title}" }
p { class: "placeholder-desc", "{props.description}" }
button {
class: "btn-primary",
disabled: true,
"{props.launch_label}"
}
span { class: "placeholder-badge",
"{t(l, \"developer.not_configured\")}"
}
}
}
}
} else {
// URL is set -- render toolbar + iframe
let pop_out_url = props.url.clone();
rsx! {
div { class: "tool-embed",
div { class: "tool-embed-toolbar",
span { class: "tool-embed-title", "{props.title}" }
a {
class: "tool-embed-popout-btn",
href: "{pop_out_url}",
target: "_blank",
rel: "noopener noreferrer",
"{t(l, \"developer.open_new_tab\")}"
}
}
iframe {
class: "tool-embed-iframe",
src: "{props.url}",
title: "{props.title}",
}
}
}
}
}

View File

@@ -174,8 +174,8 @@ pub fn t(locale: Locale, key: &str) -> String {
///
/// ```
/// use dashboard::i18n::{tw, Locale};
/// let text = tw(Locale::En, "chat.minutes_ago", &[("n", "5")]);
/// assert_eq!(text, "5m ago");
/// let text = tw(Locale::En, "common.up_to_seats", &[("n", "5")]);
/// assert_eq!(text, "Up to 5 seats");
/// ```
pub fn tw(locale: Locale, key: &str, vars: &[(&str, &str)]) -> String {
let mut result = t(locale, key);
@@ -221,8 +221,8 @@ mod tests {
#[test]
fn variable_substitution() {
let result = tw(Locale::En, "chat.minutes_ago", &[("n", "5")]);
assert_eq!(result, "5m ago");
let result = tw(Locale::En, "common.up_to_seats", &[("n", "5")]);
assert_eq!(result, "Up to 5 seats");
}
#[test]

View File

@@ -24,9 +24,9 @@ pub const LOGGED_IN_USER_SESS_KEY: &str = "logged-in-user";
/// post-login redirect URL and the PKCE code verifier needed for the
/// token exchange.
#[derive(Debug, Clone)]
struct PendingOAuthEntry {
redirect_url: Option<String>,
code_verifier: String,
pub(crate) struct PendingOAuthEntry {
pub(crate) redirect_url: Option<String>,
pub(crate) code_verifier: String,
}
/// In-memory store for pending OAuth states. Keyed by the random state
@@ -38,7 +38,7 @@ pub struct PendingOAuthStore(Arc<RwLock<HashMap<String, PendingOAuthEntry>>>);
impl PendingOAuthStore {
/// Insert a pending state with an optional redirect URL and PKCE verifier.
fn insert(&self, state: String, entry: PendingOAuthEntry) {
pub(crate) fn insert(&self, state: String, entry: PendingOAuthEntry) {
// RwLock::write only panics if the lock is poisoned, which
// indicates a prior panic -- propagating is acceptable here.
#[allow(clippy::expect_used)]
@@ -50,7 +50,7 @@ impl PendingOAuthStore {
/// Remove and return the entry if the state was pending.
/// Returns `None` if the state was never stored (CSRF failure).
fn take(&self, state: &str) -> Option<PendingOAuthEntry> {
pub(crate) fn take(&self, state: &str) -> Option<PendingOAuthEntry> {
#[allow(clippy::expect_used)]
self.0
.write()
@@ -60,7 +60,8 @@ impl PendingOAuthStore {
}
/// Generate a cryptographically random state string for CSRF protection.
fn generate_state() -> String {
#[cfg_attr(test, allow(dead_code))]
pub(crate) fn generate_state() -> String {
let bytes: [u8; 32] = rand::rng().random();
// Encode as hex to produce a URL-safe string without padding.
bytes.iter().fold(String::with_capacity(64), |mut acc, b| {
@@ -75,7 +76,7 @@ fn generate_state() -> String {
///
/// Uses 32 random bytes encoded as base64url (no padding) to produce
/// a 43-character verifier per RFC 7636.
fn generate_code_verifier() -> String {
pub(crate) fn generate_code_verifier() -> String {
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine};
let bytes: [u8; 32] = rand::rng().random();
@@ -85,7 +86,7 @@ fn generate_code_verifier() -> String {
/// Derive the S256 code challenge from a code verifier per RFC 7636.
///
/// `code_challenge = BASE64URL(SHA256(code_verifier))`
fn derive_code_challenge(verifier: &str) -> String {
pub(crate) fn derive_code_challenge(verifier: &str) -> String {
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine};
use sha2::{Digest, Sha256};
@@ -304,3 +305,117 @@ pub async fn set_login_session(session: Session, data: UserStateInner) -> Result
.await
.map_err(|e| Error::StateError(format!("session insert failed: {e}")))
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used, clippy::expect_used)]
use super::*;
use pretty_assertions::assert_eq;
// -----------------------------------------------------------------------
// generate_state()
// -----------------------------------------------------------------------
#[test]
fn generate_state_length_is_64() {
let state = generate_state();
assert_eq!(state.len(), 64);
}
#[test]
fn generate_state_chars_are_hex() {
let state = generate_state();
assert!(state.chars().all(|c| c.is_ascii_hexdigit()));
}
#[test]
fn generate_state_two_calls_differ() {
let a = generate_state();
let b = generate_state();
assert_ne!(a, b);
}
// -----------------------------------------------------------------------
// generate_code_verifier()
// -----------------------------------------------------------------------
#[test]
fn code_verifier_length_is_43() {
let verifier = generate_code_verifier();
assert_eq!(verifier.len(), 43);
}
#[test]
fn code_verifier_chars_are_url_safe_base64() {
let verifier = generate_code_verifier();
// URL-safe base64 without padding uses [A-Za-z0-9_-]
assert!(verifier
.chars()
.all(|c| c.is_ascii_alphanumeric() || c == '-' || c == '_'));
}
// -----------------------------------------------------------------------
// derive_code_challenge()
// -----------------------------------------------------------------------
#[test]
fn code_challenge_deterministic() {
let verifier = "dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk";
let a = derive_code_challenge(verifier);
let b = derive_code_challenge(verifier);
assert_eq!(a, b);
}
#[test]
fn code_challenge_rfc7636_test_vector() {
// RFC 7636 Appendix B test vector:
// verifier = "dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk"
// expected challenge = "E9Melhoa2OwvFrEMTJguCHaoeK1t8URWbuGJSstw-cM"
let verifier = "dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk";
let challenge = derive_code_challenge(verifier);
assert_eq!(challenge, "E9Melhoa2OwvFrEMTJguCHaoeK1t8URWbuGJSstw-cM");
}
// -----------------------------------------------------------------------
// PendingOAuthStore
// -----------------------------------------------------------------------
#[test]
fn pending_store_insert_and_take() {
let store = PendingOAuthStore::default();
store.insert(
"state-1".into(),
PendingOAuthEntry {
redirect_url: Some("/dashboard".into()),
code_verifier: "verifier-1".into(),
},
);
let entry = store.take("state-1");
assert!(entry.is_some());
let entry = entry.unwrap();
assert_eq!(entry.redirect_url, Some("/dashboard".into()));
assert_eq!(entry.code_verifier, "verifier-1");
}
#[test]
fn pending_store_take_removes_entry() {
let store = PendingOAuthStore::default();
store.insert(
"state-2".into(),
PendingOAuthEntry {
redirect_url: None,
code_verifier: "v2".into(),
},
);
let _ = store.take("state-2");
// Second take should return None since the entry was removed.
assert!(store.take("state-2").is_none());
}
#[test]
fn pending_store_take_unknown_returns_none() {
let store = PendingOAuthStore::default();
assert!(store.take("nonexistent").is_none());
}
}

View File

@@ -24,13 +24,30 @@ pub async fn check_auth() -> Result<AuthInfo, ServerFnError> {
.map_err(|e| ServerFnError::new(format!("session read failed: {e}")))?;
match user_state {
Some(u) => Ok(AuthInfo {
authenticated: true,
sub: u.sub,
email: u.user.email,
name: u.user.name,
avatar_url: u.user.avatar_url,
}),
Some(u) => {
let librechat_url =
std::env::var("LIBRECHAT_URL").unwrap_or_else(|_| "http://localhost:3080".into());
// Extract service URLs from server state so the frontend can
// embed developer tools (LangGraph, LangFlow, Langfuse).
let state: crate::infrastructure::server_state::ServerState =
FullstackContext::extract().await?;
let langgraph_url = state.services.langgraph_url.clone();
let langflow_url = state.services.langflow_url.clone();
let langfuse_url = state.services.langfuse_url.clone();
Ok(AuthInfo {
authenticated: true,
sub: u.sub,
email: u.user.email,
name: u.user.name,
avatar_url: u.user.avatar_url,
librechat_url,
langgraph_url,
langflow_url,
langfuse_url,
})
}
None => Ok(AuthInfo::default()),
}
}

View File

@@ -134,7 +134,7 @@ pub async fn list_chat_sessions() -> Result<Vec<ChatSession>, ServerFnError> {
///
/// * `title` - Display title for the session
/// * `namespace` - Namespace string: `"General"` or `"News"`
/// * `provider` - LLM provider name (e.g. "ollama")
/// * `provider` - LLM provider name (e.g. "litellm")
/// * `model` - Model ID (e.g. "llama3.1:8b")
/// * `article_url` - Source article URL (only for `News` namespace, empty if none)
///
@@ -440,7 +440,12 @@ pub async fn chat_complete(
let session = doc_to_chat_session(&session_doc);
// Resolve provider URL and model
let (base_url, model) = resolve_provider_url(&state, &session.provider, &session.model);
let (base_url, model) = resolve_provider_url(
&state.services.litellm_url,
&state.services.litellm_model,
&session.provider,
&session.model,
);
// Parse messages from JSON
let chat_msgs: Vec<serde_json::Value> = serde_json::from_str(&messages_json)
@@ -480,10 +485,22 @@ pub async fn chat_complete(
.ok_or_else(|| ServerFnError::new("empty LLM response"))
}
/// Resolve the base URL for a provider, falling back to server defaults.
/// Resolve the base URL for a provider, falling back to LiteLLM defaults.
///
/// # Arguments
///
/// * `litellm_url` - Default LiteLLM base URL from config
/// * `litellm_model` - Default LiteLLM model from config
/// * `provider` - Provider name (e.g. "openai", "anthropic", "huggingface")
/// * `model` - Model ID (may be empty for LiteLLM default)
///
/// # Returns
///
/// A `(base_url, model)` tuple resolved for the given provider.
#[cfg(feature = "server")]
fn resolve_provider_url(
state: &crate::infrastructure::ServerState,
pub(crate) fn resolve_provider_url(
litellm_url: &str,
litellm_model: &str,
provider: &str,
model: &str,
) -> (String, String) {
@@ -494,14 +511,231 @@ fn resolve_provider_url(
format!("https://api-inference.huggingface.co/models/{}", model),
model.to_string(),
),
// Default to Ollama
// Default to LiteLLM
_ => (
state.services.ollama_url.clone(),
litellm_url.to_string(),
if model.is_empty() {
state.services.ollama_model.clone()
litellm_model.to_string()
} else {
model.to_string()
},
),
}
}
#[cfg(test)]
mod tests {
// -----------------------------------------------------------------------
// BSON document conversion tests (server feature required)
// -----------------------------------------------------------------------
#[cfg(feature = "server")]
mod server_tests {
use super::super::{doc_to_chat_message, doc_to_chat_session, resolve_provider_url};
use crate::models::{ChatNamespace, ChatRole};
use mongodb::bson::{doc, oid::ObjectId, Document};
use pretty_assertions::assert_eq;
// -- doc_to_chat_session --
fn sample_session_doc() -> (ObjectId, Document) {
let oid = ObjectId::new();
let doc = doc! {
"_id": oid,
"user_sub": "user-42",
"title": "Test Session",
"namespace": "News",
"provider": "openai",
"model": "gpt-4",
"created_at": "2025-01-01T00:00:00Z",
"updated_at": "2025-01-02T00:00:00Z",
"article_url": "https://example.com/article",
};
(oid, doc)
}
#[test]
fn doc_to_chat_session_extracts_id_as_hex() {
let (oid, doc) = sample_session_doc();
let session = doc_to_chat_session(&doc);
assert_eq!(session.id, oid.to_hex());
}
#[test]
fn doc_to_chat_session_maps_news_namespace() {
let (_, doc) = sample_session_doc();
let session = doc_to_chat_session(&doc);
assert_eq!(session.namespace, ChatNamespace::News);
}
#[test]
fn doc_to_chat_session_defaults_to_general_for_unknown() {
let mut doc = sample_session_doc().1;
doc.insert("namespace", "SomethingElse");
let session = doc_to_chat_session(&doc);
assert_eq!(session.namespace, ChatNamespace::General);
}
#[test]
fn doc_to_chat_session_extracts_all_string_fields() {
let (_, doc) = sample_session_doc();
let session = doc_to_chat_session(&doc);
assert_eq!(session.user_sub, "user-42");
assert_eq!(session.title, "Test Session");
assert_eq!(session.provider, "openai");
assert_eq!(session.model, "gpt-4");
assert_eq!(session.created_at, "2025-01-01T00:00:00Z");
assert_eq!(session.updated_at, "2025-01-02T00:00:00Z");
}
#[test]
fn doc_to_chat_session_handles_missing_article_url() {
let oid = ObjectId::new();
let doc = doc! {
"_id": oid,
"user_sub": "u",
"title": "t",
"provider": "litellm",
"model": "m",
"created_at": "c",
"updated_at": "u",
};
let session = doc_to_chat_session(&doc);
assert_eq!(session.article_url, None);
}
#[test]
fn doc_to_chat_session_filters_empty_article_url() {
let oid = ObjectId::new();
let doc = doc! {
"_id": oid,
"user_sub": "u",
"title": "t",
"namespace": "News",
"provider": "litellm",
"model": "m",
"created_at": "c",
"updated_at": "u",
"article_url": "",
};
let session = doc_to_chat_session(&doc);
assert_eq!(session.article_url, None);
}
// -- doc_to_chat_message --
fn sample_message_doc() -> (ObjectId, Document) {
let oid = ObjectId::new();
let doc = doc! {
"_id": oid,
"session_id": "sess-1",
"role": "Assistant",
"content": "Hello there!",
"timestamp": "2025-01-01T12:00:00Z",
};
(oid, doc)
}
#[test]
fn doc_to_chat_message_extracts_id_as_hex() {
let (oid, doc) = sample_message_doc();
let msg = doc_to_chat_message(&doc);
assert_eq!(msg.id, oid.to_hex());
}
#[test]
fn doc_to_chat_message_maps_assistant_role() {
let (_, doc) = sample_message_doc();
let msg = doc_to_chat_message(&doc);
assert_eq!(msg.role, ChatRole::Assistant);
}
#[test]
fn doc_to_chat_message_maps_system_role() {
let mut doc = sample_message_doc().1;
doc.insert("role", "System");
let msg = doc_to_chat_message(&doc);
assert_eq!(msg.role, ChatRole::System);
}
#[test]
fn doc_to_chat_message_defaults_to_user_for_unknown() {
let mut doc = sample_message_doc().1;
doc.insert("role", "SomethingElse");
let msg = doc_to_chat_message(&doc);
assert_eq!(msg.role, ChatRole::User);
}
#[test]
fn doc_to_chat_message_extracts_content_and_timestamp() {
let (_, doc) = sample_message_doc();
let msg = doc_to_chat_message(&doc);
assert_eq!(msg.content, "Hello there!");
assert_eq!(msg.timestamp, "2025-01-01T12:00:00Z");
assert_eq!(msg.session_id, "sess-1");
}
#[test]
fn doc_to_chat_message_attachments_always_empty() {
let (_, doc) = sample_message_doc();
let msg = doc_to_chat_message(&doc);
assert!(msg.attachments.is_empty());
}
// -- resolve_provider_url --
const TEST_LITELLM_URL: &str = "http://localhost:4000";
const TEST_LITELLM_MODEL: &str = "qwen3-32b";
#[test]
fn resolve_openai_returns_api_openai() {
let (url, model) =
resolve_provider_url(TEST_LITELLM_URL, TEST_LITELLM_MODEL, "openai", "gpt-4o");
assert_eq!(url, "https://api.openai.com");
assert_eq!(model, "gpt-4o");
}
#[test]
fn resolve_anthropic_returns_api_anthropic() {
let (url, model) = resolve_provider_url(
TEST_LITELLM_URL,
TEST_LITELLM_MODEL,
"anthropic",
"claude-3-opus",
);
assert_eq!(url, "https://api.anthropic.com");
assert_eq!(model, "claude-3-opus");
}
#[test]
fn resolve_huggingface_returns_model_url() {
let (url, model) = resolve_provider_url(
TEST_LITELLM_URL,
TEST_LITELLM_MODEL,
"huggingface",
"meta-llama/Llama-2-7b",
);
assert_eq!(
url,
"https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b"
);
assert_eq!(model, "meta-llama/Llama-2-7b");
}
#[test]
fn resolve_unknown_defaults_to_litellm() {
let (url, model) =
resolve_provider_url(TEST_LITELLM_URL, TEST_LITELLM_MODEL, "litellm", "qwen3-32b");
assert_eq!(url, TEST_LITELLM_URL);
assert_eq!(model, "qwen3-32b");
}
#[test]
fn resolve_empty_model_falls_back_to_server_default() {
let (url, model) =
resolve_provider_url(TEST_LITELLM_URL, TEST_LITELLM_MODEL, "litellm", "");
assert_eq!(url, TEST_LITELLM_URL);
assert_eq!(model, TEST_LITELLM_MODEL);
}
}
}

View File

@@ -1,266 +0,0 @@
//! SSE streaming endpoint for chat completions.
//!
//! Exposes `GET /api/chat/stream?session_id=<id>` which:
//! 1. Authenticates the user via tower-sessions
//! 2. Loads the session and its messages from MongoDB
//! 3. Streams LLM tokens as SSE events to the frontend
//! 4. Persists the complete assistant message on finish
use axum::{
extract::Query,
response::{
sse::{Event, KeepAlive, Sse},
IntoResponse, Response,
},
Extension,
};
use futures::stream::Stream;
use reqwest::StatusCode;
use serde::Deserialize;
use tower_sessions::Session;
use super::{
auth::LOGGED_IN_USER_SESS_KEY,
chat::{doc_to_chat_message, doc_to_chat_session},
provider_client::{send_chat_request, ProviderMessage},
server_state::ServerState,
state::UserStateInner,
};
use crate::models::{ChatMessage, ChatRole};
/// Query parameters for the SSE stream endpoint.
#[derive(Deserialize)]
pub struct StreamQuery {
session_id: String,
}
/// SSE streaming handler for chat completions.
///
/// Reads the session's provider/model config, loads conversation history,
/// sends to the LLM with `stream: true`, and forwards tokens as SSE events.
///
/// # SSE Event Format
///
/// - `data: {"token": "..."}` -- partial token
/// - `data: {"done": true, "message_id": "..."}` -- stream complete
/// - `data: {"error": "..."}` -- on failure
pub async fn chat_stream_handler(
session: Session,
Extension(state): Extension<ServerState>,
Query(params): Query<StreamQuery>,
) -> Response {
// Authenticate
let user_state: Option<UserStateInner> = match session.get(LOGGED_IN_USER_SESS_KEY).await {
Ok(u) => u,
Err(_) => return (StatusCode::UNAUTHORIZED, "session error").into_response(),
};
let user = match user_state {
Some(u) => u,
None => return (StatusCode::UNAUTHORIZED, "not authenticated").into_response(),
};
// Load session from MongoDB (raw document to handle ObjectId -> String)
let chat_session = {
use mongodb::bson::{doc, oid::ObjectId};
let oid = match ObjectId::parse_str(&params.session_id) {
Ok(o) => o,
Err(_) => return (StatusCode::BAD_REQUEST, "invalid session_id").into_response(),
};
match state
.db
.raw_collection("chat_sessions")
.find_one(doc! { "_id": oid, "user_sub": &user.sub })
.await
{
Ok(Some(doc)) => doc_to_chat_session(&doc),
Ok(None) => return (StatusCode::NOT_FOUND, "session not found").into_response(),
Err(e) => {
tracing::error!("db error loading session: {e}");
return (StatusCode::INTERNAL_SERVER_ERROR, "db error").into_response();
}
}
};
// Load messages (raw documents to handle ObjectId -> String)
let messages = {
use mongodb::bson::doc;
use mongodb::options::FindOptions;
let opts = FindOptions::builder().sort(doc! { "timestamp": 1 }).build();
match state
.db
.raw_collection("chat_messages")
.find(doc! { "session_id": &params.session_id })
.with_options(opts)
.await
{
Ok(mut cursor) => {
use futures::TryStreamExt;
let mut msgs = Vec::new();
while let Some(doc) = TryStreamExt::try_next(&mut cursor).await.unwrap_or(None) {
msgs.push(doc_to_chat_message(&doc));
}
msgs
}
Err(e) => {
tracing::error!("db error loading messages: {e}");
return (StatusCode::INTERNAL_SERVER_ERROR, "db error").into_response();
}
}
};
// Convert to provider format
let provider_msgs: Vec<ProviderMessage> = messages
.iter()
.map(|m| ProviderMessage {
role: match m.role {
ChatRole::User => "user".to_string(),
ChatRole::Assistant => "assistant".to_string(),
ChatRole::System => "system".to_string(),
},
content: m.content.clone(),
})
.collect();
let provider = chat_session.provider.clone();
let model = chat_session.model.clone();
let session_id = params.session_id.clone();
// TODO: Load user's API key from preferences for non-Ollama providers.
// For now, Ollama (no key needed) is the default path.
let api_key: Option<String> = None;
// Send streaming request to LLM
let llm_resp = match send_chat_request(
&state,
&provider,
&model,
&provider_msgs,
api_key.as_deref(),
true,
)
.await
{
Ok(r) => r,
Err(e) => {
tracing::error!("LLM request failed: {e}");
return (StatusCode::BAD_GATEWAY, "LLM request failed").into_response();
}
};
if !llm_resp.status().is_success() {
let status = llm_resp.status();
let body = llm_resp.text().await.unwrap_or_default();
tracing::error!("LLM returned {status}: {body}");
return (StatusCode::BAD_GATEWAY, format!("LLM error: {status}")).into_response();
}
// Stream the response bytes as SSE events
let byte_stream = llm_resp.bytes_stream();
let state_clone = state.clone();
let sse_stream = build_sse_stream(byte_stream, state_clone, session_id, provider.clone());
Sse::new(sse_stream)
.keep_alive(KeepAlive::default())
.into_response()
}
/// Build an SSE stream that parses OpenAI-compatible streaming chunks
/// and emits token events. On completion, persists the full message.
fn build_sse_stream(
byte_stream: impl Stream<Item = Result<bytes::Bytes, reqwest::Error>> + Send + 'static,
state: ServerState,
session_id: String,
_provider: String,
) -> impl Stream<Item = Result<Event, std::convert::Infallible>> + Send + 'static {
// Use an async stream to process chunks
async_stream::stream! {
use futures::StreamExt;
let mut full_content = String::new();
let mut buffer = String::new();
// Pin the byte stream for iteration
let mut stream = std::pin::pin!(byte_stream);
while let Some(chunk_result) = StreamExt::next(&mut stream).await {
let chunk = match chunk_result {
Ok(bytes) => bytes,
Err(e) => {
let err_json = serde_json::json!({ "error": e.to_string() });
yield Ok(Event::default().data(err_json.to_string()));
break;
}
};
let text = String::from_utf8_lossy(&chunk);
buffer.push_str(&text);
// Process complete SSE lines from the buffer.
// OpenAI streaming format: `data: {...}\n\n`
while let Some(line_end) = buffer.find('\n') {
let line = buffer[..line_end].trim().to_string();
buffer = buffer[line_end + 1..].to_string();
if line.is_empty() || line == "data: [DONE]" {
continue;
}
if let Some(json_str) = line.strip_prefix("data: ") {
if let Ok(parsed) = serde_json::from_str::<serde_json::Value>(json_str) {
// Extract token from OpenAI delta format
if let Some(token) = parsed["choices"][0]["delta"]["content"].as_str() {
full_content.push_str(token);
let event_data = serde_json::json!({ "token": token });
yield Ok(Event::default().data(event_data.to_string()));
}
}
}
}
}
// Persist the complete assistant message
if !full_content.is_empty() {
let now = chrono::Utc::now().to_rfc3339();
let message = ChatMessage {
id: String::new(),
session_id: session_id.clone(),
role: ChatRole::Assistant,
content: full_content,
attachments: Vec::new(),
timestamp: now.clone(),
};
let msg_id = match state.db.chat_messages().insert_one(&message).await {
Ok(result) => result
.inserted_id
.as_object_id()
.map(|oid| oid.to_hex())
.unwrap_or_default(),
Err(e) => {
tracing::error!("failed to persist assistant message: {e}");
String::new()
}
};
// Update session timestamp
if let Ok(session_oid) =
mongodb::bson::oid::ObjectId::parse_str(&session_id)
{
let _ = state
.db
.chat_sessions()
.update_one(
mongodb::bson::doc! { "_id": session_oid },
mongodb::bson::doc! { "$set": { "updated_at": &now } },
)
.await;
}
let done_data = serde_json::json!({ "done": true, "message_id": msg_id });
yield Ok(Event::default().data(done_data.to_string()));
}
}
}

View File

@@ -141,19 +141,23 @@ impl SmtpConfig {
// ServiceUrls
// ---------------------------------------------------------------------------
/// URLs and credentials for external services (Ollama, SearXNG, S3, etc.).
/// URLs and credentials for external services (LiteLLM, SearXNG, S3, etc.).
#[derive(Debug)]
pub struct ServiceUrls {
/// Ollama LLM instance base URL.
pub ollama_url: String,
/// Default Ollama model to use.
pub ollama_model: String,
/// LiteLLM proxy base URL.
pub litellm_url: String,
/// Default LiteLLM model to use.
pub litellm_model: String,
/// LiteLLM API key for authenticated requests.
pub litellm_api_key: String,
/// SearXNG meta-search engine base URL.
pub searxng_url: String,
/// LangChain service URL.
pub langchain_url: String,
/// LangGraph service URL.
pub langgraph_url: String,
/// LangFlow visual workflow builder URL.
pub langflow_url: String,
/// Langfuse observability URL.
pub langfuse_url: String,
/// Vector database URL.
@@ -176,13 +180,15 @@ impl ServiceUrls {
/// Currently infallible but returns `Result` for consistency.
pub fn from_env() -> Result<Self, Error> {
Ok(Self {
ollama_url: std::env::var("OLLAMA_URL")
.unwrap_or_else(|_| "http://localhost:11434".into()),
ollama_model: std::env::var("OLLAMA_MODEL").unwrap_or_else(|_| "llama3.1:8b".into()),
litellm_url: std::env::var("LITELLM_URL")
.unwrap_or_else(|_| "http://localhost:4000".into()),
litellm_model: std::env::var("LITELLM_MODEL").unwrap_or_else(|_| "qwen3-32b".into()),
litellm_api_key: optional_env("LITELLM_API_KEY"),
searxng_url: std::env::var("SEARXNG_URL")
.unwrap_or_else(|_| "http://localhost:8888".into()),
langchain_url: optional_env("LANGCHAIN_URL"),
langgraph_url: optional_env("LANGGRAPH_URL"),
langflow_url: optional_env("LANGFLOW_URL"),
langfuse_url: optional_env("LANGFUSE_URL"),
vectordb_url: optional_env("VECTORDB_URL"),
s3_url: optional_env("S3_URL"),
@@ -228,7 +234,7 @@ impl StripeConfig {
/// Comma-separated list of enabled LLM provider identifiers.
///
/// For example: `LLM_PROVIDERS=ollama,openai,anthropic`
/// For example: `LLM_PROVIDERS=litellm,openai,anthropic`
#[derive(Debug)]
pub struct LlmProvidersConfig {
/// Parsed provider names.
@@ -251,3 +257,160 @@ impl LlmProvidersConfig {
Ok(Self { providers })
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::unwrap_used, clippy::expect_used)]
use super::*;
use pretty_assertions::assert_eq;
use serial_test::serial;
// -----------------------------------------------------------------------
// KeycloakConfig endpoint methods (no env vars needed)
// -----------------------------------------------------------------------
fn sample_keycloak() -> KeycloakConfig {
KeycloakConfig {
url: "https://auth.example.com".into(),
realm: "myrealm".into(),
client_id: "dashboard".into(),
redirect_uri: "https://app.example.com/callback".into(),
app_url: "https://app.example.com".into(),
admin_client_id: String::new(),
admin_client_secret: SecretString::from(String::new()),
}
}
#[test]
fn keycloak_auth_endpoint() {
let kc = sample_keycloak();
assert_eq!(
kc.auth_endpoint(),
"https://auth.example.com/realms/myrealm/protocol/openid-connect/auth"
);
}
#[test]
fn keycloak_token_endpoint() {
let kc = sample_keycloak();
assert_eq!(
kc.token_endpoint(),
"https://auth.example.com/realms/myrealm/protocol/openid-connect/token"
);
}
#[test]
fn keycloak_userinfo_endpoint() {
let kc = sample_keycloak();
assert_eq!(
kc.userinfo_endpoint(),
"https://auth.example.com/realms/myrealm/protocol/openid-connect/userinfo"
);
}
#[test]
fn keycloak_logout_endpoint() {
let kc = sample_keycloak();
assert_eq!(
kc.logout_endpoint(),
"https://auth.example.com/realms/myrealm/protocol/openid-connect/logout"
);
}
// -----------------------------------------------------------------------
// LlmProvidersConfig::from_env()
// -----------------------------------------------------------------------
#[test]
#[serial]
fn llm_providers_empty_string() {
std::env::set_var("LLM_PROVIDERS", "");
let cfg = LlmProvidersConfig::from_env().unwrap();
assert!(cfg.providers.is_empty());
std::env::remove_var("LLM_PROVIDERS");
}
#[test]
#[serial]
fn llm_providers_single() {
std::env::set_var("LLM_PROVIDERS", "litellm");
let cfg = LlmProvidersConfig::from_env().unwrap();
assert_eq!(cfg.providers, vec!["litellm"]);
std::env::remove_var("LLM_PROVIDERS");
}
#[test]
#[serial]
fn llm_providers_multiple() {
std::env::set_var("LLM_PROVIDERS", "litellm,openai,anthropic");
let cfg = LlmProvidersConfig::from_env().unwrap();
assert_eq!(cfg.providers, vec!["litellm", "openai", "anthropic"]);
std::env::remove_var("LLM_PROVIDERS");
}
#[test]
#[serial]
fn llm_providers_trims_whitespace() {
std::env::set_var("LLM_PROVIDERS", " litellm , openai ");
let cfg = LlmProvidersConfig::from_env().unwrap();
assert_eq!(cfg.providers, vec!["litellm", "openai"]);
std::env::remove_var("LLM_PROVIDERS");
}
#[test]
#[serial]
fn llm_providers_filters_empty_entries() {
std::env::set_var("LLM_PROVIDERS", "litellm,,openai,");
let cfg = LlmProvidersConfig::from_env().unwrap();
assert_eq!(cfg.providers, vec!["litellm", "openai"]);
std::env::remove_var("LLM_PROVIDERS");
}
// -----------------------------------------------------------------------
// ServiceUrls::from_env() defaults
// -----------------------------------------------------------------------
#[test]
#[serial]
fn service_urls_default_litellm_url() {
std::env::remove_var("LITELLM_URL");
let svc = ServiceUrls::from_env().unwrap();
assert_eq!(svc.litellm_url, "http://localhost:4000");
}
#[test]
#[serial]
fn service_urls_default_litellm_model() {
std::env::remove_var("LITELLM_MODEL");
let svc = ServiceUrls::from_env().unwrap();
assert_eq!(svc.litellm_model, "qwen3-32b");
}
#[test]
#[serial]
fn service_urls_default_searxng_url() {
std::env::remove_var("SEARXNG_URL");
let svc = ServiceUrls::from_env().unwrap();
assert_eq!(svc.searxng_url, "http://localhost:8888");
}
#[test]
#[serial]
fn service_urls_custom_litellm_url() {
std::env::set_var("LITELLM_URL", "http://litellm-host:4000");
let svc = ServiceUrls::from_env().unwrap();
assert_eq!(svc.litellm_url, "http://litellm-host:4000");
std::env::remove_var("LITELLM_URL");
}
#[test]
#[serial]
fn required_env_missing_returns_config_error() {
std::env::remove_var("__TEST_REQUIRED_MISSING__");
let result = required_env("__TEST_REQUIRED_MISSING__");
assert!(result.is_err());
let err_msg = result.unwrap_err().to_string();
assert!(err_msg.contains("__TEST_REQUIRED_MISSING__"));
}
}

View File

@@ -41,3 +41,53 @@ impl IntoResponse for Error {
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use axum::response::IntoResponse;
use pretty_assertions::assert_eq;
#[test]
fn state_error_display() {
let err = Error::StateError("bad state".into());
assert_eq!(err.to_string(), "bad state");
}
#[test]
fn database_error_display() {
let err = Error::DatabaseError("connection lost".into());
assert_eq!(err.to_string(), "database error: connection lost");
}
#[test]
fn config_error_display() {
let err = Error::ConfigError("missing var".into());
assert_eq!(err.to_string(), "configuration error: missing var");
}
#[test]
fn state_error_into_response_500() {
let resp = Error::StateError("oops".into()).into_response();
assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[test]
fn database_error_into_response_503() {
let resp = Error::DatabaseError("down".into()).into_response();
assert_eq!(resp.status(), StatusCode::SERVICE_UNAVAILABLE);
}
#[test]
fn config_error_into_response_500() {
let resp = Error::ConfigError("bad cfg".into()).into_response();
assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[test]
fn io_error_into_response_500() {
let io_err = std::io::Error::new(std::io::ErrorKind::NotFound, "not found");
let resp = Error::IoError(io_err).into_response();
assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
}

View File

@@ -0,0 +1,108 @@
use dioxus::prelude::*;
#[cfg(feature = "server")]
use serde::Deserialize;
use crate::models::AgentEntry;
/// Raw assistant object returned by the LangGraph `POST /assistants/search`
/// endpoint. Only the fields we display are deserialized; unknown keys are
/// silently ignored thanks to serde defaults.
#[cfg(feature = "server")]
#[derive(Deserialize)]
struct LangGraphAssistant {
assistant_id: String,
#[serde(default)]
name: String,
#[serde(default)]
graph_id: String,
#[serde(default)]
metadata: serde_json::Value,
}
/// Fetch the list of assistants (agents) from a LangGraph instance.
///
/// Calls `POST <langgraph_url>/assistants/search` with an empty body to
/// retrieve every registered assistant. Each result is mapped to the
/// frontend-friendly `AgentEntry` model.
///
/// # Returns
///
/// A vector of `AgentEntry` structs. Returns an empty vector when the
/// LangGraph URL is not configured or the service is unreachable.
///
/// # Errors
///
/// Returns `ServerFnError` on network or deserialization failures that
/// indicate a misconfigured (but present) LangGraph instance.
#[server(endpoint = "list-langgraph-agents")]
pub async fn list_langgraph_agents() -> Result<Vec<AgentEntry>, ServerFnError> {
let state: crate::infrastructure::ServerState =
dioxus_fullstack::FullstackContext::extract().await?;
let base_url = state.services.langgraph_url.clone();
if base_url.is_empty() {
return Ok(Vec::new());
}
let url = format!("{}/assistants/search", base_url.trim_end_matches('/'));
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(5))
.build()
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
// LangGraph expects a POST with a JSON body (empty object = no filters).
let resp = match client
.post(&url)
.header("content-type", "application/json")
.body("{}")
.send()
.await
{
Ok(r) if r.status().is_success() => r,
Ok(r) => {
let status = r.status();
let body = r.text().await.unwrap_or_default();
tracing::error!("LangGraph returned {status}: {body}");
return Ok(Vec::new());
}
Err(e) => {
tracing::error!("LangGraph request failed: {e}");
return Ok(Vec::new());
}
};
let assistants: Vec<LangGraphAssistant> = resp
.json()
.await
.map_err(|e| ServerFnError::new(format!("Failed to parse LangGraph response: {e}")))?;
let entries = assistants
.into_iter()
.map(|a| {
// Use the assistant name if present, otherwise fall back to graph_id.
let name = if a.name.is_empty() {
a.graph_id.clone()
} else {
a.name
};
// Extract a description from metadata if available.
let description = a
.metadata
.get("description")
.and_then(|v| v.as_str())
.unwrap_or("")
.to_string();
AgentEntry {
id: a.assistant_id,
name,
description,
status: "active".to_string(),
}
})
.collect();
Ok(entries)
}

View File

@@ -0,0 +1,403 @@
#[cfg(feature = "server")]
use std::collections::HashMap;
use dioxus::prelude::*;
use serde::{Deserialize, Serialize};
use crate::models::LitellmUsageStats;
#[cfg(feature = "server")]
use crate::models::ModelUsage;
/// Status of a LiteLLM proxy instance, including connectivity and available models.
///
/// # Fields
///
/// * `online` - Whether the LiteLLM API responded successfully
/// * `models` - List of model IDs available through the proxy
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct LitellmStatus {
pub online: bool,
pub models: Vec<String>,
}
/// Response from LiteLLM's `GET /v1/models` endpoint (OpenAI-compatible).
#[cfg(feature = "server")]
#[derive(Deserialize)]
struct ModelsResponse {
data: Vec<ModelObject>,
}
/// A single model entry from the OpenAI-compatible models list.
#[cfg(feature = "server")]
#[derive(Deserialize)]
struct ModelObject {
id: String,
}
/// Check the status of a LiteLLM proxy by querying its models endpoint.
///
/// Calls `GET <litellm_url>/v1/models` to list available models and determine
/// whether the instance is reachable. Sends the API key as a Bearer token
/// if configured.
///
/// # Arguments
///
/// * `litellm_url` - Base URL of the LiteLLM proxy (e.g. "http://localhost:4000")
///
/// # Returns
///
/// A `LitellmStatus` with `online: true` and model IDs if reachable,
/// or `online: false` with an empty model list on failure
///
/// # Errors
///
/// Returns `ServerFnError` only on serialization issues; network failures
/// are caught and returned as `online: false`
#[post("/api/litellm-status")]
pub async fn get_litellm_status(litellm_url: String) -> Result<LitellmStatus, ServerFnError> {
let state: crate::infrastructure::ServerState =
dioxus_fullstack::FullstackContext::extract().await?;
let base_url = if litellm_url.is_empty() {
state.services.litellm_url.clone()
} else {
litellm_url
};
let api_key = state.services.litellm_api_key.clone();
let url = format!("{}/v1/models", base_url.trim_end_matches('/'));
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(5))
.build()
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
let mut request = client.get(&url);
if !api_key.is_empty() {
request = request.header("Authorization", format!("Bearer {api_key}"));
}
let resp = match request.send().await {
Ok(r) if r.status().is_success() => r,
_ => {
return Ok(LitellmStatus {
online: false,
models: Vec::new(),
});
}
};
let body: ModelsResponse = match resp.json().await {
Ok(b) => b,
Err(_) => {
return Ok(LitellmStatus {
online: true,
models: Vec::new(),
});
}
};
let models = body.data.into_iter().map(|m| m.id).collect();
Ok(LitellmStatus {
online: true,
models,
})
}
/// Response from LiteLLM's `GET /global/activity` endpoint.
///
/// Returns aggregate token counts and API request totals for a date range.
/// Available on the free tier (no Enterprise license needed).
#[cfg(feature = "server")]
#[derive(Debug, Deserialize)]
struct ActivityResponse {
/// Total tokens across all models in the date range
#[serde(default)]
sum_total_tokens: u64,
}
/// Per-model entry from `GET /global/activity/model`.
///
/// Each entry contains a model name and its aggregated token total.
#[cfg(feature = "server")]
#[derive(Debug, Deserialize)]
struct ActivityModelEntry {
/// Model identifier (may be empty for unattributed traffic)
#[serde(default)]
model: String,
/// Sum of tokens used by this model in the date range
#[serde(default)]
sum_total_tokens: u64,
}
/// Per-model spend entry from `GET /global/spend/models`.
///
/// Each entry maps a model name to its total spend in USD.
#[cfg(feature = "server")]
#[derive(Debug, Deserialize)]
struct SpendModelEntry {
/// Model identifier
#[serde(default)]
model: String,
/// Total spend in USD
#[serde(default)]
total_spend: f64,
}
/// Merge per-model token counts and spend data into `ModelUsage` entries.
///
/// Joins `activity_models` (tokens) and `spend_models` (spend) by model
/// name using a HashMap for O(n + m) merge. Entries with empty model
/// names are skipped.
///
/// # Arguments
///
/// * `activity_models` - Per-model token data from `/global/activity/model`
/// * `spend_models` - Per-model spend data from `/global/spend/models`
///
/// # Returns
///
/// Merged list sorted by total tokens descending
#[cfg(feature = "server")]
fn merge_model_data(
activity_models: Vec<ActivityModelEntry>,
spend_models: Vec<SpendModelEntry>,
) -> Vec<ModelUsage> {
let mut model_map: HashMap<String, ModelUsage> = HashMap::new();
for entry in activity_models {
if entry.model.is_empty() {
continue;
}
model_map
.entry(entry.model.clone())
.or_insert_with(|| ModelUsage {
model: entry.model,
..Default::default()
})
.total_tokens = entry.sum_total_tokens;
}
for entry in spend_models {
if entry.model.is_empty() {
continue;
}
model_map
.entry(entry.model.clone())
.or_insert_with(|| ModelUsage {
model: entry.model,
..Default::default()
})
.spend = entry.total_spend;
}
let mut result: Vec<ModelUsage> = model_map.into_values().collect();
result.sort_by(|a, b| b.total_tokens.cmp(&a.total_tokens));
result
}
/// Fetch aggregated usage statistics from LiteLLM's free-tier APIs.
///
/// Combines three endpoints to build a complete usage picture:
/// - `GET /global/activity` - total token counts
/// - `GET /global/activity/model` - per-model token breakdown
/// - `GET /global/spend/models` - per-model spend in USD
///
/// # Arguments
///
/// * `start_date` - Start of the reporting period in `YYYY-MM-DD` format
/// * `end_date` - End of the reporting period in `YYYY-MM-DD` format
///
/// # Returns
///
/// Aggregated usage stats; returns default (zeroed) stats on network
/// failure or permission errors
///
/// # Errors
///
/// Returns `ServerFnError` only on HTTP client construction failure
#[post("/api/litellm-usage")]
pub async fn get_litellm_usage(
start_date: String,
end_date: String,
) -> Result<LitellmUsageStats, ServerFnError> {
let state: crate::infrastructure::ServerState =
dioxus_fullstack::FullstackContext::extract().await?;
let base_url = &state.services.litellm_url;
let api_key = &state.services.litellm_api_key;
if base_url.is_empty() {
return Ok(LitellmUsageStats::default());
}
let base = base_url.trim_end_matches('/');
let date_params = format!("start_date={start_date}&end_date={end_date}");
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(10))
.build()
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
// Helper closure to build an authenticated GET request
let auth_get = |url: String| {
let mut req = client.get(url);
if !api_key.is_empty() {
req = req.header("Authorization", format!("Bearer {api_key}"));
}
req
};
// Fire all three requests concurrently to minimise latency
let (activity_res, model_activity_res, model_spend_res) = tokio::join!(
auth_get(format!("{base}/global/activity?{date_params}")).send(),
auth_get(format!("{base}/global/activity/model?{date_params}")).send(),
auth_get(format!("{base}/global/spend/models?{date_params}")).send(),
);
// Parse total token count from /global/activity
let total_tokens = match activity_res {
Ok(r) if r.status().is_success() => r
.json::<ActivityResponse>()
.await
.map(|a| a.sum_total_tokens)
.unwrap_or(0),
_ => 0,
};
// Parse per-model token breakdown from /global/activity/model
let activity_models: Vec<ActivityModelEntry> = match model_activity_res {
Ok(r) if r.status().is_success() => r.json().await.unwrap_or_default(),
_ => Vec::new(),
};
// Parse per-model spend from /global/spend/models
let spend_models: Vec<SpendModelEntry> = match model_spend_res {
Ok(r) if r.status().is_success() => r.json().await.unwrap_or_default(),
_ => Vec::new(),
};
let total_spend: f64 = spend_models.iter().map(|m| m.total_spend).sum();
let model_breakdown = merge_model_data(activity_models, spend_models);
Ok(LitellmUsageStats {
total_spend,
// Free-tier endpoints don't provide prompt/completion split;
// total_tokens comes from /global/activity.
total_prompt_tokens: 0,
total_completion_tokens: 0,
total_tokens,
model_breakdown,
})
}
#[cfg(all(test, feature = "server"))]
mod tests {
use super::*;
#[test]
fn merge_empty_inputs() {
let result = merge_model_data(Vec::new(), Vec::new());
assert!(result.is_empty());
}
#[test]
fn merge_activity_only() {
let activity = vec![ActivityModelEntry {
model: "gpt-4".into(),
sum_total_tokens: 1500,
}];
let result = merge_model_data(activity, Vec::new());
assert_eq!(result.len(), 1);
assert_eq!(result[0].model, "gpt-4");
assert_eq!(result[0].total_tokens, 1500);
assert_eq!(result[0].spend, 0.0);
}
#[test]
fn merge_spend_only() {
let spend = vec![SpendModelEntry {
model: "gpt-4".into(),
total_spend: 2.5,
}];
let result = merge_model_data(Vec::new(), spend);
assert_eq!(result.len(), 1);
assert_eq!(result[0].model, "gpt-4");
assert_eq!(result[0].spend, 2.5);
assert_eq!(result[0].total_tokens, 0);
}
#[test]
fn merge_joins_by_model_name() {
let activity = vec![
ActivityModelEntry {
model: "gpt-4".into(),
sum_total_tokens: 5000,
},
ActivityModelEntry {
model: "claude-3".into(),
sum_total_tokens: 3000,
},
];
let spend = vec![
SpendModelEntry {
model: "gpt-4".into(),
total_spend: 1.0,
},
SpendModelEntry {
model: "claude-3".into(),
total_spend: 0.5,
},
];
let result = merge_model_data(activity, spend);
assert_eq!(result.len(), 2);
// Sorted by tokens descending: gpt-4 (5000) before claude-3 (3000)
assert_eq!(result[0].model, "gpt-4");
assert_eq!(result[0].total_tokens, 5000);
assert_eq!(result[0].spend, 1.0);
assert_eq!(result[1].model, "claude-3");
assert_eq!(result[1].total_tokens, 3000);
assert_eq!(result[1].spend, 0.5);
}
#[test]
fn merge_skips_empty_model_names() {
let activity = vec![
ActivityModelEntry {
model: "".into(),
sum_total_tokens: 100,
},
ActivityModelEntry {
model: "gpt-4".into(),
sum_total_tokens: 500,
},
];
let spend = vec![SpendModelEntry {
model: "".into(),
total_spend: 0.01,
}];
let result = merge_model_data(activity, spend);
assert_eq!(result.len(), 1);
assert_eq!(result[0].model, "gpt-4");
}
#[test]
fn merge_unmatched_models_appear_in_both_directions() {
let activity = vec![ActivityModelEntry {
model: "tokens-only".into(),
sum_total_tokens: 1000,
}];
let spend = vec![SpendModelEntry {
model: "spend-only".into(),
total_spend: 0.5,
}];
let result = merge_model_data(activity, spend);
assert_eq!(result.len(), 2);
// tokens-only has 1000 tokens, spend-only has 0 tokens
assert_eq!(result[0].model, "tokens-only");
assert_eq!(result[0].total_tokens, 1000);
assert_eq!(result[1].model, "spend-only");
assert_eq!(result[1].spend, 0.5);
}
}

View File

@@ -4,23 +4,23 @@ use dioxus::prelude::*;
mod inner {
use serde::{Deserialize, Serialize};
/// A single message in the OpenAI-compatible chat format used by Ollama.
/// A single message in the OpenAI-compatible chat format used by LiteLLM.
#[derive(Serialize)]
pub(super) struct ChatMessage {
pub role: String,
pub content: String,
}
/// Request body for Ollama's OpenAI-compatible chat completions endpoint.
/// Request body for the OpenAI-compatible chat completions endpoint.
#[derive(Serialize)]
pub(super) struct OllamaChatRequest {
pub(super) struct ChatCompletionRequest {
pub model: String,
pub messages: Vec<ChatMessage>,
/// Disable streaming so we get a single JSON response.
pub stream: bool,
}
/// A single choice in the Ollama chat completions response.
/// A single choice in the chat completions response.
#[derive(Deserialize)]
pub(super) struct ChatChoice {
pub message: ChatResponseMessage,
@@ -32,9 +32,9 @@ mod inner {
pub content: String,
}
/// Top-level response from Ollama's `/v1/chat/completions` endpoint.
/// Top-level response from the `/v1/chat/completions` endpoint.
#[derive(Deserialize)]
pub(super) struct OllamaChatResponse {
pub(super) struct ChatCompletionResponse {
pub choices: Vec<ChatChoice>,
}
@@ -72,7 +72,25 @@ mod inner {
}
let html = resp.text().await.ok()?;
let document = scraper::Html::parse_document(&html);
parse_article_html(&html)
}
/// Parse article text from raw HTML without any network I/O.
///
/// Uses a tiered extraction strategy:
/// 1. Try content within `<article>`, `<main>`, or `[role="main"]`
/// 2. Fall back to all `<p>` tags outside excluded containers
///
/// # Arguments
///
/// * `html` - Raw HTML string to parse
///
/// # Returns
///
/// The extracted text, or `None` if extraction yields < 100 chars.
/// Output is capped at 8000 characters.
pub(crate) fn parse_article_html(html: &str) -> Option<String> {
let document = scraper::Html::parse_document(html);
// Strategy 1: Extract from semantic article containers.
// Most news sites wrap the main content in <article>, <main>,
@@ -134,12 +152,12 @@ mod inner {
}
/// Sum the total character length of all collected text parts.
fn joined_len(parts: &[String]) -> usize {
pub(crate) fn joined_len(parts: &[String]) -> usize {
parts.iter().map(|s| s.len()).sum()
}
}
/// Summarize an article using a local Ollama instance.
/// Summarize an article using a LiteLLM proxy.
///
/// First attempts to fetch the full article text from the provided URL.
/// If that fails (paywall, timeout, etc.), falls back to the search snippet.
@@ -149,8 +167,8 @@ mod inner {
///
/// * `snippet` - The search result snippet (fallback content)
/// * `article_url` - The original article URL to fetch full text from
/// * `ollama_url` - Base URL of the Ollama instance (e.g. "http://localhost:11434")
/// * `model` - The Ollama model ID to use (e.g. "llama3.1:8b")
/// * `litellm_url` - Base URL of the LiteLLM proxy (e.g. "http://localhost:4000")
/// * `model` - The model ID to use (e.g. "qwen3-32b")
///
/// # Returns
///
@@ -158,36 +176,38 @@ mod inner {
///
/// # Errors
///
/// Returns `ServerFnError` if the Ollama request fails or response parsing fails
/// Returns `ServerFnError` if the LiteLLM request fails or response parsing fails
#[post("/api/summarize")]
pub async fn summarize_article(
snippet: String,
article_url: String,
ollama_url: String,
litellm_url: String,
model: String,
) -> Result<String, ServerFnError> {
use inner::{fetch_article_text, ChatMessage, OllamaChatRequest, OllamaChatResponse};
use inner::{fetch_article_text, ChatCompletionRequest, ChatCompletionResponse, ChatMessage};
let state: crate::infrastructure::ServerState =
dioxus_fullstack::FullstackContext::extract().await?;
// Use caller-provided values or fall back to ServerState config
let base_url = if ollama_url.is_empty() {
state.services.ollama_url.clone()
let base_url = if litellm_url.is_empty() {
state.services.litellm_url.clone()
} else {
ollama_url
litellm_url
};
let model = if model.is_empty() {
state.services.ollama_model.clone()
state.services.litellm_model.clone()
} else {
model
};
let api_key = state.services.litellm_api_key.clone();
// Try to fetch the full article; fall back to the search snippet
let article_text = fetch_article_text(&article_url).await.unwrap_or(snippet);
let request_body = OllamaChatRequest {
let request_body = ChatCompletionRequest {
model,
stream: false,
messages: vec![ChatMessage {
@@ -205,42 +225,48 @@ pub async fn summarize_article(
let url = format!("{}/v1/chat/completions", base_url.trim_end_matches('/'));
let client = reqwest::Client::new();
let resp = client
let mut request = client
.post(&url)
.header("content-type", "application/json")
.json(&request_body)
.json(&request_body);
if !api_key.is_empty() {
request = request.header("Authorization", format!("Bearer {api_key}"));
}
let resp = request
.send()
.await
.map_err(|e| ServerFnError::new(format!("Ollama request failed: {e}")))?;
.map_err(|e| ServerFnError::new(format!("LiteLLM request failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
return Err(ServerFnError::new(format!(
"Ollama returned {status}: {body}"
"LiteLLM returned {status}: {body}"
)));
}
let body: OllamaChatResponse = resp
let body: ChatCompletionResponse = resp
.json()
.await
.map_err(|e| ServerFnError::new(format!("Failed to parse Ollama response: {e}")))?;
.map_err(|e| ServerFnError::new(format!("Failed to parse LiteLLM response: {e}")))?;
body.choices
.first()
.map(|choice| choice.message.content.clone())
.ok_or_else(|| ServerFnError::new("Empty response from Ollama"))
.ok_or_else(|| ServerFnError::new("Empty response from LiteLLM"))
}
/// A lightweight chat message for the follow-up conversation.
/// Uses simple String role ("system"/"user"/"assistant") for Ollama compatibility.
/// Uses simple String role ("system"/"user"/"assistant") for OpenAI compatibility.
#[derive(Debug, Clone, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct FollowUpMessage {
pub role: String,
pub content: String,
}
/// Send a follow-up question about an article using a local Ollama instance.
/// Send a follow-up question about an article using a LiteLLM proxy.
///
/// Accepts the full conversation history (system context + prior turns) and
/// returns the assistant's next response. The system message should contain
@@ -249,8 +275,8 @@ pub struct FollowUpMessage {
/// # Arguments
///
/// * `messages` - The conversation history including system context
/// * `ollama_url` - Base URL of the Ollama instance
/// * `model` - The Ollama model ID to use
/// * `litellm_url` - Base URL of the LiteLLM proxy
/// * `model` - The model ID to use
///
/// # Returns
///
@@ -258,30 +284,32 @@ pub struct FollowUpMessage {
///
/// # Errors
///
/// Returns `ServerFnError` if the Ollama request fails or response parsing fails
/// Returns `ServerFnError` if the LiteLLM request fails or response parsing fails
#[post("/api/chat")]
pub async fn chat_followup(
messages: Vec<FollowUpMessage>,
ollama_url: String,
litellm_url: String,
model: String,
) -> Result<String, ServerFnError> {
use inner::{ChatMessage, OllamaChatRequest, OllamaChatResponse};
use inner::{ChatCompletionRequest, ChatCompletionResponse, ChatMessage};
let state: crate::infrastructure::ServerState =
dioxus_fullstack::FullstackContext::extract().await?;
let base_url = if ollama_url.is_empty() {
state.services.ollama_url.clone()
let base_url = if litellm_url.is_empty() {
state.services.litellm_url.clone()
} else {
ollama_url
litellm_url
};
let model = if model.is_empty() {
state.services.ollama_model.clone()
state.services.litellm_model.clone()
} else {
model
};
let api_key = state.services.litellm_api_key.clone();
// Convert FollowUpMessage to inner ChatMessage for the request
let chat_messages: Vec<ChatMessage> = messages
.into_iter()
@@ -291,7 +319,7 @@ pub async fn chat_followup(
})
.collect();
let request_body = OllamaChatRequest {
let request_body = ChatCompletionRequest {
model,
stream: false,
messages: chat_messages,
@@ -299,29 +327,182 @@ pub async fn chat_followup(
let url = format!("{}/v1/chat/completions", base_url.trim_end_matches('/'));
let client = reqwest::Client::new();
let resp = client
let mut request = client
.post(&url)
.header("content-type", "application/json")
.json(&request_body)
.json(&request_body);
if !api_key.is_empty() {
request = request.header("Authorization", format!("Bearer {api_key}"));
}
let resp = request
.send()
.await
.map_err(|e| ServerFnError::new(format!("Ollama request failed: {e}")))?;
.map_err(|e| ServerFnError::new(format!("LiteLLM request failed: {e}")))?;
if !resp.status().is_success() {
let status = resp.status();
let body = resp.text().await.unwrap_or_default();
return Err(ServerFnError::new(format!(
"Ollama returned {status}: {body}"
"LiteLLM returned {status}: {body}"
)));
}
let body: OllamaChatResponse = resp
let body: ChatCompletionResponse = resp
.json()
.await
.map_err(|e| ServerFnError::new(format!("Failed to parse Ollama response: {e}")))?;
.map_err(|e| ServerFnError::new(format!("Failed to parse LiteLLM response: {e}")))?;
body.choices
.first()
.map(|choice| choice.message.content.clone())
.ok_or_else(|| ServerFnError::new("Empty response from Ollama"))
.ok_or_else(|| ServerFnError::new("Empty response from LiteLLM"))
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
// -----------------------------------------------------------------------
// FollowUpMessage serde tests
// -----------------------------------------------------------------------
#[test]
fn followup_message_serde_round_trip() {
let msg = FollowUpMessage {
role: "assistant".into(),
content: "Here is my answer.".into(),
};
let json = serde_json::to_string(&msg).expect("serialize FollowUpMessage");
let back: FollowUpMessage =
serde_json::from_str(&json).expect("deserialize FollowUpMessage");
assert_eq!(msg, back);
}
#[test]
fn followup_message_deserialize_from_json_literal() {
let json = r#"{"role":"system","content":"You are helpful."}"#;
let msg: FollowUpMessage = serde_json::from_str(json).expect("deserialize literal");
assert_eq!(msg.role, "system");
assert_eq!(msg.content, "You are helpful.");
}
// -----------------------------------------------------------------------
// joined_len and parse_article_html tests (server feature required)
// -----------------------------------------------------------------------
#[cfg(feature = "server")]
mod server_tests {
use super::super::inner::{joined_len, parse_article_html};
use pretty_assertions::assert_eq;
#[test]
fn joined_len_empty_input() {
assert_eq!(joined_len(&[]), 0);
}
#[test]
fn joined_len_sums_correctly() {
let parts = vec!["abc".into(), "de".into(), "fghij".into()];
assert_eq!(joined_len(&parts), 10);
}
// -------------------------------------------------------------------
// parse_article_html tests
// -------------------------------------------------------------------
// Helper: generate a string of given length from a repeated word.
fn lorem(len: usize) -> String {
"Lorem ipsum dolor sit amet consectetur adipiscing elit "
.repeat((len / 55) + 1)
.chars()
.take(len)
.collect()
}
#[test]
fn article_tag_extracts_text() {
let body = lorem(250);
let html = format!("<html><body><article><p>{body}</p></article></body></html>");
let result = parse_article_html(&html);
assert!(result.is_some(), "expected Some for article tag");
assert!(result.unwrap().contains("Lorem"));
}
#[test]
fn main_tag_extracts_text() {
let body = lorem(250);
let html = format!("<html><body><main><p>{body}</p></main></body></html>");
let result = parse_article_html(&html);
assert!(result.is_some(), "expected Some for main tag");
}
#[test]
fn fallback_to_p_tags_when_article_main_yield_little() {
// No <article>/<main>, so falls back to <p> tags
let body = lorem(250);
let html = format!("<html><body><div><p>{body}</p></div></body></html>");
let result = parse_article_html(&html);
assert!(result.is_some(), "expected fallback to <p> tags");
}
#[test]
fn excludes_nav_footer_aside_content() {
// Content only inside excluded containers -- should be excluded
let body = lorem(250);
let html = format!(
"<html><body>\
<nav><p>{body}</p></nav>\
<footer><p>{body}</p></footer>\
<aside><p>{body}</p></aside>\
</body></html>"
);
let result = parse_article_html(&html);
assert!(result.is_none(), "expected None for excluded-only content");
}
#[test]
fn returns_none_when_text_too_short() {
let html = "<html><body><p>Short.</p></body></html>";
let result = parse_article_html(html);
assert!(result.is_none(), "expected None for short text");
}
#[test]
fn truncates_at_8000_chars() {
let body = lorem(10000);
let html = format!("<html><body><article><p>{body}</p></article></body></html>");
let result = parse_article_html(&html).expect("expected Some");
assert!(
result.len() <= 8000,
"expected <= 8000 chars, got {}",
result.len()
);
}
#[test]
fn skips_fragments_under_30_chars() {
// Only fragments < 30 chars -- should yield None
let html = "<html><body><article>\
<p>Short frag one</p>\
<p>Another tiny bit</p>\
</article></body></html>";
let result = parse_article_html(html);
assert!(result.is_none(), "expected None for tiny fragments");
}
#[test]
fn extracts_from_role_main_attribute() {
let body = lorem(250);
let html = format!(
"<html><body>\
<div role=\"main\"><p>{body}</p></div>\
</body></html>"
);
let result = parse_article_html(&html);
assert!(result.is_some(), "expected Some for role=main");
}
}
}

View File

@@ -2,8 +2,9 @@
// the #[server] macro generates client stubs for the web target)
pub mod auth_check;
pub mod chat;
pub mod langgraph;
pub mod litellm;
pub mod llm;
pub mod ollama;
pub mod searxng;
// Server-only modules (Axum handlers, state, configs, DB, etc.)
@@ -12,8 +13,6 @@ mod auth;
#[cfg(feature = "server")]
mod auth_middleware;
#[cfg(feature = "server")]
mod chat_stream;
#[cfg(feature = "server")]
pub mod config;
#[cfg(feature = "server")]
pub mod database;
@@ -33,8 +32,6 @@ pub use auth::*;
#[cfg(feature = "server")]
pub use auth_middleware::*;
#[cfg(feature = "server")]
pub use chat_stream::*;
#[cfg(feature = "server")]
pub use error::*;
#[cfg(feature = "server")]
pub use server::*;

View File

@@ -1,92 +0,0 @@
use dioxus::prelude::*;
use serde::{Deserialize, Serialize};
/// Status of a local Ollama instance, including connectivity and loaded models.
///
/// # Fields
///
/// * `online` - Whether the Ollama API responded successfully
/// * `models` - List of model names currently available on the instance
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct OllamaStatus {
pub online: bool,
pub models: Vec<String>,
}
/// Response from Ollama's `GET /api/tags` endpoint.
#[cfg(feature = "server")]
#[derive(Deserialize)]
struct OllamaTagsResponse {
models: Vec<OllamaModel>,
}
/// A single model entry from Ollama's tags API.
#[cfg(feature = "server")]
#[derive(Deserialize)]
struct OllamaModel {
name: String,
}
/// Check the status of a local Ollama instance by querying its tags endpoint.
///
/// Calls `GET <ollama_url>/api/tags` to list available models and determine
/// whether the instance is reachable.
///
/// # Arguments
///
/// * `ollama_url` - Base URL of the Ollama instance (e.g. "http://localhost:11434")
///
/// # Returns
///
/// An `OllamaStatus` with `online: true` and model names if reachable,
/// or `online: false` with an empty model list on failure
///
/// # Errors
///
/// Returns `ServerFnError` only on serialization issues; network failures
/// are caught and returned as `online: false`
#[post("/api/ollama-status")]
pub async fn get_ollama_status(ollama_url: String) -> Result<OllamaStatus, ServerFnError> {
let state: crate::infrastructure::ServerState =
dioxus_fullstack::FullstackContext::extract().await?;
let base_url = if ollama_url.is_empty() {
state.services.ollama_url.clone()
} else {
ollama_url
};
let url = format!("{}/api/tags", base_url.trim_end_matches('/'));
let client = reqwest::Client::builder()
.timeout(std::time::Duration::from_secs(5))
.build()
.map_err(|e| ServerFnError::new(format!("HTTP client error: {e}")))?;
let resp = match client.get(&url).send().await {
Ok(r) if r.status().is_success() => r,
_ => {
return Ok(OllamaStatus {
online: false,
models: Vec::new(),
});
}
};
let body: OllamaTagsResponse = match resp.json().await {
Ok(b) => b,
Err(_) => {
return Ok(OllamaStatus {
online: true,
models: Vec::new(),
});
}
};
let models = body.models.into_iter().map(|m| m.name).collect();
Ok(OllamaStatus {
online: true,
models,
})
}

View File

@@ -1,6 +1,6 @@
//! Unified LLM provider dispatch.
//!
//! Routes chat completion requests to Ollama, OpenAI, Anthropic, or
//! Routes chat completion requests to LiteLLM, OpenAI, Anthropic, or
//! HuggingFace based on the session's provider setting. All providers
//! except Anthropic use the OpenAI-compatible chat completions format.
@@ -20,11 +20,11 @@ pub struct ProviderMessage {
///
/// # Arguments
///
/// * `state` - Server state (for default Ollama URL/model)
/// * `provider` - Provider name (`"ollama"`, `"openai"`, `"anthropic"`, `"huggingface"`)
/// * `state` - Server state (for default LiteLLM URL/model)
/// * `provider` - Provider name (`"litellm"`, `"openai"`, `"anthropic"`, `"huggingface"`)
/// * `model` - Model ID
/// * `messages` - Conversation history
/// * `api_key` - API key (required for non-Ollama providers)
/// * `api_key` - API key (required for non-LiteLLM providers; LiteLLM uses server config)
/// * `stream` - Whether to request streaming
///
/// # Returns
@@ -123,11 +123,11 @@ pub async fn send_chat_request(
.send()
.await
}
// Default: Ollama (OpenAI-compatible endpoint)
// Default: LiteLLM proxy (OpenAI-compatible endpoint)
_ => {
let base_url = &state.services.ollama_url;
let base_url = &state.services.litellm_url;
let resolved_model = if model.is_empty() {
&state.services.ollama_model
&state.services.litellm_model
} else {
model
};
@@ -137,12 +137,42 @@ pub async fn send_chat_request(
"messages": messages,
"stream": stream,
});
client
let litellm_key = &state.services.litellm_api_key;
let mut request = client
.post(&url)
.header("content-type", "application/json")
.json(&body)
.send()
.await
.json(&body);
if !litellm_key.is_empty() {
request = request.header("Authorization", format!("Bearer {litellm_key}"));
}
request.send().await
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn provider_message_serde_round_trip() {
let msg = ProviderMessage {
role: "assistant".into(),
content: "Hello, world!".into(),
};
let json = serde_json::to_string(&msg).expect("serialize ProviderMessage");
let back: ProviderMessage =
serde_json::from_str(&json).expect("deserialize ProviderMessage");
assert_eq!(msg.role, back.role);
assert_eq!(msg.content, back.content);
}
#[test]
fn provider_message_deserialize_from_json_literal() {
let json = r#"{"role":"user","content":"What is Rust?"}"#;
let msg: ProviderMessage = serde_json::from_str(json).expect("deserialize from literal");
assert_eq!(msg.role, "user");
assert_eq!(msg.content, "What is Rust?");
}
}

View File

@@ -5,13 +5,13 @@ use dioxus::prelude::*;
// The #[server] macro generates a client stub for the web build that
// sends a network request instead of executing this function body.
#[cfg(feature = "server")]
mod inner {
pub(crate) mod inner {
use serde::Deserialize;
use std::collections::HashSet;
/// Individual result from the SearXNG search API.
#[derive(Debug, Deserialize)]
pub(super) struct SearxngResult {
pub(crate) struct SearxngResult {
pub title: String,
pub url: String,
pub content: Option<String>,
@@ -25,7 +25,7 @@ mod inner {
/// Top-level response from the SearXNG search API.
#[derive(Debug, Deserialize)]
pub(super) struct SearxngResponse {
pub(crate) struct SearxngResponse {
pub results: Vec<SearxngResult>,
}
@@ -40,7 +40,7 @@ mod inner {
/// # Returns
///
/// The domain host or a fallback "Web" string
pub(super) fn extract_source(url_str: &str) -> String {
pub(crate) fn extract_source(url_str: &str) -> String {
url::Url::parse(url_str)
.ok()
.and_then(|u| u.host_str().map(String::from))
@@ -64,7 +64,7 @@ mod inner {
/// # Returns
///
/// Filtered, deduplicated, and ranked results
pub(super) fn rank_and_deduplicate(
pub(crate) fn rank_and_deduplicate(
mut results: Vec<SearxngResult>,
max_results: usize,
) -> Vec<SearxngResult> {
@@ -285,3 +285,166 @@ pub async fn get_trending_topics() -> Result<Vec<String>, ServerFnError> {
Ok(topics)
}
#[cfg(all(test, feature = "server"))]
mod tests {
#![allow(clippy::unwrap_used, clippy::expect_used)]
use super::inner::*;
use pretty_assertions::assert_eq;
// -----------------------------------------------------------------------
// extract_source()
// -----------------------------------------------------------------------
#[test]
fn extract_source_strips_www() {
assert_eq!(
extract_source("https://www.example.com/page"),
"example.com"
);
}
#[test]
fn extract_source_returns_domain() {
assert_eq!(
extract_source("https://techcrunch.com/article"),
"techcrunch.com"
);
}
#[test]
fn extract_source_invalid_url_returns_web() {
assert_eq!(extract_source("not-a-url"), "Web");
}
#[test]
fn extract_source_no_scheme_returns_web() {
// url::Url::parse requires a scheme; bare domain fails
assert_eq!(extract_source("example.com/path"), "Web");
}
// -----------------------------------------------------------------------
// rank_and_deduplicate()
// -----------------------------------------------------------------------
fn make_result(url: &str, content: &str, score: f64) -> SearxngResult {
SearxngResult {
title: "Title".into(),
url: url.into(),
content: if content.is_empty() {
None
} else {
Some(content.into())
},
published_date: None,
thumbnail: None,
score,
}
}
#[test]
fn rank_filters_empty_content() {
let results = vec![
make_result("https://a.com", "", 10.0),
make_result(
"https://b.com",
"This is meaningful content that passes the length filter",
5.0,
),
];
let ranked = rank_and_deduplicate(results, 10);
assert_eq!(ranked.len(), 1);
assert_eq!(ranked[0].url, "https://b.com");
}
#[test]
fn rank_filters_short_content() {
let results = vec![
make_result("https://a.com", "short", 10.0),
make_result(
"https://b.com",
"This content is long enough to pass the 20-char filter threshold",
5.0,
),
];
let ranked = rank_and_deduplicate(results, 10);
assert_eq!(ranked.len(), 1);
}
#[test]
fn rank_deduplicates_by_domain_keeps_highest() {
let results = vec![
make_result(
"https://example.com/page1",
"First result with enough content here for the filter",
3.0,
),
make_result(
"https://example.com/page2",
"Second result with enough content here for the filter",
8.0,
),
];
let ranked = rank_and_deduplicate(results, 10);
assert_eq!(ranked.len(), 1);
// Should keep the highest-scored one (page2 with score 8.0)
assert_eq!(ranked[0].url, "https://example.com/page2");
}
#[test]
fn rank_sorts_by_score_descending() {
let results = vec![
make_result(
"https://a.com/p",
"Content A that is long enough to pass the filter check",
1.0,
),
make_result(
"https://b.com/p",
"Content B that is long enough to pass the filter check",
5.0,
),
make_result(
"https://c.com/p",
"Content C that is long enough to pass the filter check",
3.0,
),
];
let ranked = rank_and_deduplicate(results, 10);
assert_eq!(ranked.len(), 3);
assert!(ranked[0].score >= ranked[1].score);
assert!(ranked[1].score >= ranked[2].score);
}
#[test]
fn rank_truncates_to_max_results() {
let results: Vec<_> = (0..20)
.map(|i| {
make_result(
&format!("https://site{i}.com/page"),
&format!("Content for site {i} that is long enough to pass the filter"),
i as f64,
)
})
.collect();
let ranked = rank_and_deduplicate(results, 5);
assert_eq!(ranked.len(), 5);
}
#[test]
fn rank_empty_input_returns_empty() {
let ranked = rank_and_deduplicate(vec![], 10);
assert!(ranked.is_empty());
}
#[test]
fn rank_all_filtered_returns_empty() {
let results = vec![
make_result("https://a.com", "", 10.0),
make_result("https://b.com", "too short", 5.0),
];
let ranked = rank_and_deduplicate(results, 10);
assert!(ranked.is_empty());
}
}

View File

@@ -6,7 +6,7 @@ use time::Duration;
use tower_sessions::{cookie::Key, MemoryStore, SessionManagerLayer};
use crate::infrastructure::{
auth_callback, auth_login, chat_stream_handler,
auth_callback, auth_login,
config::{KeycloakConfig, LlmProvidersConfig, ServiceUrls, SmtpConfig, StripeConfig},
database::Database,
logout, require_auth,
@@ -82,7 +82,6 @@ pub fn server_start(app: fn() -> Element) -> Result<(), super::Error> {
.route("/auth", get(auth_login))
.route("/auth/callback", get(auth_callback))
.route("/logout", get(logout))
.route("/api/chat/stream", get(chat_stream_handler))
.serve_dioxus_application(ServeConfig::new(), app)
.layer(Extension(PendingOAuthStore::default()))
.layer(Extension(server_state))

View File

@@ -45,7 +45,7 @@ pub struct ServerStateInner {
pub keycloak: &'static KeycloakConfig,
/// Outbound email settings.
pub smtp: &'static SmtpConfig,
/// URLs for Ollama, SearXNG, LangChain, S3, etc.
/// URLs for LiteLLM, SearXNG, LangChain, S3, etc.
pub services: &'static ServiceUrls,
/// Stripe billing keys.
pub stripe: &'static StripeConfig,

View File

@@ -44,3 +44,91 @@ pub struct User {
/// Avatar / profile picture URL.
pub avatar_url: String,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn user_state_inner_default_has_empty_strings() {
let inner = UserStateInner::default();
assert_eq!(inner.sub, "");
assert_eq!(inner.access_token, "");
assert_eq!(inner.refresh_token, "");
assert_eq!(inner.user.email, "");
assert_eq!(inner.user.name, "");
assert_eq!(inner.user.avatar_url, "");
}
#[test]
fn user_default_has_empty_strings() {
let user = User::default();
assert_eq!(user.email, "");
assert_eq!(user.name, "");
assert_eq!(user.avatar_url, "");
}
#[test]
fn user_state_inner_serde_round_trip() {
let inner = UserStateInner {
sub: "user-123".into(),
access_token: "tok-abc".into(),
refresh_token: "ref-xyz".into(),
user: User {
email: "a@b.com".into(),
name: "Alice".into(),
avatar_url: "https://img.example.com/a.png".into(),
},
};
let json = serde_json::to_string(&inner).expect("serialize UserStateInner");
let back: UserStateInner = serde_json::from_str(&json).expect("deserialize UserStateInner");
assert_eq!(inner.sub, back.sub);
assert_eq!(inner.access_token, back.access_token);
assert_eq!(inner.refresh_token, back.refresh_token);
assert_eq!(inner.user.email, back.user.email);
assert_eq!(inner.user.name, back.user.name);
assert_eq!(inner.user.avatar_url, back.user.avatar_url);
}
#[test]
fn user_state_from_inner_and_deref() {
let inner = UserStateInner {
sub: "sub-1".into(),
access_token: "at".into(),
refresh_token: "rt".into(),
user: User {
email: "e@e.com".into(),
name: "Eve".into(),
avatar_url: "".into(),
},
};
let state = UserState::from(inner);
// Deref should give access to inner fields
assert_eq!(state.sub, "sub-1");
assert_eq!(state.user.name, "Eve");
}
#[test]
fn user_serde_round_trip() {
let user = User {
email: "bob@test.com".into(),
name: "Bob".into(),
avatar_url: "https://avatars.io/bob".into(),
};
let json = serde_json::to_string(&user).expect("serialize User");
let back: User = serde_json::from_str(&json).expect("deserialize User");
assert_eq!(user.email, back.email);
assert_eq!(user.name, back.name);
assert_eq!(user.avatar_url, back.avatar_url);
}
#[test]
fn user_state_clone_is_cheap() {
let inner = UserStateInner::default();
let state = UserState::from(inner);
let cloned = state.clone();
// Both point to the same Arc allocation
assert_eq!(state.sub, cloned.sub);
}
}

View File

@@ -60,8 +60,8 @@ pub struct Attachment {
/// * `user_sub` - Keycloak subject ID (session owner)
/// * `title` - Display title (auto-generated or user-renamed)
/// * `namespace` - Grouping for sidebar sections
/// * `provider` - LLM provider used (e.g. "ollama", "openai")
/// * `model` - Model ID used (e.g. "llama3.1:8b")
/// * `provider` - LLM provider used (e.g. "litellm", "openai")
/// * `model` - Model ID used (e.g. "qwen3-32b")
/// * `created_at` - ISO 8601 creation timestamp
/// * `updated_at` - ISO 8601 last-activity timestamp
/// * `article_url` - Source article URL (for News namespace sessions)
@@ -105,3 +105,163 @@ pub struct ChatMessage {
pub attachments: Vec<Attachment>,
pub timestamp: String,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn chat_namespace_default_is_general() {
assert_eq!(ChatNamespace::default(), ChatNamespace::General);
}
#[test]
fn chat_role_serde_round_trip() {
for role in [ChatRole::User, ChatRole::Assistant, ChatRole::System] {
let json =
serde_json::to_string(&role).unwrap_or_else(|_| panic!("serialize {:?}", role));
let back: ChatRole =
serde_json::from_str(&json).unwrap_or_else(|_| panic!("deserialize {:?}", role));
assert_eq!(role, back);
}
}
#[test]
fn chat_namespace_serde_round_trip() {
for ns in [ChatNamespace::General, ChatNamespace::News] {
let json = serde_json::to_string(&ns).unwrap_or_else(|_| panic!("serialize {:?}", ns));
let back: ChatNamespace =
serde_json::from_str(&json).unwrap_or_else(|_| panic!("deserialize {:?}", ns));
assert_eq!(ns, back);
}
}
#[test]
fn attachment_kind_serde_round_trip() {
for kind in [
AttachmentKind::Image,
AttachmentKind::Document,
AttachmentKind::Code,
] {
let json =
serde_json::to_string(&kind).unwrap_or_else(|_| panic!("serialize {:?}", kind));
let back: AttachmentKind =
serde_json::from_str(&json).unwrap_or_else(|_| panic!("deserialize {:?}", kind));
assert_eq!(kind, back);
}
}
#[test]
fn attachment_serde_round_trip() {
let att = Attachment {
name: "photo.png".into(),
kind: AttachmentKind::Image,
size_bytes: 2048,
};
let json = serde_json::to_string(&att).expect("serialize Attachment");
let back: Attachment = serde_json::from_str(&json).expect("deserialize Attachment");
assert_eq!(att, back);
}
#[test]
fn chat_session_serde_round_trip() {
let session = ChatSession {
id: "abc123".into(),
user_sub: "user-1".into(),
title: "Test Chat".into(),
namespace: ChatNamespace::General,
provider: "litellm".into(),
model: "qwen3-32b".into(),
created_at: "2025-01-01T00:00:00Z".into(),
updated_at: "2025-01-01T01:00:00Z".into(),
article_url: None,
};
let json = serde_json::to_string(&session).expect("serialize ChatSession");
let back: ChatSession = serde_json::from_str(&json).expect("deserialize ChatSession");
assert_eq!(session, back);
}
#[test]
fn chat_session_id_alias_deserialization() {
// MongoDB returns `_id` instead of `id`
let json = r#"{
"_id": "mongo-id",
"user_sub": "u1",
"title": "t",
"provider": "litellm",
"model": "m",
"created_at": "2025-01-01",
"updated_at": "2025-01-01"
}"#;
let session: ChatSession = serde_json::from_str(json).expect("deserialize with _id");
assert_eq!(session.id, "mongo-id");
}
#[test]
fn chat_session_empty_id_skips_serialization() {
let session = ChatSession {
id: String::new(),
user_sub: "u1".into(),
title: "t".into(),
namespace: ChatNamespace::default(),
provider: "litellm".into(),
model: "m".into(),
created_at: "2025-01-01".into(),
updated_at: "2025-01-01".into(),
article_url: None,
};
let json = serde_json::to_string(&session).expect("serialize");
// `id` field should be absent when empty due to skip_serializing_if
assert!(!json.contains("\"id\""));
}
#[test]
fn chat_session_none_article_url_skips_serialization() {
let session = ChatSession {
id: "s1".into(),
user_sub: "u1".into(),
title: "t".into(),
namespace: ChatNamespace::default(),
provider: "litellm".into(),
model: "m".into(),
created_at: "2025-01-01".into(),
updated_at: "2025-01-01".into(),
article_url: None,
};
let json = serde_json::to_string(&session).expect("serialize");
assert!(!json.contains("article_url"));
}
#[test]
fn chat_message_serde_round_trip() {
let msg = ChatMessage {
id: "msg-1".into(),
session_id: "s1".into(),
role: ChatRole::User,
content: "Hello AI".into(),
attachments: vec![Attachment {
name: "doc.pdf".into(),
kind: AttachmentKind::Document,
size_bytes: 4096,
}],
timestamp: "2025-01-01T00:00:00Z".into(),
};
let json = serde_json::to_string(&msg).expect("serialize ChatMessage");
let back: ChatMessage = serde_json::from_str(&json).expect("deserialize ChatMessage");
assert_eq!(msg, back);
}
#[test]
fn chat_message_id_alias_deserialization() {
let json = r#"{
"_id": "mongo-msg-id",
"session_id": "s1",
"role": "User",
"content": "hi",
"timestamp": "2025-01-01"
}"#;
let msg: ChatMessage = serde_json::from_str(json).expect("deserialize with _id");
assert_eq!(msg.id, "mongo-msg-id");
}
}

View File

@@ -45,3 +45,63 @@ pub struct AnalyticsMetric {
pub value: String,
pub change_pct: f64,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn agent_entry_serde_round_trip() {
let agent = AgentEntry {
id: "a1".into(),
name: "RAG Agent".into(),
description: "Retrieval-augmented generation".into(),
status: "running".into(),
};
let json = serde_json::to_string(&agent).expect("serialize AgentEntry");
let back: AgentEntry = serde_json::from_str(&json).expect("deserialize AgentEntry");
assert_eq!(agent, back);
}
#[test]
fn flow_entry_serde_round_trip() {
let flow = FlowEntry {
id: "f1".into(),
name: "Data Pipeline".into(),
node_count: 5,
last_run: Some("2025-06-01T12:00:00Z".into()),
};
let json = serde_json::to_string(&flow).expect("serialize FlowEntry");
let back: FlowEntry = serde_json::from_str(&json).expect("deserialize FlowEntry");
assert_eq!(flow, back);
}
#[test]
fn flow_entry_with_none_last_run() {
let flow = FlowEntry {
id: "f2".into(),
name: "New Flow".into(),
node_count: 0,
last_run: None,
};
let json = serde_json::to_string(&flow).expect("serialize");
let back: FlowEntry = serde_json::from_str(&json).expect("deserialize");
assert_eq!(flow, back);
assert_eq!(back.last_run, None);
}
#[test]
fn analytics_metric_negative_change_pct() {
let metric = AnalyticsMetric {
label: "Latency".into(),
value: "120ms".into(),
change_pct: -15.5,
};
let json = serde_json::to_string(&metric).expect("serialize AnalyticsMetric");
let back: AnalyticsMetric =
serde_json::from_str(&json).expect("deserialize AnalyticsMetric");
assert_eq!(metric, back);
assert!(back.change_pct < 0.0);
}
}

View File

@@ -1,60 +0,0 @@
use serde::{Deserialize, Serialize};
/// The type of file stored in the knowledge base.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum FileKind {
/// PDF document
Pdf,
/// Plain text or markdown file
Text,
/// Spreadsheet (csv, xlsx)
Spreadsheet,
/// Source code file
Code,
/// Image file
Image,
}
impl FileKind {
/// Returns the display label for a file kind.
pub fn label(&self) -> &'static str {
match self {
Self::Pdf => "PDF",
Self::Text => "Text",
Self::Spreadsheet => "Spreadsheet",
Self::Code => "Code",
Self::Image => "Image",
}
}
/// Returns an icon identifier for rendering.
pub fn icon(&self) -> &'static str {
match self {
Self::Pdf => "file-pdf",
Self::Text => "file-text",
Self::Spreadsheet => "file-spreadsheet",
Self::Code => "file-code",
Self::Image => "file-image",
}
}
}
/// A file stored in the knowledge base for RAG retrieval.
///
/// # Fields
///
/// * `id` - Unique file identifier
/// * `name` - Original filename
/// * `kind` - Type classification of the file
/// * `size_bytes` - File size in bytes
/// * `uploaded_at` - ISO 8601 upload timestamp
/// * `chunk_count` - Number of vector chunks created from this file
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct KnowledgeFile {
pub id: String,
pub name: String,
pub kind: FileKind,
pub size_bytes: u64,
pub uploaded_at: String,
pub chunk_count: u32,
}

View File

@@ -1,17 +1,15 @@
mod chat;
mod developer;
mod knowledge;
mod news;
mod organization;
mod provider;
mod tool;
mod services;
mod user;
pub use chat::*;
pub use developer::*;
pub use knowledge::*;
pub use news::*;
pub use organization::*;
pub use provider::*;
pub use tool::*;
pub use services::*;
pub use user::*;

View File

@@ -23,3 +23,61 @@ pub struct NewsCard {
pub thumbnail_url: Option<String>,
pub published_at: String,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn news_card_serde_round_trip() {
let card = NewsCard {
title: "AI Breakthrough".into(),
source: "techcrunch.com".into(),
summary: "New model released".into(),
content: "Full article content here".into(),
category: "AI".into(),
url: "https://example.com/article".into(),
thumbnail_url: Some("https://example.com/thumb.jpg".into()),
published_at: "2025-06-01".into(),
};
let json = serde_json::to_string(&card).expect("serialize NewsCard");
let back: NewsCard = serde_json::from_str(&json).expect("deserialize NewsCard");
assert_eq!(card, back);
}
#[test]
fn news_card_thumbnail_none() {
let card = NewsCard {
title: "No Thumb".into(),
source: "bbc.com".into(),
summary: "Summary".into(),
content: "Content".into(),
category: "Tech".into(),
url: "https://bbc.com/article".into(),
thumbnail_url: None,
published_at: "2025-06-01".into(),
};
let json = serde_json::to_string(&card).expect("serialize");
let back: NewsCard = serde_json::from_str(&json).expect("deserialize");
assert_eq!(card, back);
}
#[test]
fn news_card_thumbnail_some() {
let card = NewsCard {
title: "With Thumb".into(),
source: "cnn.com".into(),
summary: "Summary".into(),
content: "Content".into(),
category: "News".into(),
url: "https://cnn.com/article".into(),
thumbnail_url: Some("https://cnn.com/img.jpg".into()),
published_at: "2025-06-01".into(),
};
let json = serde_json::to_string(&card).expect("serialize");
assert!(json.contains("img.jpg"));
let back: NewsCard = serde_json::from_str(&json).expect("deserialize");
assert_eq!(card.thumbnail_url, back.thumbnail_url);
}
}

View File

@@ -83,6 +83,42 @@ pub struct BillingUsage {
pub billing_cycle_end: String,
}
/// Aggregated token usage statistics from LiteLLM's spend tracking API.
///
/// # Fields
///
/// * `total_spend` - Total cost in USD across all models
/// * `total_prompt_tokens` - Sum of prompt (input) tokens
/// * `total_completion_tokens` - Sum of completion (output) tokens
/// * `total_tokens` - Sum of all tokens (prompt + completion)
/// * `model_breakdown` - Per-model usage breakdown
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct LitellmUsageStats {
pub total_spend: f64,
pub total_prompt_tokens: u64,
pub total_completion_tokens: u64,
pub total_tokens: u64,
pub model_breakdown: Vec<ModelUsage>,
}
/// Token and spend usage for a single LLM model.
///
/// # Fields
///
/// * `model` - Model identifier (e.g. "gpt-4", "claude-3-opus")
/// * `spend` - Cost in USD for this model
/// * `prompt_tokens` - Prompt (input) tokens consumed
/// * `completion_tokens` - Completion (output) tokens generated
/// * `total_tokens` - Total tokens (prompt + completion)
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct ModelUsage {
pub model: String,
pub spend: f64,
pub prompt_tokens: u64,
pub completion_tokens: u64,
pub total_tokens: u64,
}
/// Organisation-level settings stored in MongoDB.
///
/// These complement Keycloak's Organizations feature with
@@ -116,3 +152,200 @@ pub struct OrgBillingRecord {
/// Number of tokens consumed during this cycle.
pub tokens_used: u64,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn member_role_label_admin() {
assert_eq!(MemberRole::Admin.label(), "Admin");
}
#[test]
fn member_role_label_member() {
assert_eq!(MemberRole::Member.label(), "Member");
}
#[test]
fn member_role_label_viewer() {
assert_eq!(MemberRole::Viewer.label(), "Viewer");
}
#[test]
fn member_role_all_returns_three_in_order() {
let all = MemberRole::all();
assert_eq!(all.len(), 3);
assert_eq!(all[0], MemberRole::Admin);
assert_eq!(all[1], MemberRole::Member);
assert_eq!(all[2], MemberRole::Viewer);
}
#[test]
fn member_role_serde_round_trip() {
for role in MemberRole::all() {
let json =
serde_json::to_string(role).unwrap_or_else(|_| panic!("serialize {:?}", role));
let back: MemberRole =
serde_json::from_str(&json).unwrap_or_else(|_| panic!("deserialize {:?}", role));
assert_eq!(*role, back);
}
}
#[test]
fn org_member_serde_round_trip() {
let member = OrgMember {
id: "m1".into(),
name: "Alice".into(),
email: "alice@example.com".into(),
role: MemberRole::Admin,
joined_at: "2025-01-01T00:00:00Z".into(),
};
let json = serde_json::to_string(&member).expect("serialize OrgMember");
let back: OrgMember = serde_json::from_str(&json).expect("deserialize OrgMember");
assert_eq!(member, back);
}
#[test]
fn pricing_plan_with_max_seats() {
let plan = PricingPlan {
id: "team".into(),
name: "Team".into(),
price_eur: 49,
features: vec!["SSO".into(), "Priority".into()],
highlighted: true,
max_seats: Some(25),
};
let json = serde_json::to_string(&plan).expect("serialize PricingPlan");
let back: PricingPlan = serde_json::from_str(&json).expect("deserialize PricingPlan");
assert_eq!(plan, back);
}
#[test]
fn pricing_plan_without_max_seats() {
let plan = PricingPlan {
id: "enterprise".into(),
name: "Enterprise".into(),
price_eur: 199,
features: vec!["Unlimited".into()],
highlighted: false,
max_seats: None,
};
let json = serde_json::to_string(&plan).expect("serialize PricingPlan");
let back: PricingPlan = serde_json::from_str(&json).expect("deserialize PricingPlan");
assert_eq!(plan, back);
assert!(json.contains("null") || !json.contains("max_seats"));
}
#[test]
fn billing_usage_serde_round_trip() {
let usage = BillingUsage {
seats_used: 5,
seats_total: 10,
tokens_used: 1_000_000,
tokens_limit: 5_000_000,
billing_cycle_end: "2025-12-31".into(),
};
let json = serde_json::to_string(&usage).expect("serialize BillingUsage");
let back: BillingUsage = serde_json::from_str(&json).expect("deserialize BillingUsage");
assert_eq!(usage, back);
}
#[test]
fn org_settings_default() {
let settings = OrgSettings::default();
assert_eq!(settings.org_id, "");
assert_eq!(settings.plan_id, "");
assert!(settings.enabled_features.is_empty());
assert_eq!(settings.stripe_customer_id, "");
}
#[test]
fn org_billing_record_default() {
let record = OrgBillingRecord::default();
assert_eq!(record.org_id, "");
assert_eq!(record.cycle_start, "");
assert_eq!(record.cycle_end, "");
assert_eq!(record.seats_used, 0);
assert_eq!(record.tokens_used, 0);
}
#[test]
fn litellm_usage_stats_default() {
let stats = LitellmUsageStats::default();
assert_eq!(stats.total_spend, 0.0);
assert_eq!(stats.total_prompt_tokens, 0);
assert_eq!(stats.total_completion_tokens, 0);
assert_eq!(stats.total_tokens, 0);
assert!(stats.model_breakdown.is_empty());
}
#[test]
fn litellm_usage_stats_serde_round_trip() {
let stats = LitellmUsageStats {
total_spend: 12.34,
total_prompt_tokens: 50_000,
total_completion_tokens: 25_000,
total_tokens: 75_000,
model_breakdown: vec![
ModelUsage {
model: "gpt-4".into(),
spend: 10.0,
prompt_tokens: 40_000,
completion_tokens: 20_000,
total_tokens: 60_000,
},
ModelUsage {
model: "claude-3-opus".into(),
spend: 2.34,
prompt_tokens: 10_000,
completion_tokens: 5_000,
total_tokens: 15_000,
},
],
};
let json = serde_json::to_string(&stats).expect("serialize LitellmUsageStats");
let back: LitellmUsageStats =
serde_json::from_str(&json).expect("deserialize LitellmUsageStats");
assert_eq!(stats, back);
}
#[test]
fn model_usage_default() {
let usage = ModelUsage::default();
assert_eq!(usage.model, "");
assert_eq!(usage.spend, 0.0);
assert_eq!(usage.prompt_tokens, 0);
assert_eq!(usage.completion_tokens, 0);
assert_eq!(usage.total_tokens, 0);
}
#[test]
fn model_usage_serde_round_trip() {
let usage = ModelUsage {
model: "gpt-4-turbo".into(),
spend: 5.67,
prompt_tokens: 30_000,
completion_tokens: 15_000,
total_tokens: 45_000,
};
let json = serde_json::to_string(&usage).expect("serialize ModelUsage");
let back: ModelUsage = serde_json::from_str(&json).expect("deserialize ModelUsage");
assert_eq!(usage, back);
}
#[test]
fn litellm_usage_stats_empty_breakdown_round_trip() {
let stats = LitellmUsageStats {
total_spend: 0.0,
total_prompt_tokens: 0,
total_completion_tokens: 0,
total_tokens: 0,
model_breakdown: Vec::new(),
};
let json = serde_json::to_string(&stats).expect("serialize empty stats");
let back: LitellmUsageStats = serde_json::from_str(&json).expect("deserialize empty stats");
assert_eq!(stats, back);
}
}

View File

@@ -3,8 +3,8 @@ use serde::{Deserialize, Serialize};
/// Supported LLM provider backends.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum LlmProvider {
/// Self-hosted models via Ollama
Ollama,
/// LiteLLM proxy for unified model access
LiteLlm,
/// Hugging Face Inference API
HuggingFace,
/// OpenAI-compatible endpoints
@@ -17,7 +17,7 @@ impl LlmProvider {
/// Returns the display name for a provider.
pub fn label(&self) -> &'static str {
match self {
Self::Ollama => "Ollama",
Self::LiteLlm => "LiteLLM",
Self::HuggingFace => "Hugging Face",
Self::OpenAi => "OpenAI",
Self::Anthropic => "Anthropic",
@@ -29,7 +29,7 @@ impl LlmProvider {
///
/// # Fields
///
/// * `id` - Unique model identifier (e.g. "llama3.1:8b")
/// * `id` - Unique model identifier (e.g. "qwen3-32b")
/// * `name` - Human-readable display name
/// * `provider` - Which provider hosts this model
/// * `context_window` - Maximum context length in tokens
@@ -72,3 +72,84 @@ pub struct ProviderConfig {
pub selected_embedding: String,
pub api_key_set: bool,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn llm_provider_label_litellm() {
assert_eq!(LlmProvider::LiteLlm.label(), "LiteLLM");
}
#[test]
fn llm_provider_label_hugging_face() {
assert_eq!(LlmProvider::HuggingFace.label(), "Hugging Face");
}
#[test]
fn llm_provider_label_openai() {
assert_eq!(LlmProvider::OpenAi.label(), "OpenAI");
}
#[test]
fn llm_provider_label_anthropic() {
assert_eq!(LlmProvider::Anthropic.label(), "Anthropic");
}
#[test]
fn llm_provider_serde_round_trip() {
for variant in [
LlmProvider::LiteLlm,
LlmProvider::HuggingFace,
LlmProvider::OpenAi,
LlmProvider::Anthropic,
] {
let json = serde_json::to_string(&variant)
.unwrap_or_else(|_| panic!("serialize {:?}", variant));
let back: LlmProvider =
serde_json::from_str(&json).unwrap_or_else(|_| panic!("deserialize {:?}", variant));
assert_eq!(variant, back);
}
}
#[test]
fn model_entry_serde_round_trip() {
let entry = ModelEntry {
id: "qwen3-32b".into(),
name: "Qwen3 32B".into(),
provider: LlmProvider::LiteLlm,
context_window: 32,
};
let json = serde_json::to_string(&entry).expect("serialize ModelEntry");
let back: ModelEntry = serde_json::from_str(&json).expect("deserialize ModelEntry");
assert_eq!(entry, back);
}
#[test]
fn embedding_entry_serde_round_trip() {
let entry = EmbeddingEntry {
id: "nomic-embed".into(),
name: "Nomic Embed".into(),
provider: LlmProvider::HuggingFace,
dimensions: 768,
};
let json = serde_json::to_string(&entry).expect("serialize EmbeddingEntry");
let back: EmbeddingEntry = serde_json::from_str(&json).expect("deserialize EmbeddingEntry");
assert_eq!(entry, back);
}
#[test]
fn provider_config_serde_round_trip() {
let cfg = ProviderConfig {
provider: LlmProvider::Anthropic,
selected_model: "claude-3".into(),
selected_embedding: "embed-v1".into(),
api_key_set: true,
};
let json = serde_json::to_string(&cfg).expect("serialize ProviderConfig");
let back: ProviderConfig = serde_json::from_str(&json).expect("deserialize ProviderConfig");
assert_eq!(cfg, back);
}
}

43
src/models/services.rs Normal file
View File

@@ -0,0 +1,43 @@
use serde::{Deserialize, Serialize};
/// Frontend-facing URLs for developer tool services.
///
/// Provided as a context signal in `AppShell` so that developer pages
/// can read the configured URLs without threading props through layouts.
/// An empty string indicates the service is not configured.
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct ServiceUrlsContext {
/// LangGraph agent builder URL (empty if not configured)
pub langgraph_url: String,
/// LangFlow visual workflow builder URL (empty if not configured)
pub langflow_url: String,
/// Langfuse observability URL (empty if not configured)
pub langfuse_url: String,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn default_urls_are_empty() {
let ctx = ServiceUrlsContext::default();
assert_eq!(ctx.langgraph_url, "");
assert_eq!(ctx.langflow_url, "");
assert_eq!(ctx.langfuse_url, "");
}
#[test]
fn serde_round_trip() {
let ctx = ServiceUrlsContext {
langgraph_url: "http://localhost:8123".into(),
langflow_url: "http://localhost:7860".into(),
langfuse_url: "http://localhost:3000".into(),
};
let json = serde_json::to_string(&ctx).expect("serialize ServiceUrlsContext");
let back: ServiceUrlsContext =
serde_json::from_str(&json).expect("deserialize ServiceUrlsContext");
assert_eq!(ctx, back);
}
}

View File

@@ -1,73 +0,0 @@
use serde::{Deserialize, Serialize};
/// Category grouping for MCP tools.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ToolCategory {
/// Web search and browsing tools
Search,
/// File and document processing tools
FileSystem,
/// Computation and math tools
Compute,
/// Code execution and analysis tools
Code,
/// Communication and notification tools
Communication,
}
impl ToolCategory {
/// Returns the display label for a tool category.
pub fn label(&self) -> &'static str {
match self {
Self::Search => "Search",
Self::FileSystem => "File System",
Self::Compute => "Compute",
Self::Code => "Code",
Self::Communication => "Communication",
}
}
}
/// Status of an MCP tool instance.
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum ToolStatus {
/// Tool is running and available
Active,
/// Tool is installed but not running
Inactive,
/// Tool encountered an error
Error,
}
impl ToolStatus {
/// Returns the CSS class suffix for status styling.
pub fn css_class(&self) -> &'static str {
match self {
Self::Active => "active",
Self::Inactive => "inactive",
Self::Error => "error",
}
}
}
/// An MCP (Model Context Protocol) tool entry.
///
/// # Fields
///
/// * `id` - Unique tool identifier
/// * `name` - Human-readable display name
/// * `description` - Brief description of what the tool does
/// * `category` - Classification category
/// * `status` - Current running status
/// * `enabled` - Whether the tool is toggled on by the user
/// * `icon` - Icon identifier for rendering
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub struct McpTool {
pub id: String,
pub name: String,
pub description: String,
pub category: ToolCategory,
pub status: ToolStatus,
pub enabled: bool,
pub icon: String,
}

View File

@@ -22,17 +22,25 @@ pub struct AuthInfo {
pub name: String,
/// Avatar URL (from Keycloak picture claim)
pub avatar_url: String,
/// LibreChat instance URL for the sidebar chat link
pub librechat_url: String,
/// LangGraph agent builder URL (empty if not configured)
pub langgraph_url: String,
/// LangFlow visual workflow builder URL (empty if not configured)
pub langflow_url: String,
/// Langfuse observability URL (empty if not configured)
pub langfuse_url: String,
}
/// Per-user LLM provider configuration stored in MongoDB.
///
/// Controls which provider and model the user's chat sessions default
/// to, and stores API keys for non-Ollama providers.
/// to, and stores API keys for non-LiteLLM providers.
#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)]
pub struct UserProviderConfig {
/// Default provider name (e.g. "ollama", "openai")
/// Default provider name (e.g. "litellm", "openai")
pub default_provider: String,
/// Default model ID (e.g. "llama3.1:8b", "gpt-4o")
/// Default model ID (e.g. "qwen3-32b", "gpt-4o")
pub default_model: String,
/// OpenAI API key (empty if not configured)
#[serde(default, skip_serializing_if = "Option::is_none")]
@@ -43,8 +51,8 @@ pub struct UserProviderConfig {
/// HuggingFace API key
#[serde(default, skip_serializing_if = "Option::is_none")]
pub huggingface_api_key: Option<String>,
/// Custom Ollama URL override (empty = use server default)
pub ollama_url_override: String,
/// Custom LiteLLM URL override (empty = use server default)
pub litellm_url_override: String,
}
/// Per-user preferences stored in MongoDB.
@@ -58,13 +66,97 @@ pub struct UserPreferences {
pub org_id: String,
/// User-selected news/search topics
pub custom_topics: Vec<String>,
/// Per-user Ollama URL override (empty = use server default)
pub ollama_url_override: String,
/// Per-user Ollama model override (empty = use server default)
pub ollama_model_override: String,
/// Per-user LiteLLM URL override (empty = use server default)
pub litellm_url_override: String,
/// Per-user LiteLLM model override (empty = use server default)
pub litellm_model_override: String,
/// Recently searched queries for quick access
pub recent_searches: Vec<String>,
/// LLM provider configuration
#[serde(default)]
pub provider_config: UserProviderConfig,
}
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn user_data_default() {
let ud = UserData::default();
assert_eq!(ud.name, "");
}
#[test]
fn auth_info_default_not_authenticated() {
let info = AuthInfo::default();
assert!(!info.authenticated);
assert_eq!(info.sub, "");
assert_eq!(info.email, "");
assert_eq!(info.name, "");
assert_eq!(info.avatar_url, "");
assert_eq!(info.librechat_url, "");
assert_eq!(info.langgraph_url, "");
assert_eq!(info.langflow_url, "");
assert_eq!(info.langfuse_url, "");
}
#[test]
fn auth_info_serde_round_trip() {
let info = AuthInfo {
authenticated: true,
sub: "sub-123".into(),
email: "test@example.com".into(),
name: "Test User".into(),
avatar_url: "https://example.com/avatar.png".into(),
librechat_url: "https://chat.example.com".into(),
langgraph_url: "http://localhost:8123".into(),
langflow_url: "http://localhost:7860".into(),
langfuse_url: "http://localhost:3000".into(),
};
let json = serde_json::to_string(&info).expect("serialize AuthInfo");
let back: AuthInfo = serde_json::from_str(&json).expect("deserialize AuthInfo");
assert_eq!(info, back);
}
#[test]
fn user_preferences_default() {
let prefs = UserPreferences::default();
assert_eq!(prefs.sub, "");
assert_eq!(prefs.org_id, "");
assert!(prefs.custom_topics.is_empty());
assert!(prefs.recent_searches.is_empty());
}
#[test]
fn user_provider_config_optional_keys_skip_none() {
let cfg = UserProviderConfig {
default_provider: "litellm".into(),
default_model: "qwen3-32b".into(),
openai_api_key: None,
anthropic_api_key: None,
huggingface_api_key: None,
litellm_url_override: String::new(),
};
let json = serde_json::to_string(&cfg).expect("serialize UserProviderConfig");
assert!(!json.contains("openai_api_key"));
assert!(!json.contains("anthropic_api_key"));
assert!(!json.contains("huggingface_api_key"));
}
#[test]
fn user_provider_config_serde_round_trip_with_keys() {
let cfg = UserProviderConfig {
default_provider: "openai".into(),
default_model: "gpt-4o".into(),
openai_api_key: Some("sk-test".into()),
anthropic_api_key: Some("ak-test".into()),
huggingface_api_key: None,
litellm_url_override: "http://custom:4000".into(),
};
let json = serde_json::to_string(&cfg).expect("serialize");
let back: UserProviderConfig = serde_json::from_str(&json).expect("deserialize");
assert_eq!(cfg, back);
}
}

View File

@@ -1,344 +0,0 @@
use crate::components::{
ChatActionBar, ChatInputBar, ChatMessageList, ChatModelSelector, ChatSidebar,
};
use crate::i18n::{t, Locale};
use crate::infrastructure::chat::{
chat_complete, create_chat_session, delete_chat_session, list_chat_messages,
list_chat_sessions, rename_chat_session, save_chat_message,
};
use crate::infrastructure::ollama::get_ollama_status;
use crate::models::{ChatMessage, ChatRole};
use dioxus::prelude::*;
/// LibreChat-inspired chat interface with MongoDB persistence and SSE streaming.
///
/// Layout: sidebar (session list) | main panel (model selector, messages, input).
/// Messages stream via `EventSource` connected to `/api/chat/stream`.
#[component]
pub fn ChatPage() -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
// ---- Signals ----
let mut active_session_id: Signal<Option<String>> = use_signal(|| None);
let mut messages: Signal<Vec<ChatMessage>> = use_signal(Vec::new);
let mut input_text: Signal<String> = use_signal(String::new);
let mut is_streaming: Signal<bool> = use_signal(|| false);
let mut streaming_content: Signal<String> = use_signal(String::new);
let mut selected_model: Signal<String> = use_signal(String::new);
// ---- Resources ----
// Load sessions list (re-fetches when dependency changes)
let mut sessions_resource =
use_resource(move || async move { list_chat_sessions().await.unwrap_or_default() });
// Load available Ollama models
let models_resource = use_resource(move || async move {
get_ollama_status(String::new())
.await
.map(|s| s.models)
.unwrap_or_default()
});
let sessions = sessions_resource.read().clone().unwrap_or_default();
let available_models = models_resource.read().clone().unwrap_or_default();
// Set default model if not yet chosen
if selected_model.read().is_empty() {
if let Some(first) = available_models.first() {
selected_model.set(first.clone());
}
}
// Load messages when active session changes.
// The signal read MUST happen inside the closure so use_resource
// tracks it as a dependency and re-fetches on change.
let _messages_loader = use_resource(move || {
let session_id = active_session_id.read().clone();
async move {
if let Some(id) = session_id {
match list_chat_messages(id).await {
Ok(msgs) => messages.set(msgs),
Err(e) => tracing::error!("failed to load messages: {e}"),
}
} else {
messages.set(Vec::new());
}
}
});
// ---- Callbacks ----
// Create new session
let on_new = move |_: ()| {
let model = selected_model.read().clone();
let new_chat_title = t(l, "chat.new_chat");
spawn(async move {
match create_chat_session(
new_chat_title,
"General".to_string(),
"ollama".to_string(),
model,
String::new(),
)
.await
{
Ok(session) => {
active_session_id.set(Some(session.id));
messages.set(Vec::new());
sessions_resource.restart();
}
Err(e) => tracing::error!("failed to create session: {e}"),
}
});
};
// Select session
let on_select = move |id: String| {
active_session_id.set(Some(id));
};
// Rename session
let on_rename = move |(id, new_title): (String, String)| {
spawn(async move {
if let Err(e) = rename_chat_session(id, new_title).await {
tracing::error!("failed to rename: {e}");
}
sessions_resource.restart();
});
};
// Delete session
let on_delete = move |id: String| {
let is_active = active_session_id.read().as_deref() == Some(&id);
spawn(async move {
if let Err(e) = delete_chat_session(id).await {
tracing::error!("failed to delete: {e}");
}
if is_active {
active_session_id.set(None);
messages.set(Vec::new());
}
sessions_resource.restart();
});
};
// Model change
let on_model_change = move |model: String| {
selected_model.set(model);
};
// Send message
let on_send = move |text: String| {
let session_id = active_session_id.read().clone();
let model = selected_model.read().clone();
spawn(async move {
// If no active session, create one first
let sid = if let Some(id) = session_id {
id
} else {
match create_chat_session(
// Use first ~50 chars of message as title
text.chars().take(50).collect::<String>(),
"General".to_string(),
"ollama".to_string(),
model,
String::new(),
)
.await
{
Ok(session) => {
let id = session.id.clone();
active_session_id.set(Some(id.clone()));
sessions_resource.restart();
id
}
Err(e) => {
tracing::error!("failed to create session: {e}");
return;
}
}
};
// Save user message
match save_chat_message(sid.clone(), "user".to_string(), text).await {
Ok(msg) => {
messages.write().push(msg);
}
Err(e) => {
tracing::error!("failed to save message: {e}");
return;
}
}
// Show thinking indicator
is_streaming.set(true);
streaming_content.set(String::new());
// Build message history as JSON for the server
let history: Vec<serde_json::Value> = messages
.read()
.iter()
.map(|m| {
let role = match m.role {
ChatRole::User => "user",
ChatRole::Assistant => "assistant",
ChatRole::System => "system",
};
serde_json::json!({"role": role, "content": m.content})
})
.collect();
let messages_json = serde_json::to_string(&history).unwrap_or_default();
// Non-streaming completion
match chat_complete(sid.clone(), messages_json).await {
Ok(response) => {
// Save assistant message
match save_chat_message(sid, "assistant".to_string(), response).await {
Ok(msg) => {
messages.write().push(msg);
}
Err(e) => tracing::error!("failed to save assistant msg: {e}"),
}
sessions_resource.restart();
}
Err(e) => tracing::error!("chat completion failed: {e}"),
}
is_streaming.set(false);
});
};
// ---- Action bar state ----
let has_messages = !messages.read().is_empty();
let has_assistant_message = messages
.read()
.iter()
.any(|m| m.role == ChatRole::Assistant);
let has_user_message = messages.read().iter().any(|m| m.role == ChatRole::User);
// Copy last assistant response to clipboard
let on_copy = move |_: ()| {
#[cfg(feature = "web")]
{
let last_assistant = messages
.read()
.iter()
.rev()
.find(|m| m.role == ChatRole::Assistant)
.map(|m| m.content.clone());
if let Some(text) = last_assistant {
if let Some(window) = web_sys::window() {
let clipboard = window.navigator().clipboard();
let _ = clipboard.write_text(&text);
}
}
}
};
// Copy full conversation as text to clipboard
let on_share = move |_: ()| {
#[cfg(feature = "web")]
{
let you_label = t(l, "chat.you");
let assistant_label = t(l, "chat.assistant");
let text: String = messages
.read()
.iter()
.filter(|m| m.role != ChatRole::System)
.map(|m| {
let label = match m.role {
ChatRole::User => &you_label,
ChatRole::Assistant => &assistant_label,
// Filtered out above, but required for exhaustive match
ChatRole::System => "System",
};
format!("{label}:\n{}\n", m.content)
})
.collect::<Vec<_>>()
.join("\n");
if let Some(window) = web_sys::window() {
let clipboard = window.navigator().clipboard();
let _ = clipboard.write_text(&text);
}
}
};
// Edit last user message: remove it and place text back in input
let on_edit = move |_: ()| {
let last_user = messages
.read()
.iter()
.rev()
.find(|m| m.role == ChatRole::User)
.map(|m| m.content.clone());
if let Some(text) = last_user {
// Remove the last user message (and any assistant reply after it)
let mut msgs = messages.read().clone();
if let Some(pos) = msgs.iter().rposition(|m| m.role == ChatRole::User) {
msgs.truncate(pos);
messages.set(msgs);
}
input_text.set(text);
}
};
// Scroll to bottom when messages or streaming content changes
let msg_count = messages.read().len();
let stream_len = streaming_content.read().len();
use_effect(move || {
// Track dependencies
let _ = msg_count;
let _ = stream_len;
// Scroll the message list to bottom
#[cfg(feature = "web")]
{
if let Some(window) = web_sys::window() {
if let Some(doc) = window.document() {
if let Some(el) = doc.get_element_by_id("chat-message-list") {
let height = el.scroll_height();
el.set_scroll_top(height);
}
}
}
}
});
rsx! {
section { class: "chat-page",
ChatSidebar {
sessions: sessions,
active_session_id: active_session_id.read().clone(),
on_select: on_select,
on_new: on_new,
on_rename: on_rename,
on_delete: on_delete,
}
div { class: "chat-main-panel",
ChatModelSelector {
selected_model: selected_model.read().clone(),
available_models: available_models,
on_change: on_model_change,
}
ChatMessageList {
messages: messages.read().clone(),
streaming_content: streaming_content.read().clone(),
is_streaming: *is_streaming.read(),
}
ChatActionBar {
on_copy: on_copy,
on_share: on_share,
on_edit: on_edit,
has_messages: has_messages,
has_assistant_message: has_assistant_message,
has_user_message: has_user_message,
}
ChatInputBar {
input_text: input_text,
on_send: on_send,
is_streaming: *is_streaming.read(),
}
}
}
}
}

View File

@@ -25,8 +25,8 @@ const DEFAULT_TOPICS: &[&str] = &[
///
/// State is persisted across sessions using localStorage:
/// - `certifai_topics`: custom user-defined search topics
/// - `certifai_ollama_url`: Ollama instance URL for summarization
/// - `certifai_ollama_model`: Ollama model ID for summarization
/// - `certifai_litellm_url`: LiteLLM proxy URL for summarization
/// - `certifai_litellm_model`: LiteLLM model ID for summarization
#[component]
pub fn DashboardPage() -> Element {
let locale = use_context::<Signal<Locale>>();
@@ -34,11 +34,11 @@ pub fn DashboardPage() -> Element {
// Persistent state stored in localStorage
let mut custom_topics = use_persistent("certifai_topics".to_string(), Vec::<String>::new);
// Default to empty so the server functions use OLLAMA_URL / OLLAMA_MODEL
// Default to empty so the server functions use LITELLM_URL / LITELLM_MODEL
// from .env. Only stores a non-empty value when the user explicitly saves
// an override via the Settings panel.
let mut ollama_url = use_persistent("certifai_ollama_url".to_string(), String::new);
let mut ollama_model = use_persistent("certifai_ollama_model".to_string(), String::new);
let mut litellm_url = use_persistent("certifai_litellm_url".to_string(), String::new);
let mut litellm_model = use_persistent("certifai_litellm_model".to_string(), String::new);
// Reactive signals for UI state
let mut active_topic = use_signal(|| "AI".to_string());
@@ -235,8 +235,8 @@ pub fn DashboardPage() -> Element {
onclick: move |_| {
let currently_shown = *show_settings.read();
if !currently_shown {
settings_url.set(ollama_url.read().clone());
settings_model.set(ollama_model.read().clone());
settings_url.set(litellm_url.read().clone());
settings_model.set(litellm_model.read().clone());
}
show_settings.set(!currently_shown);
},
@@ -247,16 +247,16 @@ pub fn DashboardPage() -> Element {
// Settings panel (collapsible)
if *show_settings.read() {
div { class: "settings-panel",
h4 { class: "settings-panel-title", "{t(l, \"dashboard.ollama_settings\")}" }
h4 { class: "settings-panel-title", "{t(l, \"dashboard.litellm_settings\")}" }
p { class: "settings-hint",
"{t(l, \"dashboard.settings_hint\")}"
}
div { class: "settings-field",
label { "{t(l, \"dashboard.ollama_url\")}" }
label { "{t(l, \"dashboard.litellm_url\")}" }
input {
class: "settings-input",
r#type: "text",
placeholder: "{t(l, \"dashboard.ollama_url_placeholder\")}",
placeholder: "{t(l, \"dashboard.litellm_url_placeholder\")}",
value: "{settings_url}",
oninput: move |e| settings_url.set(e.value()),
}
@@ -274,8 +274,8 @@ pub fn DashboardPage() -> Element {
button {
class: "btn btn-primary",
onclick: move |_| {
*ollama_url.write() = settings_url.read().trim().to_string();
*ollama_model.write() = settings_model.read().trim().to_string();
*litellm_url.write() = settings_url.read().trim().to_string();
*litellm_model.write() = settings_model.read().trim().to_string();
show_settings.set(false);
},
"{t(l, \"common.save\")}"
@@ -320,14 +320,14 @@ pub fn DashboardPage() -> Element {
news_session_id.set(None);
let oll_url = ollama_url.read().clone();
let mdl = ollama_model.read().clone();
let ll_url = litellm_url.read().clone();
let mdl = litellm_model.read().clone();
spawn(async move {
is_summarizing.set(true);
match crate::infrastructure::llm::summarize_article(
snippet.clone(),
article_url,
oll_url,
ll_url,
mdl,
)
.await
@@ -373,8 +373,8 @@ pub fn DashboardPage() -> Element {
chat_messages: chat_messages.read().clone(),
is_chatting: *is_chatting.read(),
on_chat_send: move |question: String| {
let oll_url = ollama_url.read().clone();
let mdl = ollama_model.read().clone();
let ll_url = litellm_url.read().clone();
let mdl = litellm_model.read().clone();
let ctx = article_context.read().clone();
// Capture article info for News session creation
let card_title = selected_card
@@ -394,7 +394,7 @@ pub fn DashboardPage() -> Element {
content: question.clone(),
});
// Build full message history for Ollama
// Build full message history for LiteLLM
let system_msg = format!(
"You are a helpful assistant. The user is reading \
a news article. Use the following context to answer \
@@ -422,7 +422,7 @@ pub fn DashboardPage() -> Element {
match create_chat_session(
card_title,
"News".to_string(),
"ollama".to_string(),
"litellm".to_string(),
mdl.clone(),
card_url,
)
@@ -458,7 +458,7 @@ pub fn DashboardPage() -> Element {
}
match crate::infrastructure::llm::chat_followup(
msgs, oll_url, mdl,
msgs, ll_url, mdl,
)
.await
{
@@ -495,7 +495,7 @@ pub fn DashboardPage() -> Element {
// Right: sidebar (when no card selected)
if !has_selection {
DashboardSidebar {
ollama_url: ollama_url.read().clone(),
litellm_url: litellm_url.read().clone(),
trending: trending_topics.clone(),
recent_searches: recent_searches.read().clone(),
on_topic_click: move |topic: String| {

View File

@@ -1,26 +1,239 @@
use dioxus::prelude::*;
use dioxus_free_icons::icons::bs_icons::{
BsBook, BsBoxArrowUpRight, BsCodeSquare, BsCpu, BsGithub, BsLightningCharge,
};
use dioxus_free_icons::Icon;
use crate::i18n::{t, Locale};
use crate::models::ServiceUrlsContext;
/// Agents page placeholder for the LangGraph agent builder.
/// Agents informational landing page for LangGraph.
///
/// Shows a "Coming Soon" card with a disabled launch button.
/// Will eventually integrate with the LangGraph framework.
/// Since LangGraph is API-only (no web UI), this page displays a hero section
/// explaining its role, a connection status indicator, a card grid linking
/// to documentation, and a live table of registered agents fetched from the
/// LangGraph assistants API.
#[component]
pub fn AgentsPage() -> Element {
let locale = use_context::<Signal<Locale>>();
let svc = use_context::<Signal<ServiceUrlsContext>>();
let l = *locale.read();
let url = svc.read().langgraph_url.clone();
// Derive whether a LangGraph URL is configured
let connected = !url.is_empty();
// Build the API reference URL from the configured base, falling back to "#"
let api_ref_href = if connected {
format!("{}/docs", url)
} else {
"#".to_string()
};
// Fetch agents from LangGraph when connected
let agents_resource = use_resource(move || async move {
match crate::infrastructure::langgraph::list_langgraph_agents().await {
Ok(agents) => agents,
Err(e) => {
tracing::error!("Failed to fetch agents: {e}");
Vec::new()
}
}
});
rsx! {
section { class: "placeholder-page",
div { class: "placeholder-card",
div { class: "placeholder-icon", "A" }
h2 { "{t(l, \"developer.agents_title\")}" }
p { class: "placeholder-desc",
"{t(l, \"developer.agents_desc\")}"
div { class: "agents-page",
// -- Hero section --
div { class: "agents-hero",
div { class: "agents-hero-row",
div { class: "agents-hero-icon",
Icon { icon: BsCpu, width: 24, height: 24 }
}
h2 { class: "agents-hero-title",
{t(l, "developer.agents_title")}
}
}
p { class: "agents-hero-desc",
{t(l, "developer.agents_desc")}
}
// -- Connection status --
if connected {
div { class: "agents-status",
span {
class: "agents-status-dot agents-status-dot--on",
}
span { {t(l, "developer.agents_status_connected")} }
code { class: "agents-status-url", {url.clone()} }
}
} else {
div { class: "agents-status",
span {
class: "agents-status-dot agents-status-dot--off",
}
span { {t(l, "developer.agents_status_not_connected")} }
span { class: "agents-status-hint",
{t(l, "developer.agents_config_hint")}
}
}
}
}
// -- Running Agents table --
div { class: "agents-table-section",
h3 { class: "agents-section-title",
{t(l, "developer.agents_running_title")}
}
match agents_resource.read().as_ref() {
None => {
rsx! {
p { class: "agents-table-loading",
{t(l, "common.loading")}
}
}
}
Some(agents) if agents.is_empty() => {
rsx! {
p { class: "agents-table-empty",
{t(l, "developer.agents_none")}
}
}
}
Some(agents) => {
rsx! {
div { class: "agents-table-wrap",
table { class: "agents-table",
thead {
tr {
th { {t(l, "developer.agents_col_name")} }
th { {t(l, "developer.agents_col_id")} }
th { {t(l, "developer.agents_col_description")} }
th { {t(l, "developer.agents_col_status")} }
}
}
tbody {
for agent in agents.iter() {
tr { key: "{agent.id}",
td { class: "agents-cell-name",
{agent.name.clone()}
}
td {
code { class: "agents-cell-id",
{agent.id.clone()}
}
}
td { class: "agents-cell-desc",
if agent.description.is_empty() {
span { class: "agents-cell-none", "--" }
} else {
{agent.description.clone()}
}
}
td {
span { class: "agents-badge agents-badge--active",
{agent.status.clone()}
}
}
}
}
}
}
}
}
}
}
}
// -- Quick Start card grid --
h3 { class: "agents-section-title",
{t(l, "developer.agents_quick_start")}
}
div { class: "agents-grid",
// Documentation
a {
class: "agents-card",
href: "https://langchain-ai.github.io/langgraph/",
target: "_blank",
rel: "noopener noreferrer",
div { class: "agents-card-icon",
Icon { icon: BsBook, width: 18, height: 18 }
}
div { class: "agents-card-title",
{t(l, "developer.agents_docs")}
}
div { class: "agents-card-desc",
{t(l, "developer.agents_docs_desc")}
}
}
// Getting Started
a {
class: "agents-card",
href: "https://langchain-ai.github.io/langgraph/tutorials/introduction/",
target: "_blank",
rel: "noopener noreferrer",
div { class: "agents-card-icon",
Icon { icon: BsLightningCharge, width: 18, height: 18 }
}
div { class: "agents-card-title",
{t(l, "developer.agents_getting_started")}
}
div { class: "agents-card-desc",
{t(l, "developer.agents_getting_started_desc")}
}
}
// GitHub
a {
class: "agents-card",
href: "https://github.com/langchain-ai/langgraph",
target: "_blank",
rel: "noopener noreferrer",
div { class: "agents-card-icon",
Icon { icon: BsGithub, width: 18, height: 18 }
}
div { class: "agents-card-title",
{t(l, "developer.agents_github")}
}
div { class: "agents-card-desc",
{t(l, "developer.agents_github_desc")}
}
}
// Examples
a {
class: "agents-card",
href: "https://github.com/langchain-ai/langgraph/tree/main/examples",
target: "_blank",
rel: "noopener noreferrer",
div { class: "agents-card-icon",
Icon { icon: BsCodeSquare, width: 18, height: 18 }
}
div { class: "agents-card-title",
{t(l, "developer.agents_examples")}
}
div { class: "agents-card-desc",
{t(l, "developer.agents_examples_desc")}
}
}
// API Reference (disabled when URL is empty)
a {
class: if connected { "agents-card" } else { "agents-card agents-card--disabled" },
href: "{api_ref_href}",
target: "_blank",
rel: "noopener noreferrer",
div { class: "agents-card-icon",
Icon { icon: BsBoxArrowUpRight, width: 18, height: 18 }
}
div { class: "agents-card-title",
{t(l, "developer.agents_api_ref")}
}
div { class: "agents-card-desc",
{t(l, "developer.agents_api_ref_desc")}
}
}
button { class: "btn-primary", disabled: true, "{t(l, \"developer.launch_agents\")}" }
span { class: "placeholder-badge", "{t(l, \"common.coming_soon\")}" }
}
}
}

View File

@@ -1,40 +1,142 @@
use dioxus::prelude::*;
use dioxus_free_icons::icons::bs_icons::{
BsBarChartLine, BsBoxArrowUpRight, BsGraphUp, BsSpeedometer,
};
use dioxus_free_icons::Icon;
use crate::i18n::{t, Locale};
use crate::models::AnalyticsMetric;
use crate::models::{AnalyticsMetric, ServiceUrlsContext};
/// Analytics page placeholder for LangFuse integration.
/// Analytics & Observability page for Langfuse.
///
/// Shows a "Coming Soon" card with a disabled launch button,
/// plus a mock stats bar showing sample metrics.
/// Langfuse is configured with Keycloak SSO (shared realm with CERTifAI).
/// When users open Langfuse, the existing Keycloak session auto-authenticates
/// them transparently. This page shows a metrics bar, connection status,
/// and a prominent button to open Langfuse in a new tab.
#[component]
pub fn AnalyticsPage() -> Element {
let locale = use_context::<Signal<Locale>>();
let svc = use_context::<Signal<ServiceUrlsContext>>();
let l = *locale.read();
let url = svc.read().langfuse_url.clone();
let connected = !url.is_empty();
let metrics = mock_metrics(l);
rsx! {
section { class: "placeholder-page",
div { class: "analytics-page",
// -- Hero section --
div { class: "analytics-hero",
div { class: "analytics-hero-row",
div { class: "analytics-hero-icon",
Icon { icon: BsGraphUp, width: 24, height: 24 }
}
h2 { class: "analytics-hero-title",
{t(l, "developer.analytics_title")}
}
}
p { class: "analytics-hero-desc",
{t(l, "developer.analytics_desc")}
}
// -- Connection status --
if connected {
div { class: "agents-status",
span {
class: "agents-status-dot agents-status-dot--on",
}
span { {t(l, "developer.analytics_status_connected")} }
code { class: "agents-status-url", {url.clone()} }
}
} else {
div { class: "agents-status",
span {
class: "agents-status-dot agents-status-dot--off",
}
span { {t(l, "developer.analytics_status_not_connected")} }
span { class: "agents-status-hint",
{t(l, "developer.analytics_config_hint")}
}
}
}
// -- SSO info --
if connected {
p { class: "analytics-sso-hint",
{t(l, "developer.analytics_sso_hint")}
}
}
}
// -- Metrics bar --
div { class: "analytics-stats-bar",
for metric in &metrics {
div { class: "analytics-stat",
span { class: "analytics-stat-value", "{metric.value}" }
span { class: "analytics-stat-label", "{metric.label}" }
span { class: if metric.change_pct >= 0.0 { "analytics-stat-change analytics-stat-change--up" } else { "analytics-stat-change analytics-stat-change--down" },
span {
class: if metric.change_pct >= 0.0 {
"analytics-stat-change analytics-stat-change--up"
} else {
"analytics-stat-change analytics-stat-change--down"
},
"{metric.change_pct:+.1}%"
}
}
}
}
div { class: "placeholder-card",
div { class: "placeholder-icon", "L" }
h2 { "{t(l, \"developer.analytics_title\")}" }
p { class: "placeholder-desc",
"{t(l, \"developer.analytics_desc\")}"
// -- Open Langfuse button --
if connected {
a {
class: "analytics-launch-btn",
href: "{url}",
target: "_blank",
rel: "noopener noreferrer",
Icon { icon: BsBoxArrowUpRight, width: 16, height: 16 }
span { {t(l, "developer.launch_analytics")} }
}
}
// -- Quick actions --
h3 { class: "agents-section-title",
{t(l, "developer.analytics_quick_actions")}
}
div { class: "agents-grid",
// Traces
a {
class: if connected { "agents-card" } else { "agents-card agents-card--disabled" },
href: if connected { format!("{url}/project") } else { "#".to_string() },
target: "_blank",
rel: "noopener noreferrer",
div { class: "agents-card-icon",
Icon { icon: BsBarChartLine, width: 18, height: 18 }
}
div { class: "agents-card-title",
{t(l, "developer.analytics_traces")}
}
div { class: "agents-card-desc",
{t(l, "developer.analytics_traces_desc")}
}
}
// Dashboard
a {
class: if connected { "agents-card" } else { "agents-card agents-card--disabled" },
href: if connected { format!("{url}/project") } else { "#".to_string() },
target: "_blank",
rel: "noopener noreferrer",
div { class: "agents-card-icon",
Icon { icon: BsSpeedometer, width: 18, height: 18 }
}
div { class: "agents-card-title",
{t(l, "developer.analytics_dashboard")}
}
div { class: "agents-card-desc",
{t(l, "developer.analytics_dashboard_desc")}
}
}
button { class: "btn-primary", disabled: true, "{t(l, \"developer.launch_analytics\")}" }
span { class: "placeholder-badge", "{t(l, \"common.coming_soon\")}" }
}
}
}

View File

@@ -1,27 +1,27 @@
use dioxus::prelude::*;
use crate::components::ToolEmbed;
use crate::i18n::{t, Locale};
use crate::models::ServiceUrlsContext;
/// Flow page placeholder for the LangFlow visual workflow builder.
/// Flow page embedding the LangFlow visual workflow builder.
///
/// Shows a "Coming Soon" card with a disabled launch button.
/// Will eventually integrate with LangFlow for visual flow design.
/// When `langflow_url` is configured, embeds the service in an iframe
/// with a pop-out button. Otherwise shows a "Not Configured" placeholder.
#[component]
pub fn FlowPage() -> Element {
let locale = use_context::<Signal<Locale>>();
let svc = use_context::<Signal<ServiceUrlsContext>>();
let l = *locale.read();
let url = svc.read().langflow_url.clone();
rsx! {
section { class: "placeholder-page",
div { class: "placeholder-card",
div { class: "placeholder-icon", "F" }
h2 { "{t(l, \"developer.flow_title\")}" }
p { class: "placeholder-desc",
"{t(l, \"developer.flow_desc\")}"
}
button { class: "btn-primary", disabled: true, "{t(l, \"developer.launch_flow\")}" }
span { class: "placeholder-badge", "{t(l, \"common.coming_soon\")}" }
}
ToolEmbed {
url,
title: t(l, "developer.flow_title"),
description: t(l, "developer.flow_desc"),
icon: "F",
launch_label: t(l, "developer.launch_flow"),
}
}
}

View File

@@ -1,128 +0,0 @@
use dioxus::prelude::*;
use crate::components::{FileRow, PageHeader};
use crate::i18n::{t, Locale};
use crate::models::{FileKind, KnowledgeFile};
/// Knowledge Base page with file explorer table and upload controls.
///
/// Displays uploaded documents used for RAG retrieval with their
/// metadata, chunk counts, and management actions.
#[component]
pub fn KnowledgePage() -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
let mut files = use_signal(mock_files);
let mut search_query = use_signal(String::new);
// Filter files by search query (case-insensitive name match)
let query = search_query.read().to_lowercase();
let filtered: Vec<_> = files
.read()
.iter()
.filter(|f| query.is_empty() || f.name.to_lowercase().contains(&query))
.cloned()
.collect();
// Remove a file by ID
let on_delete = move |id: String| {
files.write().retain(|f| f.id != id);
};
rsx! {
section { class: "knowledge-page",
PageHeader {
title: t(l, "knowledge.title"),
subtitle: t(l, "knowledge.subtitle"),
actions: rsx! {
button { class: "btn-primary", {t(l, "common.upload_file")} }
},
}
div { class: "knowledge-toolbar",
input {
class: "form-input knowledge-search",
r#type: "text",
placeholder: t(l, "knowledge.search_placeholder"),
value: "{search_query}",
oninput: move |evt: Event<FormData>| {
search_query.set(evt.value());
},
}
}
div { class: "knowledge-table-wrapper",
table { class: "knowledge-table",
thead {
tr {
th { {t(l, "knowledge.name")} }
th { {t(l, "knowledge.type")} }
th { {t(l, "knowledge.size")} }
th { {t(l, "knowledge.chunks")} }
th { {t(l, "knowledge.uploaded")} }
th { {t(l, "knowledge.actions")} }
}
}
tbody {
for file in filtered {
FileRow { key: "{file.id}", file, on_delete }
}
}
}
}
}
}
}
/// Returns mock knowledge base files.
fn mock_files() -> Vec<KnowledgeFile> {
vec![
KnowledgeFile {
id: "f1".into(),
name: "company-handbook.pdf".into(),
kind: FileKind::Pdf,
size_bytes: 2_450_000,
uploaded_at: "2026-02-15".into(),
chunk_count: 142,
},
KnowledgeFile {
id: "f2".into(),
name: "api-reference.md".into(),
kind: FileKind::Text,
size_bytes: 89_000,
uploaded_at: "2026-02-14".into(),
chunk_count: 34,
},
KnowledgeFile {
id: "f3".into(),
name: "sales-data-q4.csv".into(),
kind: FileKind::Spreadsheet,
size_bytes: 1_200_000,
uploaded_at: "2026-02-12".into(),
chunk_count: 67,
},
KnowledgeFile {
id: "f4".into(),
name: "deployment-guide.pdf".into(),
kind: FileKind::Pdf,
size_bytes: 540_000,
uploaded_at: "2026-02-10".into(),
chunk_count: 28,
},
KnowledgeFile {
id: "f5".into(),
name: "onboarding-checklist.md".into(),
kind: FileKind::Text,
size_bytes: 12_000,
uploaded_at: "2026-02-08".into(),
chunk_count: 8,
},
KnowledgeFile {
id: "f6".into(),
name: "architecture-diagram.png".into(),
kind: FileKind::Image,
size_bytes: 3_800_000,
uploaded_at: "2026-02-05".into(),
chunk_count: 1,
},
]
}

View File

@@ -1,21 +1,15 @@
mod chat;
mod dashboard;
pub mod developer;
mod impressum;
mod knowledge;
mod landing;
pub mod organization;
mod privacy;
mod providers;
mod tools;
pub use chat::*;
pub use dashboard::*;
pub use developer::*;
pub use impressum::*;
pub use knowledge::*;
pub use landing::*;
pub use organization::*;
pub use privacy::*;
pub use providers::*;
pub use tools::*;

View File

@@ -2,12 +2,14 @@ use dioxus::prelude::*;
use crate::components::{MemberRow, PageHeader};
use crate::i18n::{t, tw, Locale};
use crate::models::{BillingUsage, MemberRole, OrgMember};
use crate::infrastructure::litellm::get_litellm_usage;
use crate::models::{BillingUsage, LitellmUsageStats, MemberRole, OrgMember};
/// Organization dashboard with billing stats, member table, and invite modal.
///
/// Shows current billing usage, a table of organization members
/// with role management, and a button to invite new members.
/// Shows current billing usage (fetched from LiteLLM), a per-model
/// breakdown table, a table of organization members with role
/// management, and a button to invite new members.
#[component]
pub fn OrgDashboardPage() -> Element {
let locale = use_context::<Signal<Locale>>();
@@ -20,6 +22,20 @@ pub fn OrgDashboardPage() -> Element {
let members_list = members.read().clone();
// Compute date range: 1st of current month to today
let (start_date, end_date) = current_month_range();
// Fetch real usage stats from LiteLLM via server function.
// use_resource memoises and won't re-fire on parent re-renders.
let usage_resource = use_resource(move || {
let start = start_date.clone();
let end = end_date.clone();
async move { get_litellm_usage(start, end).await }
});
// Clone out of Signal to avoid holding the borrow across rsx!
let usage_snapshot = usage_resource.read().clone();
// Format token counts for display
let tokens_display = format_tokens(usage.tokens_used);
let tokens_limit_display = format_tokens(usage.tokens_limit);
@@ -30,26 +46,39 @@ pub fn OrgDashboardPage() -> Element {
title: t(l, "org.title"),
subtitle: t(l, "org.subtitle"),
actions: rsx! {
button { class: "btn-primary", onclick: move |_| show_invite.set(true), {t(l, "org.invite_member")} }
button {
class: "btn-primary",
onclick: move |_| show_invite.set(true),
{t(l, "org.invite_member")}
}
},
}
// Stats bar
div { class: "org-stats-bar",
div { class: "org-stat",
span { class: "org-stat-value", "{usage.seats_used}/{usage.seats_total}" }
span { class: "org-stat-value",
"{usage.seats_used}/{usage.seats_total}"
}
span { class: "org-stat-label", {t(l, "org.seats_used")} }
}
div { class: "org-stat",
span { class: "org-stat-value", "{tokens_display}" }
span { class: "org-stat-label", {tw(l, "org.of_tokens", &[("limit", &tokens_limit_display)])} }
span { class: "org-stat-label",
{tw(l, "org.of_tokens", &[("limit", &tokens_limit_display)])}
}
}
div { class: "org-stat",
span { class: "org-stat-value", "{usage.billing_cycle_end}" }
span { class: "org-stat-value",
"{usage.billing_cycle_end}"
}
span { class: "org-stat-label", {t(l, "org.cycle_ends")} }
}
}
// LiteLLM usage stats section
{render_usage_section(l, &usage_snapshot)}
// Members table
div { class: "org-table-wrapper",
table { class: "org-table",
@@ -114,6 +143,144 @@ pub fn OrgDashboardPage() -> Element {
}
}
/// Render the LiteLLM usage stats section: totals bar + per-model table.
///
/// Shows a loading state while the resource is pending, an error/empty
/// message on failure, and the full breakdown on success.
fn render_usage_section(
l: Locale,
snapshot: &Option<Result<LitellmUsageStats, ServerFnError>>,
) -> Element {
match snapshot {
None => rsx! {
div { class: "org-usage-loading",
span { {t(l, "org.loading_usage")} }
}
},
Some(Err(_)) => rsx! {
div { class: "org-usage-unavailable",
span { {t(l, "org.usage_unavailable")} }
}
},
Some(Ok(stats)) if stats.total_tokens == 0 && stats.model_breakdown.is_empty() => {
rsx! {
div { class: "org-usage-unavailable",
span { {t(l, "org.usage_unavailable")} }
}
}
}
Some(Ok(stats)) => {
let spend_display = format!("${:.2}", stats.total_spend);
let total_display = format_tokens(stats.total_tokens);
// Free-tier LiteLLM doesn't provide prompt/completion split
let has_token_split =
stats.total_prompt_tokens > 0 || stats.total_completion_tokens > 0;
rsx! {
// Usage totals bar
div { class: "org-stats-bar",
div { class: "org-stat",
span { class: "org-stat-value", "{spend_display}" }
span { class: "org-stat-label",
{t(l, "org.total_spend")}
}
}
div { class: "org-stat",
span { class: "org-stat-value",
"{total_display}"
}
span { class: "org-stat-label",
{t(l, "org.total_tokens")}
}
}
// Only show prompt/completion split when available
if has_token_split {
div { class: "org-stat",
span { class: "org-stat-value",
{format_tokens(stats.total_prompt_tokens)}
}
span { class: "org-stat-label",
{t(l, "org.prompt_tokens")}
}
}
div { class: "org-stat",
span { class: "org-stat-value",
{format_tokens(stats.total_completion_tokens)}
}
span { class: "org-stat-label",
{t(l, "org.completion_tokens")}
}
}
}
}
// Per-model breakdown table
if !stats.model_breakdown.is_empty() {
h3 { class: "org-section-title",
{t(l, "org.model_usage")}
}
div { class: "org-table-wrapper",
table { class: "org-table",
thead {
tr {
th { {t(l, "org.model")} }
th { {t(l, "org.tokens")} }
th { {t(l, "org.spend")} }
}
}
tbody {
for model in &stats.model_breakdown {
tr { key: "{model.model}",
td { "{model.model}" }
td {
{format_tokens(model.total_tokens)}
}
td {
{format!(
"${:.2}", model.spend
)}
}
}
}
}
}
}
}
}
}
}
}
/// Compute the date range for the current billing month.
///
/// Returns `(start_date, end_date)` as `YYYY-MM-DD` strings where
/// start_date is the 1st of the current month and end_date is today.
///
/// On the web target this uses `js_sys::Date` to read the browser clock.
/// On the server target (SSR) it falls back to `chrono::Utc::now()`.
fn current_month_range() -> (String, String) {
#[cfg(feature = "web")]
{
// js_sys::Date accesses the browser's local clock in WASM.
let now = js_sys::Date::new_0();
let year = now.get_full_year();
// JS months are 0-indexed, so add 1 for calendar month
let month = now.get_month() + 1;
let day = now.get_date();
let start = format!("{year:04}-{month:02}-01");
let end = format!("{year:04}-{month:02}-{day:02}");
(start, end)
}
#[cfg(not(feature = "web"))]
{
use chrono::Datelike;
let today = chrono::Utc::now().date_naive();
let start = format!("{:04}-{:02}-01", today.year(), today.month());
let end = today.format("%Y-%m-%d").to_string();
(start, end)
}
}
/// Formats a token count into a human-readable string (e.g. "1.2M").
fn format_tokens(count: u64) -> String {
const M: u64 = 1_000_000;

View File

@@ -13,8 +13,8 @@ pub fn ProvidersPage() -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
let mut selected_provider = use_signal(|| LlmProvider::Ollama);
let mut selected_model = use_signal(|| "llama3.1:8b".to_string());
let mut selected_provider = use_signal(|| LlmProvider::LiteLlm);
let mut selected_model = use_signal(|| "qwen3-32b".to_string());
let mut selected_embedding = use_signal(|| "nomic-embed-text".to_string());
let mut api_key = use_signal(String::new);
let mut saved = use_signal(|| false);
@@ -59,12 +59,12 @@ pub fn ProvidersPage() -> Element {
"Hugging Face" => LlmProvider::HuggingFace,
"OpenAI" => LlmProvider::OpenAi,
"Anthropic" => LlmProvider::Anthropic,
_ => LlmProvider::Ollama,
_ => LlmProvider::LiteLlm,
};
selected_provider.set(prov);
saved.set(false);
},
option { value: "Ollama", "Ollama" }
option { value: "LiteLLM", "LiteLLM" }
option { value: "Hugging Face", "Hugging Face" }
option { value: "OpenAI", "OpenAI" }
option { value: "Anthropic", "Anthropic" }
@@ -156,23 +156,29 @@ pub fn ProvidersPage() -> Element {
fn mock_models() -> Vec<ModelEntry> {
vec![
ModelEntry {
id: "llama3.1:8b".into(),
name: "Llama 3.1 8B".into(),
provider: LlmProvider::Ollama,
context_window: 128,
},
ModelEntry {
id: "llama3.1:70b".into(),
name: "Llama 3.1 70B".into(),
provider: LlmProvider::Ollama,
context_window: 128,
},
ModelEntry {
id: "mistral:7b".into(),
name: "Mistral 7B".into(),
provider: LlmProvider::Ollama,
id: "qwen3-32b".into(),
name: "Qwen3 32B".into(),
provider: LlmProvider::LiteLlm,
context_window: 32,
},
ModelEntry {
id: "llama-3.3-70b".into(),
name: "Llama 3.3 70B".into(),
provider: LlmProvider::LiteLlm,
context_window: 128,
},
ModelEntry {
id: "mistral-small-24b".into(),
name: "Mistral Small 24B".into(),
provider: LlmProvider::LiteLlm,
context_window: 32,
},
ModelEntry {
id: "deepseek-r1-70b".into(),
name: "DeepSeek R1 70B".into(),
provider: LlmProvider::LiteLlm,
context_window: 64,
},
ModelEntry {
id: "meta-llama/Llama-3.1-8B".into(),
name: "Llama 3.1 8B".into(),
@@ -200,7 +206,7 @@ fn mock_embeddings() -> Vec<EmbeddingEntry> {
EmbeddingEntry {
id: "nomic-embed-text".into(),
name: "Nomic Embed Text".into(),
provider: LlmProvider::Ollama,
provider: LlmProvider::LiteLlm,
dimensions: 768,
},
EmbeddingEntry {

View File

@@ -1,149 +0,0 @@
use std::collections::HashMap;
use dioxus::prelude::*;
use crate::components::{PageHeader, ToolCard};
use crate::i18n::{t, Locale};
use crate::models::{McpTool, ToolCategory, ToolStatus};
/// Tools page displaying a grid of MCP tool cards with toggle switches.
///
/// Shows all available MCP tools with their status and allows
/// enabling/disabling them via toggle buttons.
#[component]
pub fn ToolsPage() -> Element {
let locale = use_context::<Signal<Locale>>();
let l = *locale.read();
// Track which tool IDs have been toggled off/on by the user.
// The canonical tool definitions (including translated names) come
// from `mock_tools(l)` on every render so they react to locale changes.
let mut enabled_overrides = use_signal(HashMap::<String, bool>::new);
// Build the display list: translated names from mock_tools, with
// enabled state merged from user overrides.
let tool_list: Vec<McpTool> = mock_tools(l)
.into_iter()
.map(|mut tool| {
if let Some(&enabled) = enabled_overrides.read().get(&tool.id) {
tool.enabled = enabled;
}
tool
})
.collect();
// Toggle a tool's enabled state by its ID.
// Reads the current state from overrides (or falls back to the default
// enabled value from mock_tools) and flips it.
let on_toggle = move |id: String| {
let defaults = mock_tools(l);
let current = enabled_overrides
.read()
.get(&id)
.copied()
.unwrap_or_else(|| {
defaults
.iter()
.find(|tool| tool.id == id)
.map(|tool| tool.enabled)
.unwrap_or(false)
});
enabled_overrides.write().insert(id, !current);
};
rsx! {
section { class: "tools-page",
PageHeader {
title: t(l, "tools.title"),
subtitle: t(l, "tools.subtitle"),
}
div { class: "tools-grid",
for tool in tool_list {
ToolCard { key: "{tool.id}", tool, on_toggle }
}
}
}
}
}
/// Returns mock MCP tools for the tools grid with translated names.
///
/// # Arguments
///
/// * `l` - The current locale for translating tool names and descriptions
fn mock_tools(l: Locale) -> Vec<McpTool> {
vec![
McpTool {
id: "calculator".into(),
name: t(l, "tools.calculator"),
description: t(l, "tools.calculator_desc"),
category: ToolCategory::Compute,
status: ToolStatus::Active,
enabled: true,
icon: "calculator".into(),
},
McpTool {
id: "tavily".into(),
name: t(l, "tools.tavily"),
description: t(l, "tools.tavily_desc"),
category: ToolCategory::Search,
status: ToolStatus::Active,
enabled: true,
icon: "search".into(),
},
McpTool {
id: "searxng".into(),
name: t(l, "tools.searxng"),
description: t(l, "tools.searxng_desc"),
category: ToolCategory::Search,
status: ToolStatus::Active,
enabled: true,
icon: "globe".into(),
},
McpTool {
id: "file-reader".into(),
name: t(l, "tools.file_reader"),
description: t(l, "tools.file_reader_desc"),
category: ToolCategory::FileSystem,
status: ToolStatus::Active,
enabled: true,
icon: "file".into(),
},
McpTool {
id: "code-exec".into(),
name: t(l, "tools.code_executor"),
description: t(l, "tools.code_executor_desc"),
category: ToolCategory::Code,
status: ToolStatus::Inactive,
enabled: false,
icon: "terminal".into(),
},
McpTool {
id: "web-scraper".into(),
name: t(l, "tools.web_scraper"),
description: t(l, "tools.web_scraper_desc"),
category: ToolCategory::Search,
status: ToolStatus::Active,
enabled: true,
icon: "download".into(),
},
McpTool {
id: "email".into(),
name: t(l, "tools.email_sender"),
description: t(l, "tools.email_sender_desc"),
category: ToolCategory::Communication,
status: ToolStatus::Inactive,
enabled: false,
icon: "mail".into(),
},
McpTool {
id: "git".into(),
name: t(l, "tools.git_ops"),
description: t(l, "tools.git_ops_desc"),
category: ToolCategory::Code,
status: ToolStatus::Active,
enabled: true,
icon: "git".into(),
},
]
}