From 52a15b24fe47d0bbc86e2f5ec6ee4bf9bf15a7bf Mon Sep 17 00:00:00 2001 From: Benjamin Admin Date: Wed, 29 Apr 2026 15:24:13 +0200 Subject: [PATCH] Add custom word entry + language pair support for learning units MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - New UnitBuilder component with language pair selector (DE⇄EN, ES, FR, etc.) - Manual word entry form with auto-suggest from Kaikki dictionary (6M words) - "No results" prompt to add multi-word terms (e.g. "schottisches Hochland") - New backend endpoint GET /vocabulary/lookup-translation (any→any via EN hub) - Updated POST /vocabulary/units: accepts custom_words + source_lang/target_lang - Split unit endpoints into vocabulary/unit_api.py (500 LOC budget) Co-Authored-By: Claude Opus 4.6 (1M context) --- backend-lehrer/main.py | 4 + backend-lehrer/vocabulary/api.py | 130 +------ backend-lehrer/vocabulary/unit_api.py | 356 ++++++++++++++++++ .../app/vocabulary/components/UnitBuilder.tsx | 307 +++++++++++++++ studio-v2/app/vocabulary/page.tsx | 260 +++++-------- 5 files changed, 762 insertions(+), 295 deletions(-) create mode 100644 backend-lehrer/vocabulary/unit_api.py create mode 100644 studio-v2/app/vocabulary/components/UnitBuilder.tsx diff --git a/backend-lehrer/main.py b/backend-lehrer/main.py index 2d18c5c..13b0c1b 100644 --- a/backend-lehrer/main.py +++ b/backend-lehrer/main.py @@ -119,6 +119,10 @@ app.include_router(progress_router, prefix="/api") from vocabulary.api import router as vocabulary_router app.include_router(vocabulary_router, prefix="/api") +# --- 4c2. Vocabulary Unit Creation + Translation --- +from vocabulary.unit_api import router as vocab_unit_router +app.include_router(vocab_unit_router, prefix="/api") + # --- 4d. User Language Preferences --- from api.user_language import router as user_language_router app.include_router(user_language_router, prefix="/api") diff --git a/backend-lehrer/vocabulary/api.py b/backend-lehrer/vocabulary/api.py index ff26f20..375d48c 100644 --- a/backend-lehrer/vocabulary/api.py +++ b/backend-lehrer/vocabulary/api.py @@ -22,11 +22,6 @@ from .db import ( get_all_pos, VocabularyWord, ) -from units.learning import ( - LearningUnitCreate, - create_learning_unit, - get_learning_unit, -) logger = logging.getLogger(__name__) @@ -239,130 +234,7 @@ async def api_tts(text: str = Query("", min_length=1), lang: str = Query("de")): # --------------------------------------------------------------------------- -class CreateUnitFromWordsPayload(BaseModel): - title: str - word_ids: List[str] - grade: Optional[str] = None - language: Optional[str] = "de" - - -@router.post("/units") -async def api_create_unit_from_words(payload: CreateUnitFromWordsPayload): - """Create a learning unit from selected vocabulary word IDs. - - Fetches full word details, creates a LearningUnit in the - learning_units system, and stores the vocabulary data. - """ - if not payload.word_ids: - raise HTTPException(status_code=400, detail="Keine Woerter ausgewaehlt") - - # Fetch all selected words - words = [] - for wid in payload.word_ids: - word = await get_word(wid) - if word: - words.append(word) - - if not words: - raise HTTPException(status_code=404, detail="Keine der Woerter gefunden") - - # Create learning unit - lu = create_learning_unit(LearningUnitCreate( - title=payload.title, - topic="Vocabulary", - grade_level=payload.grade or "5-8", - language=payload.language or "de", - status="raw", - )) - - # Save vocabulary data as analysis JSON for generators - import os - analysis_dir = os.path.expanduser("~/Arbeitsblaetter/Lerneinheiten") - os.makedirs(analysis_dir, exist_ok=True) - - vocab_data = [w.to_dict() for w in words] - analysis_path = os.path.join(analysis_dir, f"{lu.id}_vocab.json") - with open(analysis_path, "w", encoding="utf-8") as f: - json.dump({"words": vocab_data, "title": payload.title}, f, ensure_ascii=False, indent=2) - - # Also save as QA items for flashcards/type trainer - qa_items = [] - for i, w in enumerate(words): - qa_items.append({ - "id": f"qa_{i+1}", - "question": w.english, - "answer": w.german, - "question_type": "knowledge", - "key_terms": [w.english], - "difficulty": w.difficulty, - "source_hint": w.part_of_speech, - "leitner_box": 0, - "correct_count": 0, - "incorrect_count": 0, - "last_seen": None, - "next_review": None, - # Extra fields for enhanced flashcards - "ipa_en": w.ipa_en, - "ipa_de": w.ipa_de, - "syllables_en": w.syllables_en, - "syllables_de": w.syllables_de, - "example_en": w.example_en, - "example_de": w.example_de, - "image_url": w.image_url, - "audio_url_en": w.audio_url_en, - "audio_url_de": w.audio_url_de, - "part_of_speech": w.part_of_speech, - "translations": w.translations, - }) - - qa_path = os.path.join(analysis_dir, f"{lu.id}_qa.json") - with open(qa_path, "w", encoding="utf-8") as f: - json.dump({ - "qa_items": qa_items, - "metadata": { - "subject": "English Vocabulary", - "grade_level": payload.grade or "5-8", - "source_title": payload.title, - "total_questions": len(qa_items), - }, - }, f, ensure_ascii=False, indent=2) - - # Auto-enrich words with images (Wikipedia + emoji fallback) - try: - from services.image_service import enrich_words_with_images - await enrich_words_with_images(payload.word_ids) - except Exception as e: - logger.warning(f"Image enrichment failed (non-critical): {e}") - - logger.info(f"Created vocab unit {lu.id} with {len(words)} words") - - return { - "unit_id": lu.id, - "title": payload.title, - "word_count": len(words), - "status": "created", - } - - -@router.get("/units/{unit_id}") -async def api_get_unit_words(unit_id: str): - """Get all words for a learning unit.""" - import os - vocab_path = os.path.join( - os.path.expanduser("~/Arbeitsblaetter/Lerneinheiten"), - f"{unit_id}_vocab.json", - ) - if not os.path.exists(vocab_path): - raise HTTPException(status_code=404, detail="Unit nicht gefunden") - - with open(vocab_path, "r", encoding="utf-8") as f: - data = json.load(f) - - return { - "unit_id": unit_id, - "title": data.get("title", ""), - "words": data.get("words", []), - } +# Unit creation and translation lookup moved to vocabulary/unit_api.py # --------------------------------------------------------------------------- diff --git a/backend-lehrer/vocabulary/unit_api.py b/backend-lehrer/vocabulary/unit_api.py new file mode 100644 index 0000000..3dedd96 --- /dev/null +++ b/backend-lehrer/vocabulary/unit_api.py @@ -0,0 +1,356 @@ +""" +Vocabulary Unit API — Create learning units, translate words, manage language pairs. + +Endpoints for teachers to build vocabulary learning units with custom words, +auto-translation via Kaikki dictionary, and flexible language pair support. +""" + +import json +import logging +import os +from typing import Any, Dict, List, Optional + +from fastapi import APIRouter, HTTPException, Query +from pydantic import BaseModel + +from .db import get_word, VocabularyWord, get_pool +from units.learning import LearningUnitCreate, create_learning_unit + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/vocabulary", tags=["vocabulary"]) + +# All supported language codes +SUPPORTED_LANGS = { + "en", "de", "fr", "es", "it", "pt", "nl", "tr", "ru", "ar", + "uk", "pl", "sv", "fi", "da", "ro", "el", "hu", "cs", "bg", + "lv", "lt", "sk", "et", "sl", "hr", +} + + +# --------------------------------------------------------------------------- +# Translation Lookup (auto-suggest) +# --------------------------------------------------------------------------- + + +@router.get("/lookup-translation") +async def api_lookup_translation( + word: str = Query("", min_length=1, description="Word to translate"), + source: str = Query("en", description="Source language code"), + target: str = Query("de", description="Target language code"), + limit: int = Query(5, ge=1, le=20), +): + """Look up translations between any two languages via Kaikki dictionary. + + Uses EN entries as a hub: all EN words have translations to 24 languages. + - EN → X: direct lookup (word in EN, translation from JSONB) + - X → EN: reverse lookup (search EN entries where translations.X matches) + - X → Y: bridge via EN (find EN word via X, then get Y translation) + """ + if source not in SUPPORTED_LANGS or target not in SUPPORTED_LANGS: + raise HTTPException(status_code=400, detail="Sprache nicht unterstuetzt") + if source == target: + return {"results": [], "word": word, "source": source, "target": target} + + pool = await get_pool() + q = word.strip() + results = [] + + async with pool.acquire() as conn: + if source == "en": + # Direct: search EN word, return target translation + rows = await conn.fetch( + """SELECT word, pos, ipa, translations + FROM vocabulary_kaikki + WHERE lang = 'en' AND lower(word) LIKE $1 + ORDER BY length(word), lower(word) + LIMIT $2""", + f"{q.lower()}%", limit, + ) + for r in rows: + tr = _parse_translations(r["translations"]) + target_text = tr.get(target, {}).get("text", "") + if target_text: + results.append({ + "source_text": r["word"], + "target_text": target_text, + "pos": r["pos"], + "ipa": r["ipa"] or "", + }) + + elif target == "en": + # Reverse: search EN entries where translations.source matches + rows = await conn.fetch( + """SELECT word, pos, ipa, translations->'%s'->>'text' as src_text + FROM vocabulary_kaikki + WHERE lang = 'en' + AND translations->'%s'->>'text' ILIKE $1 + ORDER BY length(word) + LIMIT $2""" % (source, source), + f"{q}%", limit, + ) + for r in rows: + results.append({ + "source_text": r["src_text"], + "target_text": r["word"], + "pos": r["pos"], + "ipa": r["ipa"] or "", + }) + + else: + # Bridge via EN: find EN word via source, then get target translation + rows = await conn.fetch( + """SELECT word, pos, ipa, translations + FROM vocabulary_kaikki + WHERE lang = 'en' + AND translations->'%s'->>'text' ILIKE $1 + ORDER BY length(word) + LIMIT $2""" % source, + f"{q}%", limit, + ) + for r in rows: + tr = _parse_translations(r["translations"]) + src_text = tr.get(source, {}).get("text", "") + target_text = tr.get(target, {}).get("text", "") + if src_text and target_text: + results.append({ + "source_text": src_text, + "target_text": target_text, + "pos": r["pos"], + "ipa": "", + }) + + return {"results": results, "word": q, "source": source, "target": target} + + +def _parse_translations(tr) -> dict: + """Parse translations field (may be JSONB dict or JSON string).""" + if isinstance(tr, str): + return json.loads(tr) + return tr or {} + + +# --------------------------------------------------------------------------- +# Unit Creation (with custom words + language pair) +# --------------------------------------------------------------------------- + + +class CustomWord(BaseModel): + source_text: str + target_text: str + + +class CreateUnitPayload(BaseModel): + title: str + word_ids: List[str] = [] + custom_words: List[CustomWord] = [] + source_lang: str = "en" + target_lang: str = "de" + grade: Optional[str] = None + + +@router.post("/units") +async def api_create_unit_from_words(payload: CreateUnitPayload): + """Create a learning unit from dictionary words and/or custom word pairs. + + Supports any language pair. Words can come from: + 1. word_ids — looked up in Kaikki dictionary + 2. custom_words — manually entered source/target pairs + """ + if not payload.word_ids and not payload.custom_words: + raise HTTPException(status_code=400, detail="Keine Woerter ausgewaehlt") + + qa_items = [] + vocab_data = [] + idx = 0 + + # 1. Process dictionary words + for wid in payload.word_ids: + word = await get_word(wid) + if not word: + # Try Kaikki lookup + kaikki_word = await _get_kaikki_word(wid, payload.source_lang, payload.target_lang) + if kaikki_word: + qa_items.append(_make_qa_item(idx, kaikki_word, payload.source_lang, payload.target_lang)) + vocab_data.append(kaikki_word) + idx += 1 + continue + # Manual vocabulary_words entry + source_text, target_text = _get_word_pair(word, payload.source_lang, payload.target_lang) + qa_items.append({ + "id": f"qa_{idx+1}", + "question": source_text, + "answer": target_text, + "question_type": "knowledge", + "key_terms": [source_text], + "difficulty": word.difficulty, + "source_hint": word.part_of_speech, + "leitner_box": 0, + "correct_count": 0, + "incorrect_count": 0, + "last_seen": None, + "next_review": None, + "ipa_en": word.ipa_en, + "ipa_de": word.ipa_de, + "syllables_en": word.syllables_en, + "syllables_de": word.syllables_de, + "example_en": word.example_en, + "example_de": word.example_de, + "image_url": word.image_url, + "audio_url_en": word.audio_url_en, + "audio_url_de": word.audio_url_de, + "part_of_speech": word.part_of_speech, + "translations": word.translations, + }) + vocab_data.append(word.to_dict()) + idx += 1 + + # 2. Process custom words (manually entered by teacher) + for cw in payload.custom_words: + qa_items.append({ + "id": f"qa_{idx+1}", + "question": cw.source_text, + "answer": cw.target_text, + "question_type": "knowledge", + "key_terms": [cw.source_text], + "difficulty": 1, + "source_hint": "", + "leitner_box": 0, + "correct_count": 0, + "incorrect_count": 0, + "last_seen": None, + "next_review": None, + "part_of_speech": "", + "translations": {}, + }) + vocab_data.append({ + "english": cw.source_text if payload.source_lang == "en" else cw.target_text if payload.target_lang == "en" else "", + "german": cw.source_text if payload.source_lang == "de" else cw.target_text if payload.target_lang == "de" else "", + "word": cw.source_text, + "translation": cw.target_text, + "source_lang": payload.source_lang, + "target_lang": payload.target_lang, + }) + idx += 1 + + if not qa_items: + raise HTTPException(status_code=400, detail="Keine gültigen Woerter") + + # Create learning unit + lang_label = f"{payload.source_lang.upper()}→{payload.target_lang.upper()}" + lu = create_learning_unit(LearningUnitCreate( + title=payload.title, + topic="Vocabulary", + grade_level=payload.grade or "5-8", + language=payload.target_lang, + status="raw", + )) + + # Save files + analysis_dir = os.path.expanduser("~/Arbeitsblaetter/Lerneinheiten") + os.makedirs(analysis_dir, exist_ok=True) + + with open(os.path.join(analysis_dir, f"{lu.id}_vocab.json"), "w", encoding="utf-8") as f: + json.dump({"words": vocab_data, "title": payload.title}, f, ensure_ascii=False, indent=2) + + with open(os.path.join(analysis_dir, f"{lu.id}_qa.json"), "w", encoding="utf-8") as f: + json.dump({ + "qa_items": qa_items, + "metadata": { + "subject": f"Vocabulary {lang_label}", + "grade_level": payload.grade or "5-8", + "source_title": payload.title, + "total_questions": len(qa_items), + "source_lang": payload.source_lang, + "target_lang": payload.target_lang, + }, + }, f, ensure_ascii=False, indent=2) + + # Auto-enrich images for dictionary words + dict_ids = [wid for wid in payload.word_ids] + if dict_ids: + try: + from services.image_service import enrich_words_with_images + await enrich_words_with_images(dict_ids) + except Exception as e: + logger.warning(f"Image enrichment failed (non-critical): {e}") + + logger.info(f"Created vocab unit {lu.id} ({lang_label}) with {len(qa_items)} words") + return { + "unit_id": lu.id, + "title": payload.title, + "word_count": len(qa_items), + "source_lang": payload.source_lang, + "target_lang": payload.target_lang, + "status": "created", + } + + +def _get_word_pair(word: VocabularyWord, source_lang: str, target_lang: str): + """Extract source/target text from a VocabularyWord for the given language pair.""" + lang_map = {"en": word.english, "de": word.german} + # Check translations for other languages + if source_lang not in lang_map: + tr = word.translations or {} + lang_map[source_lang] = tr.get(source_lang, {}).get("text", word.english) + if target_lang not in lang_map: + tr = word.translations or {} + lang_map[target_lang] = tr.get(target_lang, {}).get("text", word.german) + return lang_map.get(source_lang, word.english), lang_map.get(target_lang, word.german) + + +async def _get_kaikki_word(word_id: str, source_lang: str, target_lang: str) -> Optional[dict]: + """Look up a word by ID in the Kaikki table and return a vocab dict.""" + pool = await get_pool() + async with pool.acquire() as conn: + row = await conn.fetchrow( + "SELECT id, word, lang, pos, ipa, translations, example FROM vocabulary_kaikki WHERE id = $1", + _to_uuid(word_id), + ) + if not row: + return None + tr = _parse_translations(row["translations"]) + src = row["word"] if row["lang"] == source_lang else tr.get(source_lang, {}).get("text", "") + tgt = tr.get(target_lang, {}).get("text", "") if row["lang"] != target_lang else row["word"] + return { + "id": str(row["id"]), + "word": row["word"], + "lang": row["lang"], + "source_text": src or row["word"], + "target_text": tgt, + "pos": row["pos"], + "ipa": row["ipa"] or "", + "example": row["example"] or "", + "translations": tr, + } + + +def _make_qa_item(idx: int, kw: dict, source_lang: str, target_lang: str) -> dict: + """Create a QA item from a Kaikki word dict.""" + return { + "id": f"qa_{idx+1}", + "question": kw.get("source_text", kw.get("word", "")), + "answer": kw.get("target_text", ""), + "question_type": "knowledge", + "key_terms": [kw.get("source_text", kw.get("word", ""))], + "difficulty": 0, + "source_hint": kw.get("pos", ""), + "leitner_box": 0, + "correct_count": 0, + "incorrect_count": 0, + "last_seen": None, + "next_review": None, + "ipa_en": kw.get("ipa", "") if source_lang == "en" else "", + "ipa_de": kw.get("ipa", "") if source_lang == "de" else "", + "part_of_speech": kw.get("pos", ""), + "translations": kw.get("translations", {}), + } + + +def _to_uuid(s: str): + """Convert string to UUID, return as-is if already valid.""" + import uuid + try: + return uuid.UUID(s) + except (ValueError, AttributeError): + return s diff --git a/studio-v2/app/vocabulary/components/UnitBuilder.tsx b/studio-v2/app/vocabulary/components/UnitBuilder.tsx new file mode 100644 index 0000000..6fc5ee3 --- /dev/null +++ b/studio-v2/app/vocabulary/components/UnitBuilder.tsx @@ -0,0 +1,307 @@ +'use client' + +import React, { useState, useCallback, useRef, useEffect } from 'react' + +/** Supported language pairs */ +const LANGUAGES = [ + { code: 'en', label: 'Englisch' }, + { code: 'de', label: 'Deutsch' }, + { code: 'fr', label: 'Franzoesisch' }, + { code: 'es', label: 'Spanisch' }, + { code: 'it', label: 'Italienisch' }, + { code: 'pt', label: 'Portugiesisch' }, + { code: 'nl', label: 'Niederlaendisch' }, + { code: 'tr', label: 'Tuerkisch' }, + { code: 'ru', label: 'Russisch' }, + { code: 'ar', label: 'Arabisch' }, + { code: 'uk', label: 'Ukrainisch' }, + { code: 'pl', label: 'Polnisch' }, + { code: 'sv', label: 'Schwedisch' }, + { code: 'da', label: 'Daenisch' }, + { code: 'fi', label: 'Finnisch' }, + { code: 'el', label: 'Griechisch' }, + { code: 'hu', label: 'Ungarisch' }, + { code: 'cs', label: 'Tschechisch' }, + { code: 'ro', label: 'Rumaenisch' }, +] + +export interface UnitWord { + id: string + source_text: string + target_text: string + pos?: string + is_custom?: boolean +} + +interface Props { + isDark: boolean + glassCard: string + glassInput: string + selectedWords: UnitWord[] + onWordsChange: (words: UnitWord[]) => void + onCreateUnit: (title: string, sourceLang: string, targetLang: string) => void + isCreating: boolean + noSearchResults?: boolean + searchQuery?: string +} + +export default function UnitBuilder({ + isDark, glassCard, glassInput, + selectedWords, onWordsChange, onCreateUnit, isCreating, + noSearchResults, searchQuery, +}: Props) { + const [unitTitle, setUnitTitle] = useState('') + const [sourceLang, setSourceLang] = useState('de') + const [targetLang, setTargetLang] = useState('en') + const [showManualEntry, setShowManualEntry] = useState(false) + const [manualSource, setManualSource] = useState('') + const [manualTarget, setManualTarget] = useState('') + const [suggestions, setSuggestions] = useState<{ source_text: string; target_text: string; pos?: string }[]>([]) + const [isLookingUp, setIsLookingUp] = useState(false) + const debounceRef = useRef>() + + // Auto-suggest translation when typing in source field + useEffect(() => { + if (!manualSource.trim() || manualSource.length < 2) { + setSuggestions([]) + return + } + if (debounceRef.current) clearTimeout(debounceRef.current) + debounceRef.current = setTimeout(async () => { + setIsLookingUp(true) + try { + const resp = await fetch( + `/api/vocabulary/lookup-translation?word=${encodeURIComponent(manualSource)}&source=${sourceLang}&target=${targetLang}&limit=5` + ) + if (resp.ok) { + const data = await resp.json() + setSuggestions(data.results || []) + // Auto-fill target if exactly one match + if (data.results?.length === 1 && !manualTarget) { + setManualTarget(data.results[0].target_text) + } + } + } catch { /* ignore */ } + setIsLookingUp(false) + }, 400) + return () => { if (debounceRef.current) clearTimeout(debounceRef.current) } + }, [manualSource, sourceLang, targetLang]) // eslint-disable-line react-hooks/exhaustive-deps + + const addManualWord = useCallback(() => { + if (!manualSource.trim() || !manualTarget.trim()) return + const newWord: UnitWord = { + id: `custom_${Date.now()}_${Math.random().toString(36).slice(2, 6)}`, + source_text: manualSource.trim(), + target_text: manualTarget.trim(), + is_custom: true, + } + onWordsChange([...selectedWords, newWord]) + setManualSource('') + setManualTarget('') + setSuggestions([]) + }, [manualSource, manualTarget, selectedWords, onWordsChange]) + + const removeWord = useCallback((id: string) => { + onWordsChange(selectedWords.filter(w => w.id !== id)) + }, [selectedWords, onWordsChange]) + + const swapLanguages = useCallback(() => { + setSourceLang(targetLang) + setTargetLang(sourceLang) + }, [sourceLang, targetLang]) + + const srcLabel = LANGUAGES.find(l => l.code === sourceLang)?.label || sourceLang.toUpperCase() + const tgtLabel = LANGUAGES.find(l => l.code === targetLang)?.label || targetLang.toUpperCase() + + return ( +
+
+

+ Lernunit erstellen +

+ + {/* Language pair selector */} +
+ + + +
+ + setUnitTitle(e.target.value)} + placeholder="Titel (z.B. Unit 3 - Food)" + className={`w-full px-4 py-2.5 rounded-xl border outline-none text-sm mb-3 ${glassInput}`} + /> + + {/* Manual word entry toggle */} + + + {/* Manual entry form */} + {showManualEntry && ( +
+
+
+ + setManualSource(e.target.value)} + placeholder={`z.B. ${sourceLang === 'de' ? 'schottisches Hochland' : 'Scottish Highlands'}`} + className={`w-full px-3 py-2 rounded-lg border outline-none text-sm ${glassInput}`} + onKeyDown={e => e.key === 'Enter' && manualTarget && addManualWord()} + /> +
+
+ + setManualTarget(e.target.value)} + placeholder={`z.B. ${targetLang === 'en' ? 'Scottish Highlands' : 'schottisches Hochland'}`} + className={`w-full px-3 py-2 rounded-lg border outline-none text-sm ${glassInput}`} + onKeyDown={e => e.key === 'Enter' && manualSource && addManualWord()} + /> +
+ {/* Auto-suggest results */} + {suggestions.length > 1 && ( +
+ Vorschlaege: + {suggestions.map((s, i) => ( + + ))} +
+ )} + +
+
+ )} + + {/* "No results" prompt to add manually */} + {noSearchResults && searchQuery && !showManualEntry && ( +
+

+ "{searchQuery}" nicht im Woerterbuch +

+ +
+ )} + + {/* Word list */} + {selectedWords.length === 0 ? ( +

+ Woerter aus dem Woerterbuch auswaehlen oder eigene eingeben +

+ ) : ( +
+ {selectedWords.map((w, i) => ( +
+
+ {i+1} +
+ + {w.source_text} + + + {w.target_text} + +
+ {w.is_custom && ( + + eigen + + )} +
+ +
+ ))} +
+ )} + +
+ {selectedWords.length} Woerter · {sourceLang.toUpperCase()} → {targetLang.toUpperCase()} +
+ + +
+
+ ) +} diff --git a/studio-v2/app/vocabulary/page.tsx b/studio-v2/app/vocabulary/page.tsx index c89f5cf..b8de604 100644 --- a/studio-v2/app/vocabulary/page.tsx +++ b/studio-v2/app/vocabulary/page.tsx @@ -5,11 +5,14 @@ import { useRouter } from 'next/navigation' import { useTheme } from '@/lib/ThemeContext' import { Sidebar } from '@/components/Sidebar' import { AudioButton } from '@/components/learn/AudioButton' +import UnitBuilder, { type UnitWord } from './components/UnitBuilder' interface VocabWord { id: string english: string german: string + word?: string + lang?: string ipa_en: string ipa_de: string part_of_speech: string @@ -20,11 +23,18 @@ interface VocabWord { image_url: string difficulty: number tags: string[] + translations?: Record } -/** Use Next.js API proxy to avoid mixed-content/CORS issues */ -function getApiBase() { - return '' // Same-origin: /api/vocabulary/... proxied by Next.js +function vocabToUnit(w: VocabWord, searchLang: string): UnitWord { + // Source = the word in the language we searched, Target = the translation + const src = w.word || w.english || '' + const tgt = searchLang === 'en' + ? (w.german || '') + : searchLang === 'de' + ? (w.english || '') + : (w.english || w.german || '') + return { id: w.id, source_text: src, target_text: tgt, pos: w.part_of_speech } } export default function VocabularyPage() { @@ -39,11 +49,9 @@ export default function VocabularyPage() { const [diffFilter, setDiffFilter] = useState(0) const [searchLang, setSearchLang] = useState('en') const [topics, setTopics] = useState<{ topic: string; words: string[]; display_words?: string[]; word_count: number }[]>([]) - const [showTopics, setShowTopics] = useState(false) - // Unit builder - const [selectedWords, setSelectedWords] = useState([]) - const [unitTitle, setUnitTitle] = useState('') + // Unit builder state (UnitWord format) + const [unitWords, setUnitWords] = useState([]) const [isCreating, setIsCreating] = useState(false) const glassCard = isDark @@ -54,9 +62,8 @@ export default function VocabularyPage() { ? 'bg-white/10 border-white/20 text-white placeholder-white/40' : 'bg-white border-slate-200 text-slate-900 placeholder-slate-400' - // Load filters on mount useEffect(() => { - fetch(`${getApiBase()}/api/vocabulary/filters`) + fetch('/api/vocabulary/filters') .then(r => r.ok ? r.json() : null) .then(d => { if (d) setFilters(d) }) .catch(() => {}) @@ -66,30 +73,28 @@ export default function VocabularyPage() { useEffect(() => { if (!query.trim() && !posFilter && !diffFilter) { setResults([]) + setTopics([]) return } - const timer = setTimeout(async () => { setIsSearching(true) try { let url: string if (query.trim()) { - url = `${getApiBase()}/api/vocabulary/search?q=${encodeURIComponent(query)}&lang=${searchLang}&limit=30&source=kaikki` + url = `/api/vocabulary/search?q=${encodeURIComponent(query)}&lang=${searchLang}&limit=30&source=kaikki` } else { const params = new URLSearchParams({ limit: '30' }) if (posFilter) params.set('pos', posFilter) if (diffFilter) params.set('difficulty', String(diffFilter)) - url = `${getApiBase()}/api/vocabulary/browse?${params}` + url = `/api/vocabulary/browse?${params}` } const resp = await fetch(url) if (resp.ok) { const data = await resp.json() setResults(data.words || []) } - - // Also search for matching topics if (query.trim()) { - const topicResp = await fetch(`${getApiBase()}/api/vocabulary/topics?q=${encodeURIComponent(query)}&lang=${searchLang}`) + const topicResp = await fetch(`/api/vocabulary/topics?q=${encodeURIComponent(query)}&lang=${searchLang}`) if (topicResp.ok) { const topicData = await topicResp.json() setTopics(topicData.topics || []) @@ -101,28 +106,38 @@ export default function VocabularyPage() { setIsSearching(false) } }, 300) - return () => clearTimeout(timer) }, [query, posFilter, diffFilter, searchLang]) const toggleWord = useCallback((word: VocabWord) => { - setSelectedWords(prev => { + setUnitWords(prev => { const exists = prev.find(w => w.id === word.id) if (exists) return prev.filter(w => w.id !== word.id) - return [...prev, word] + return [...prev, vocabToUnit(word, searchLang)] }) - }, []) + }, [searchLang]) - const createUnit = useCallback(async () => { - if (!unitTitle.trim() || selectedWords.length === 0) return + const isSelected = (wordId: string) => unitWords.some(w => w.id === wordId) + + const createUnit = useCallback(async (title: string, sourceLang: string, targetLang: string) => { + if (!title.trim() || unitWords.length === 0) return setIsCreating(true) try { - const resp = await fetch(`${getApiBase()}/api/vocabulary/units`, { + const dictWords = unitWords.filter(w => !w.is_custom) + const customWords = unitWords.filter(w => w.is_custom) + + const resp = await fetch('/api/vocabulary/units', { method: 'POST', headers: { 'Content-Type': 'application/json' }, body: JSON.stringify({ - title: unitTitle, - word_ids: selectedWords.map(w => w.id), + title, + word_ids: dictWords.map(w => w.id), + custom_words: customWords.map(w => ({ + source_text: w.source_text, + target_text: w.target_text, + })), + source_lang: sourceLang, + target_lang: targetLang, }), }) if (resp.ok) { @@ -134,9 +149,29 @@ export default function VocabularyPage() { } finally { setIsCreating(false) } - }, [unitTitle, selectedWords, router]) + }, [unitWords, router]) - const isSelected = (wordId: string) => selectedWords.some(w => w.id === wordId) + const addTopicWords = useCallback(async (topic: { words: string[] }, showOnly: boolean) => { + setIsSearching(true) + const topicWords: VocabWord[] = [] + for (const w of topic.words) { + const r = await fetch(`/api/vocabulary/search?q=${encodeURIComponent(w)}&lang=en&limit=1&source=kaikki`) + if (r.ok) { + const d = await r.json() + if (d.words?.[0]) topicWords.push(d.words[0]) + } + } + if (!showOnly) { + const newUnitWords = topicWords + .filter(tw => !unitWords.find(uw => uw.id === tw.id)) + .map(tw => vocabToUnit(tw, 'en')) + setUnitWords(prev => [...prev, ...newUnitWords]) + } + setResults(topicWords) + setIsSearching(false) + }, [unitWords]) + + const noSearchResults = !isSearching && results.length === 0 && !!query.trim() && topics.length === 0 return (

Woerterbuch

- {(filters as any).kaikki_total > 0 ? `${((filters as any).kaikki_total as number).toLocaleString()} Woerter in ${(filters as any).kaikki_languages} Sprachen` : filters.total_words > 0 ? `${filters.total_words.toLocaleString()} Woerter` : 'Woerter suchen und Lernunits erstellen'} + {(filters as any).kaikki_total > 0 ? `${((filters as any).kaikki_total as number).toLocaleString()} Woerter in ${(filters as any).kaikki_languages} Sprachen` : 'Woerter suchen und Lernunits erstellen'}

@@ -184,18 +219,6 @@ export default function VocabularyPage() { - - - - - - - - - - - - - - - {/* Results */} {isSearching && (
@@ -236,7 +244,7 @@ export default function VocabularyPage() {
- 💡 {topic.topic} ({topic.word_count}) + {topic.topic} ({topic.word_count})
@@ -248,44 +256,13 @@ export default function VocabularyPage() { )}
- -
@@ -293,12 +270,17 @@ export default function VocabularyPage() {
)} - {!isSearching && results.length === 0 && query.trim() && topics.length === 0 && ( + {/* No results message */} + {noSearchResults && (

Keine Ergebnisse fuer "{query}"

+

+ Du kannst das Wort rechts manuell hinzufuegen → +

)} + {/* Result list */}
{results.map(word => (
toggleWord(word)} > - {/* Image or emoji placeholder */}
{word.image_url ? ( {word.english} @@ -318,33 +299,22 @@ export default function VocabularyPage() { 📝 )}
- - {/* Word info */}
- {word.english} + {word.word || word.english} {word.ipa_en && {word.ipa_en}} - +
- {word.german} - -
-
- {word.part_of_speech && ( - - {word.part_of_speech} - - )} - {word.syllables_en.length > 0 && ( - - {word.syllables_en.join(' · ')} - - )} + {word.german || word.english} + {word.german && }
+ {word.part_of_speech && ( + + {word.part_of_speech} + + )}
- - {/* Select indicator */}
{/* Right: Unit Builder */} -
-
-

- Lernunit erstellen -

- - setUnitTitle(e.target.value)} - placeholder="Titel (z.B. Unit 3 - Food)" - className={`w-full px-4 py-2.5 rounded-xl border outline-none text-sm mb-4 ${glassInput}`} - /> - - {selectedWords.length === 0 ? ( -

- Klicke auf Woerter um sie hinzuzufuegen -

- ) : ( -
- {selectedWords.map((w, i) => ( -
-
- {i+1} - {w.english} - {w.german} -
- -
- ))} -
- )} - -
- {selectedWords.length} Woerter ausgewaehlt -
- - -
-
+