[split-required] Split final 43 files (500-668 LOC) to complete refactoring
klausur-service (11 files): - cv_gutter_repair, ocr_pipeline_regression, upload_api - ocr_pipeline_sessions, smart_spell, nru_worksheet_generator - ocr_pipeline_overlays, mail/aggregator, zeugnis_api - cv_syllable_detect, self_rag backend-lehrer (17 files): - classroom_engine/suggestions, generators/quiz_generator - worksheets_api, llm_gateway/comparison, state_engine_api - classroom/models (→ 4 submodules), services/file_processor - alerts_agent/api/wizard+digests+routes, content_generators/pdf - classroom/routes/sessions, llm_gateway/inference - classroom_engine/analytics, auth/keycloak_auth - alerts_agent/processing/rule_engine, ai_processor/print_versions agent-core (5 files): - brain/memory_store, brain/knowledge_graph, brain/context_manager - orchestrator/supervisor, sessions/session_manager admin-lehrer (5 components): - GridOverlay, StepGridReview, DevOpsPipelineSidebar - DataFlowDiagram, sbom/wizard/page website (2 files): - DependencyMap, lehrer/abitur-archiv Other: nibis_ingestion, grid_detection_service, export-doclayout-onnx Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -7,15 +7,31 @@ Provides:
|
||||
- KnowledgeGraph: Entity relationships and semantic connections
|
||||
"""
|
||||
|
||||
from agent_core.brain.memory_store import MemoryStore, Memory
|
||||
from agent_core.brain.context_manager import ConversationContext, ContextManager
|
||||
from agent_core.brain.knowledge_graph import KnowledgeGraph, Entity, Relationship
|
||||
from agent_core.brain.memory_models import Memory
|
||||
from agent_core.brain.memory_store import MemoryStore
|
||||
from agent_core.brain.context_models import (
|
||||
MessageRole,
|
||||
Message,
|
||||
ConversationContext,
|
||||
)
|
||||
from agent_core.brain.context_manager import ContextManager
|
||||
from agent_core.brain.knowledge_models import (
|
||||
EntityType,
|
||||
RelationshipType,
|
||||
Entity,
|
||||
Relationship,
|
||||
)
|
||||
from agent_core.brain.knowledge_graph import KnowledgeGraph
|
||||
|
||||
__all__ = [
|
||||
"MemoryStore",
|
||||
"Memory",
|
||||
"MessageRole",
|
||||
"Message",
|
||||
"ConversationContext",
|
||||
"ContextManager",
|
||||
"EntityType",
|
||||
"RelationshipType",
|
||||
"KnowledgeGraph",
|
||||
"Entity",
|
||||
"Relationship",
|
||||
|
||||
@@ -1,317 +1,22 @@
|
||||
"""
|
||||
Context Management for Breakpilot Agents
|
||||
|
||||
Provides conversation context with:
|
||||
- Message history with compression
|
||||
- Entity extraction and tracking
|
||||
- Intent history
|
||||
- Context summarization
|
||||
Manages conversation contexts for multiple sessions with persistence.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional, Callable, Awaitable
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
import json
|
||||
import logging
|
||||
|
||||
from agent_core.brain.context_models import (
|
||||
MessageRole,
|
||||
Message,
|
||||
ConversationContext,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageRole(Enum):
|
||||
"""Message roles in a conversation"""
|
||||
SYSTEM = "system"
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
TOOL = "tool"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
"""Represents a message in a conversation"""
|
||||
role: MessageRole
|
||||
content: str
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"role": self.role.value,
|
||||
"content": self.content,
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Message":
|
||||
return cls(
|
||||
role=MessageRole(data["role"]),
|
||||
content=data["content"],
|
||||
timestamp=datetime.fromisoformat(data["timestamp"]) if "timestamp" in data else datetime.now(timezone.utc),
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConversationContext:
|
||||
"""
|
||||
Context for a running conversation.
|
||||
|
||||
Maintains:
|
||||
- Message history with automatic compression
|
||||
- Extracted entities
|
||||
- Intent history
|
||||
- Conversation summary
|
||||
"""
|
||||
messages: List[Message] = field(default_factory=list)
|
||||
entities: Dict[str, Any] = field(default_factory=dict)
|
||||
intent_history: List[str] = field(default_factory=list)
|
||||
summary: Optional[str] = None
|
||||
max_messages: int = 50
|
||||
system_prompt: Optional[str] = None
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def add_message(
|
||||
self,
|
||||
role: MessageRole,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""
|
||||
Adds a message to the conversation.
|
||||
|
||||
Args:
|
||||
role: Message role
|
||||
content: Message content
|
||||
metadata: Optional message metadata
|
||||
|
||||
Returns:
|
||||
The created Message
|
||||
"""
|
||||
message = Message(
|
||||
role=role,
|
||||
content=content,
|
||||
metadata=metadata or {}
|
||||
)
|
||||
self.messages.append(message)
|
||||
|
||||
# Compress if needed
|
||||
if len(self.messages) > self.max_messages:
|
||||
self._compress_history()
|
||||
|
||||
return message
|
||||
|
||||
def add_user_message(
|
||||
self,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""Convenience method to add a user message"""
|
||||
return self.add_message(MessageRole.USER, content, metadata)
|
||||
|
||||
def add_assistant_message(
|
||||
self,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""Convenience method to add an assistant message"""
|
||||
return self.add_message(MessageRole.ASSISTANT, content, metadata)
|
||||
|
||||
def add_system_message(
|
||||
self,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""Convenience method to add a system message"""
|
||||
return self.add_message(MessageRole.SYSTEM, content, metadata)
|
||||
|
||||
def add_intent(self, intent: str) -> None:
|
||||
"""
|
||||
Records an intent in the history.
|
||||
|
||||
Args:
|
||||
intent: The detected intent
|
||||
"""
|
||||
self.intent_history.append(intent)
|
||||
# Keep last 20 intents
|
||||
if len(self.intent_history) > 20:
|
||||
self.intent_history = self.intent_history[-20:]
|
||||
|
||||
def set_entity(self, name: str, value: Any) -> None:
|
||||
"""
|
||||
Sets an entity value.
|
||||
|
||||
Args:
|
||||
name: Entity name
|
||||
value: Entity value
|
||||
"""
|
||||
self.entities[name] = value
|
||||
|
||||
def get_entity(self, name: str, default: Any = None) -> Any:
|
||||
"""
|
||||
Gets an entity value.
|
||||
|
||||
Args:
|
||||
name: Entity name
|
||||
default: Default value if not found
|
||||
|
||||
Returns:
|
||||
Entity value or default
|
||||
"""
|
||||
return self.entities.get(name, default)
|
||||
|
||||
def get_last_message(self, role: Optional[MessageRole] = None) -> Optional[Message]:
|
||||
"""
|
||||
Gets the last message, optionally filtered by role.
|
||||
|
||||
Args:
|
||||
role: Optional role filter
|
||||
|
||||
Returns:
|
||||
The last matching message or None
|
||||
"""
|
||||
if not self.messages:
|
||||
return None
|
||||
|
||||
if role is None:
|
||||
return self.messages[-1]
|
||||
|
||||
for msg in reversed(self.messages):
|
||||
if msg.role == role:
|
||||
return msg
|
||||
|
||||
return None
|
||||
|
||||
def get_messages_for_llm(self) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Gets messages formatted for LLM API calls.
|
||||
|
||||
Returns:
|
||||
List of message dicts with role and content
|
||||
"""
|
||||
result = []
|
||||
|
||||
# Add system prompt first
|
||||
if self.system_prompt:
|
||||
result.append({
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
})
|
||||
|
||||
# Add summary if we have one and history was compressed
|
||||
if self.summary:
|
||||
result.append({
|
||||
"role": "system",
|
||||
"content": f"Previous conversation summary: {self.summary}"
|
||||
})
|
||||
|
||||
# Add recent messages
|
||||
for msg in self.messages:
|
||||
result.append({
|
||||
"role": msg.role.value,
|
||||
"content": msg.content
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
def _compress_history(self) -> None:
|
||||
"""
|
||||
Compresses older messages to save context window space.
|
||||
|
||||
Keeps:
|
||||
- System messages
|
||||
- Last 20 messages
|
||||
- Creates summary of compressed middle messages
|
||||
"""
|
||||
# Keep system messages
|
||||
system_msgs = [m for m in self.messages if m.role == MessageRole.SYSTEM]
|
||||
|
||||
# Keep last 20 messages
|
||||
recent_msgs = self.messages[-20:]
|
||||
|
||||
# Middle messages to summarize
|
||||
middle_start = len(system_msgs)
|
||||
middle_end = len(self.messages) - 20
|
||||
middle_msgs = self.messages[middle_start:middle_end]
|
||||
|
||||
if middle_msgs:
|
||||
# Create a basic summary (can be enhanced with LLM-based summarization)
|
||||
self.summary = self._create_summary(middle_msgs)
|
||||
|
||||
# Combine
|
||||
self.messages = system_msgs + recent_msgs
|
||||
|
||||
logger.debug(
|
||||
f"Compressed conversation: {middle_end - middle_start} messages summarized"
|
||||
)
|
||||
|
||||
def _create_summary(self, messages: List[Message]) -> str:
|
||||
"""
|
||||
Creates a summary of messages.
|
||||
|
||||
This is a basic implementation - can be enhanced with LLM-based summarization.
|
||||
|
||||
Args:
|
||||
messages: Messages to summarize
|
||||
|
||||
Returns:
|
||||
Summary string
|
||||
"""
|
||||
# Count message types
|
||||
user_count = sum(1 for m in messages if m.role == MessageRole.USER)
|
||||
assistant_count = sum(1 for m in messages if m.role == MessageRole.ASSISTANT)
|
||||
|
||||
# Extract key topics (simplified - could use NLP)
|
||||
topics = set()
|
||||
for msg in messages:
|
||||
# Simple keyword extraction
|
||||
words = msg.content.lower().split()
|
||||
# Filter common words
|
||||
keywords = [w for w in words if len(w) > 5][:3]
|
||||
topics.update(keywords)
|
||||
|
||||
topics_str = ", ".join(list(topics)[:5])
|
||||
|
||||
return (
|
||||
f"Earlier conversation: {user_count} user messages, "
|
||||
f"{assistant_count} assistant responses. "
|
||||
f"Topics discussed: {topics_str}"
|
||||
)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clears all context"""
|
||||
self.messages.clear()
|
||||
self.entities.clear()
|
||||
self.intent_history.clear()
|
||||
self.summary = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Serializes context to dict"""
|
||||
return {
|
||||
"messages": [m.to_dict() for m in self.messages],
|
||||
"entities": self.entities,
|
||||
"intent_history": self.intent_history,
|
||||
"summary": self.summary,
|
||||
"max_messages": self.max_messages,
|
||||
"system_prompt": self.system_prompt,
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "ConversationContext":
|
||||
"""Deserializes context from dict"""
|
||||
ctx = cls(
|
||||
messages=[Message.from_dict(m) for m in data.get("messages", [])],
|
||||
entities=data.get("entities", {}),
|
||||
intent_history=data.get("intent_history", []),
|
||||
summary=data.get("summary"),
|
||||
max_messages=data.get("max_messages", 50),
|
||||
system_prompt=data.get("system_prompt"),
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
return ctx
|
||||
|
||||
|
||||
class ContextManager:
|
||||
"""
|
||||
Manages conversation contexts for multiple sessions.
|
||||
|
||||
307
agent-core/brain/context_models.py
Normal file
307
agent-core/brain/context_models.py
Normal file
@@ -0,0 +1,307 @@
|
||||
"""
|
||||
Context Models for Breakpilot Agents
|
||||
|
||||
Data classes for conversation messages and context management.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MessageRole(Enum):
|
||||
"""Message roles in a conversation"""
|
||||
SYSTEM = "system"
|
||||
USER = "user"
|
||||
ASSISTANT = "assistant"
|
||||
TOOL = "tool"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Message:
|
||||
"""Represents a message in a conversation"""
|
||||
role: MessageRole
|
||||
content: str
|
||||
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"role": self.role.value,
|
||||
"content": self.content,
|
||||
"timestamp": self.timestamp.isoformat(),
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Message":
|
||||
return cls(
|
||||
role=MessageRole(data["role"]),
|
||||
content=data["content"],
|
||||
timestamp=datetime.fromisoformat(data["timestamp"]) if "timestamp" in data else datetime.now(timezone.utc),
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ConversationContext:
|
||||
"""
|
||||
Context for a running conversation.
|
||||
|
||||
Maintains:
|
||||
- Message history with automatic compression
|
||||
- Extracted entities
|
||||
- Intent history
|
||||
- Conversation summary
|
||||
"""
|
||||
messages: List[Message] = field(default_factory=list)
|
||||
entities: Dict[str, Any] = field(default_factory=dict)
|
||||
intent_history: List[str] = field(default_factory=list)
|
||||
summary: Optional[str] = None
|
||||
max_messages: int = 50
|
||||
system_prompt: Optional[str] = None
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def add_message(
|
||||
self,
|
||||
role: MessageRole,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""
|
||||
Adds a message to the conversation.
|
||||
|
||||
Args:
|
||||
role: Message role
|
||||
content: Message content
|
||||
metadata: Optional message metadata
|
||||
|
||||
Returns:
|
||||
The created Message
|
||||
"""
|
||||
message = Message(
|
||||
role=role,
|
||||
content=content,
|
||||
metadata=metadata or {}
|
||||
)
|
||||
self.messages.append(message)
|
||||
|
||||
# Compress if needed
|
||||
if len(self.messages) > self.max_messages:
|
||||
self._compress_history()
|
||||
|
||||
return message
|
||||
|
||||
def add_user_message(
|
||||
self,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""Convenience method to add a user message"""
|
||||
return self.add_message(MessageRole.USER, content, metadata)
|
||||
|
||||
def add_assistant_message(
|
||||
self,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""Convenience method to add an assistant message"""
|
||||
return self.add_message(MessageRole.ASSISTANT, content, metadata)
|
||||
|
||||
def add_system_message(
|
||||
self,
|
||||
content: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Message:
|
||||
"""Convenience method to add a system message"""
|
||||
return self.add_message(MessageRole.SYSTEM, content, metadata)
|
||||
|
||||
def add_intent(self, intent: str) -> None:
|
||||
"""
|
||||
Records an intent in the history.
|
||||
|
||||
Args:
|
||||
intent: The detected intent
|
||||
"""
|
||||
self.intent_history.append(intent)
|
||||
# Keep last 20 intents
|
||||
if len(self.intent_history) > 20:
|
||||
self.intent_history = self.intent_history[-20:]
|
||||
|
||||
def set_entity(self, name: str, value: Any) -> None:
|
||||
"""
|
||||
Sets an entity value.
|
||||
|
||||
Args:
|
||||
name: Entity name
|
||||
value: Entity value
|
||||
"""
|
||||
self.entities[name] = value
|
||||
|
||||
def get_entity(self, name: str, default: Any = None) -> Any:
|
||||
"""
|
||||
Gets an entity value.
|
||||
|
||||
Args:
|
||||
name: Entity name
|
||||
default: Default value if not found
|
||||
|
||||
Returns:
|
||||
Entity value or default
|
||||
"""
|
||||
return self.entities.get(name, default)
|
||||
|
||||
def get_last_message(self, role: Optional[MessageRole] = None) -> Optional[Message]:
|
||||
"""
|
||||
Gets the last message, optionally filtered by role.
|
||||
|
||||
Args:
|
||||
role: Optional role filter
|
||||
|
||||
Returns:
|
||||
The last matching message or None
|
||||
"""
|
||||
if not self.messages:
|
||||
return None
|
||||
|
||||
if role is None:
|
||||
return self.messages[-1]
|
||||
|
||||
for msg in reversed(self.messages):
|
||||
if msg.role == role:
|
||||
return msg
|
||||
|
||||
return None
|
||||
|
||||
def get_messages_for_llm(self) -> List[Dict[str, str]]:
|
||||
"""
|
||||
Gets messages formatted for LLM API calls.
|
||||
|
||||
Returns:
|
||||
List of message dicts with role and content
|
||||
"""
|
||||
result = []
|
||||
|
||||
# Add system prompt first
|
||||
if self.system_prompt:
|
||||
result.append({
|
||||
"role": "system",
|
||||
"content": self.system_prompt
|
||||
})
|
||||
|
||||
# Add summary if we have one and history was compressed
|
||||
if self.summary:
|
||||
result.append({
|
||||
"role": "system",
|
||||
"content": f"Previous conversation summary: {self.summary}"
|
||||
})
|
||||
|
||||
# Add recent messages
|
||||
for msg in self.messages:
|
||||
result.append({
|
||||
"role": msg.role.value,
|
||||
"content": msg.content
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
def _compress_history(self) -> None:
|
||||
"""
|
||||
Compresses older messages to save context window space.
|
||||
|
||||
Keeps:
|
||||
- System messages
|
||||
- Last 20 messages
|
||||
- Creates summary of compressed middle messages
|
||||
"""
|
||||
# Keep system messages
|
||||
system_msgs = [m for m in self.messages if m.role == MessageRole.SYSTEM]
|
||||
|
||||
# Keep last 20 messages
|
||||
recent_msgs = self.messages[-20:]
|
||||
|
||||
# Middle messages to summarize
|
||||
middle_start = len(system_msgs)
|
||||
middle_end = len(self.messages) - 20
|
||||
middle_msgs = self.messages[middle_start:middle_end]
|
||||
|
||||
if middle_msgs:
|
||||
# Create a basic summary (can be enhanced with LLM-based summarization)
|
||||
self.summary = self._create_summary(middle_msgs)
|
||||
|
||||
# Combine
|
||||
self.messages = system_msgs + recent_msgs
|
||||
|
||||
logger.debug(
|
||||
f"Compressed conversation: {middle_end - middle_start} messages summarized"
|
||||
)
|
||||
|
||||
def _create_summary(self, messages: List[Message]) -> str:
|
||||
"""
|
||||
Creates a summary of messages.
|
||||
|
||||
This is a basic implementation - can be enhanced with LLM-based summarization.
|
||||
|
||||
Args:
|
||||
messages: Messages to summarize
|
||||
|
||||
Returns:
|
||||
Summary string
|
||||
"""
|
||||
# Count message types
|
||||
user_count = sum(1 for m in messages if m.role == MessageRole.USER)
|
||||
assistant_count = sum(1 for m in messages if m.role == MessageRole.ASSISTANT)
|
||||
|
||||
# Extract key topics (simplified - could use NLP)
|
||||
topics = set()
|
||||
for msg in messages:
|
||||
# Simple keyword extraction
|
||||
words = msg.content.lower().split()
|
||||
# Filter common words
|
||||
keywords = [w for w in words if len(w) > 5][:3]
|
||||
topics.update(keywords)
|
||||
|
||||
topics_str = ", ".join(list(topics)[:5])
|
||||
|
||||
return (
|
||||
f"Earlier conversation: {user_count} user messages, "
|
||||
f"{assistant_count} assistant responses. "
|
||||
f"Topics discussed: {topics_str}"
|
||||
)
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clears all context"""
|
||||
self.messages.clear()
|
||||
self.entities.clear()
|
||||
self.intent_history.clear()
|
||||
self.summary = None
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""Serializes context to dict"""
|
||||
return {
|
||||
"messages": [m.to_dict() for m in self.messages],
|
||||
"entities": self.entities,
|
||||
"intent_history": self.intent_history,
|
||||
"summary": self.summary,
|
||||
"max_messages": self.max_messages,
|
||||
"system_prompt": self.system_prompt,
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "ConversationContext":
|
||||
"""Deserializes context from dict"""
|
||||
ctx = cls(
|
||||
messages=[Message.from_dict(m) for m in data.get("messages", [])],
|
||||
entities=data.get("entities", {}),
|
||||
intent_history=data.get("intent_history", []),
|
||||
summary=data.get("summary"),
|
||||
max_messages=data.get("max_messages", 50),
|
||||
system_prompt=data.get("system_prompt"),
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
return ctx
|
||||
@@ -9,109 +9,20 @@ Provides entity and relationship management:
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional, Set, Tuple
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
import json
|
||||
import logging
|
||||
|
||||
from agent_core.brain.knowledge_models import (
|
||||
EntityType,
|
||||
RelationshipType,
|
||||
Entity,
|
||||
Relationship,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EntityType(Enum):
|
||||
"""Types of entities in the knowledge graph"""
|
||||
STUDENT = "student"
|
||||
TEACHER = "teacher"
|
||||
CLASS = "class"
|
||||
SUBJECT = "subject"
|
||||
ASSIGNMENT = "assignment"
|
||||
EXAM = "exam"
|
||||
TOPIC = "topic"
|
||||
CONCEPT = "concept"
|
||||
RESOURCE = "resource"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class RelationshipType(Enum):
|
||||
"""Types of relationships between entities"""
|
||||
BELONGS_TO = "belongs_to" # Student belongs to class
|
||||
TEACHES = "teaches" # Teacher teaches subject
|
||||
ASSIGNED_TO = "assigned_to" # Assignment assigned to student
|
||||
COVERS = "covers" # Exam covers topic
|
||||
REQUIRES = "requires" # Topic requires concept
|
||||
RELATED_TO = "related_to" # General relationship
|
||||
PARENT_OF = "parent_of" # Hierarchical relationship
|
||||
CREATED_BY = "created_by" # Creator relationship
|
||||
GRADED_BY = "graded_by" # Grading relationship
|
||||
|
||||
|
||||
@dataclass
|
||||
class Entity:
|
||||
"""Represents an entity in the knowledge graph"""
|
||||
id: str
|
||||
entity_type: EntityType
|
||||
name: str
|
||||
properties: Dict[str, Any] = field(default_factory=dict)
|
||||
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": self.id,
|
||||
"entity_type": self.entity_type.value,
|
||||
"name": self.name,
|
||||
"properties": self.properties,
|
||||
"created_at": self.created_at.isoformat(),
|
||||
"updated_at": self.updated_at.isoformat()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Entity":
|
||||
return cls(
|
||||
id=data["id"],
|
||||
entity_type=EntityType(data["entity_type"]),
|
||||
name=data["name"],
|
||||
properties=data.get("properties", {}),
|
||||
created_at=datetime.fromisoformat(data["created_at"]),
|
||||
updated_at=datetime.fromisoformat(data["updated_at"])
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Relationship:
|
||||
"""Represents a relationship between two entities"""
|
||||
id: str
|
||||
source_id: str
|
||||
target_id: str
|
||||
relationship_type: RelationshipType
|
||||
properties: Dict[str, Any] = field(default_factory=dict)
|
||||
weight: float = 1.0
|
||||
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": self.id,
|
||||
"source_id": self.source_id,
|
||||
"target_id": self.target_id,
|
||||
"relationship_type": self.relationship_type.value,
|
||||
"properties": self.properties,
|
||||
"weight": self.weight,
|
||||
"created_at": self.created_at.isoformat()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Relationship":
|
||||
return cls(
|
||||
id=data["id"],
|
||||
source_id=data["source_id"],
|
||||
target_id=data["target_id"],
|
||||
relationship_type=RelationshipType(data["relationship_type"]),
|
||||
properties=data.get("properties", {}),
|
||||
weight=data.get("weight", 1.0),
|
||||
created_at=datetime.fromisoformat(data["created_at"])
|
||||
)
|
||||
|
||||
|
||||
class KnowledgeGraph:
|
||||
"""
|
||||
Knowledge graph for managing entity relationships.
|
||||
|
||||
104
agent-core/brain/knowledge_models.py
Normal file
104
agent-core/brain/knowledge_models.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""
|
||||
Knowledge Graph Models for Breakpilot Agents
|
||||
|
||||
Entity and relationship data classes, plus type enumerations.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class EntityType(Enum):
|
||||
"""Types of entities in the knowledge graph"""
|
||||
STUDENT = "student"
|
||||
TEACHER = "teacher"
|
||||
CLASS = "class"
|
||||
SUBJECT = "subject"
|
||||
ASSIGNMENT = "assignment"
|
||||
EXAM = "exam"
|
||||
TOPIC = "topic"
|
||||
CONCEPT = "concept"
|
||||
RESOURCE = "resource"
|
||||
CUSTOM = "custom"
|
||||
|
||||
|
||||
class RelationshipType(Enum):
|
||||
"""Types of relationships between entities"""
|
||||
BELONGS_TO = "belongs_to" # Student belongs to class
|
||||
TEACHES = "teaches" # Teacher teaches subject
|
||||
ASSIGNED_TO = "assigned_to" # Assignment assigned to student
|
||||
COVERS = "covers" # Exam covers topic
|
||||
REQUIRES = "requires" # Topic requires concept
|
||||
RELATED_TO = "related_to" # General relationship
|
||||
PARENT_OF = "parent_of" # Hierarchical relationship
|
||||
CREATED_BY = "created_by" # Creator relationship
|
||||
GRADED_BY = "graded_by" # Grading relationship
|
||||
|
||||
|
||||
@dataclass
|
||||
class Entity:
|
||||
"""Represents an entity in the knowledge graph"""
|
||||
id: str
|
||||
entity_type: EntityType
|
||||
name: str
|
||||
properties: Dict[str, Any] = field(default_factory=dict)
|
||||
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": self.id,
|
||||
"entity_type": self.entity_type.value,
|
||||
"name": self.name,
|
||||
"properties": self.properties,
|
||||
"created_at": self.created_at.isoformat(),
|
||||
"updated_at": self.updated_at.isoformat()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Entity":
|
||||
return cls(
|
||||
id=data["id"],
|
||||
entity_type=EntityType(data["entity_type"]),
|
||||
name=data["name"],
|
||||
properties=data.get("properties", {}),
|
||||
created_at=datetime.fromisoformat(data["created_at"]),
|
||||
updated_at=datetime.fromisoformat(data["updated_at"])
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Relationship:
|
||||
"""Represents a relationship between two entities"""
|
||||
id: str
|
||||
source_id: str
|
||||
target_id: str
|
||||
relationship_type: RelationshipType
|
||||
properties: Dict[str, Any] = field(default_factory=dict)
|
||||
weight: float = 1.0
|
||||
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"id": self.id,
|
||||
"source_id": self.source_id,
|
||||
"target_id": self.target_id,
|
||||
"relationship_type": self.relationship_type.value,
|
||||
"properties": self.properties,
|
||||
"weight": self.weight,
|
||||
"created_at": self.created_at.isoformat()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Relationship":
|
||||
return cls(
|
||||
id=data["id"],
|
||||
source_id=data["source_id"],
|
||||
target_id=data["target_id"],
|
||||
relationship_type=RelationshipType(data["relationship_type"]),
|
||||
properties=data.get("properties", {}),
|
||||
weight=data.get("weight", 1.0),
|
||||
created_at=datetime.fromisoformat(data["created_at"])
|
||||
)
|
||||
53
agent-core/brain/memory_models.py
Normal file
53
agent-core/brain/memory_models.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""
|
||||
Memory Models for Breakpilot Agents
|
||||
|
||||
Data classes for memory items used by MemoryStore.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
from datetime import datetime, timezone
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class Memory:
|
||||
"""Represents a stored memory item"""
|
||||
key: str
|
||||
value: Any
|
||||
agent_id: str
|
||||
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
expires_at: Optional[datetime] = None
|
||||
access_count: int = 0
|
||||
last_accessed: Optional[datetime] = None
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"key": self.key,
|
||||
"value": self.value,
|
||||
"agent_id": self.agent_id,
|
||||
"created_at": self.created_at.isoformat(),
|
||||
"expires_at": self.expires_at.isoformat() if self.expires_at else None,
|
||||
"access_count": self.access_count,
|
||||
"last_accessed": self.last_accessed.isoformat() if self.last_accessed else None,
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Memory":
|
||||
return cls(
|
||||
key=data["key"],
|
||||
value=data["value"],
|
||||
agent_id=data["agent_id"],
|
||||
created_at=datetime.fromisoformat(data["created_at"]),
|
||||
expires_at=datetime.fromisoformat(data["expires_at"]) if data.get("expires_at") else None,
|
||||
access_count=data.get("access_count", 0),
|
||||
last_accessed=datetime.fromisoformat(data["last_accessed"]) if data.get("last_accessed") else None,
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""Check if the memory has expired"""
|
||||
if not self.expires_at:
|
||||
return False
|
||||
return datetime.now(timezone.utc) > self.expires_at
|
||||
@@ -1,92 +1,24 @@
|
||||
"""
|
||||
Memory Store for Breakpilot Agents
|
||||
|
||||
Provides long-term memory with:
|
||||
- TTL-based expiration
|
||||
- Access count tracking
|
||||
- Pattern-based search
|
||||
- Hybrid Valkey + PostgreSQL persistence
|
||||
Hybrid Valkey + PostgreSQL persistence with TTL, access tracking, and pattern search.
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from datetime import datetime, timezone, timedelta
|
||||
from dataclasses import dataclass, field
|
||||
import json
|
||||
import logging
|
||||
import hashlib
|
||||
|
||||
from agent_core.brain.memory_models import Memory
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Memory:
|
||||
"""Represents a stored memory item"""
|
||||
key: str
|
||||
value: Any
|
||||
agent_id: str
|
||||
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
|
||||
expires_at: Optional[datetime] = None
|
||||
access_count: int = 0
|
||||
last_accessed: Optional[datetime] = None
|
||||
metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"key": self.key,
|
||||
"value": self.value,
|
||||
"agent_id": self.agent_id,
|
||||
"created_at": self.created_at.isoformat(),
|
||||
"expires_at": self.expires_at.isoformat() if self.expires_at else None,
|
||||
"access_count": self.access_count,
|
||||
"last_accessed": self.last_accessed.isoformat() if self.last_accessed else None,
|
||||
"metadata": self.metadata
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> "Memory":
|
||||
return cls(
|
||||
key=data["key"],
|
||||
value=data["value"],
|
||||
agent_id=data["agent_id"],
|
||||
created_at=datetime.fromisoformat(data["created_at"]),
|
||||
expires_at=datetime.fromisoformat(data["expires_at"]) if data.get("expires_at") else None,
|
||||
access_count=data.get("access_count", 0),
|
||||
last_accessed=datetime.fromisoformat(data["last_accessed"]) if data.get("last_accessed") else None,
|
||||
metadata=data.get("metadata", {})
|
||||
)
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""Check if the memory has expired"""
|
||||
if not self.expires_at:
|
||||
return False
|
||||
return datetime.now(timezone.utc) > self.expires_at
|
||||
|
||||
|
||||
class MemoryStore:
|
||||
"""
|
||||
Long-term memory store for agents.
|
||||
"""Long-term memory store with TTL, access tracking, and hybrid persistence."""
|
||||
|
||||
Stores facts, decisions, and learning progress with:
|
||||
- TTL-based expiration
|
||||
- Access tracking for importance scoring
|
||||
- Pattern-based retrieval
|
||||
- Hybrid persistence (Valkey for fast access, PostgreSQL for durability)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
redis_client=None,
|
||||
db_pool=None,
|
||||
namespace: str = "breakpilot"
|
||||
):
|
||||
"""
|
||||
Initialize the memory store.
|
||||
|
||||
Args:
|
||||
redis_client: Async Redis/Valkey client
|
||||
db_pool: Async PostgreSQL connection pool
|
||||
namespace: Key namespace for isolation
|
||||
"""
|
||||
def __init__(self, redis_client=None, db_pool=None, namespace: str = "breakpilot"):
|
||||
self.redis = redis_client
|
||||
self.db_pool = db_pool
|
||||
self.namespace = namespace
|
||||
@@ -103,26 +35,10 @@ class MemoryStore:
|
||||
return key
|
||||
|
||||
async def remember(
|
||||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
agent_id: str,
|
||||
ttl_days: int = 30,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
self, key: str, value: Any, agent_id: str,
|
||||
ttl_days: int = 30, metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Memory:
|
||||
"""
|
||||
Stores a memory.
|
||||
|
||||
Args:
|
||||
key: Unique key for the memory
|
||||
value: Value to store (must be JSON-serializable)
|
||||
agent_id: ID of the agent storing the memory
|
||||
ttl_days: Time to live in days (0 = no expiration)
|
||||
metadata: Optional additional metadata
|
||||
|
||||
Returns:
|
||||
The created Memory object
|
||||
"""
|
||||
"""Stores a memory with optional TTL and metadata."""
|
||||
expires_at = None
|
||||
if ttl_days > 0:
|
||||
expires_at = datetime.now(timezone.utc) + timedelta(days=ttl_days)
|
||||
@@ -143,32 +59,14 @@ class MemoryStore:
|
||||
return memory
|
||||
|
||||
async def recall(self, key: str) -> Optional[Any]:
|
||||
"""
|
||||
Retrieves a memory value by key.
|
||||
|
||||
Args:
|
||||
key: The memory key
|
||||
|
||||
Returns:
|
||||
The stored value or None if not found/expired
|
||||
"""
|
||||
"""Retrieves a memory value by key, or None if not found/expired."""
|
||||
memory = await self.get_memory(key)
|
||||
if memory:
|
||||
return memory.value
|
||||
return None
|
||||
|
||||
async def get_memory(self, key: str) -> Optional[Memory]:
|
||||
"""
|
||||
Retrieves a full Memory object by key.
|
||||
|
||||
Updates access count and last_accessed timestamp.
|
||||
|
||||
Args:
|
||||
key: The memory key
|
||||
|
||||
Returns:
|
||||
Memory object or None if not found/expired
|
||||
"""
|
||||
"""Retrieves a full Memory object by key, updating access count."""
|
||||
# Check local cache
|
||||
if key in self._local_cache:
|
||||
memory = self._local_cache[key]
|
||||
|
||||
Reference in New Issue
Block a user