Initial implementation of autonomous Discord LLM fishbowl
Core Features: - Full autonomous AI character ecosystem with multi-personality support - Advanced RAG system with personal, community, and creative memory layers - MCP integration for character self-modification and file system access - PostgreSQL database with comprehensive character relationship tracking - Redis caching and ChromaDB vector storage for semantic memory retrieval - Dynamic personality evolution based on interactions and self-reflection - Community knowledge management with tradition and norm identification - Sophisticated conversation engine with natural scheduling and topic management - Docker containerization and production-ready deployment configuration Architecture: - Multi-layer vector databases for personal, community, and creative knowledge - Character file systems with personal and shared digital spaces - Autonomous self-modification with safety validation and audit trails - Memory importance scoring with time-based decay and consolidation - Community health monitoring and cultural evolution tracking - RAG-powered conversation context and relationship optimization Characters can: - Develop authentic personalities through experience-based learning - Create and build upon original creative works and philosophical insights - Form complex relationships with memory of past interactions - Modify their own personality traits through self-reflection cycles - Contribute to and learn from shared community knowledge - Manage personal digital spaces with diaries, creative works, and reflections - Engage in collaborative projects and community decision-making System supports indefinite autonomous operation with continuous character development, community culture evolution, and creative collaboration.
This commit is contained in:
583
src/rag/personal_memory.py
Normal file
583
src/rag/personal_memory.py
Normal file
@@ -0,0 +1,583 @@
|
||||
import asyncio
|
||||
from typing import Dict, List, Any, Optional, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from dataclasses import dataclass
|
||||
import json
|
||||
|
||||
from .vector_store import VectorStoreManager, VectorMemory, MemoryType
|
||||
from ..utils.logging import log_character_action, log_error_with_context, log_memory_operation
|
||||
from ..database.connection import get_db_session
|
||||
from ..database.models import Memory
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@dataclass
|
||||
class MemoryQuery:
|
||||
question: str
|
||||
context: Dict[str, Any]
|
||||
memory_types: List[MemoryType]
|
||||
importance_threshold: float = 0.3
|
||||
limit: int = 10
|
||||
|
||||
@dataclass
|
||||
class MemoryInsight:
|
||||
insight: str
|
||||
confidence: float
|
||||
supporting_memories: List[VectorMemory]
|
||||
metadata: Dict[str, Any]
|
||||
|
||||
class PersonalMemoryRAG:
|
||||
"""RAG system for character's personal memories and self-reflection"""
|
||||
|
||||
def __init__(self, character_name: str, vector_store: VectorStoreManager):
|
||||
self.character_name = character_name
|
||||
self.vector_store = vector_store
|
||||
|
||||
# Memory importance weights by type
|
||||
self.importance_weights = {
|
||||
MemoryType.PERSONAL: 1.0,
|
||||
MemoryType.RELATIONSHIP: 1.2,
|
||||
MemoryType.EXPERIENCE: 0.9,
|
||||
MemoryType.REFLECTION: 1.3,
|
||||
MemoryType.CREATIVE: 0.8
|
||||
}
|
||||
|
||||
# Query templates for self-reflection
|
||||
self.reflection_queries = {
|
||||
"behavioral_patterns": [
|
||||
"How do I usually handle conflict?",
|
||||
"What are my typical responses to stress?",
|
||||
"How do I show affection or friendship?",
|
||||
"What makes me excited or enthusiastic?",
|
||||
"How do I react to criticism?"
|
||||
],
|
||||
"relationship_insights": [
|
||||
"What do I know about {other}'s interests?",
|
||||
"How has my relationship with {other} evolved?",
|
||||
"What conflicts have I had with {other}?",
|
||||
"What do I appreciate most about {other}?",
|
||||
"How does {other} usually respond to me?"
|
||||
],
|
||||
"personal_growth": [
|
||||
"How have I changed recently?",
|
||||
"What have I learned about myself?",
|
||||
"What are my evolving interests?",
|
||||
"What challenges have I overcome?",
|
||||
"What are my current goals or aspirations?"
|
||||
],
|
||||
"creative_development": [
|
||||
"What creative ideas have I explored?",
|
||||
"How has my artistic style evolved?",
|
||||
"What philosophical concepts interest me?",
|
||||
"What original thoughts have I had?",
|
||||
"How do I approach creative problems?"
|
||||
]
|
||||
}
|
||||
|
||||
async def store_interaction_memory(self, content: str, context: Dict[str, Any],
|
||||
importance: float = None) -> str:
|
||||
"""Store memory of an interaction with importance scoring"""
|
||||
try:
|
||||
# Auto-calculate importance if not provided
|
||||
if importance is None:
|
||||
importance = await self._calculate_interaction_importance(content, context)
|
||||
|
||||
# Determine memory type based on context
|
||||
memory_type = self._determine_memory_type(context)
|
||||
|
||||
# Create memory object
|
||||
memory = VectorMemory(
|
||||
id="", # Will be auto-generated
|
||||
content=content,
|
||||
memory_type=memory_type,
|
||||
character_name=self.character_name,
|
||||
timestamp=datetime.utcnow(),
|
||||
importance=importance,
|
||||
metadata={
|
||||
"interaction_type": context.get("type", "unknown"),
|
||||
"participants": context.get("participants", []),
|
||||
"topic": context.get("topic", ""),
|
||||
"conversation_id": context.get("conversation_id"),
|
||||
"emotional_context": context.get("emotion", "neutral")
|
||||
}
|
||||
)
|
||||
|
||||
# Store in vector database
|
||||
memory_id = await self.vector_store.store_memory(memory)
|
||||
|
||||
log_memory_operation(
|
||||
self.character_name,
|
||||
"stored_interaction",
|
||||
memory_type.value,
|
||||
importance
|
||||
)
|
||||
|
||||
return memory_id
|
||||
|
||||
except Exception as e:
|
||||
log_error_with_context(e, {"character": self.character_name, "context": context})
|
||||
return ""
|
||||
|
||||
async def store_reflection_memory(self, reflection: str, reflection_type: str,
|
||||
importance: float = 0.8) -> str:
|
||||
"""Store a self-reflection memory"""
|
||||
try:
|
||||
memory = VectorMemory(
|
||||
id="",
|
||||
content=reflection,
|
||||
memory_type=MemoryType.REFLECTION,
|
||||
character_name=self.character_name,
|
||||
timestamp=datetime.utcnow(),
|
||||
importance=importance,
|
||||
metadata={
|
||||
"reflection_type": reflection_type,
|
||||
"trigger": "self_initiated",
|
||||
"depth": "deep" if len(reflection) > 200 else "surface"
|
||||
}
|
||||
)
|
||||
|
||||
memory_id = await self.vector_store.store_memory(memory)
|
||||
|
||||
log_memory_operation(
|
||||
self.character_name,
|
||||
"stored_reflection",
|
||||
reflection_type,
|
||||
importance
|
||||
)
|
||||
|
||||
return memory_id
|
||||
|
||||
except Exception as e:
|
||||
log_error_with_context(e, {"character": self.character_name, "reflection_type": reflection_type})
|
||||
return ""
|
||||
|
||||
async def query_behavioral_patterns(self, question: str) -> MemoryInsight:
|
||||
"""Query memories to understand behavioral patterns"""
|
||||
try:
|
||||
# Search for relevant memories
|
||||
memories = await self.vector_store.query_memories(
|
||||
character_name=self.character_name,
|
||||
query=question,
|
||||
memory_types=[MemoryType.PERSONAL, MemoryType.EXPERIENCE, MemoryType.REFLECTION],
|
||||
limit=15,
|
||||
min_importance=0.4
|
||||
)
|
||||
|
||||
if not memories:
|
||||
return MemoryInsight(
|
||||
insight="I don't have enough memories to answer this question yet.",
|
||||
confidence=0.1,
|
||||
supporting_memories=[],
|
||||
metadata={"query": question, "memory_count": 0}
|
||||
)
|
||||
|
||||
# Analyze patterns in memories
|
||||
insight = await self._analyze_behavioral_patterns(memories, question)
|
||||
|
||||
# Calculate confidence based on memory count and importance
|
||||
confidence = min(0.9, len(memories) * 0.1 + sum(m.importance for m in memories) / len(memories))
|
||||
|
||||
return MemoryInsight(
|
||||
insight=insight,
|
||||
confidence=confidence,
|
||||
supporting_memories=memories[:5], # Top 5 most relevant
|
||||
metadata={
|
||||
"query": question,
|
||||
"memory_count": len(memories),
|
||||
"avg_importance": sum(m.importance for m in memories) / len(memories)
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
log_error_with_context(e, {"character": self.character_name, "question": question})
|
||||
return MemoryInsight(
|
||||
insight="I'm having trouble accessing my memories right now.",
|
||||
confidence=0.0,
|
||||
supporting_memories=[],
|
||||
metadata={"error": str(e)}
|
||||
)
|
||||
|
||||
async def query_relationship_knowledge(self, other_character: str, question: str = None) -> MemoryInsight:
|
||||
"""Query memories about a specific relationship"""
|
||||
try:
|
||||
# Default question if none provided
|
||||
if not question:
|
||||
question = f"What do I know about {other_character}?"
|
||||
|
||||
# Search for relationship memories
|
||||
memories = await self.vector_store.query_memories(
|
||||
character_name=self.character_name,
|
||||
query=f"{other_character} {question}",
|
||||
memory_types=[MemoryType.RELATIONSHIP, MemoryType.PERSONAL, MemoryType.EXPERIENCE],
|
||||
limit=10,
|
||||
min_importance=0.3
|
||||
)
|
||||
|
||||
# Filter memories that actually mention the other character
|
||||
relevant_memories = [
|
||||
m for m in memories
|
||||
if other_character.lower() in m.content.lower() or
|
||||
other_character in m.metadata.get("participants", [])
|
||||
]
|
||||
|
||||
if not relevant_memories:
|
||||
return MemoryInsight(
|
||||
insight=f"I don't have many specific memories about {other_character} yet.",
|
||||
confidence=0.2,
|
||||
supporting_memories=[],
|
||||
metadata={"other_character": other_character, "query": question}
|
||||
)
|
||||
|
||||
# Analyze relationship dynamics
|
||||
insight = await self._analyze_relationship_dynamics(relevant_memories, other_character, question)
|
||||
|
||||
confidence = min(0.9, len(relevant_memories) * 0.15 +
|
||||
sum(m.importance for m in relevant_memories) / len(relevant_memories))
|
||||
|
||||
return MemoryInsight(
|
||||
insight=insight,
|
||||
confidence=confidence,
|
||||
supporting_memories=relevant_memories[:5],
|
||||
metadata={
|
||||
"other_character": other_character,
|
||||
"query": question,
|
||||
"memory_count": len(relevant_memories)
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
log_error_with_context(e, {"character": self.character_name, "other_character": other_character})
|
||||
return MemoryInsight(
|
||||
insight=f"I'm having trouble recalling my interactions with {other_character}.",
|
||||
confidence=0.0,
|
||||
supporting_memories=[],
|
||||
metadata={"error": str(e)}
|
||||
)
|
||||
|
||||
async def query_creative_knowledge(self, creative_query: str) -> MemoryInsight:
|
||||
"""Query creative memories and ideas"""
|
||||
try:
|
||||
# Search creative memories
|
||||
creative_memories = await self.vector_store.get_creative_knowledge(
|
||||
character_name=self.character_name,
|
||||
query=creative_query,
|
||||
limit=8
|
||||
)
|
||||
|
||||
# Also search reflections that might contain creative insights
|
||||
reflection_memories = await self.vector_store.query_memories(
|
||||
character_name=self.character_name,
|
||||
query=creative_query,
|
||||
memory_types=[MemoryType.REFLECTION],
|
||||
limit=5,
|
||||
min_importance=0.5
|
||||
)
|
||||
|
||||
all_memories = creative_memories + reflection_memories
|
||||
|
||||
if not all_memories:
|
||||
return MemoryInsight(
|
||||
insight="I haven't explored this creative area much yet, but it sounds intriguing.",
|
||||
confidence=0.2,
|
||||
supporting_memories=[],
|
||||
metadata={"query": creative_query, "memory_count": 0}
|
||||
)
|
||||
|
||||
# Analyze creative development
|
||||
insight = await self._analyze_creative_development(all_memories, creative_query)
|
||||
|
||||
confidence = min(0.9, len(all_memories) * 0.12 +
|
||||
sum(m.importance for m in all_memories) / len(all_memories))
|
||||
|
||||
return MemoryInsight(
|
||||
insight=insight,
|
||||
confidence=confidence,
|
||||
supporting_memories=all_memories[:5],
|
||||
metadata={
|
||||
"query": creative_query,
|
||||
"memory_count": len(all_memories),
|
||||
"creative_memories": len(creative_memories),
|
||||
"reflection_memories": len(reflection_memories)
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
log_error_with_context(e, {"character": self.character_name, "query": creative_query})
|
||||
return MemoryInsight(
|
||||
insight="I'm having trouble accessing my creative thoughts right now.",
|
||||
confidence=0.0,
|
||||
supporting_memories=[],
|
||||
metadata={"error": str(e)}
|
||||
)
|
||||
|
||||
async def perform_self_reflection_cycle(self) -> Dict[str, MemoryInsight]:
|
||||
"""Perform comprehensive self-reflection using memory queries"""
|
||||
try:
|
||||
reflections = {}
|
||||
|
||||
# Behavioral pattern analysis
|
||||
for pattern_type, queries in self.reflection_queries.items():
|
||||
if pattern_type == "relationship_insights":
|
||||
continue # Skip relationship queries for general reflection
|
||||
|
||||
pattern_insights = []
|
||||
for query in queries:
|
||||
if pattern_type == "creative_development":
|
||||
insight = await self.query_creative_knowledge(query)
|
||||
else:
|
||||
insight = await self.query_behavioral_patterns(query)
|
||||
|
||||
if insight.confidence > 0.3:
|
||||
pattern_insights.append(insight)
|
||||
|
||||
if pattern_insights:
|
||||
# Synthesize insights for this pattern type
|
||||
combined_insight = await self._synthesize_pattern_insights(pattern_insights, pattern_type)
|
||||
reflections[pattern_type] = combined_insight
|
||||
|
||||
log_character_action(
|
||||
self.character_name,
|
||||
"completed_reflection_cycle",
|
||||
{"reflection_areas": len(reflections)}
|
||||
)
|
||||
|
||||
return reflections
|
||||
|
||||
except Exception as e:
|
||||
log_error_with_context(e, {"character": self.character_name})
|
||||
return {}
|
||||
|
||||
async def get_memory_statistics(self) -> Dict[str, Any]:
|
||||
"""Get statistics about character's memory system"""
|
||||
try:
|
||||
stats = self.vector_store.get_store_statistics(self.character_name)
|
||||
|
||||
# Add RAG-specific statistics
|
||||
|
||||
# Memory importance distribution
|
||||
personal_memories = await self.vector_store.query_memories(
|
||||
character_name=self.character_name,
|
||||
query="", # Empty query to get recent memories
|
||||
memory_types=[MemoryType.PERSONAL, MemoryType.RELATIONSHIP, MemoryType.EXPERIENCE],
|
||||
limit=100
|
||||
)
|
||||
|
||||
if personal_memories:
|
||||
importance_scores = [m.importance for m in personal_memories]
|
||||
stats.update({
|
||||
"avg_memory_importance": sum(importance_scores) / len(importance_scores),
|
||||
"high_importance_memories": len([s for s in importance_scores if s > 0.7]),
|
||||
"recent_memory_count": len([m for m in personal_memories
|
||||
if (datetime.utcnow() - m.timestamp).days < 7])
|
||||
})
|
||||
|
||||
return stats
|
||||
|
||||
except Exception as e:
|
||||
log_error_with_context(e, {"character": self.character_name})
|
||||
return {"error": str(e)}
|
||||
|
||||
async def _calculate_interaction_importance(self, content: str, context: Dict[str, Any]) -> float:
|
||||
"""Calculate importance score for an interaction"""
|
||||
base_importance = 0.5
|
||||
|
||||
# Boost importance for emotional content
|
||||
emotional_words = ["love", "hate", "excited", "sad", "angry", "happy", "surprised", "fear"]
|
||||
if any(word in content.lower() for word in emotional_words):
|
||||
base_importance += 0.2
|
||||
|
||||
# Boost importance for personal revelations
|
||||
personal_words = ["realize", "understand", "learn", "discover", "feel", "think"]
|
||||
if any(word in content.lower() for word in personal_words):
|
||||
base_importance += 0.15
|
||||
|
||||
# Boost importance for relationship interactions
|
||||
if context.get("type") == "relationship" or len(context.get("participants", [])) > 1:
|
||||
base_importance += 0.1
|
||||
|
||||
# Boost importance for first-time experiences
|
||||
if "first time" in content.lower() or "never" in content.lower():
|
||||
base_importance += 0.2
|
||||
|
||||
# Boost importance for creative expressions
|
||||
creative_words = ["create", "imagine", "design", "compose", "write", "art"]
|
||||
if any(word in content.lower() for word in creative_words):
|
||||
base_importance += 0.1
|
||||
|
||||
return min(1.0, base_importance)
|
||||
|
||||
def _determine_memory_type(self, context: Dict[str, Any]) -> MemoryType:
|
||||
"""Determine appropriate memory type based on context"""
|
||||
interaction_type = context.get("type", "").lower()
|
||||
|
||||
if "reflection" in interaction_type:
|
||||
return MemoryType.REFLECTION
|
||||
elif "creative" in interaction_type or "art" in interaction_type:
|
||||
return MemoryType.CREATIVE
|
||||
elif len(context.get("participants", [])) > 1:
|
||||
return MemoryType.RELATIONSHIP
|
||||
elif "experience" in interaction_type or "event" in interaction_type:
|
||||
return MemoryType.EXPERIENCE
|
||||
else:
|
||||
return MemoryType.PERSONAL
|
||||
|
||||
async def _analyze_behavioral_patterns(self, memories: List[VectorMemory], question: str) -> str:
|
||||
"""Analyze memories to identify behavioral patterns"""
|
||||
if not memories:
|
||||
return "I don't have enough memories to identify patterns yet."
|
||||
|
||||
# Extract key themes from memories
|
||||
themes = {}
|
||||
for memory in memories:
|
||||
content_words = memory.content.lower().split()
|
||||
for word in content_words:
|
||||
if len(word) > 4: # Only consider longer words
|
||||
themes[word] = themes.get(word, 0) + memory.importance
|
||||
|
||||
# Sort themes by importance-weighted frequency
|
||||
top_themes = sorted(themes.items(), key=lambda x: x[1], reverse=True)[:5]
|
||||
|
||||
# Construct insight based on patterns
|
||||
if "conflict" in question.lower():
|
||||
conflict_approaches = []
|
||||
for memory in memories:
|
||||
if any(word in memory.content.lower() for word in ["disagree", "argue", "conflict", "problem"]):
|
||||
conflict_approaches.append(memory.content[:100])
|
||||
|
||||
if conflict_approaches:
|
||||
return f"When dealing with conflict, I tend to: {'; '.join(conflict_approaches[:2])}..."
|
||||
else:
|
||||
return "I don't seem to have many experiences with conflict yet."
|
||||
|
||||
elif "stress" in question.lower():
|
||||
stress_responses = []
|
||||
for memory in memories:
|
||||
if any(word in memory.content.lower() for word in ["stress", "pressure", "overwhelm", "difficult"]):
|
||||
stress_responses.append(memory.content[:100])
|
||||
|
||||
if stress_responses:
|
||||
return f"Under stress, I typically: {'; '.join(stress_responses[:2])}..."
|
||||
else:
|
||||
return "I haven't encountered much stress in my recent experiences."
|
||||
|
||||
else:
|
||||
# General pattern analysis
|
||||
if top_themes:
|
||||
theme_words = [theme[0] for theme in top_themes[:3]]
|
||||
return f"Looking at my memories, I notice patterns around: {', '.join(theme_words)}. These seem to be important themes in my experiences."
|
||||
else:
|
||||
return "I'm still developing patterns in my behavior and experiences."
|
||||
|
||||
async def _analyze_relationship_dynamics(self, memories: List[VectorMemory],
|
||||
other_character: str, question: str) -> str:
|
||||
"""Analyze relationship-specific memories"""
|
||||
if not memories:
|
||||
return f"I don't have many specific memories about {other_character} yet."
|
||||
|
||||
# Categorize interactions
|
||||
positive_interactions = []
|
||||
negative_interactions = []
|
||||
neutral_interactions = []
|
||||
|
||||
for memory in memories:
|
||||
content_lower = memory.content.lower()
|
||||
if any(word in content_lower for word in ["like", "enjoy", "appreciate", "agree", "wonderful"]):
|
||||
positive_interactions.append(memory)
|
||||
elif any(word in content_lower for word in ["dislike", "disagree", "annoying", "conflict"]):
|
||||
negative_interactions.append(memory)
|
||||
else:
|
||||
neutral_interactions.append(memory)
|
||||
|
||||
# Analyze relationship evolution
|
||||
if len(memories) > 1:
|
||||
earliest = min(memories, key=lambda m: m.timestamp)
|
||||
latest = max(memories, key=lambda m: m.timestamp)
|
||||
|
||||
relationship_evolution = f"My relationship with {other_character} started when {earliest.content[:50]}... and more recently {latest.content[:50]}..."
|
||||
else:
|
||||
relationship_evolution = f"I have limited interaction history with {other_character}."
|
||||
|
||||
# Construct insight
|
||||
if "interests" in question.lower():
|
||||
interests = []
|
||||
for memory in memories:
|
||||
if any(word in memory.content.lower() for word in ["like", "love", "enjoy", "interested"]):
|
||||
interests.append(memory.content[:80])
|
||||
|
||||
if interests:
|
||||
return f"About {other_character}'s interests: {'; '.join(interests[:2])}..."
|
||||
else:
|
||||
return f"I need to learn more about {other_character}'s interests."
|
||||
|
||||
else:
|
||||
# General relationship summary
|
||||
pos_count = len(positive_interactions)
|
||||
neg_count = len(negative_interactions)
|
||||
|
||||
if pos_count > neg_count:
|
||||
return f"My relationship with {other_character} seems positive. {relationship_evolution}"
|
||||
elif neg_count > pos_count:
|
||||
return f"I've had some challenging interactions with {other_character}. {relationship_evolution}"
|
||||
else:
|
||||
return f"My relationship with {other_character} is developing. {relationship_evolution}"
|
||||
|
||||
async def _analyze_creative_development(self, memories: List[VectorMemory], query: str) -> str:
|
||||
"""Analyze creative memories and development"""
|
||||
if not memories:
|
||||
return "I haven't explored this creative area much yet, but it sounds intriguing."
|
||||
|
||||
# Extract creative themes
|
||||
creative_themes = []
|
||||
for memory in memories:
|
||||
if memory.memory_type == MemoryType.CREATIVE:
|
||||
creative_themes.append(memory.content[:100])
|
||||
|
||||
# Analyze creative evolution
|
||||
if len(memories) > 1:
|
||||
memories_by_time = sorted(memories, key=lambda m: m.timestamp)
|
||||
earliest_creative = memories_by_time[0].content[:80]
|
||||
latest_creative = memories_by_time[-1].content[:80]
|
||||
|
||||
return f"My creative journey in this area started with: {earliest_creative}... and has evolved to: {latest_creative}..."
|
||||
elif creative_themes:
|
||||
return f"I've been exploring: {creative_themes[0]}..."
|
||||
else:
|
||||
return f"This relates to my broader thinking about creativity and expression."
|
||||
|
||||
async def _synthesize_pattern_insights(self, insights: List[MemoryInsight],
|
||||
pattern_type: str) -> MemoryInsight:
|
||||
"""Synthesize multiple insights into a comprehensive understanding"""
|
||||
if not insights:
|
||||
return MemoryInsight(
|
||||
insight=f"I haven't developed clear patterns in {pattern_type} yet.",
|
||||
confidence=0.1,
|
||||
supporting_memories=[],
|
||||
metadata={"pattern_type": pattern_type}
|
||||
)
|
||||
|
||||
# Combine insights
|
||||
combined_content = []
|
||||
all_memories = []
|
||||
total_confidence = 0
|
||||
|
||||
for insight in insights:
|
||||
combined_content.append(insight.insight)
|
||||
all_memories.extend(insight.supporting_memories)
|
||||
total_confidence += insight.confidence
|
||||
|
||||
avg_confidence = total_confidence / len(insights)
|
||||
|
||||
# Create synthesized insight
|
||||
synthesis = f"Reflecting on my {pattern_type}: " + " ".join(combined_content[:3])
|
||||
|
||||
return MemoryInsight(
|
||||
insight=synthesis,
|
||||
confidence=min(0.95, avg_confidence * 1.1), # Slight boost for synthesis
|
||||
supporting_memories=all_memories[:10], # Top 10 most relevant
|
||||
metadata={
|
||||
"pattern_type": pattern_type,
|
||||
"synthesized_from": len(insights),
|
||||
"total_memories": len(all_memories)
|
||||
}
|
||||
)
|
||||
Reference in New Issue
Block a user