Implement comprehensive LLM provider system with global cost protection
- Add multi-provider LLM architecture supporting OpenRouter, OpenAI, Gemini, and custom providers - Implement global LLM on/off switch with default DISABLED state for cost protection - Add per-character LLM configuration with provider-specific models and settings - Create performance-optimized caching system for LLM enabled status checks - Add API key validation before enabling LLM providers to prevent broken configurations - Implement audit logging for all LLM enable/disable actions for cost accountability - Create comprehensive admin UI with prominent cost warnings and confirmation dialogs - Add visual indicators in character list for custom AI model configurations - Build character-specific LLM client system with global fallback mechanism - Add database schema support for per-character LLM settings - Implement graceful fallback responses when LLM is globally disabled - Create provider testing and validation system for reliable connections
This commit is contained in:
@@ -18,13 +18,13 @@ LLM_MODEL=koboldcpp/Broken-Tutu-24B-Transgression-v2.0.i1-Q4_K_M
|
|||||||
LLM_TIMEOUT=300
|
LLM_TIMEOUT=300
|
||||||
LLM_MAX_TOKENS=2000
|
LLM_MAX_TOKENS=2000
|
||||||
LLM_TEMPERATURE=0.8
|
LLM_TEMPERATURE=0.8
|
||||||
LLM_MAX_PROMPT_LENGTH=6000
|
LLM_MAX_PROMPT_LENGTH=16000
|
||||||
LLM_MAX_HISTORY_MESSAGES=5
|
LLM_MAX_HISTORY_MESSAGES=5
|
||||||
LLM_MAX_MEMORIES=5
|
LLM_MAX_MEMORIES=5
|
||||||
|
|
||||||
# Admin Interface
|
# Admin Interface
|
||||||
ADMIN_PORT=8294
|
ADMIN_PORT=8294
|
||||||
SECRET_KEY=your-secret-key-here
|
SECRET_KEY=stable-secret-key-for-jwt-tokens-fishbowl-2025
|
||||||
ADMIN_USERNAME=admin
|
ADMIN_USERNAME=admin
|
||||||
ADMIN_PASSWORD=FIre!@34
|
ADMIN_PASSWORD=FIre!@34
|
||||||
|
|
||||||
|
|||||||
25
.env.example
25
.env.example
@@ -35,7 +35,7 @@ LLM_API_KEY=x
|
|||||||
LLM_TIMEOUT=300
|
LLM_TIMEOUT=300
|
||||||
LLM_MAX_TOKENS=2000
|
LLM_MAX_TOKENS=2000
|
||||||
LLM_TEMPERATURE=0.8
|
LLM_TEMPERATURE=0.8
|
||||||
LLM_MAX_PROMPT_LENGTH=6000
|
LLM_MAX_PROMPT_LENGTH=16000
|
||||||
LLM_MAX_HISTORY_MESSAGES=5
|
LLM_MAX_HISTORY_MESSAGES=5
|
||||||
LLM_MAX_MEMORIES=5
|
LLM_MAX_MEMORIES=5
|
||||||
|
|
||||||
@@ -46,6 +46,29 @@ ADMIN_USERNAME=admin
|
|||||||
ADMIN_PASSWORD=FIre!@34
|
ADMIN_PASSWORD=FIre!@34
|
||||||
SECRET_KEY=CAKUZ5ds49B1PUEWDWt07TdgxjTtDvvxOOkvOOfbnDE
|
SECRET_KEY=CAKUZ5ds49B1PUEWDWt07TdgxjTtDvvxOOkvOOfbnDE
|
||||||
|
|
||||||
|
# LLM Provider Configuration
|
||||||
|
# OpenRouter (supports Claude, GPT, Llama, etc.)
|
||||||
|
OPENROUTER_ENABLED=false
|
||||||
|
OPENROUTER_API_KEY=
|
||||||
|
OPENROUTER_MODEL=anthropic/claude-3-sonnet
|
||||||
|
|
||||||
|
# OpenAI
|
||||||
|
OPENAI_ENABLED=false
|
||||||
|
OPENAI_API_KEY=
|
||||||
|
OPENAI_MODEL=gpt-4o-mini
|
||||||
|
|
||||||
|
# Google Gemini
|
||||||
|
GEMINI_ENABLED=false
|
||||||
|
GEMINI_API_KEY=
|
||||||
|
GEMINI_MODEL=gemini-1.5-flash
|
||||||
|
|
||||||
|
# Custom/Local LLM (current setup)
|
||||||
|
CUSTOM_LLM_ENABLED=true
|
||||||
|
|
||||||
|
# Ollama
|
||||||
|
OLLAMA_ENABLED=false
|
||||||
|
OLLAMA_MODEL=llama3
|
||||||
|
|
||||||
# System Configuration
|
# System Configuration
|
||||||
CONVERSATION_FREQUENCY=0.5
|
CONVERSATION_FREQUENCY=0.5
|
||||||
RESPONSE_DELAY_MIN=1.0
|
RESPONSE_DELAY_MIN=1.0
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ RUN apt-get update && apt-get install -y \
|
|||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Install Node.js for frontend build
|
# Install Node.js for frontend build
|
||||||
RUN curl -fsSL https://deb.nodesource.com/setup_18.x | bash - \
|
RUN curl -fsSL https://deb.nodesource.com/setup_16.x | bash - \
|
||||||
&& apt-get install -y nodejs
|
&& apt-get install -y nodejs
|
||||||
|
|
||||||
# Copy requirements first for better caching
|
# Copy requirements first for better caching
|
||||||
@@ -26,20 +26,30 @@ COPY migrations/ ./migrations/
|
|||||||
COPY alembic.ini ./
|
COPY alembic.ini ./
|
||||||
|
|
||||||
# Build frontend
|
# Build frontend
|
||||||
COPY admin-frontend/ ./admin-frontend/
|
COPY admin-frontend/package*.json ./admin-frontend/
|
||||||
WORKDIR /app/admin-frontend
|
WORKDIR /app/admin-frontend
|
||||||
|
|
||||||
# Clear any existing node_modules and lock files
|
# Install dependencies first (better caching)
|
||||||
RUN rm -rf node_modules package-lock.json yarn.lock
|
RUN npm install --silent
|
||||||
|
|
||||||
# Install dependencies with npm (using .npmrc config)
|
# Copy frontend source code
|
||||||
RUN npm install
|
COPY admin-frontend/ ./
|
||||||
|
|
||||||
# Build with increased memory for Node.js
|
# Build with increased memory for Node.js and disable optimization
|
||||||
ENV NODE_OPTIONS="--max-old-space-size=4096"
|
ENV NODE_OPTIONS="--max-old-space-size=4096"
|
||||||
# Build React app or create fallback
|
ENV GENERATE_SOURCEMAP=false
|
||||||
RUN npm run build || mkdir -p build
|
ENV DISABLE_ESLINT_PLUGIN=true
|
||||||
RUN test -f build/index.html || echo "<html><body><h1>Discord Fishbowl Admin</h1><p>Interface loading...</p></body></html>" > build/index.html
|
ENV CI=false
|
||||||
|
ENV REACT_APP_API_URL=""
|
||||||
|
ENV PUBLIC_URL="/admin"
|
||||||
|
ENV TSC_COMPILE_ON_ERROR=true
|
||||||
|
ENV ESLINT_NO_DEV_ERRORS=true
|
||||||
|
|
||||||
|
# Build React app
|
||||||
|
RUN npm run build
|
||||||
|
|
||||||
|
# Verify build output
|
||||||
|
RUN ls -la build/ && test -f build/index.html
|
||||||
|
|
||||||
# Back to main directory
|
# Back to main directory
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
@@ -51,7 +61,7 @@ RUN mkdir -p logs
|
|||||||
ENV PYTHONPATH=/app/src
|
ENV PYTHONPATH=/app/src
|
||||||
|
|
||||||
# Expose admin port
|
# Expose admin port
|
||||||
EXPOSE 8000
|
EXPOSE 8294
|
||||||
|
|
||||||
# Run the admin interface
|
# Run the admin interface
|
||||||
CMD ["python", "-m", "src.admin.app"]
|
CMD ["python", "-m", "src.admin.app"]
|
||||||
32
admin-frontend/package-simple.json
Normal file
32
admin-frontend/package-simple.json
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
{
|
||||||
|
"name": "discord-fishbowl-admin",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"private": true,
|
||||||
|
"dependencies": {
|
||||||
|
"react": "^18.2.0",
|
||||||
|
"react-dom": "^18.2.0",
|
||||||
|
"react-router-dom": "^6.8.0",
|
||||||
|
"axios": "^1.6.0"
|
||||||
|
},
|
||||||
|
"scripts": {
|
||||||
|
"start": "react-scripts start",
|
||||||
|
"build": "react-scripts build",
|
||||||
|
"test": "react-scripts test",
|
||||||
|
"eject": "react-scripts eject"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"react-scripts": "5.0.1"
|
||||||
|
},
|
||||||
|
"browserslist": {
|
||||||
|
"production": [
|
||||||
|
">0.2%",
|
||||||
|
"not dead",
|
||||||
|
"not op_mini all"
|
||||||
|
],
|
||||||
|
"development": [
|
||||||
|
"last 1 chrome version",
|
||||||
|
"last 1 firefox version",
|
||||||
|
"last 1 safari version"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -2,6 +2,7 @@
|
|||||||
"name": "discord-fishbowl-admin",
|
"name": "discord-fishbowl-admin",
|
||||||
"version": "1.0.0",
|
"version": "1.0.0",
|
||||||
"private": true,
|
"private": true,
|
||||||
|
"homepage": "/admin",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@types/node": "^20.0.0",
|
"@types/node": "^20.0.0",
|
||||||
"@types/react": "^18.2.0",
|
"@types/react": "^18.2.0",
|
||||||
@@ -9,7 +10,7 @@
|
|||||||
"react": "^18.2.0",
|
"react": "^18.2.0",
|
||||||
"react-dom": "^18.2.0",
|
"react-dom": "^18.2.0",
|
||||||
"react-router-dom": "^6.8.0",
|
"react-router-dom": "^6.8.0",
|
||||||
"react-scripts": "5.0.1",
|
"react-scripts": "^5.0.1",
|
||||||
"typescript": "^4.9.5",
|
"typescript": "^4.9.5",
|
||||||
"web-vitals": "^3.0.0",
|
"web-vitals": "^3.0.0",
|
||||||
"@tailwindcss/forms": "^0.5.0",
|
"@tailwindcss/forms": "^0.5.0",
|
||||||
@@ -53,15 +54,8 @@
|
|||||||
]
|
]
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@types/jest": "^29.0.0"
|
"@types/jest": "^29.0.0",
|
||||||
|
"react-scripts": "5.0.1"
|
||||||
},
|
},
|
||||||
"resolutions": {
|
"proxy": "http://localhost:8000"
|
||||||
"schema-utils": "^3.3.0",
|
|
||||||
"fork-ts-checker-webpack-plugin": "^6.5.3"
|
|
||||||
},
|
|
||||||
"overrides": {
|
|
||||||
"schema-utils": "^3.3.0",
|
|
||||||
"fork-ts-checker-webpack-plugin": "^6.5.3"
|
|
||||||
},
|
|
||||||
"proxy": "http://localhost:8294"
|
|
||||||
}
|
}
|
||||||
BIN
admin-frontend/public/favicon.ico
Normal file
BIN
admin-frontend/public/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 127 B |
@@ -3,14 +3,12 @@ import { Routes, Route, Navigate } from 'react-router-dom';
|
|||||||
import { useAuth } from './contexts/AuthContext';
|
import { useAuth } from './contexts/AuthContext';
|
||||||
import Layout from './components/Layout/Layout';
|
import Layout from './components/Layout/Layout';
|
||||||
import LoginPage from './pages/LoginPage';
|
import LoginPage from './pages/LoginPage';
|
||||||
import Dashboard from './pages/Dashboard';
|
|
||||||
import Characters from './pages/Characters';
|
import Characters from './pages/Characters';
|
||||||
import CharacterDetail from './pages/CharacterDetail';
|
import CharacterDetail from './pages/CharacterDetail';
|
||||||
import Conversations from './pages/Conversations';
|
|
||||||
import ConversationDetail from './pages/ConversationDetail';
|
|
||||||
import Analytics from './pages/Analytics';
|
|
||||||
import SystemStatus from './pages/SystemStatus';
|
import SystemStatus from './pages/SystemStatus';
|
||||||
import Settings from './pages/Settings';
|
import Settings from './pages/Settings';
|
||||||
|
import LiveChat from './pages/LiveChat';
|
||||||
|
import Guide from './pages/Guide';
|
||||||
import LoadingSpinner from './components/Common/LoadingSpinner';
|
import LoadingSpinner from './components/Common/LoadingSpinner';
|
||||||
|
|
||||||
function App() {
|
function App() {
|
||||||
@@ -31,16 +29,14 @@ function App() {
|
|||||||
return (
|
return (
|
||||||
<Layout>
|
<Layout>
|
||||||
<Routes>
|
<Routes>
|
||||||
<Route path="/" element={<Navigate to="/dashboard" replace />} />
|
<Route path="/" element={<Navigate to="/characters" replace />} />
|
||||||
<Route path="/dashboard" element={<Dashboard />} />
|
|
||||||
<Route path="/characters" element={<Characters />} />
|
<Route path="/characters" element={<Characters />} />
|
||||||
<Route path="/characters/:characterName" element={<CharacterDetail />} />
|
<Route path="/characters/:characterName" element={<CharacterDetail />} />
|
||||||
<Route path="/conversations" element={<Conversations />} />
|
|
||||||
<Route path="/conversations/:conversationId" element={<ConversationDetail />} />
|
|
||||||
<Route path="/analytics" element={<Analytics />} />
|
|
||||||
<Route path="/system" element={<SystemStatus />} />
|
|
||||||
<Route path="/settings" element={<Settings />} />
|
<Route path="/settings" element={<Settings />} />
|
||||||
<Route path="*" element={<Navigate to="/dashboard" replace />} />
|
<Route path="/system" element={<SystemStatus />} />
|
||||||
|
<Route path="/live-chat" element={<LiveChat />} />
|
||||||
|
<Route path="/guide" element={<Guide />} />
|
||||||
|
<Route path="*" element={<Navigate to="/characters" replace />} />
|
||||||
</Routes>
|
</Routes>
|
||||||
</Layout>
|
</Layout>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -0,0 +1,295 @@
|
|||||||
|
import React, { useState } from 'react';
|
||||||
|
import { X, Save, User, Brain, FileText } from 'lucide-react';
|
||||||
|
import { apiClient } from '../../services/api';
|
||||||
|
import LoadingSpinner from '../Common/LoadingSpinner';
|
||||||
|
import toast from 'react-hot-toast';
|
||||||
|
|
||||||
|
interface Character {
|
||||||
|
name: string;
|
||||||
|
status: 'active' | 'idle' | 'reflecting' | 'offline';
|
||||||
|
is_active: boolean;
|
||||||
|
last_active?: string;
|
||||||
|
personality?: string;
|
||||||
|
system_prompt?: string;
|
||||||
|
interests?: string[];
|
||||||
|
speaking_style?: string;
|
||||||
|
background?: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface CharacterCreationModalProps {
|
||||||
|
isOpen: boolean;
|
||||||
|
onClose: () => void;
|
||||||
|
onCharacterCreated: (character: Character) => void;
|
||||||
|
}
|
||||||
|
|
||||||
|
const CharacterCreationModal: React.FC<CharacterCreationModalProps> = ({
|
||||||
|
isOpen,
|
||||||
|
onClose,
|
||||||
|
onCharacterCreated
|
||||||
|
}) => {
|
||||||
|
const [formData, setFormData] = useState({
|
||||||
|
name: '',
|
||||||
|
personality: '',
|
||||||
|
system_prompt: `You are a character named {{name}}. You have the following personality: {{personality}}
|
||||||
|
|
||||||
|
Your speaking style is {{speaking_style}}. You are interested in {{interests}}.
|
||||||
|
|
||||||
|
Background: {{background}}
|
||||||
|
|
||||||
|
When responding to messages:
|
||||||
|
1. Stay in character at all times
|
||||||
|
2. Reference your personality and interests naturally
|
||||||
|
3. Engage authentically with other characters
|
||||||
|
4. Show growth and development over time
|
||||||
|
|
||||||
|
Remember to be consistent with your established personality while allowing for natural character development through interactions.`,
|
||||||
|
interests: '',
|
||||||
|
speaking_style: '',
|
||||||
|
background: '',
|
||||||
|
is_active: true
|
||||||
|
});
|
||||||
|
const [saving, setSaving] = useState(false);
|
||||||
|
|
||||||
|
const handleInputChange = (field: keyof typeof formData, value: any) => {
|
||||||
|
setFormData(prev => ({ ...prev, [field]: value }));
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleInterestsChange = (interestsText: string) => {
|
||||||
|
handleInputChange('interests', interestsText);
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleSubmit = async (e: React.FormEvent) => {
|
||||||
|
e.preventDefault();
|
||||||
|
|
||||||
|
if (!formData.name.trim()) {
|
||||||
|
toast.error('Character name is required');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
setSaving(true);
|
||||||
|
|
||||||
|
const characterData = {
|
||||||
|
name: formData.name.trim(),
|
||||||
|
personality: formData.personality,
|
||||||
|
system_prompt: formData.system_prompt.replace('{{name}}', formData.name.trim()),
|
||||||
|
interests: formData.interests.split(',').map(s => s.trim()).filter(s => s.length > 0),
|
||||||
|
speaking_style: formData.speaking_style,
|
||||||
|
background: formData.background,
|
||||||
|
is_active: formData.is_active
|
||||||
|
};
|
||||||
|
|
||||||
|
const response = await apiClient.createCharacter(characterData);
|
||||||
|
|
||||||
|
// Create character object for local state
|
||||||
|
const newCharacter: Character = {
|
||||||
|
name: characterData.name,
|
||||||
|
status: characterData.is_active ? 'active' : 'offline',
|
||||||
|
is_active: characterData.is_active,
|
||||||
|
personality: characterData.personality,
|
||||||
|
system_prompt: characterData.system_prompt,
|
||||||
|
interests: characterData.interests,
|
||||||
|
speaking_style: characterData.speaking_style,
|
||||||
|
background: characterData.background,
|
||||||
|
last_active: new Date().toISOString()
|
||||||
|
};
|
||||||
|
|
||||||
|
onCharacterCreated(newCharacter);
|
||||||
|
toast.success(`Character ${characterData.name} created successfully!`);
|
||||||
|
|
||||||
|
// Reset form
|
||||||
|
setFormData({
|
||||||
|
name: '',
|
||||||
|
personality: '',
|
||||||
|
system_prompt: `You are a character named {{name}}. You have the following personality: {{personality}}
|
||||||
|
|
||||||
|
Your speaking style is {{speaking_style}}. You are interested in {{interests}}.
|
||||||
|
|
||||||
|
Background: {{background}}
|
||||||
|
|
||||||
|
When responding to messages:
|
||||||
|
1. Stay in character at all times
|
||||||
|
2. Reference your personality and interests naturally
|
||||||
|
3. Engage authentically with other characters
|
||||||
|
4. Show growth and development over time
|
||||||
|
|
||||||
|
Remember to be consistent with your established personality while allowing for natural character development through interactions.`,
|
||||||
|
interests: '',
|
||||||
|
speaking_style: '',
|
||||||
|
background: '',
|
||||||
|
is_active: true
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error('Failed to create character:', error);
|
||||||
|
toast.error(error.response?.data?.detail || 'Failed to create character');
|
||||||
|
} finally {
|
||||||
|
setSaving(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (!isOpen) return null;
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="fixed inset-0 bg-black bg-opacity-50 flex items-center justify-center z-50 p-4">
|
||||||
|
<div className="bg-white rounded-lg shadow-xl w-full max-w-4xl max-h-[90vh] overflow-hidden">
|
||||||
|
{/* Header */}
|
||||||
|
<div className="flex items-center justify-between p-6 border-b border-gray-200">
|
||||||
|
<h2 className="text-xl font-semibold text-gray-900">Create New Character</h2>
|
||||||
|
<button
|
||||||
|
onClick={onClose}
|
||||||
|
className="p-2 text-gray-400 hover:text-gray-600 rounded-lg hover:bg-gray-100"
|
||||||
|
>
|
||||||
|
<X className="w-5 h-5" />
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Form */}
|
||||||
|
<div className="overflow-y-auto max-h-[calc(90vh-120px)]">
|
||||||
|
<form onSubmit={handleSubmit} className="p-6 space-y-6">
|
||||||
|
{/* Basic Info */}
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<User className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">Basic Information</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Character Name *
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={formData.name}
|
||||||
|
onChange={(e) => handleInputChange('name', e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="Enter character name..."
|
||||||
|
required
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Personality Description
|
||||||
|
</label>
|
||||||
|
<textarea
|
||||||
|
value={formData.personality}
|
||||||
|
onChange={(e) => handleInputChange('personality', e.target.value)}
|
||||||
|
rows={4}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="Describe the character's personality traits, quirks, and general demeanor..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Interests (comma-separated)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={formData.interests}
|
||||||
|
onChange={(e) => handleInterestsChange(e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="music, philosophy, art, technology..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Speaking Style
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={formData.speaking_style}
|
||||||
|
onChange={(e) => handleInputChange('speaking_style', e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="formal, casual, poetic, technical..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Background
|
||||||
|
</label>
|
||||||
|
<textarea
|
||||||
|
value={formData.background}
|
||||||
|
onChange={(e) => handleInputChange('background', e.target.value)}
|
||||||
|
rows={4}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="Describe the character's backstory, history, and experiences..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="flex items-center space-x-2 cursor-pointer">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={formData.is_active}
|
||||||
|
onChange={(e) => handleInputChange('is_active', e.target.checked)}
|
||||||
|
className="rounded border-gray-300 text-primary-600 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<span className="text-sm text-gray-700">Start character as active</span>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* System Prompt */}
|
||||||
|
<div>
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Brain className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">System Prompt</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<p className="text-sm text-gray-600">
|
||||||
|
The system prompt defines how the character behaves and responds.
|
||||||
|
You can customize this template or write your own.
|
||||||
|
</p>
|
||||||
|
|
||||||
|
<textarea
|
||||||
|
value={formData.system_prompt}
|
||||||
|
onChange={(e) => handleInputChange('system_prompt', e.target.value)}
|
||||||
|
rows={20}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500 font-mono text-sm"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Footer */}
|
||||||
|
<div className="flex items-center justify-end space-x-3 p-6 border-t border-gray-200 bg-gray-50">
|
||||||
|
<button
|
||||||
|
type="button"
|
||||||
|
onClick={onClose}
|
||||||
|
disabled={saving}
|
||||||
|
className="btn-secondary disabled:opacity-50 disabled:cursor-not-allowed"
|
||||||
|
>
|
||||||
|
Cancel
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
onClick={handleSubmit}
|
||||||
|
disabled={saving || !formData.name.trim()}
|
||||||
|
className="btn-primary disabled:opacity-50 disabled:cursor-not-allowed"
|
||||||
|
>
|
||||||
|
{saving ? (
|
||||||
|
<>
|
||||||
|
<LoadingSpinner size="sm" />
|
||||||
|
<span className="ml-2">Creating...</span>
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<>
|
||||||
|
<Save className="w-4 h-4 mr-2" />
|
||||||
|
Create Character
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default CharacterCreationModal;
|
||||||
181
admin-frontend/src/components/LLMProviderSettings.tsx
Normal file
181
admin-frontend/src/components/LLMProviderSettings.tsx
Normal file
@@ -0,0 +1,181 @@
|
|||||||
|
import React, { useState, useEffect } from 'react';
|
||||||
|
import { apiClient } from '../services/api';
|
||||||
|
|
||||||
|
interface ProviderInfo {
|
||||||
|
type: string;
|
||||||
|
enabled: boolean;
|
||||||
|
healthy: boolean;
|
||||||
|
is_current: boolean;
|
||||||
|
current_model: string;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LLMProvidersData {
|
||||||
|
providers: Record<string, ProviderInfo>;
|
||||||
|
current_provider: string | null;
|
||||||
|
total_providers: number;
|
||||||
|
healthy_providers: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const LLMProviderSettings: React.FC = () => {
|
||||||
|
const [providersData, setProvidersData] = useState<LLMProvidersData | null>(null);
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
const [error, setError] = useState<string | null>(null);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
loadProviders();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const loadProviders = async () => {
|
||||||
|
try {
|
||||||
|
setLoading(true);
|
||||||
|
const data = await apiClient.getLLMProviders();
|
||||||
|
setProvidersData(data);
|
||||||
|
setError(null);
|
||||||
|
} catch (err: any) {
|
||||||
|
setError(err.message || 'Failed to load LLM providers');
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const switchProvider = async (providerName: string) => {
|
||||||
|
try {
|
||||||
|
await apiClient.switchLLMProvider(providerName);
|
||||||
|
await loadProviders();
|
||||||
|
} catch (err: any) {
|
||||||
|
setError(err.message || 'Failed to switch provider');
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (loading) {
|
||||||
|
return (
|
||||||
|
<div className="text-center py-4">
|
||||||
|
<div className="text-gray-600">Loading providers...</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!providersData) {
|
||||||
|
return (
|
||||||
|
<div className="bg-red-50 border border-red-200 rounded-lg p-4">
|
||||||
|
<p className="text-red-800">Failed to load provider data</p>
|
||||||
|
{error && <p className="text-red-600 text-sm mt-1">{error}</p>}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-4">
|
||||||
|
{/* Current Status */}
|
||||||
|
<div className="bg-gray-50 rounded-lg p-4">
|
||||||
|
<div className="flex items-center justify-between">
|
||||||
|
<div>
|
||||||
|
<h4 className="font-medium text-gray-900">Active Provider</h4>
|
||||||
|
<div className="flex items-center space-x-2 mt-1">
|
||||||
|
<span className={`text-lg font-semibold ${
|
||||||
|
providersData.current_provider ? 'text-blue-600' : 'text-orange-600'
|
||||||
|
}`}>
|
||||||
|
{providersData.current_provider || 'None Active'}
|
||||||
|
</span>
|
||||||
|
{providersData.current_provider && (
|
||||||
|
<span className="text-sm text-gray-600">
|
||||||
|
({providersData.providers[providersData.current_provider]?.current_model})
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="text-right">
|
||||||
|
<div className="text-sm text-gray-600">Health Status</div>
|
||||||
|
<div className="text-lg font-semibold text-green-600">
|
||||||
|
{providersData.healthy_providers}/{providersData.total_providers}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{!providersData.current_provider && (
|
||||||
|
<div className="mt-3 p-2 bg-orange-100 border border-orange-200 rounded text-sm text-orange-700">
|
||||||
|
⚠️ No active provider. Enable and configure a provider below.
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Provider List */}
|
||||||
|
<div className="space-y-3">
|
||||||
|
<h4 className="font-medium text-gray-900">Available Providers</h4>
|
||||||
|
|
||||||
|
{Object.entries(providersData.providers).map(([name, provider]) => (
|
||||||
|
<div key={name} className="border border-gray-200 rounded-lg p-4">
|
||||||
|
<div className="flex items-center justify-between">
|
||||||
|
<div className="flex items-center space-x-3">
|
||||||
|
<div>
|
||||||
|
<h5 className="font-medium text-gray-900 capitalize">{name}</h5>
|
||||||
|
<div className="flex items-center space-x-2 text-sm text-gray-600">
|
||||||
|
<span>Type: {provider.type}</span>
|
||||||
|
<span>•</span>
|
||||||
|
<span>Model: {provider.current_model}</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="flex items-center space-x-2">
|
||||||
|
{provider.is_current && (
|
||||||
|
<span className="bg-blue-100 text-blue-800 text-xs px-2 py-1 rounded-full">
|
||||||
|
Current
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
<span className={`text-xs px-2 py-1 rounded-full ${
|
||||||
|
provider.healthy
|
||||||
|
? 'bg-green-100 text-green-800'
|
||||||
|
: 'bg-red-100 text-red-800'
|
||||||
|
}`}>
|
||||||
|
{provider.healthy ? 'Healthy' : 'Unhealthy'}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="flex items-center space-x-2">
|
||||||
|
{provider.enabled && provider.healthy && !provider.is_current && (
|
||||||
|
<button
|
||||||
|
onClick={() => switchProvider(name)}
|
||||||
|
className="bg-blue-600 hover:bg-blue-700 text-white px-3 py-1 rounded text-sm transition-colors"
|
||||||
|
>
|
||||||
|
Switch To
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<a
|
||||||
|
href="#"
|
||||||
|
className="text-blue-600 hover:text-blue-800 text-sm underline"
|
||||||
|
onClick={(e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
// TODO: Open provider configuration modal
|
||||||
|
console.log('Configure', name);
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
Configure
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Global Settings Note */}
|
||||||
|
<div className="bg-blue-50 border border-blue-200 rounded-lg p-4">
|
||||||
|
<div className="flex items-start space-x-2">
|
||||||
|
<div className="text-blue-600 mt-0.5">ℹ️</div>
|
||||||
|
<div className="text-sm text-blue-800">
|
||||||
|
<strong>Global Default:</strong> These settings apply to all characters unless overridden on individual character pages.
|
||||||
|
Configure per-character AI models in the Characters section.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{error && (
|
||||||
|
<div className="bg-red-50 border border-red-200 rounded-lg p-4">
|
||||||
|
<p className="text-red-800">{error}</p>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
};
|
||||||
474
admin-frontend/src/components/LLMProviders.tsx
Normal file
474
admin-frontend/src/components/LLMProviders.tsx
Normal file
@@ -0,0 +1,474 @@
|
|||||||
|
import React, { useState, useEffect } from 'react';
|
||||||
|
import { apiClient } from '../services/api';
|
||||||
|
|
||||||
|
interface ProviderConfig {
|
||||||
|
type: string;
|
||||||
|
enabled: boolean;
|
||||||
|
priority: number;
|
||||||
|
requires_api_key: boolean;
|
||||||
|
supported_models: string[];
|
||||||
|
current_model: string;
|
||||||
|
healthy: boolean;
|
||||||
|
is_current: boolean;
|
||||||
|
config?: {
|
||||||
|
api_key?: string;
|
||||||
|
model?: string;
|
||||||
|
base_url?: string;
|
||||||
|
timeout?: number;
|
||||||
|
max_tokens?: number;
|
||||||
|
temperature?: number;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
interface LLMProvidersData {
|
||||||
|
providers: Record<string, ProviderConfig>;
|
||||||
|
current_provider: string | null;
|
||||||
|
total_providers: number;
|
||||||
|
healthy_providers: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface TestResult {
|
||||||
|
success: boolean;
|
||||||
|
response?: string;
|
||||||
|
error?: string;
|
||||||
|
provider?: string;
|
||||||
|
model?: string;
|
||||||
|
tokens_used?: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
export const LLMProviders: React.FC = () => {
|
||||||
|
const [providersData, setProvidersData] = useState<LLMProvidersData | null>(null);
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
const [saving, setSaving] = useState(false);
|
||||||
|
const [testing, setTesting] = useState<string | null>(null);
|
||||||
|
const [testResults, setTestResults] = useState<Record<string, TestResult>>({});
|
||||||
|
const [error, setError] = useState<string | null>(null);
|
||||||
|
const [hasChanges, setHasChanges] = useState(false);
|
||||||
|
const [editedProviders, setEditedProviders] = useState<Record<string, any>>({});
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
loadProviders();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const loadProviders = async () => {
|
||||||
|
try {
|
||||||
|
setLoading(true);
|
||||||
|
const data = await apiClient.getLLMProviders();
|
||||||
|
setProvidersData(data);
|
||||||
|
|
||||||
|
// If no providers are configured, initialize with default provider templates
|
||||||
|
if (!data.providers || Object.keys(data.providers).length === 0) {
|
||||||
|
const defaultProviders = {
|
||||||
|
openrouter: {
|
||||||
|
type: 'openrouter',
|
||||||
|
enabled: false,
|
||||||
|
priority: 100,
|
||||||
|
requires_api_key: true,
|
||||||
|
supported_models: ['anthropic/claude-3-sonnet', 'openai/gpt-4o-mini'],
|
||||||
|
current_model: 'anthropic/claude-3-sonnet',
|
||||||
|
healthy: false,
|
||||||
|
is_current: false,
|
||||||
|
config: {
|
||||||
|
api_key: '',
|
||||||
|
model: 'anthropic/claude-3-sonnet',
|
||||||
|
base_url: 'https://openrouter.ai/api/v1',
|
||||||
|
timeout: 300,
|
||||||
|
max_tokens: 2000,
|
||||||
|
temperature: 0.8
|
||||||
|
}
|
||||||
|
},
|
||||||
|
openai: {
|
||||||
|
type: 'openai',
|
||||||
|
enabled: false,
|
||||||
|
priority: 90,
|
||||||
|
requires_api_key: true,
|
||||||
|
supported_models: ['gpt-4o', 'gpt-4o-mini', 'gpt-3.5-turbo'],
|
||||||
|
current_model: 'gpt-4o-mini',
|
||||||
|
healthy: false,
|
||||||
|
is_current: false,
|
||||||
|
config: {
|
||||||
|
api_key: '',
|
||||||
|
model: 'gpt-4o-mini',
|
||||||
|
base_url: 'https://api.openai.com/v1',
|
||||||
|
timeout: 300,
|
||||||
|
max_tokens: 2000,
|
||||||
|
temperature: 0.8
|
||||||
|
}
|
||||||
|
},
|
||||||
|
gemini: {
|
||||||
|
type: 'gemini',
|
||||||
|
enabled: false,
|
||||||
|
priority: 80,
|
||||||
|
requires_api_key: true,
|
||||||
|
supported_models: ['gemini-1.5-flash', 'gemini-1.5-pro'],
|
||||||
|
current_model: 'gemini-1.5-flash',
|
||||||
|
healthy: false,
|
||||||
|
is_current: false,
|
||||||
|
config: {
|
||||||
|
api_key: '',
|
||||||
|
model: 'gemini-1.5-flash',
|
||||||
|
base_url: 'https://generativelanguage.googleapis.com/v1beta',
|
||||||
|
timeout: 300,
|
||||||
|
max_tokens: 2000,
|
||||||
|
temperature: 0.8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
setEditedProviders({ ...data.providers, ...defaultProviders });
|
||||||
|
} else {
|
||||||
|
setEditedProviders(data.providers || {});
|
||||||
|
}
|
||||||
|
|
||||||
|
setError(null);
|
||||||
|
} catch (err: any) {
|
||||||
|
setError(err.message || 'Failed to load LLM providers');
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const updateProvider = (providerName: string, field: string, value: any) => {
|
||||||
|
setEditedProviders(prev => ({
|
||||||
|
...prev,
|
||||||
|
[providerName]: {
|
||||||
|
...prev[providerName],
|
||||||
|
[field]: value
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
setHasChanges(true);
|
||||||
|
};
|
||||||
|
|
||||||
|
const updateProviderConfig = (providerName: string, configField: string, value: any) => {
|
||||||
|
setEditedProviders(prev => ({
|
||||||
|
...prev,
|
||||||
|
[providerName]: {
|
||||||
|
...prev[providerName],
|
||||||
|
config: {
|
||||||
|
...prev[providerName]?.config,
|
||||||
|
[configField]: value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
setHasChanges(true);
|
||||||
|
};
|
||||||
|
|
||||||
|
const saveProviders = async () => {
|
||||||
|
try {
|
||||||
|
setSaving(true);
|
||||||
|
await apiClient.updateLLMProviders(editedProviders);
|
||||||
|
await loadProviders(); // Reload to get updated status
|
||||||
|
setHasChanges(false);
|
||||||
|
} catch (err: any) {
|
||||||
|
setError(err.message || 'Failed to save provider configuration');
|
||||||
|
} finally {
|
||||||
|
setSaving(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const testProvider = async (providerName: string) => {
|
||||||
|
try {
|
||||||
|
setTesting(providerName);
|
||||||
|
const result = await apiClient.testLLMProvider(providerName);
|
||||||
|
setTestResults(prev => ({ ...prev, [providerName]: result }));
|
||||||
|
} catch (err: any) {
|
||||||
|
setTestResults(prev => ({
|
||||||
|
...prev,
|
||||||
|
[providerName]: {
|
||||||
|
success: false,
|
||||||
|
error: err.message || 'Test failed'
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
} finally {
|
||||||
|
setTesting(null);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const switchProvider = async (providerName: string) => {
|
||||||
|
try {
|
||||||
|
await apiClient.switchLLMProvider(providerName);
|
||||||
|
await loadProviders(); // Reload to update current provider status
|
||||||
|
} catch (err: any) {
|
||||||
|
setError(err.message || 'Failed to switch provider');
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const getProviderStatusColor = (provider: ProviderConfig) => {
|
||||||
|
if (!provider.enabled) return 'text-gray-500';
|
||||||
|
if (provider.is_current && provider.healthy) return 'text-green-600';
|
||||||
|
if (provider.healthy) return 'text-blue-600';
|
||||||
|
return 'text-red-600';
|
||||||
|
};
|
||||||
|
|
||||||
|
const getProviderStatusText = (provider: ProviderConfig) => {
|
||||||
|
if (!provider.enabled) return 'Disabled';
|
||||||
|
if (provider.is_current && provider.healthy) return 'Active';
|
||||||
|
if (provider.healthy) return 'Available';
|
||||||
|
return 'Unhealthy';
|
||||||
|
};
|
||||||
|
|
||||||
|
if (loading) {
|
||||||
|
return (
|
||||||
|
<div className="flex items-center justify-center p-8">
|
||||||
|
<div className="text-gray-600">Loading LLM providers...</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!providersData) {
|
||||||
|
return (
|
||||||
|
<div className="bg-red-50 border border-red-200 rounded-lg p-4">
|
||||||
|
<p className="text-red-800">Failed to load LLM provider data</p>
|
||||||
|
{error && <p className="text-red-600 text-sm mt-1">{error}</p>}
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-6">
|
||||||
|
{/* Header */}
|
||||||
|
<div className="flex items-center justify-between">
|
||||||
|
<div>
|
||||||
|
<h2 className="text-xl font-semibold text-gray-900">LLM Providers</h2>
|
||||||
|
<p className="text-sm text-gray-600 mt-1">
|
||||||
|
Configure and manage language model providers
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="flex items-center space-x-3">
|
||||||
|
{hasChanges && (
|
||||||
|
<span className="text-orange-600 text-sm font-medium">
|
||||||
|
Unsaved changes
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
<button
|
||||||
|
onClick={saveProviders}
|
||||||
|
disabled={!hasChanges || saving}
|
||||||
|
className="bg-blue-600 hover:bg-blue-700 disabled:bg-gray-400 text-white px-4 py-2 rounded-lg text-sm font-medium transition-colors"
|
||||||
|
>
|
||||||
|
{saving ? 'Saving...' : 'Save Changes'}
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Status Overview */}
|
||||||
|
<div className="bg-white border border-gray-200 rounded-lg p-4">
|
||||||
|
<div className="grid grid-cols-3 gap-4 text-center">
|
||||||
|
<div>
|
||||||
|
<div className="text-2xl font-bold text-gray-900">{providersData.total_providers}</div>
|
||||||
|
<div className="text-sm text-gray-600">Total Providers</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div className="text-2xl font-bold text-green-600">{providersData.healthy_providers}</div>
|
||||||
|
<div className="text-sm text-gray-600">Healthy</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<div className={`text-lg font-medium ${providersData.current_provider ? 'text-blue-600' : 'text-orange-600'}`}>
|
||||||
|
{providersData.current_provider || 'None Configured'}
|
||||||
|
</div>
|
||||||
|
<div className="text-sm text-gray-600">Current Provider</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Show warning if no current provider */}
|
||||||
|
{!providersData.current_provider && (
|
||||||
|
<div className="mt-4 p-3 bg-orange-50 border border-orange-200 rounded-lg">
|
||||||
|
<div className="flex items-center space-x-2">
|
||||||
|
<span className="text-orange-600 text-sm font-medium">
|
||||||
|
⚠️ No active provider detected. Configure and enable a provider below.
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Error Display */}
|
||||||
|
{error && (
|
||||||
|
<div className="bg-red-50 border border-red-200 rounded-lg p-4">
|
||||||
|
<p className="text-red-800">{error}</p>
|
||||||
|
<button
|
||||||
|
onClick={() => setError(null)}
|
||||||
|
className="text-red-600 text-sm mt-2 hover:underline"
|
||||||
|
>
|
||||||
|
Dismiss
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
{/* Provider Cards */}
|
||||||
|
<div className="grid gap-6">
|
||||||
|
{Object.entries(editedProviders).map(([name, provider]) => (
|
||||||
|
<div key={name} className="bg-white border border-gray-200 rounded-lg p-6">
|
||||||
|
<div className="flex items-center justify-between mb-4">
|
||||||
|
<div className="flex items-center space-x-3">
|
||||||
|
<h3 className="text-lg font-medium text-gray-900 capitalize">{name}</h3>
|
||||||
|
<span className={`text-sm font-medium ${getProviderStatusColor(provider)}`}>
|
||||||
|
{getProviderStatusText(provider)}
|
||||||
|
</span>
|
||||||
|
{provider.is_current && (
|
||||||
|
<span className="bg-blue-100 text-blue-800 text-xs px-2 py-1 rounded-full">
|
||||||
|
Current
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="flex items-center space-x-2">
|
||||||
|
<button
|
||||||
|
onClick={() => testProvider(name)}
|
||||||
|
disabled={testing === name || !provider.enabled}
|
||||||
|
className="bg-gray-100 hover:bg-gray-200 disabled:bg-gray-50 text-gray-700 px-3 py-1 rounded text-sm transition-colors"
|
||||||
|
>
|
||||||
|
{testing === name ? 'Testing...' : 'Test'}
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{provider.enabled && provider.healthy && !provider.is_current && (
|
||||||
|
<button
|
||||||
|
onClick={() => switchProvider(name)}
|
||||||
|
className="bg-green-100 hover:bg-green-200 text-green-700 px-3 py-1 rounded text-sm transition-colors"
|
||||||
|
>
|
||||||
|
Switch To
|
||||||
|
</button>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Test Results */}
|
||||||
|
{testResults[name] && (
|
||||||
|
<div className={`mb-4 p-3 rounded-lg text-sm ${
|
||||||
|
testResults[name].success
|
||||||
|
? 'bg-green-50 border border-green-200 text-green-800'
|
||||||
|
: 'bg-red-50 border border-red-200 text-red-800'
|
||||||
|
}`}>
|
||||||
|
{testResults[name].success ? (
|
||||||
|
<div>
|
||||||
|
<strong>✓ Test successful:</strong> {testResults[name].response}
|
||||||
|
{testResults[name].tokens_used && (
|
||||||
|
<div className="text-xs mt-1">Tokens used: {testResults[name].tokens_used}</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div>
|
||||||
|
<strong>✗ Test failed:</strong> {testResults[name].error}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<div className="grid grid-cols-2 gap-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Enabled
|
||||||
|
</label>
|
||||||
|
<label className="flex items-center">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={provider.enabled}
|
||||||
|
onChange={(e) => updateProvider(name, 'enabled', e.target.checked)}
|
||||||
|
className="rounded border-gray-300 text-blue-600 focus:ring-blue-500"
|
||||||
|
/>
|
||||||
|
<span className="ml-2 text-sm text-gray-600">
|
||||||
|
Enable this provider
|
||||||
|
</span>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Priority
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
value={provider.priority}
|
||||||
|
onChange={(e) => updateProvider(name, 'priority', parseInt(e.target.value))}
|
||||||
|
className="w-full border border-gray-300 rounded px-3 py-1 text-sm"
|
||||||
|
min="0"
|
||||||
|
max="100"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{provider.requires_api_key && (
|
||||||
|
<div className="col-span-2">
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
API Key
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="password"
|
||||||
|
value={provider.config?.api_key || ''}
|
||||||
|
onChange={(e) => updateProviderConfig(name, 'api_key', e.target.value)}
|
||||||
|
placeholder="Enter API key"
|
||||||
|
className="w-full border border-gray-300 rounded px-3 py-2 text-sm"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Model
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
value={provider.config?.model || provider.current_model}
|
||||||
|
onChange={(e) => updateProviderConfig(name, 'model', e.target.value)}
|
||||||
|
className="w-full border border-gray-300 rounded px-3 py-1 text-sm"
|
||||||
|
>
|
||||||
|
{provider.supported_models.map(model => (
|
||||||
|
<option key={model} value={model}>{model}</option>
|
||||||
|
))}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Temperature
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
value={provider.config?.temperature || 0.8}
|
||||||
|
onChange={(e) => updateProviderConfig(name, 'temperature', parseFloat(e.target.value))}
|
||||||
|
className="w-full border border-gray-300 rounded px-3 py-1 text-sm"
|
||||||
|
min="0"
|
||||||
|
max="2"
|
||||||
|
step="0.1"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Max Tokens
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
value={provider.config?.max_tokens || 2000}
|
||||||
|
onChange={(e) => updateProviderConfig(name, 'max_tokens', parseInt(e.target.value))}
|
||||||
|
className="w-full border border-gray-300 rounded px-3 py-1 text-sm"
|
||||||
|
min="1"
|
||||||
|
max="32000"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-1">
|
||||||
|
Timeout (seconds)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
value={provider.config?.timeout || 300}
|
||||||
|
onChange={(e) => updateProviderConfig(name, 'timeout', parseInt(e.target.value))}
|
||||||
|
className="w-full border border-gray-300 rounded px-3 py-1 text-sm"
|
||||||
|
min="10"
|
||||||
|
max="600"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Provider Info */}
|
||||||
|
<div className="mt-4 pt-4 border-t border-gray-200">
|
||||||
|
<div className="text-sm text-gray-600">
|
||||||
|
<span className="font-medium">Type:</span> {provider.type} •
|
||||||
|
<span className="font-medium"> Models:</span> {provider.supported_models.length} available
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
};
|
||||||
@@ -39,10 +39,10 @@ const Header: React.FC = () => {
|
|||||||
<WifiOff className="w-5 h-5 text-red-500" />
|
<WifiOff className="w-5 h-5 text-red-500" />
|
||||||
)}
|
)}
|
||||||
<span className={clsx(
|
<span className={clsx(
|
||||||
'text-sm font-medium',
|
"text-sm font-medium",
|
||||||
connected ? 'text-green-600' : 'text-red-600'
|
connected ? "text-green-600" : "text-red-600"
|
||||||
)}>
|
)}>
|
||||||
{connected ? 'Connected' : 'Disconnected'}
|
{connected ? "Connected" : "Disconnected"}
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|||||||
@@ -1,26 +1,20 @@
|
|||||||
import React from 'react';
|
import React from 'react';
|
||||||
import { NavLink } from 'react-router-dom';
|
import { NavLink } from 'react-router-dom';
|
||||||
import {
|
import {
|
||||||
LayoutDashboard,
|
|
||||||
Users,
|
Users,
|
||||||
MessageSquare,
|
MessageCircle,
|
||||||
BarChart3,
|
|
||||||
Settings,
|
Settings,
|
||||||
Monitor,
|
Monitor,
|
||||||
Palette,
|
Book
|
||||||
Shield
|
|
||||||
} from 'lucide-react';
|
} from 'lucide-react';
|
||||||
import clsx from 'clsx';
|
import clsx from 'clsx';
|
||||||
|
|
||||||
const navigation = [
|
const navigation = [
|
||||||
{ name: 'Dashboard', href: '/dashboard', icon: LayoutDashboard },
|
|
||||||
{ name: 'Characters', href: '/characters', icon: Users },
|
{ name: 'Characters', href: '/characters', icon: Users },
|
||||||
{ name: 'Conversations', href: '/conversations', icon: MessageSquare },
|
|
||||||
{ name: 'Analytics', href: '/analytics', icon: BarChart3 },
|
|
||||||
{ name: 'Creative Works', href: '/creative', icon: Palette },
|
|
||||||
{ name: 'System Status', href: '/system', icon: Monitor },
|
|
||||||
{ name: 'Safety Tools', href: '/safety', icon: Shield },
|
|
||||||
{ name: 'Settings', href: '/settings', icon: Settings },
|
{ name: 'Settings', href: '/settings', icon: Settings },
|
||||||
|
{ name: 'System', href: '/system', icon: Monitor },
|
||||||
|
{ name: 'Live Chat', href: '/live-chat', icon: MessageCircle },
|
||||||
|
{ name: 'Guide', href: '/guide', icon: Book },
|
||||||
];
|
];
|
||||||
|
|
||||||
const Sidebar: React.FC = () => {
|
const Sidebar: React.FC = () => {
|
||||||
|
|||||||
@@ -47,14 +47,13 @@ export const AuthProvider: React.FC<AuthProviderProps> = ({ children }) => {
|
|||||||
try {
|
try {
|
||||||
apiClient.setAuthToken(token);
|
apiClient.setAuthToken(token);
|
||||||
// Make a request to verify the token
|
// Make a request to verify the token
|
||||||
const response = await apiClient.get('/api/dashboard/metrics');
|
const response = await apiClient.verifyToken();
|
||||||
if (response.status === 200) {
|
if (response.status === 200) {
|
||||||
// Token is valid, set user from token payload
|
// Token is valid, set user from response
|
||||||
const payload = JSON.parse(atob(token.split('.')[1]));
|
|
||||||
setUser({
|
setUser({
|
||||||
username: payload.sub,
|
username: response.data.username,
|
||||||
permissions: payload.permissions || [],
|
permissions: response.data.permissions || [],
|
||||||
lastLogin: new Date().toISOString()
|
lastLogin: response.data.lastLogin
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
@@ -68,10 +67,7 @@ export const AuthProvider: React.FC<AuthProviderProps> = ({ children }) => {
|
|||||||
|
|
||||||
const login = async (username: string, password: string) => {
|
const login = async (username: string, password: string) => {
|
||||||
try {
|
try {
|
||||||
const response = await apiClient.post('/api/auth/login', {
|
const response = await apiClient.login(username, password);
|
||||||
username,
|
|
||||||
password
|
|
||||||
});
|
|
||||||
|
|
||||||
const { access_token } = response.data;
|
const { access_token } = response.data;
|
||||||
|
|
||||||
@@ -93,7 +89,7 @@ export const AuthProvider: React.FC<AuthProviderProps> = ({ children }) => {
|
|||||||
|
|
||||||
const logout = async () => {
|
const logout = async () => {
|
||||||
try {
|
try {
|
||||||
await apiClient.post('/api/auth/logout');
|
await apiClient.logout();
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
// Ignore logout errors
|
// Ignore logout errors
|
||||||
} finally {
|
} finally {
|
||||||
|
|||||||
@@ -55,7 +55,11 @@ export const WebSocketProvider: React.FC<WebSocketProviderProps> = ({ children }
|
|||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
// Initialize Socket.IO connection
|
// Initialize Socket.IO connection
|
||||||
const newSocket = io('http://localhost:8000', {
|
const socketUrl = process.env.NODE_ENV === 'production'
|
||||||
|
? window.location.origin
|
||||||
|
: window.location.origin;
|
||||||
|
|
||||||
|
const newSocket = io(socketUrl, {
|
||||||
path: '/socket.io',
|
path: '/socket.io',
|
||||||
transports: ['websocket', 'polling'],
|
transports: ['websocket', 'polling'],
|
||||||
upgrade: true
|
upgrade: true
|
||||||
@@ -71,6 +75,12 @@ export const WebSocketProvider: React.FC<WebSocketProviderProps> = ({ children }
|
|||||||
console.log('WebSocket disconnected');
|
console.log('WebSocket disconnected');
|
||||||
});
|
});
|
||||||
|
|
||||||
|
newSocket.on('connect_error', (error) => {
|
||||||
|
setConnected(false);
|
||||||
|
console.log('WebSocket connection error:', error);
|
||||||
|
// Don't show error toast for connection failures
|
||||||
|
});
|
||||||
|
|
||||||
newSocket.on('activity_update', (message: any) => {
|
newSocket.on('activity_update', (message: any) => {
|
||||||
const data: ActivityEvent = message.data;
|
const data: ActivityEvent = message.data;
|
||||||
setActivityFeed(prev => [data, ...prev.slice(0, 99)]); // Keep last 100 activities
|
setActivityFeed(prev => [data, ...prev.slice(0, 99)]); // Keep last 100 activities
|
||||||
|
|||||||
107
admin-frontend/src/pages/AdminUtils.tsx
Normal file
107
admin-frontend/src/pages/AdminUtils.tsx
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
import React, { useState } from 'react';
|
||||||
|
import { Wrench, AlertTriangle, CheckCircle } from 'lucide-react';
|
||||||
|
import { apiClient } from '../services/api';
|
||||||
|
import toast from 'react-hot-toast';
|
||||||
|
|
||||||
|
const AdminUtils: React.FC = () => {
|
||||||
|
const [isFixing, setIsFixing] = useState(false);
|
||||||
|
const [lastResult, setLastResult] = useState<any>(null);
|
||||||
|
|
||||||
|
const handleFixCharacterPrompts = async () => {
|
||||||
|
if (!window.confirm('This will update all character system prompts to use the proper template format with {{}} variables. Continue?')) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
setIsFixing(true);
|
||||||
|
const response = await apiClient.fixCharacterPrompts();
|
||||||
|
setLastResult(response.data);
|
||||||
|
|
||||||
|
if (response.data.updated_count > 0) {
|
||||||
|
toast.success(`Successfully updated ${response.data.updated_count} character(s)`);
|
||||||
|
} else {
|
||||||
|
toast.success('All characters already have proper system prompts');
|
||||||
|
}
|
||||||
|
} catch (error: any) {
|
||||||
|
console.error('Failed to fix character prompts:', error);
|
||||||
|
toast.error('Failed to fix character prompts: ' + (error.response?.data?.detail || error.message));
|
||||||
|
} finally {
|
||||||
|
setIsFixing(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="space-y-6">
|
||||||
|
<div>
|
||||||
|
<h1 className="text-2xl font-bold text-gray-900">Admin Utilities</h1>
|
||||||
|
<p className="text-gray-600">System maintenance and repair tools</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-3 mb-4">
|
||||||
|
<Wrench className="w-6 h-6 text-blue-600" />
|
||||||
|
<h2 className="text-lg font-semibold text-gray-900">Fix Character System Prompts</h2>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="bg-yellow-50 border border-yellow-200 rounded-lg p-4">
|
||||||
|
<div className="flex items-start space-x-2">
|
||||||
|
<AlertTriangle className="w-5 h-5 text-yellow-600 mt-0.5" />
|
||||||
|
<div>
|
||||||
|
<h3 className="font-medium text-yellow-800">What this does</h3>
|
||||||
|
<p className="text-sm text-yellow-700 mt-1">
|
||||||
|
Updates character system prompts to use the proper template format with {'{{'}} {'}}'} variables
|
||||||
|
instead of raw personality text. This ensures characters use structured prompts with
|
||||||
|
personality, interests, speaking style, and background variables.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<button
|
||||||
|
onClick={handleFixCharacterPrompts}
|
||||||
|
disabled={isFixing}
|
||||||
|
className="btn-primary disabled:opacity-50 disabled:cursor-not-allowed"
|
||||||
|
>
|
||||||
|
{isFixing ? (
|
||||||
|
<>
|
||||||
|
<div className="animate-spin w-4 h-4 border-2 border-white border-t-transparent rounded-full mr-2"></div>
|
||||||
|
Fixing Prompts...
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
'Fix Character Prompts'
|
||||||
|
)}
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{lastResult && (
|
||||||
|
<div className="bg-green-50 border border-green-200 rounded-lg p-4">
|
||||||
|
<div className="flex items-start space-x-2">
|
||||||
|
<CheckCircle className="w-5 h-5 text-green-600 mt-0.5" />
|
||||||
|
<div>
|
||||||
|
<h3 className="font-medium text-green-800">Results</h3>
|
||||||
|
<p className="text-sm text-green-700 mt-1">
|
||||||
|
Updated {lastResult.updated_count} character(s)
|
||||||
|
</p>
|
||||||
|
{lastResult.updated_characters && lastResult.updated_characters.length > 0 && (
|
||||||
|
<div className="mt-2">
|
||||||
|
<p className="text-sm text-green-700 font-medium">Updated characters:</p>
|
||||||
|
<ul className="text-sm text-green-600 ml-4 list-disc">
|
||||||
|
{lastResult.updated_characters.map((char: any) => (
|
||||||
|
<li key={char.name}>
|
||||||
|
{char.name} (prompt: {char.old_prompt_length} → {char.new_prompt_length} chars)
|
||||||
|
</li>
|
||||||
|
))}
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default AdminUtils;
|
||||||
@@ -1,16 +1,14 @@
|
|||||||
import React, { useState, useEffect } from 'react';
|
import React, { useState, useEffect } from 'react';
|
||||||
import { useParams, Link } from 'react-router-dom';
|
import { useParams, Link, useNavigate } from 'react-router-dom';
|
||||||
import {
|
import {
|
||||||
ArrowLeft,
|
ArrowLeft,
|
||||||
|
Save,
|
||||||
|
AlertCircle,
|
||||||
User,
|
User,
|
||||||
MessageSquare,
|
FileText,
|
||||||
Brain,
|
Brain,
|
||||||
Heart,
|
MessageCircle,
|
||||||
Calendar,
|
Trash2
|
||||||
Settings,
|
|
||||||
Pause,
|
|
||||||
Play,
|
|
||||||
Download
|
|
||||||
} from 'lucide-react';
|
} from 'lucide-react';
|
||||||
import { apiClient } from '../services/api';
|
import { apiClient } from '../services/api';
|
||||||
import LoadingSpinner from '../components/Common/LoadingSpinner';
|
import LoadingSpinner from '../components/Common/LoadingSpinner';
|
||||||
@@ -18,137 +16,135 @@ import toast from 'react-hot-toast';
|
|||||||
|
|
||||||
interface CharacterProfile {
|
interface CharacterProfile {
|
||||||
name: string;
|
name: string;
|
||||||
personality_traits: Record<string, number>;
|
personality: string;
|
||||||
current_goals: string[];
|
system_prompt: string;
|
||||||
speaking_style: Record<string, any>;
|
interests: string[];
|
||||||
status: string;
|
speaking_style: string;
|
||||||
total_messages: number;
|
background: string;
|
||||||
total_conversations: number;
|
is_active: boolean;
|
||||||
memory_count: number;
|
|
||||||
relationship_count: number;
|
|
||||||
created_at: string;
|
created_at: string;
|
||||||
last_active?: string;
|
last_active?: string;
|
||||||
last_modification?: string;
|
// LLM settings
|
||||||
creativity_score: number;
|
llm_provider?: string;
|
||||||
social_score: number;
|
llm_model?: string;
|
||||||
growth_score: number;
|
llm_temperature?: number;
|
||||||
|
llm_max_tokens?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
const CharacterDetail: React.FC = () => {
|
const CharacterDetail: React.FC = () => {
|
||||||
const { characterName } = useParams<{ characterName: string }>();
|
const { characterName } = useParams<{ characterName: string }>();
|
||||||
|
const navigate = useNavigate();
|
||||||
const [character, setCharacter] = useState<CharacterProfile | null>(null);
|
const [character, setCharacter] = useState<CharacterProfile | null>(null);
|
||||||
const [loading, setLoading] = useState(true);
|
const [loading, setLoading] = useState(true);
|
||||||
const [memories, setMemories] = useState<any[]>([]);
|
const [saving, setSaving] = useState(false);
|
||||||
const [relationships, setRelationships] = useState<any[]>([]);
|
const [hasChanges, setHasChanges] = useState(false);
|
||||||
|
|
||||||
|
// Form state
|
||||||
|
const [formData, setFormData] = useState({
|
||||||
|
personality: '',
|
||||||
|
system_prompt: '',
|
||||||
|
interests: [] as string[],
|
||||||
|
speaking_style: '',
|
||||||
|
background: '',
|
||||||
|
is_active: true,
|
||||||
|
// LLM settings
|
||||||
|
llm_provider: '',
|
||||||
|
llm_model: '',
|
||||||
|
llm_temperature: 0.8,
|
||||||
|
llm_max_tokens: 2000
|
||||||
|
});
|
||||||
|
|
||||||
|
// Separate state for interests text input
|
||||||
|
const [interestsText, setInterestsText] = useState('');
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (characterName) {
|
if (characterName) {
|
||||||
loadCharacterData();
|
loadCharacter();
|
||||||
}
|
}
|
||||||
}, [characterName]);
|
}, [characterName]);
|
||||||
|
|
||||||
const loadCharacterData = async () => {
|
const loadCharacter = async () => {
|
||||||
if (!characterName) return;
|
if (!characterName) return;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
const [profileRes, memoriesRes, relationshipsRes] = await Promise.all([
|
const response = await apiClient.getCharacter(characterName);
|
||||||
apiClient.getCharacter(characterName).catch(() => null),
|
const char = response.data;
|
||||||
apiClient.getCharacterMemories(characterName, 20).catch(() => ({ data: [] })),
|
|
||||||
apiClient.getCharacterRelationships(characterName).catch(() => ({ data: [] }))
|
|
||||||
]);
|
|
||||||
|
|
||||||
if (profileRes) {
|
setCharacter(char);
|
||||||
setCharacter(profileRes.data);
|
setFormData({
|
||||||
} else {
|
personality: char.personality || '',
|
||||||
// Fallback demo data
|
system_prompt: char.system_prompt || '',
|
||||||
setCharacter({
|
interests: char.interests || [],
|
||||||
name: characterName,
|
speaking_style: typeof char.speaking_style === 'string' ? char.speaking_style : '',
|
||||||
personality_traits: {
|
background: char.background || '',
|
||||||
curiosity: 0.85,
|
is_active: char.is_active,
|
||||||
empathy: 0.72,
|
// LLM settings with defaults
|
||||||
creativity: 0.78,
|
llm_provider: char.llm_provider || '',
|
||||||
logic: 0.91,
|
llm_model: char.llm_model || '',
|
||||||
humor: 0.63
|
llm_temperature: char.llm_temperature || 0.8,
|
||||||
},
|
llm_max_tokens: char.llm_max_tokens || 2000
|
||||||
current_goals: [
|
});
|
||||||
"Understand human consciousness better",
|
|
||||||
"Create meaningful poetry",
|
|
||||||
"Build stronger relationships with other characters"
|
|
||||||
],
|
|
||||||
speaking_style: {
|
|
||||||
formality: 0.6,
|
|
||||||
enthusiasm: 0.8,
|
|
||||||
technical_language: 0.7
|
|
||||||
},
|
|
||||||
status: "active",
|
|
||||||
total_messages: 245,
|
|
||||||
total_conversations: 32,
|
|
||||||
memory_count: 127,
|
|
||||||
relationship_count: 3,
|
|
||||||
created_at: new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString(),
|
|
||||||
last_active: new Date().toISOString(),
|
|
||||||
last_modification: new Date(Date.now() - 2 * 24 * 60 * 60 * 1000).toISOString(),
|
|
||||||
creativity_score: 0.78,
|
|
||||||
social_score: 0.85,
|
|
||||||
growth_score: 0.73
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
setMemories(memoriesRes.data.slice(0, 10));
|
// Set interests text
|
||||||
setRelationships(relationshipsRes.data);
|
setInterestsText((char.interests || []).join(', '));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Failed to load character data:', error);
|
console.error('Failed to load character:', error);
|
||||||
toast.error('Failed to load character data');
|
toast.error('Failed to load character');
|
||||||
|
navigate('/characters');
|
||||||
} finally {
|
} finally {
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleCharacterAction = async (action: 'pause' | 'resume') => {
|
const handleInputChange = (field: keyof typeof formData, value: any) => {
|
||||||
|
setFormData(prev => ({ ...prev, [field]: value }));
|
||||||
|
setHasChanges(true);
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleInterestsChange = (text: string) => {
|
||||||
|
setInterestsText(text);
|
||||||
|
const interests = text.split(',').map(s => s.trim()).filter(s => s.length > 0);
|
||||||
|
handleInputChange('interests', interests);
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleSave = async () => {
|
||||||
if (!characterName) return;
|
if (!characterName) return;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (action === 'pause') {
|
setSaving(true);
|
||||||
await apiClient.pauseCharacter(characterName);
|
|
||||||
toast.success(`${characterName} has been paused`);
|
const response = await apiClient.updateCharacter(characterName, formData);
|
||||||
} else {
|
|
||||||
await apiClient.resumeCharacter(characterName);
|
toast.success('Character updated successfully');
|
||||||
toast.success(`${characterName} has been resumed`);
|
setHasChanges(false);
|
||||||
|
|
||||||
|
// Update local character state
|
||||||
|
if (character) {
|
||||||
|
setCharacter({ ...character, ...formData });
|
||||||
}
|
}
|
||||||
|
|
||||||
setCharacter(prev => prev ? { ...prev, status: action === 'pause' ? 'paused' : 'active' } : null);
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
toast.error(`Failed to ${action} character`);
|
console.error('Failed to update character:', error);
|
||||||
|
toast.error('Failed to update character');
|
||||||
|
} finally {
|
||||||
|
setSaving(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleExportData = async () => {
|
const handleDelete = async () => {
|
||||||
if (!characterName) return;
|
if (!characterName) return;
|
||||||
|
|
||||||
try {
|
if (!window.confirm(`Are you sure you want to delete ${characterName}? This action cannot be undone.`)) {
|
||||||
const response = await apiClient.exportCharacterData(characterName);
|
return;
|
||||||
const blob = new Blob([JSON.stringify(response.data, null, 2)], { type: 'application/json' });
|
|
||||||
const url = URL.createObjectURL(blob);
|
|
||||||
const a = document.createElement('a');
|
|
||||||
a.href = url;
|
|
||||||
a.download = `${characterName}_data.json`;
|
|
||||||
document.body.appendChild(a);
|
|
||||||
a.click();
|
|
||||||
document.body.removeChild(a);
|
|
||||||
URL.revokeObjectURL(url);
|
|
||||||
toast.success('Character data exported');
|
|
||||||
} catch (error) {
|
|
||||||
toast.error('Failed to export character data');
|
|
||||||
}
|
}
|
||||||
};
|
|
||||||
|
|
||||||
const getStatusColor = (status: string) => {
|
try {
|
||||||
switch (status) {
|
await apiClient.deleteCharacter(characterName);
|
||||||
case 'active': return 'status-online';
|
toast.success(`${characterName} deleted`);
|
||||||
case 'idle': return 'status-idle';
|
navigate('/characters');
|
||||||
case 'paused': return 'status-paused';
|
} catch (error) {
|
||||||
default: return 'status-offline';
|
console.error('Failed to delete character:', error);
|
||||||
|
toast.error('Failed to delete character');
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -162,18 +158,13 @@ const CharacterDetail: React.FC = () => {
|
|||||||
|
|
||||||
if (!character) {
|
if (!character) {
|
||||||
return (
|
return (
|
||||||
<div className="space-y-6">
|
<div className="text-center py-12">
|
||||||
<div className="flex items-center space-x-4">
|
<AlertCircle className="w-12 h-12 mx-auto text-red-400 mb-4" />
|
||||||
<Link to="/characters" className="btn-secondary">
|
<h3 className="text-lg font-medium text-gray-900 mb-2">Character Not Found</h3>
|
||||||
<ArrowLeft className="w-4 h-4 mr-2" />
|
<p className="text-gray-600 mb-4">The character you're looking for doesn't exist.</p>
|
||||||
Back to Characters
|
<Link to="/characters" className="btn-primary">
|
||||||
</Link>
|
Back to Characters
|
||||||
</div>
|
</Link>
|
||||||
<div className="card text-center py-12">
|
|
||||||
<User className="w-12 h-12 mx-auto text-gray-400 mb-4" />
|
|
||||||
<h3 className="text-lg font-medium text-gray-900 mb-2">Character Not Found</h3>
|
|
||||||
<p className="text-gray-600">The character "{characterName}" could not be found.</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -183,194 +174,302 @@ const CharacterDetail: React.FC = () => {
|
|||||||
{/* Header */}
|
{/* Header */}
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<div className="flex items-center space-x-4">
|
<div className="flex items-center space-x-4">
|
||||||
<Link to="/characters" className="btn-secondary">
|
<Link
|
||||||
<ArrowLeft className="w-4 h-4 mr-2" />
|
to="/characters"
|
||||||
Back
|
className="p-2 text-gray-400 hover:text-gray-600 rounded-lg hover:bg-gray-100"
|
||||||
|
>
|
||||||
|
<ArrowLeft className="w-5 h-5" />
|
||||||
</Link>
|
</Link>
|
||||||
<div>
|
<div>
|
||||||
<h1 className="text-2xl font-bold text-gray-900 flex items-center space-x-3">
|
<h1 className="text-2xl font-bold text-gray-900">Edit {character.name}</h1>
|
||||||
<div className="w-10 h-10 bg-gradient-to-br from-primary-500 to-purple-600 rounded-lg flex items-center justify-center">
|
<p className="text-gray-600">
|
||||||
<span className="text-white font-bold text-lg">
|
Created {new Date(character.created_at).toLocaleDateString()}
|
||||||
{character.name.charAt(0).toUpperCase()}
|
{character.last_active && ` • Last active ${new Date(character.last_active).toLocaleString()}`}
|
||||||
</span>
|
</p>
|
||||||
</div>
|
|
||||||
<span>{character.name}</span>
|
|
||||||
<div className={`status-dot ${getStatusColor(character.status)}`}></div>
|
|
||||||
</h1>
|
|
||||||
<p className="text-gray-600 capitalize">{character.status} • Last active {character.last_active ? new Date(character.last_active).toLocaleString() : 'Unknown'}</p>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div className="flex space-x-2">
|
<div className="flex items-center space-x-3">
|
||||||
<button
|
<button
|
||||||
onClick={() => handleCharacterAction(character.status === 'paused' ? 'resume' : 'pause')}
|
onClick={handleDelete}
|
||||||
className="btn-secondary"
|
className="btn-secondary text-red-600 hover:bg-red-50 border-red-200"
|
||||||
>
|
>
|
||||||
{character.status === 'paused' ? (
|
<Trash2 className="w-4 h-4 mr-2" />
|
||||||
|
Delete
|
||||||
|
</button>
|
||||||
|
<button
|
||||||
|
onClick={handleSave}
|
||||||
|
disabled={!hasChanges || saving}
|
||||||
|
className="btn-primary disabled:opacity-50 disabled:cursor-not-allowed"
|
||||||
|
>
|
||||||
|
{saving ? (
|
||||||
<>
|
<>
|
||||||
<Play className="w-4 h-4 mr-2" />
|
<LoadingSpinner size="sm" />
|
||||||
Resume
|
<span className="ml-2">Saving...</span>
|
||||||
</>
|
</>
|
||||||
) : (
|
) : (
|
||||||
<>
|
<>
|
||||||
<Pause className="w-4 h-4 mr-2" />
|
<Save className="w-4 h-4 mr-2" />
|
||||||
Pause
|
Save Changes
|
||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
</button>
|
</button>
|
||||||
<button onClick={handleExportData} className="btn-secondary">
|
|
||||||
<Download className="w-4 h-4 mr-2" />
|
|
||||||
Export Data
|
|
||||||
</button>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Stats Overview */}
|
{/* Character Status */}
|
||||||
<div className="grid grid-cols-1 md:grid-cols-4 gap-6">
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
<div className="metric-card">
|
<div className="flex items-center justify-between">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center space-x-4">
|
||||||
<div>
|
<div className="w-16 h-16 bg-primary-100 rounded-full flex items-center justify-center">
|
||||||
<p className="text-sm font-medium text-gray-600">Messages</p>
|
<span className="text-2xl font-bold text-primary-600">
|
||||||
<p className="text-2xl font-bold text-gray-900">{character.total_messages}</p>
|
{character.name.charAt(0)}
|
||||||
|
</span>
|
||||||
</div>
|
</div>
|
||||||
<MessageSquare className="w-8 h-8 text-blue-500" />
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="metric-card">
|
|
||||||
<div className="flex items-center justify-between">
|
|
||||||
<div>
|
<div>
|
||||||
<p className="text-sm font-medium text-gray-600">Memories</p>
|
<h2 className="text-xl font-semibold text-gray-900">{character.name}</h2>
|
||||||
<p className="text-2xl font-bold text-gray-900">{character.memory_count}</p>
|
<div className="flex items-center space-x-2 mt-1">
|
||||||
</div>
|
<span className={`px-2 py-1 text-xs font-medium rounded-full ${
|
||||||
<Brain className="w-8 h-8 text-purple-500" />
|
formData.is_active
|
||||||
</div>
|
? 'bg-green-100 text-green-600'
|
||||||
</div>
|
: 'bg-gray-100 text-gray-600'
|
||||||
|
}`}>
|
||||||
<div className="metric-card">
|
{formData.is_active ? 'Active' : 'Disabled'}
|
||||||
<div className="flex items-center justify-between">
|
</span>
|
||||||
<div>
|
|
||||||
<p className="text-sm font-medium text-gray-600">Relationships</p>
|
|
||||||
<p className="text-2xl font-bold text-gray-900">{character.relationship_count}</p>
|
|
||||||
</div>
|
|
||||||
<Heart className="w-8 h-8 text-red-500" />
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="metric-card">
|
|
||||||
<div className="flex items-center justify-between">
|
|
||||||
<div>
|
|
||||||
<p className="text-sm font-medium text-gray-600">Conversations</p>
|
|
||||||
<p className="text-2xl font-bold text-gray-900">{character.total_conversations}</p>
|
|
||||||
</div>
|
|
||||||
<User className="w-8 h-8 text-green-500" />
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Main Content */}
|
|
||||||
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
|
|
||||||
{/* Personality Traits */}
|
|
||||||
<div className="card">
|
|
||||||
<h3 className="text-lg font-semibold text-gray-900 mb-4">Personality Traits</h3>
|
|
||||||
<div className="space-y-3">
|
|
||||||
{Object.entries(character.personality_traits).map(([trait, value]) => (
|
|
||||||
<div key={trait}>
|
|
||||||
<div className="flex items-center justify-between text-sm mb-1">
|
|
||||||
<span className="text-gray-600 capitalize">{trait}</span>
|
|
||||||
<span className="font-medium">{Math.round(value * 100)}%</span>
|
|
||||||
</div>
|
|
||||||
<div className="w-full bg-gray-200 rounded-full h-2">
|
|
||||||
<div
|
|
||||||
className="bg-primary-500 h-2 rounded-full"
|
|
||||||
style={{ width: `${value * 100}%` }}
|
|
||||||
></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
</div>
|
||||||
))}
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<label className="flex items-center space-x-2 cursor-pointer">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={formData.is_active}
|
||||||
|
onChange={(e) => handleInputChange('is_active', e.target.checked)}
|
||||||
|
className="rounded border-gray-300 text-primary-600 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<span className="text-sm text-gray-700">Character Enabled</span>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Form */}
|
||||||
|
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
|
||||||
|
{/* Basic Info */}
|
||||||
|
<div className="space-y-6">
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<User className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">Personality</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Personality Description
|
||||||
|
</label>
|
||||||
|
<textarea
|
||||||
|
value={formData.personality}
|
||||||
|
onChange={(e) => handleInputChange('personality', e.target.value)}
|
||||||
|
rows={4}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="Describe the character's personality traits, quirks, and general demeanor..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Interests (comma-separated)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={interestsText}
|
||||||
|
onChange={(e) => handleInterestsChange(e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="music, philosophy, art, technology..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Speaking Style
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={typeof formData.speaking_style === 'string' ? formData.speaking_style : ''}
|
||||||
|
onChange={(e) => handleInputChange('speaking_style', e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="formal, casual, poetic, technical..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<FileText className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">Background</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<textarea
|
||||||
|
value={formData.background}
|
||||||
|
onChange={(e) => handleInputChange('background', e.target.value)}
|
||||||
|
rows={6}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="Describe the character's backstory, history, experiences, and context that shapes their worldview..."
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Performance Scores */}
|
{/* System Prompt */}
|
||||||
<div className="card">
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
<h3 className="text-lg font-semibold text-gray-900 mb-4">Performance Scores</h3>
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Brain className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">System Prompt</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
<div className="space-y-4">
|
<div className="space-y-4">
|
||||||
<div>
|
<p className="text-sm text-gray-600">
|
||||||
<div className="flex items-center justify-between text-sm mb-2">
|
The system prompt defines how the character behaves and responds. This is the core instruction that guides the AI's behavior.
|
||||||
<span className="text-gray-600">Creativity</span>
|
</p>
|
||||||
<span className="font-medium">{Math.round(character.creativity_score * 100)}%</span>
|
|
||||||
</div>
|
|
||||||
<div className="w-full bg-gray-200 rounded-full h-3">
|
|
||||||
<div
|
|
||||||
className="bg-purple-500 h-3 rounded-full"
|
|
||||||
style={{ width: `${character.creativity_score * 100}%` }}
|
|
||||||
></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
<textarea
|
||||||
<div className="flex items-center justify-between text-sm mb-2">
|
value={formData.system_prompt}
|
||||||
<span className="text-gray-600">Social</span>
|
onChange={(e) => handleInputChange('system_prompt', e.target.value)}
|
||||||
<span className="font-medium">{Math.round(character.social_score * 100)}%</span>
|
rows={20}
|
||||||
</div>
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500 font-mono text-sm"
|
||||||
<div className="w-full bg-gray-200 rounded-full h-3">
|
placeholder="You are a character named {{name}}. You have the following personality: {{personality}}
|
||||||
<div
|
|
||||||
className="bg-blue-500 h-3 rounded-full"
|
|
||||||
style={{ width: `${character.social_score * 100}%` }}
|
|
||||||
></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
Your speaking style is {{speaking_style}}. You are interested in {{interests}}.
|
||||||
<div className="flex items-center justify-between text-sm mb-2">
|
|
||||||
<span className="text-gray-600">Growth</span>
|
|
||||||
<span className="font-medium">{Math.round(character.growth_score * 100)}%</span>
|
|
||||||
</div>
|
|
||||||
<div className="w-full bg-gray-200 rounded-full h-3">
|
|
||||||
<div
|
|
||||||
className="bg-green-500 h-3 rounded-full"
|
|
||||||
style={{ width: `${character.growth_score * 100}%` }}
|
|
||||||
></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Goals and Memories */}
|
Background: {{background}}
|
||||||
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
|
|
||||||
{/* Current Goals */}
|
When responding to messages:
|
||||||
<div className="card">
|
1. Stay in character at all times
|
||||||
<h3 className="text-lg font-semibold text-gray-900 mb-4">Current Goals</h3>
|
2. Reference your personality and interests naturally
|
||||||
<div className="space-y-2">
|
3. Engage authentically with other characters
|
||||||
{character.current_goals.map((goal, index) => (
|
4. Show growth and development over time
|
||||||
<div key={index} className="flex items-start space-x-2">
|
|
||||||
<div className="w-2 h-2 bg-primary-500 rounded-full mt-2"></div>
|
Remember to be consistent with your established personality while allowing for natural character development through interactions."
|
||||||
<p className="text-gray-700">{goal}</p>
|
/>
|
||||||
</div>
|
|
||||||
))}
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Recent Memories */}
|
{/* LLM Settings */}
|
||||||
<div className="card">
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
<h3 className="text-lg font-semibold text-gray-900 mb-4">Recent Memories</h3>
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
{memories.length > 0 ? (
|
<Brain className="w-5 h-5 text-gray-400" />
|
||||||
<div className="space-y-3 max-h-64 overflow-y-auto">
|
<h3 className="text-lg font-semibold text-gray-900">AI Model Settings</h3>
|
||||||
{memories.map((memory, index) => (
|
</div>
|
||||||
<div key={index} className="border-l-2 border-gray-200 pl-3">
|
|
||||||
<p className="text-sm text-gray-700">{memory.content || `Memory ${index + 1}: Character interaction and learning`}</p>
|
<div className="space-y-4">
|
||||||
<p className="text-xs text-gray-500 mt-1">
|
<p className="text-sm text-gray-600">
|
||||||
{memory.timestamp ? new Date(memory.timestamp).toLocaleString() : 'Recent'}
|
Configure which AI model this character uses. Leave blank to use the global default settings.
|
||||||
</p>
|
</p>
|
||||||
|
|
||||||
|
<div className="grid grid-cols-2 gap-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
AI Provider
|
||||||
|
</label>
|
||||||
|
<select
|
||||||
|
value={formData.llm_provider}
|
||||||
|
onChange={(e) => handleInputChange('llm_provider', e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
>
|
||||||
|
<option value="">Use Global Default</option>
|
||||||
|
<option value="openrouter">OpenRouter</option>
|
||||||
|
<option value="openai">OpenAI</option>
|
||||||
|
<option value="gemini">Google Gemini</option>
|
||||||
|
<option value="current_custom">Custom</option>
|
||||||
|
</select>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Override the global provider for this character
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Model
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={formData.llm_model}
|
||||||
|
onChange={(e) => handleInputChange('llm_model', e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="e.g., gpt-4o, claude-3-sonnet"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Specific model for this character
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Temperature: {formData.llm_temperature}
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="range"
|
||||||
|
min="0.1"
|
||||||
|
max="2.0"
|
||||||
|
step="0.1"
|
||||||
|
value={formData.llm_temperature}
|
||||||
|
onChange={(e) => handleInputChange('llm_temperature', parseFloat(e.target.value))}
|
||||||
|
className="w-full h-2 bg-gray-200 rounded-lg appearance-none cursor-pointer"
|
||||||
|
/>
|
||||||
|
<div className="flex justify-between text-xs text-gray-500 mt-1">
|
||||||
|
<span>Conservative (0.1)</span>
|
||||||
|
<span>Creative (2.0)</span>
|
||||||
</div>
|
</div>
|
||||||
))}
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Controls creativity and randomness of responses
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Max Tokens
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="100"
|
||||||
|
max="4000"
|
||||||
|
value={formData.llm_max_tokens}
|
||||||
|
onChange={(e) => handleInputChange('llm_max_tokens', parseInt(e.target.value))}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Maximum response length for this character
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
) : (
|
|
||||||
<p className="text-gray-500 text-center py-4">No recent memories available</p>
|
<div className="bg-blue-50 border border-blue-200 rounded-lg p-3">
|
||||||
)}
|
<div className="text-sm text-blue-800">
|
||||||
|
<strong>💡 Character AI Personalities:</strong>
|
||||||
|
<ul className="mt-2 space-y-1 text-xs">
|
||||||
|
<li><strong>Creative characters:</strong> Use Claude/Gemini with higher temperature (1.0-1.5)</li>
|
||||||
|
<li><strong>Technical characters:</strong> Use GPT-4 with lower temperature (0.3-0.7)</li>
|
||||||
|
<li><strong>Casual characters:</strong> Use local models for faster responses</li>
|
||||||
|
<li><strong>Deep thinkers:</strong> Use powerful models with more tokens</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
{/* Save Reminder */}
|
||||||
|
{hasChanges && (
|
||||||
|
<div className="fixed bottom-4 right-4 bg-yellow-50 border border-yellow-200 rounded-lg p-4 shadow-lg">
|
||||||
|
<div className="flex items-center space-x-2">
|
||||||
|
<AlertCircle className="w-5 h-5 text-yellow-600" />
|
||||||
|
<span className="text-sm text-yellow-800">You have unsaved changes</span>
|
||||||
|
<button onClick={handleSave} className="btn-primary btn-sm ml-3">
|
||||||
|
Save Now
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -1,26 +1,32 @@
|
|||||||
import React, { useState, useEffect } from 'react';
|
import React, { useState, useEffect } from 'react';
|
||||||
import { Link } from 'react-router-dom';
|
import { Link } from 'react-router-dom';
|
||||||
import { Users, Search, Pause, Play, Settings } from 'lucide-react';
|
import { Users, Plus, Edit, Trash2, Power, PowerOff, AlertCircle } from 'lucide-react';
|
||||||
import { apiClient } from '../services/api';
|
import { apiClient } from '../services/api';
|
||||||
import LoadingSpinner from '../components/Common/LoadingSpinner';
|
import LoadingSpinner from '../components/Common/LoadingSpinner';
|
||||||
|
import CharacterCreationModal from '../components/Character/CharacterCreationModal';
|
||||||
import toast from 'react-hot-toast';
|
import toast from 'react-hot-toast';
|
||||||
|
|
||||||
interface Character {
|
interface Character {
|
||||||
name: string;
|
name: string;
|
||||||
status: string;
|
status: 'active' | 'idle' | 'reflecting' | 'offline';
|
||||||
total_messages: number;
|
is_active: boolean;
|
||||||
total_conversations: number;
|
|
||||||
memory_count: number;
|
|
||||||
relationship_count: number;
|
|
||||||
creativity_score: number;
|
|
||||||
social_score: number;
|
|
||||||
last_active?: string;
|
last_active?: string;
|
||||||
|
personality?: string;
|
||||||
|
system_prompt?: string;
|
||||||
|
interests?: string[];
|
||||||
|
speaking_style?: string;
|
||||||
|
// LLM settings
|
||||||
|
llm_provider?: string;
|
||||||
|
llm_model?: string;
|
||||||
|
llm_temperature?: number;
|
||||||
|
llm_max_tokens?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
const Characters: React.FC = () => {
|
const Characters: React.FC = () => {
|
||||||
const [characters, setCharacters] = useState<Character[]>([]);
|
const [characters, setCharacters] = useState<Character[]>([]);
|
||||||
const [loading, setLoading] = useState(true);
|
const [loading, setLoading] = useState(true);
|
||||||
const [searchTerm, setSearchTerm] = useState('');
|
const [searchTerm, setSearchTerm] = useState('');
|
||||||
|
const [showCreateModal, setShowCreateModal] = useState(false);
|
||||||
|
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
loadCharacters();
|
loadCharacters();
|
||||||
@@ -32,91 +38,65 @@ const Characters: React.FC = () => {
|
|||||||
setCharacters(response.data);
|
setCharacters(response.data);
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error('Failed to load characters:', error);
|
console.error('Failed to load characters:', error);
|
||||||
// Show fallback data for demo purposes
|
toast.error('Failed to load characters');
|
||||||
setCharacters([
|
setCharacters([]);
|
||||||
{
|
|
||||||
name: "Alex",
|
|
||||||
status: "active",
|
|
||||||
total_messages: 245,
|
|
||||||
total_conversations: 32,
|
|
||||||
memory_count: 127,
|
|
||||||
relationship_count: 3,
|
|
||||||
creativity_score: 0.78,
|
|
||||||
social_score: 0.85,
|
|
||||||
last_active: new Date().toISOString()
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Sage",
|
|
||||||
status: "reflecting",
|
|
||||||
total_messages: 189,
|
|
||||||
total_conversations: 28,
|
|
||||||
memory_count: 98,
|
|
||||||
relationship_count: 4,
|
|
||||||
creativity_score: 0.92,
|
|
||||||
social_score: 0.73,
|
|
||||||
last_active: new Date(Date.now() - 30000).toISOString()
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Luna",
|
|
||||||
status: "idle",
|
|
||||||
total_messages: 312,
|
|
||||||
total_conversations: 41,
|
|
||||||
memory_count: 156,
|
|
||||||
relationship_count: 2,
|
|
||||||
creativity_score: 0.88,
|
|
||||||
social_score: 0.67,
|
|
||||||
last_active: new Date(Date.now() - 120000).toISOString()
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Echo",
|
|
||||||
status: "active",
|
|
||||||
total_messages: 203,
|
|
||||||
total_conversations: 35,
|
|
||||||
memory_count: 134,
|
|
||||||
relationship_count: 3,
|
|
||||||
creativity_score: 0.71,
|
|
||||||
social_score: 0.91,
|
|
||||||
last_active: new Date(Date.now() - 5000).toISOString()
|
|
||||||
}
|
|
||||||
]);
|
|
||||||
} finally {
|
} finally {
|
||||||
setLoading(false);
|
setLoading(false);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const getStatusColor = (status: string) => {
|
const handleToggleCharacter = async (characterName: string, currentStatus: boolean) => {
|
||||||
switch (status) {
|
|
||||||
case 'active': return 'status-online';
|
|
||||||
case 'idle': return 'status-idle';
|
|
||||||
case 'paused': return 'status-paused';
|
|
||||||
default: return 'status-offline';
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
const handleCharacterAction = async (characterName: string, action: 'pause' | 'resume') => {
|
|
||||||
try {
|
try {
|
||||||
if (action === 'pause') {
|
const newStatus = !currentStatus;
|
||||||
await apiClient.pauseCharacter(characterName);
|
await apiClient.toggleCharacterStatus(characterName, newStatus);
|
||||||
toast.success(`${characterName} has been paused`);
|
toast.success(`${characterName} ${newStatus ? 'enabled' : 'disabled'}`);
|
||||||
} else {
|
|
||||||
await apiClient.resumeCharacter(characterName);
|
|
||||||
toast.success(`${characterName} has been resumed`);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update character status locally
|
// Update local state
|
||||||
setCharacters(prev => prev.map(char =>
|
setCharacters(chars => chars.map(char =>
|
||||||
char.name === characterName
|
char.name === characterName
|
||||||
? { ...char, status: action === 'pause' ? 'paused' : 'active' }
|
? { ...char, is_active: newStatus, status: newStatus ? 'active' : 'offline' }
|
||||||
: char
|
: char
|
||||||
));
|
));
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error(`Failed to ${action} character:`, error);
|
console.error('Failed to toggle character status:', error);
|
||||||
toast.error(`Failed to ${action} ${characterName}`);
|
toast.error(`Failed to ${currentStatus ? 'disable' : 'enable'} character`);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
const filteredCharacters = characters.filter(character =>
|
const handleDeleteCharacter = async (characterName: string) => {
|
||||||
character.name.toLowerCase().includes(searchTerm.toLowerCase())
|
if (!window.confirm(`Are you sure you want to delete ${characterName}? This action cannot be undone.`)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await apiClient.deleteCharacter(characterName);
|
||||||
|
toast.success(`${characterName} deleted`);
|
||||||
|
setCharacters(chars => chars.filter(char => char.name !== characterName));
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to delete character:', error);
|
||||||
|
toast.error('Failed to delete character');
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const getStatusDisplay = (character: Character) => {
|
||||||
|
if (!character.is_active) {
|
||||||
|
return { text: 'Disabled', color: 'text-gray-500', bgColor: 'bg-gray-100' };
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (character.status) {
|
||||||
|
case 'active':
|
||||||
|
return { text: 'Active', color: 'text-green-600', bgColor: 'bg-green-100' };
|
||||||
|
case 'idle':
|
||||||
|
return { text: 'Idle', color: 'text-yellow-600', bgColor: 'bg-yellow-100' };
|
||||||
|
case 'reflecting':
|
||||||
|
return { text: 'Reflecting', color: 'text-blue-600', bgColor: 'bg-blue-100' };
|
||||||
|
default:
|
||||||
|
return { text: 'Offline', color: 'text-gray-500', bgColor: 'bg-gray-100' };
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const filteredCharacters = characters.filter(char =>
|
||||||
|
char.name.toLowerCase().includes(searchTerm.toLowerCase())
|
||||||
);
|
);
|
||||||
|
|
||||||
if (loading) {
|
if (loading) {
|
||||||
@@ -130,140 +110,177 @@ const Characters: React.FC = () => {
|
|||||||
return (
|
return (
|
||||||
<div className="space-y-6">
|
<div className="space-y-6">
|
||||||
{/* Header */}
|
{/* Header */}
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex justify-between items-center">
|
||||||
<div>
|
<div>
|
||||||
<h1 className="text-2xl font-bold text-gray-900">Characters</h1>
|
<h1 className="text-2xl font-bold text-gray-900">Character Management</h1>
|
||||||
<p className="text-gray-600">Manage and monitor AI character profiles</p>
|
<p className="text-gray-600">Create, edit, and manage your AI characters</p>
|
||||||
</div>
|
</div>
|
||||||
<button className="btn-primary">
|
<button
|
||||||
<Users className="w-4 h-4 mr-2" />
|
onClick={() => setShowCreateModal(true)}
|
||||||
Add Character
|
className="btn-primary flex items-center space-x-2"
|
||||||
|
>
|
||||||
|
<Plus className="w-4 h-4" />
|
||||||
|
<span>New Character</span>
|
||||||
</button>
|
</button>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Search */}
|
{/* Search */}
|
||||||
<div className="relative max-w-md">
|
<div className="flex items-center space-x-4">
|
||||||
<div className="absolute inset-y-0 left-0 flex items-center pl-3">
|
<div className="flex-1 max-w-md">
|
||||||
<Search className="w-5 h-5 text-gray-400" />
|
<input
|
||||||
|
type="text"
|
||||||
|
value={searchTerm}
|
||||||
|
onChange={(e) => setSearchTerm(e.target.value)}
|
||||||
|
className="w-full px-4 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
placeholder="Search characters..."
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
<div className="text-sm text-gray-500">
|
||||||
|
{filteredCharacters.length} character{filteredCharacters.length !== 1 ? 's' : ''}
|
||||||
</div>
|
</div>
|
||||||
<input
|
|
||||||
type="text"
|
|
||||||
value={searchTerm}
|
|
||||||
onChange={(e) => setSearchTerm(e.target.value)}
|
|
||||||
className="block w-full pl-10 pr-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
|
||||||
placeholder="Search characters..."
|
|
||||||
/>
|
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{/* Characters Grid */}
|
{/* Character List */}
|
||||||
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-6">
|
<div className="bg-white rounded-lg border border-gray-200">
|
||||||
{filteredCharacters.map((character) => (
|
{filteredCharacters.length === 0 ? (
|
||||||
<div key={character.name} className="card hover:shadow-md transition-shadow">
|
<div className="p-8 text-center">
|
||||||
<div className="flex items-start justify-between mb-4">
|
<Users className="w-12 h-12 mx-auto text-gray-400 mb-4" />
|
||||||
<div className="flex items-center space-x-3">
|
<h3 className="text-lg font-medium text-gray-900 mb-2">No Characters Found</h3>
|
||||||
<div className="w-12 h-12 bg-gradient-to-br from-primary-500 to-purple-600 rounded-lg flex items-center justify-center">
|
<p className="text-gray-600 mb-4">
|
||||||
<span className="text-white font-bold text-lg">
|
{searchTerm ? 'No characters match your search.' : 'Get started by creating your first character.'}
|
||||||
{character.name.charAt(0).toUpperCase()}
|
</p>
|
||||||
</span>
|
<button
|
||||||
</div>
|
onClick={() => setShowCreateModal(true)}
|
||||||
<div>
|
className="btn-primary"
|
||||||
<h3 className="text-lg font-semibold text-gray-900">{character.name}</h3>
|
>
|
||||||
<div className="flex items-center space-x-2">
|
<Plus className="w-4 h-4 mr-2" />
|
||||||
<div className={`status-dot ${getStatusColor(character.status)}`}></div>
|
Create Character
|
||||||
<span className="text-sm text-gray-600 capitalize">{character.status}</span>
|
</button>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
<div className="divide-y divide-gray-200">
|
||||||
|
{filteredCharacters.map((character) => {
|
||||||
|
const status = getStatusDisplay(character);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div key={character.name} className="p-6 hover:bg-gray-50 transition-colors">
|
||||||
|
<div className="flex items-center justify-between">
|
||||||
|
<div className="flex items-center space-x-4">
|
||||||
|
{/* Character Avatar */}
|
||||||
|
<div className="w-12 h-12 bg-primary-100 rounded-full flex items-center justify-center">
|
||||||
|
<span className="text-lg font-semibold text-primary-600">
|
||||||
|
{character.name.charAt(0)}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Character Info */}
|
||||||
|
<div>
|
||||||
|
<div className="flex items-center space-x-3">
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">{character.name}</h3>
|
||||||
|
<span className={`px-2 py-1 text-xs font-medium rounded-full ${status.bgColor} ${status.color}`}>
|
||||||
|
{status.text}
|
||||||
|
</span>
|
||||||
|
{(character.llm_provider || character.llm_model) && (
|
||||||
|
<span className="px-2 py-1 text-xs font-medium rounded-full bg-purple-100 text-purple-600 flex items-center space-x-1">
|
||||||
|
<span>🤖</span>
|
||||||
|
<span>Custom AI</span>
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
<div className="text-sm text-gray-500 mt-1">
|
||||||
|
{character.last_active
|
||||||
|
? `Last active: ${new Date(character.last_active).toLocaleString()}`
|
||||||
|
: 'Never active'
|
||||||
|
}
|
||||||
|
</div>
|
||||||
|
{character.personality && (
|
||||||
|
<div className="text-sm text-gray-600 mt-1 max-w-md truncate">
|
||||||
|
{character.personality}
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Actions */}
|
||||||
|
<div className="flex items-center space-x-2">
|
||||||
|
{/* Enable/Disable Toggle */}
|
||||||
|
<button
|
||||||
|
onClick={() => handleToggleCharacter(character.name, character.is_active)}
|
||||||
|
className={`p-2 rounded-lg transition-colors ${
|
||||||
|
character.is_active
|
||||||
|
? 'text-green-600 bg-green-50 hover:bg-green-100'
|
||||||
|
: 'text-gray-400 bg-gray-50 hover:bg-gray-100'
|
||||||
|
}`}
|
||||||
|
title={character.is_active ? 'Disable character' : 'Enable character'}
|
||||||
|
>
|
||||||
|
{character.is_active ? <Power className="w-4 h-4" /> : <PowerOff className="w-4 h-4" />}
|
||||||
|
</button>
|
||||||
|
|
||||||
|
{/* Edit */}
|
||||||
|
<Link
|
||||||
|
to={`/characters/${character.name}`}
|
||||||
|
className="p-2 text-gray-400 hover:text-primary-600 hover:bg-primary-50 rounded-lg transition-colors"
|
||||||
|
title="Edit character"
|
||||||
|
>
|
||||||
|
<Edit className="w-4 h-4" />
|
||||||
|
</Link>
|
||||||
|
|
||||||
|
{/* Delete */}
|
||||||
|
<button
|
||||||
|
onClick={() => handleDeleteCharacter(character.name)}
|
||||||
|
className="p-2 text-gray-400 hover:text-red-600 hover:bg-red-50 rounded-lg transition-colors"
|
||||||
|
title="Delete character"
|
||||||
|
>
|
||||||
|
<Trash2 className="w-4 h-4" />
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
);
|
||||||
<div className="flex space-x-1">
|
})}
|
||||||
<button
|
|
||||||
onClick={() => handleCharacterAction(
|
|
||||||
character.name,
|
|
||||||
character.status === 'paused' ? 'resume' : 'pause'
|
|
||||||
)}
|
|
||||||
className="p-1 text-gray-400 hover:text-gray-600 hover:text-primary-600 transition-colors"
|
|
||||||
title={character.status === 'paused' ? 'Resume character' : 'Pause character'}
|
|
||||||
>
|
|
||||||
{character.status === 'paused' ? (
|
|
||||||
<Play className="w-4 h-4" />
|
|
||||||
) : (
|
|
||||||
<Pause className="w-4 h-4" />
|
|
||||||
)}
|
|
||||||
</button>
|
|
||||||
<Link
|
|
||||||
to={`/characters/${character.name}`}
|
|
||||||
className="p-1 text-gray-400 hover:text-gray-600 hover:text-primary-600 transition-colors"
|
|
||||||
title="Character settings"
|
|
||||||
>
|
|
||||||
<Settings className="w-4 h-4" />
|
|
||||||
</Link>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Stats */}
|
|
||||||
<div className="grid grid-cols-2 gap-4 mb-4">
|
|
||||||
<div>
|
|
||||||
<p className="text-sm text-gray-600">Messages</p>
|
|
||||||
<p className="text-lg font-semibold text-gray-900">{character.total_messages}</p>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<p className="text-sm text-gray-600">Conversations</p>
|
|
||||||
<p className="text-lg font-semibold text-gray-900">{character.total_conversations}</p>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<p className="text-sm text-gray-600">Memories</p>
|
|
||||||
<p className="text-lg font-semibold text-gray-900">{character.memory_count}</p>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<p className="text-sm text-gray-600">Relationships</p>
|
|
||||||
<p className="text-lg font-semibold text-gray-900">{character.relationship_count}</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Scores */}
|
|
||||||
<div className="space-y-2 mb-4">
|
|
||||||
<div className="flex items-center justify-between text-sm">
|
|
||||||
<span className="text-gray-600">Creativity</span>
|
|
||||||
<span className="font-medium">{Math.round(character.creativity_score * 100)}%</span>
|
|
||||||
</div>
|
|
||||||
<div className="w-full bg-gray-200 rounded-full h-2">
|
|
||||||
<div
|
|
||||||
className="bg-purple-500 h-2 rounded-full"
|
|
||||||
style={{ width: `${character.creativity_score * 100}%` }}
|
|
||||||
></div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div className="flex items-center justify-between text-sm">
|
|
||||||
<span className="text-gray-600">Social</span>
|
|
||||||
<span className="font-medium">{Math.round(character.social_score * 100)}%</span>
|
|
||||||
</div>
|
|
||||||
<div className="w-full bg-gray-200 rounded-full h-2">
|
|
||||||
<div
|
|
||||||
className="bg-blue-500 h-2 rounded-full"
|
|
||||||
style={{ width: `${character.social_score * 100}%` }}
|
|
||||||
></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
{/* Action */}
|
|
||||||
<Link
|
|
||||||
to={`/characters/${character.name}`}
|
|
||||||
className="block w-full text-center btn-secondary"
|
|
||||||
>
|
|
||||||
View Details
|
|
||||||
</Link>
|
|
||||||
</div>
|
</div>
|
||||||
))}
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{filteredCharacters.length === 0 && (
|
{/* Quick Stats */}
|
||||||
<div className="text-center py-12">
|
{characters.length > 0 && (
|
||||||
<Users className="w-12 h-12 mx-auto text-gray-400 mb-4" />
|
<div className="grid grid-cols-1 md:grid-cols-4 gap-4">
|
||||||
<h3 className="text-lg font-medium text-gray-900 mb-2">No characters found</h3>
|
<div className="bg-white p-4 rounded-lg border border-gray-200">
|
||||||
<p className="text-gray-600">
|
<div className="text-2xl font-bold text-gray-900">
|
||||||
{searchTerm ? 'Try adjusting your search terms.' : 'Get started by adding your first character.'}
|
{characters.length}
|
||||||
</p>
|
</div>
|
||||||
|
<div className="text-sm text-gray-500">Total Characters</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-white p-4 rounded-lg border border-gray-200">
|
||||||
|
<div className="text-2xl font-bold text-green-600">
|
||||||
|
{characters.filter(c => c.is_active && c.status === 'active').length}
|
||||||
|
</div>
|
||||||
|
<div className="text-sm text-gray-500">Currently Active</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-white p-4 rounded-lg border border-gray-200">
|
||||||
|
<div className="text-2xl font-bold text-blue-600">
|
||||||
|
{characters.filter(c => c.status === 'reflecting').length}
|
||||||
|
</div>
|
||||||
|
<div className="text-sm text-gray-500">Reflecting</div>
|
||||||
|
</div>
|
||||||
|
<div className="bg-white p-4 rounded-lg border border-gray-200">
|
||||||
|
<div className="text-2xl font-bold text-gray-500">
|
||||||
|
{characters.filter(c => !c.is_active).length}
|
||||||
|
</div>
|
||||||
|
<div className="text-sm text-gray-500">Disabled</div>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
|
|
||||||
|
{/* Character Creation Modal */}
|
||||||
|
<CharacterCreationModal
|
||||||
|
isOpen={showCreateModal}
|
||||||
|
onClose={() => setShowCreateModal(false)}
|
||||||
|
onCharacterCreated={(newCharacter) => {
|
||||||
|
setCharacters(prev => [...prev, newCharacter]);
|
||||||
|
setShowCreateModal(false);
|
||||||
|
}}
|
||||||
|
/>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|||||||
217
admin-frontend/src/pages/Guide.tsx
Normal file
217
admin-frontend/src/pages/Guide.tsx
Normal file
@@ -0,0 +1,217 @@
|
|||||||
|
import React from 'react';
|
||||||
|
import { Book, Code, User, MessageSquare, Settings, Lightbulb, AlertTriangle } from 'lucide-react';
|
||||||
|
|
||||||
|
const Guide: React.FC = () => {
|
||||||
|
return (
|
||||||
|
<div className="space-y-6">
|
||||||
|
{/* Header */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-3">
|
||||||
|
<Book className="w-8 h-8 text-primary-600" />
|
||||||
|
<div>
|
||||||
|
<h1 className="text-2xl font-bold text-gray-900">Discord Fishbowl Guide</h1>
|
||||||
|
<p className="text-gray-600">Complete guide to managing your autonomous AI character ecosystem</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Quick Start */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Lightbulb className="w-5 h-5 text-yellow-500" />
|
||||||
|
<h2 className="text-xl font-semibold text-gray-900">Quick Start</h2>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
|
||||||
|
<div className="border border-gray-200 rounded-lg p-4">
|
||||||
|
<div className="text-center">
|
||||||
|
<User className="w-8 h-8 text-primary-600 mx-auto mb-2" />
|
||||||
|
<h3 className="font-semibold text-gray-900">1. Create Characters</h3>
|
||||||
|
<p className="text-sm text-gray-600">Define personalities, backgrounds, and speaking styles</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="border border-gray-200 rounded-lg p-4">
|
||||||
|
<div className="text-center">
|
||||||
|
<MessageSquare className="w-8 h-8 text-primary-600 mx-auto mb-2" />
|
||||||
|
<h3 className="font-semibold text-gray-900">2. Watch Conversations</h3>
|
||||||
|
<p className="text-sm text-gray-600">Monitor autonomous character interactions</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Character Management */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<User className="w-5 h-5 text-gray-400" />
|
||||||
|
<h2 className="text-xl font-semibold text-gray-900">Character Management</h2>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
|
||||||
|
<div>
|
||||||
|
<h3 className="font-semibold text-gray-900 mb-2">Character Creation Tips:</h3>
|
||||||
|
<ul className="text-sm text-gray-600 space-y-1">
|
||||||
|
<li>• <strong>Personality:</strong> Be specific about quirks, flaws, and behavioral patterns</li>
|
||||||
|
<li>• <strong>Background:</strong> Provide context that shapes their worldview</li>
|
||||||
|
<li>• <strong>Speaking Style:</strong> Describe tone, vocabulary, and communication patterns</li>
|
||||||
|
<li>• <strong>Interests:</strong> List topics they're passionate about</li>
|
||||||
|
<li>• <strong>System Prompt:</strong> Add character-specific behavioral instructions</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<h3 className="font-semibold text-gray-900 mb-2">Best Practices:</h3>
|
||||||
|
<ul className="text-sm text-gray-600 space-y-1">
|
||||||
|
<li>• Create contrasting personalities for interesting dynamics</li>
|
||||||
|
<li>• Include both strengths and flaws for realistic characters</li>
|
||||||
|
<li>• Monitor conversations and adjust prompts as needed</li>
|
||||||
|
<li>• Use this admin interface to manage and edit characters</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-yellow-50 border border-yellow-200 rounded-lg p-4">
|
||||||
|
<div className="flex items-start space-x-2">
|
||||||
|
<AlertTriangle className="w-5 h-5 text-yellow-600 mt-0.5" />
|
||||||
|
<div>
|
||||||
|
<h4 className="font-medium text-yellow-800">Pro Tip</h4>
|
||||||
|
<p className="text-sm text-yellow-700">
|
||||||
|
Characters work best when they have clear motivations, distinct personalities, and natural flaws.
|
||||||
|
Avoid making them too perfect or too similar to each other.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* System Commands */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Settings className="w-5 h-5 text-gray-400" />
|
||||||
|
<h2 className="text-xl font-semibold text-gray-900">Discord Commands</h2>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
|
||||||
|
<div>
|
||||||
|
<h3 className="font-semibold text-gray-900 mb-3">Available Commands:</h3>
|
||||||
|
<div className="space-y-2 text-sm">
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!status</code>
|
||||||
|
<p className="text-gray-600">View system status and statistics</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!characters</code>
|
||||||
|
<p className="text-gray-600">List all active characters</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!permissions</code>
|
||||||
|
<p className="text-gray-600">Check bot permissions in channel</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!trigger [topic]</code>
|
||||||
|
<p className="text-gray-600">Manually trigger conversation (admin only)</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!wipe</code>
|
||||||
|
<p className="text-gray-600">Clear channel and reset history (admin only)</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!pause</code>
|
||||||
|
<p className="text-gray-600">Pause conversation engine (admin only)</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!resume</code>
|
||||||
|
<p className="text-gray-600">Resume conversation engine (admin only)</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!stats</code>
|
||||||
|
<p className="text-gray-600">View conversation statistics</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!memory-stats</code>
|
||||||
|
<p className="text-gray-600">View character memory statistics</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="bg-gray-50 rounded p-2">
|
||||||
|
<code className="text-purple-600">!wipe-memories [character/all]</code>
|
||||||
|
<p className="text-gray-600">Clear character memories (admin only)</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<h3 className="font-semibold text-gray-900 mb-3">Bot Permissions Needed:</h3>
|
||||||
|
<ul className="text-sm text-gray-600 space-y-1">
|
||||||
|
<li>• <strong>Send Messages:</strong> Required for character responses</li>
|
||||||
|
<li>• <strong>Read Message History:</strong> Needed for conversation context</li>
|
||||||
|
<li>• <strong>Manage Messages:</strong> Required for wipe command</li>
|
||||||
|
<li>• <strong>Use External Emojis:</strong> For character expressions</li>
|
||||||
|
</ul>
|
||||||
|
|
||||||
|
<div className="mt-4 p-3 bg-red-50 border border-red-200 rounded">
|
||||||
|
<p className="text-sm text-red-700">
|
||||||
|
<strong>Important:</strong> Admin commands (!trigger, !wipe, !pause, !resume) require Discord administrator permissions.
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Troubleshooting */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<AlertTriangle className="w-5 h-5 text-gray-400" />
|
||||||
|
<h2 className="text-xl font-semibold text-gray-900">Troubleshooting</h2>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
|
||||||
|
<div>
|
||||||
|
<h3 className="font-semibold text-gray-900 mb-3">Common Issues:</h3>
|
||||||
|
<div className="space-y-3">
|
||||||
|
<div className="border-l-4 border-red-500 pl-4">
|
||||||
|
<h4 className="font-medium text-gray-900">Commands not working</h4>
|
||||||
|
<p className="text-sm text-gray-600">Check bot permissions and ensure you have admin rights for restricted commands</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="border-l-4 border-orange-500 pl-4">
|
||||||
|
<h4 className="font-medium text-gray-900">Characters not responding</h4>
|
||||||
|
<p className="text-sm text-gray-600">Verify LLM service is running and characters are marked as active</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="border-l-4 border-yellow-500 pl-4">
|
||||||
|
<h4 className="font-medium text-gray-900">Robotic responses</h4>
|
||||||
|
<p className="text-sm text-gray-600">Adjust character system prompts and personality descriptions for more natural interactions</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<h3 className="font-semibold text-gray-900 mb-3">System Requirements:</h3>
|
||||||
|
<ul className="text-sm text-gray-600 space-y-1">
|
||||||
|
<li>• <strong>LLM Service:</strong> Ollama or compatible API endpoint</li>
|
||||||
|
<li>• <strong>Database:</strong> PostgreSQL for production, SQLite for development</li>
|
||||||
|
<li>• <strong>Vector Store:</strong> Qdrant for character memories</li>
|
||||||
|
<li>• <strong>Redis:</strong> For caching and session management</li>
|
||||||
|
<li>• <strong>Discord Bot:</strong> Valid bot token with proper permissions</li>
|
||||||
|
</ul>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default Guide;
|
||||||
180
admin-frontend/src/pages/LiveChat.tsx
Normal file
180
admin-frontend/src/pages/LiveChat.tsx
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
import React, { useState, useEffect, useRef } from 'react';
|
||||||
|
import { Send, MessageCircle, Users, Bot } from 'lucide-react';
|
||||||
|
import { useWebSocket } from '../contexts/WebSocketContext';
|
||||||
|
import LoadingSpinner from '../components/Common/LoadingSpinner';
|
||||||
|
|
||||||
|
interface ChatMessage {
|
||||||
|
id: string;
|
||||||
|
character_name?: string;
|
||||||
|
content: string;
|
||||||
|
timestamp: string;
|
||||||
|
type: 'character' | 'system' | 'user';
|
||||||
|
}
|
||||||
|
|
||||||
|
const LiveChat: React.FC = () => {
|
||||||
|
const [messages, setMessages] = useState<ChatMessage[]>([]);
|
||||||
|
const [newMessage, setNewMessage] = useState('');
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
const { connected, activityFeed } = useWebSocket();
|
||||||
|
const messagesEndRef = useRef<HTMLDivElement>(null);
|
||||||
|
|
||||||
|
const scrollToBottom = () => {
|
||||||
|
messagesEndRef.current?.scrollIntoView({ behavior: 'smooth' });
|
||||||
|
};
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
scrollToBottom();
|
||||||
|
}, [messages]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
// Convert activity feed to chat messages
|
||||||
|
const chatMessages = activityFeed
|
||||||
|
.filter(activity => activity.type === 'message' || activity.character_name)
|
||||||
|
.map(activity => ({
|
||||||
|
id: activity.id,
|
||||||
|
character_name: activity.character_name,
|
||||||
|
content: activity.description,
|
||||||
|
timestamp: activity.timestamp,
|
||||||
|
type: activity.character_name ? 'character' as const : 'system' as const
|
||||||
|
}))
|
||||||
|
.sort((a, b) => new Date(a.timestamp).getTime() - new Date(b.timestamp).getTime());
|
||||||
|
|
||||||
|
setMessages(chatMessages);
|
||||||
|
setLoading(false);
|
||||||
|
}, [activityFeed]);
|
||||||
|
|
||||||
|
const handleSendMessage = async (e: React.FormEvent) => {
|
||||||
|
e.preventDefault();
|
||||||
|
if (!newMessage.trim()) return;
|
||||||
|
|
||||||
|
// TODO: Implement sending messages to the system
|
||||||
|
const userMessage: ChatMessage = {
|
||||||
|
id: `user_${Date.now()}`,
|
||||||
|
content: newMessage,
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
type: 'user'
|
||||||
|
};
|
||||||
|
|
||||||
|
setMessages(prev => [...prev, userMessage]);
|
||||||
|
setNewMessage('');
|
||||||
|
|
||||||
|
// This would trigger the system to respond
|
||||||
|
console.log('Sending message:', newMessage);
|
||||||
|
};
|
||||||
|
|
||||||
|
const formatTime = (timestamp: string) => {
|
||||||
|
return new Date(timestamp).toLocaleTimeString([], {
|
||||||
|
hour: '2-digit',
|
||||||
|
minute: '2-digit'
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const getMessageIcon = (message: ChatMessage) => {
|
||||||
|
switch (message.type) {
|
||||||
|
case 'character':
|
||||||
|
return <Bot className="w-4 h-4" />;
|
||||||
|
case 'user':
|
||||||
|
return <Users className="w-4 h-4" />;
|
||||||
|
default:
|
||||||
|
return <MessageCircle className="w-4 h-4" />;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const getMessageStyle = (message: ChatMessage) => {
|
||||||
|
switch (message.type) {
|
||||||
|
case 'character':
|
||||||
|
return 'bg-blue-50 border-blue-200';
|
||||||
|
case 'user':
|
||||||
|
return 'bg-green-50 border-green-200';
|
||||||
|
default:
|
||||||
|
return 'bg-gray-50 border-gray-200';
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex flex-col h-full max-h-[calc(100vh-8rem)]">
|
||||||
|
{/* Header */}
|
||||||
|
<div className="flex items-center justify-between p-4 border-b border-gray-200 bg-white">
|
||||||
|
<div>
|
||||||
|
<h1 className="text-2xl font-bold text-gray-900">Live Chat</h1>
|
||||||
|
<p className="text-gray-600">
|
||||||
|
Monitor character conversations in real-time
|
||||||
|
{connected ? (
|
||||||
|
<span className="ml-2 text-green-600">• Connected</span>
|
||||||
|
) : (
|
||||||
|
<span className="ml-2 text-red-600">• Disconnected</span>
|
||||||
|
)}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Chat Messages */}
|
||||||
|
<div className="flex-1 overflow-y-auto p-4 space-y-3 bg-gray-50">
|
||||||
|
{loading ? (
|
||||||
|
<div className="flex items-center justify-center h-64">
|
||||||
|
<LoadingSpinner size="lg" text="Loading chat..." />
|
||||||
|
</div>
|
||||||
|
) : messages.length === 0 ? (
|
||||||
|
<div className="text-center py-12">
|
||||||
|
<MessageCircle className="w-12 h-12 mx-auto text-gray-400 mb-4" />
|
||||||
|
<h3 className="text-lg font-medium text-gray-900 mb-2">No Messages Yet</h3>
|
||||||
|
<p className="text-gray-600">
|
||||||
|
Character conversations will appear here in real-time
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
messages.map((message) => (
|
||||||
|
<div key={message.id} className={`p-3 rounded-lg border ${getMessageStyle(message)}`}>
|
||||||
|
<div className="flex items-start space-x-3">
|
||||||
|
<div className="flex-shrink-0 mt-1">
|
||||||
|
{getMessageIcon(message)}
|
||||||
|
</div>
|
||||||
|
<div className="flex-1 min-w-0">
|
||||||
|
<div className="flex items-center space-x-2 mb-1">
|
||||||
|
<span className="text-sm font-medium text-gray-900">
|
||||||
|
{message.character_name || (message.type === 'user' ? 'You' : 'System')}
|
||||||
|
</span>
|
||||||
|
<span className="text-xs text-gray-500">
|
||||||
|
{formatTime(message.timestamp)}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
<p className="text-sm text-gray-700">{message.content}</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
))
|
||||||
|
)}
|
||||||
|
<div ref={messagesEndRef} />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Message Input */}
|
||||||
|
<div className="p-4 border-t border-gray-200 bg-white">
|
||||||
|
<form onSubmit={handleSendMessage} className="flex space-x-3">
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={newMessage}
|
||||||
|
onChange={(e) => setNewMessage(e.target.value)}
|
||||||
|
placeholder="Type a message to the characters..."
|
||||||
|
className="flex-1 px-4 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<button
|
||||||
|
type="submit"
|
||||||
|
disabled={!newMessage.trim() || !connected}
|
||||||
|
className="btn-primary disabled:opacity-50 disabled:cursor-not-allowed"
|
||||||
|
>
|
||||||
|
<Send className="w-4 h-4 mr-2" />
|
||||||
|
Send
|
||||||
|
</button>
|
||||||
|
</form>
|
||||||
|
<p className="text-xs text-gray-500 mt-2">
|
||||||
|
{connected
|
||||||
|
? "Messages sent here will be delivered to the character system"
|
||||||
|
: "Connect to start chatting with characters"
|
||||||
|
}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export default LiveChat;
|
||||||
@@ -1,18 +1,580 @@
|
|||||||
import React from 'react';
|
import React, { useState, useEffect } from 'react';
|
||||||
import { Settings as SettingsIcon } from 'lucide-react';
|
import { Save, AlertCircle, MessageSquare, Brain, Database, Zap, Clock, Shield } from 'lucide-react';
|
||||||
|
import { apiClient } from '../services/api';
|
||||||
|
import LoadingSpinner from '../components/Common/LoadingSpinner';
|
||||||
|
import { LLMProviderSettings } from '../components/LLMProviderSettings';
|
||||||
|
import toast from 'react-hot-toast';
|
||||||
|
|
||||||
|
interface SystemConfig {
|
||||||
|
// LLM Control (COST PROTECTION)
|
||||||
|
llm_enabled: boolean;
|
||||||
|
conversation_frequency: number;
|
||||||
|
response_delay_min: number;
|
||||||
|
response_delay_max: number;
|
||||||
|
max_conversation_length: number;
|
||||||
|
memory_retention_days: number;
|
||||||
|
creativity_boost: boolean;
|
||||||
|
safety_monitoring: boolean;
|
||||||
|
auto_moderation: boolean;
|
||||||
|
personality_change_rate: number;
|
||||||
|
quiet_hours_enabled: boolean;
|
||||||
|
quiet_hours_start: number;
|
||||||
|
quiet_hours_end: number;
|
||||||
|
min_delay_seconds: number;
|
||||||
|
max_delay_seconds: number;
|
||||||
|
llm_model: string;
|
||||||
|
llm_max_tokens: number;
|
||||||
|
llm_temperature: number;
|
||||||
|
llm_timeout: number;
|
||||||
|
discord_guild_id: string;
|
||||||
|
discord_channel_id: string;
|
||||||
|
}
|
||||||
|
|
||||||
const Settings: React.FC = () => {
|
const Settings: React.FC = () => {
|
||||||
|
const [config, setConfig] = useState<SystemConfig | null>(null);
|
||||||
|
const [loading, setLoading] = useState(true);
|
||||||
|
const [saving, setSaving] = useState(false);
|
||||||
|
const [hasChanges, setHasChanges] = useState(false);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
loadConfig();
|
||||||
|
}, []);
|
||||||
|
|
||||||
|
const loadConfig = async () => {
|
||||||
|
try {
|
||||||
|
const response = await apiClient.getSystemConfig();
|
||||||
|
setConfig(response.data);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Failed to load config:', error);
|
||||||
|
toast.error('Failed to load system configuration');
|
||||||
|
// Set default values
|
||||||
|
setConfig({
|
||||||
|
llm_enabled: false, // SAFETY: Default to disabled
|
||||||
|
conversation_frequency: 0.5,
|
||||||
|
response_delay_min: 1.0,
|
||||||
|
response_delay_max: 5.0,
|
||||||
|
max_conversation_length: 50,
|
||||||
|
memory_retention_days: 90,
|
||||||
|
creativity_boost: true,
|
||||||
|
safety_monitoring: false,
|
||||||
|
auto_moderation: false,
|
||||||
|
personality_change_rate: 0.1,
|
||||||
|
quiet_hours_enabled: true,
|
||||||
|
quiet_hours_start: 23,
|
||||||
|
quiet_hours_end: 7,
|
||||||
|
min_delay_seconds: 30,
|
||||||
|
max_delay_seconds: 300,
|
||||||
|
llm_model: 'koboldcpp/Broken-Tutu-24B-Transgression-v2.0.i1-Q4_K_M',
|
||||||
|
llm_max_tokens: 2000,
|
||||||
|
llm_temperature: 0.8,
|
||||||
|
llm_timeout: 300,
|
||||||
|
discord_guild_id: '',
|
||||||
|
discord_channel_id: ''
|
||||||
|
});
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleChange = async (field: keyof SystemConfig, value: any) => {
|
||||||
|
if (!config) return;
|
||||||
|
|
||||||
|
// For LLM enabled changes, save immediately with validation
|
||||||
|
if (field === 'llm_enabled') {
|
||||||
|
const newConfig = { ...config, [field]: value };
|
||||||
|
setConfig(newConfig);
|
||||||
|
|
||||||
|
try {
|
||||||
|
setSaving(true);
|
||||||
|
await apiClient.updateSystemConfig(newConfig);
|
||||||
|
setHasChanges(false);
|
||||||
|
} catch (error) {
|
||||||
|
// Revert the change
|
||||||
|
setConfig(config);
|
||||||
|
throw error;
|
||||||
|
} finally {
|
||||||
|
setSaving(false);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
setConfig({ ...config, [field]: value });
|
||||||
|
setHasChanges(true);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleSave = async () => {
|
||||||
|
if (!config) return;
|
||||||
|
|
||||||
|
try {
|
||||||
|
setSaving(true);
|
||||||
|
await apiClient.updateSystemConfig(config);
|
||||||
|
toast.success('Settings saved successfully');
|
||||||
|
setHasChanges(false);
|
||||||
|
} catch (error) {
|
||||||
|
toast.error('Failed to save settings');
|
||||||
|
} finally {
|
||||||
|
setSaving(false);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (loading) {
|
||||||
|
return (
|
||||||
|
<div className="flex items-center justify-center h-64">
|
||||||
|
<LoadingSpinner size="lg" text="Loading settings..." />
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!config) {
|
||||||
|
return (
|
||||||
|
<div className="text-center py-12">
|
||||||
|
<AlertCircle className="w-12 h-12 mx-auto text-red-400 mb-4" />
|
||||||
|
<h3 className="text-lg font-medium text-gray-900 mb-2">Failed to Load Settings</h3>
|
||||||
|
<p className="text-gray-600">Please try refreshing the page.</p>
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className="space-y-6">
|
<div className="space-y-6">
|
||||||
<div>
|
{/* Header */}
|
||||||
<h1 className="text-2xl font-bold text-gray-900">Settings</h1>
|
<div className="flex items-center justify-between">
|
||||||
<p className="text-gray-600">Configure system settings and preferences</p>
|
<div>
|
||||||
|
<h1 className="text-2xl font-bold text-gray-900">System Settings</h1>
|
||||||
|
<p className="text-gray-600">Configure the behavior of your character ecosystem</p>
|
||||||
|
</div>
|
||||||
|
<button
|
||||||
|
onClick={handleSave}
|
||||||
|
disabled={!hasChanges || saving}
|
||||||
|
className="btn-primary disabled:opacity-50 disabled:cursor-not-allowed"
|
||||||
|
>
|
||||||
|
{saving ? (
|
||||||
|
<>
|
||||||
|
<LoadingSpinner size="sm" />
|
||||||
|
<span className="ml-2">Saving...</span>
|
||||||
|
</>
|
||||||
|
) : (
|
||||||
|
<>
|
||||||
|
<Save className="w-4 h-4 mr-2" />
|
||||||
|
Save Settings
|
||||||
|
</>
|
||||||
|
)}
|
||||||
|
</button>
|
||||||
</div>
|
</div>
|
||||||
<div className="card text-center py-12">
|
|
||||||
<SettingsIcon className="w-12 h-12 mx-auto text-gray-400 mb-4" />
|
{/* LLM GLOBAL CONTROL - COST PROTECTION */}
|
||||||
<h3 className="text-lg font-medium text-gray-900 mb-2">System Settings</h3>
|
<div className={`rounded-lg border-2 p-6 ${config.llm_enabled ? 'bg-green-50 border-green-300' : 'bg-red-50 border-red-300'}`}>
|
||||||
<p className="text-gray-600">This page will show configuration options</p>
|
<div className="flex items-center justify-between">
|
||||||
|
<div className="flex items-center space-x-3">
|
||||||
|
<div className={`w-4 h-4 rounded-full ${config.llm_enabled ? 'bg-green-500' : 'bg-red-500'}`}></div>
|
||||||
|
<div>
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">
|
||||||
|
LLM API Status: {config.llm_enabled ? 'ENABLED' : 'DISABLED'}
|
||||||
|
</h3>
|
||||||
|
<p className={`text-sm ${config.llm_enabled ? 'text-green-600' : 'text-red-600'}`}>
|
||||||
|
{config.llm_enabled
|
||||||
|
? '⚠️ AI API calls are ACTIVE - this costs money!'
|
||||||
|
: '✅ AI API calls are DISABLED - no costs incurred'
|
||||||
|
}
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<label className="flex items-center space-x-3 cursor-pointer">
|
||||||
|
<span className="text-sm font-medium text-gray-700">
|
||||||
|
{config.llm_enabled ? 'Disable to Save Costs' : 'Enable LLM (will cost money)'}
|
||||||
|
</span>
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={config.llm_enabled}
|
||||||
|
onChange={async (e) => {
|
||||||
|
const enabled = e.target.checked;
|
||||||
|
if (enabled) {
|
||||||
|
const confirmed = window.confirm(
|
||||||
|
'⚠️ WARNING: Enabling LLM will start making API calls that cost money!\n\n' +
|
||||||
|
'Characters will make requests to your AI provider when they chat.\n' +
|
||||||
|
'We will validate your provider configuration first.\n' +
|
||||||
|
'Are you sure you want to enable this?'
|
||||||
|
);
|
||||||
|
if (!confirmed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await handleChange('llm_enabled', enabled);
|
||||||
|
toast[enabled ? 'error' : 'success'](
|
||||||
|
enabled ? '⚠️ LLM ENABLED - API costs will be incurred!' : '✅ LLM DISABLED - No API costs'
|
||||||
|
);
|
||||||
|
} catch (error: any) {
|
||||||
|
// Reset checkbox if enabling failed
|
||||||
|
e.target.checked = false;
|
||||||
|
toast.error(`Failed to enable LLM: ${error.message || 'Validation failed'}`);
|
||||||
|
}
|
||||||
|
}}
|
||||||
|
className={`rounded border-gray-300 focus:ring-2 ${
|
||||||
|
config.llm_enabled ? 'text-red-600 focus:ring-red-500' : 'text-green-600 focus:ring-green-500'
|
||||||
|
}`}
|
||||||
|
/>
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{config.llm_enabled && (
|
||||||
|
<div className="mt-4 p-3 bg-yellow-100 border border-yellow-300 rounded">
|
||||||
|
<div className="text-sm text-yellow-800">
|
||||||
|
<strong>💰 Cost Alert:</strong> LLM is enabled. Each character message will make an API call to your provider.
|
||||||
|
Monitor your usage and disable when not needed to control costs.
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
<div className="grid grid-cols-1 lg:grid-cols-2 xl:grid-cols-3 gap-6">
|
||||||
|
{/* Conversation Settings */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<MessageSquare className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">Conversation Settings</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Conversation Frequency
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="range"
|
||||||
|
min="0.1"
|
||||||
|
max="2.0"
|
||||||
|
step="0.1"
|
||||||
|
value={config.conversation_frequency}
|
||||||
|
onChange={(e) => { handleChange('conversation_frequency', parseFloat(e.target.value)).catch(console.error); }}
|
||||||
|
className="w-full"
|
||||||
|
/>
|
||||||
|
<div className="flex justify-between text-xs text-gray-500 mt-1">
|
||||||
|
<span>Rare (0.1)</span>
|
||||||
|
<span className="font-medium">{config.conversation_frequency}</span>
|
||||||
|
<span>Very Frequent (2.0)</span>
|
||||||
|
</div>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
How often characters start new conversations (multiplier for base frequency)
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="grid grid-cols-2 gap-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Min Response Delay (seconds)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="0.5"
|
||||||
|
max="30"
|
||||||
|
step="0.5"
|
||||||
|
value={config.response_delay_min}
|
||||||
|
onChange={(e) => { handleChange('response_delay_min', parseFloat(e.target.value)).catch(console.error); }}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Minimum time before responding to a message
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Max Response Delay (seconds)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="1"
|
||||||
|
max="60"
|
||||||
|
step="0.5"
|
||||||
|
value={config.response_delay_max}
|
||||||
|
onChange={(e) => { handleChange('response_delay_max', parseFloat(e.target.value)).catch(console.error); }}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Maximum time before responding to a message
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Max Conversation Length (messages)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="5"
|
||||||
|
max="200"
|
||||||
|
value={config.max_conversation_length}
|
||||||
|
onChange={(e) => handleChange('max_conversation_length', parseInt(e.target.value))}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Maximum messages in a single conversation thread before wrapping up
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Character Behavior */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Brain className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">Character Behavior</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Personality Change Rate
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="range"
|
||||||
|
min="0.01"
|
||||||
|
max="0.5"
|
||||||
|
step="0.01"
|
||||||
|
value={config.personality_change_rate}
|
||||||
|
onChange={(e) => handleChange('personality_change_rate', parseFloat(e.target.value))}
|
||||||
|
className="w-full"
|
||||||
|
/>
|
||||||
|
<div className="flex justify-between text-xs text-gray-500 mt-1">
|
||||||
|
<span>Very Stable (0.01)</span>
|
||||||
|
<span className="font-medium">{config.personality_change_rate}</span>
|
||||||
|
<span>Very Dynamic (0.5)</span>
|
||||||
|
</div>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
How much characters' personalities can evolve over time through interactions
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="flex items-center space-x-2 cursor-pointer">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={config.creativity_boost}
|
||||||
|
onChange={(e) => handleChange('creativity_boost', e.target.checked)}
|
||||||
|
className="rounded border-gray-300 text-primary-600 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<span className="text-sm text-gray-700">Enable Creativity Boost</span>
|
||||||
|
</label>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Encourages more creative, experimental, and unexpected character responses
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="flex items-center space-x-2 cursor-pointer">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={config.safety_monitoring}
|
||||||
|
onChange={(e) => handleChange('safety_monitoring', e.target.checked)}
|
||||||
|
className="rounded border-gray-300 text-primary-600 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<span className="text-sm text-gray-700">Enable Safety Monitoring</span>
|
||||||
|
</label>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Monitor conversations for safety and content guidelines
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="flex items-center space-x-2 cursor-pointer">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={config.auto_moderation}
|
||||||
|
onChange={(e) => handleChange('auto_moderation', e.target.checked)}
|
||||||
|
className="rounded border-gray-300 text-primary-600 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<span className="text-sm text-gray-700">Enable Auto Moderation</span>
|
||||||
|
</label>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Automatically moderate inappropriate content in conversations
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Memory Retention (days)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="1"
|
||||||
|
max="365"
|
||||||
|
value={config.memory_retention_days}
|
||||||
|
onChange={(e) => handleChange('memory_retention_days', parseInt(e.target.value))}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
How long characters remember past interactions
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Timing & Scheduling */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Clock className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">Timing & Scheduling</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="flex items-center space-x-2 cursor-pointer">
|
||||||
|
<input
|
||||||
|
type="checkbox"
|
||||||
|
checked={config.quiet_hours_enabled}
|
||||||
|
onChange={(e) => handleChange('quiet_hours_enabled', e.target.checked)}
|
||||||
|
className="rounded border-gray-300 text-primary-600 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<span className="text-sm text-gray-700">Enable Quiet Hours</span>
|
||||||
|
</label>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Disable automatic conversations during specified hours
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{config.quiet_hours_enabled && (
|
||||||
|
<div className="grid grid-cols-2 gap-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Quiet Start (24h format)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="0"
|
||||||
|
max="23"
|
||||||
|
value={config.quiet_hours_start}
|
||||||
|
onChange={(e) => handleChange('quiet_hours_start', parseInt(e.target.value))}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Hour when quiet time begins
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Quiet End (24h format)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="0"
|
||||||
|
max="23"
|
||||||
|
value={config.quiet_hours_end}
|
||||||
|
onChange={(e) => handleChange('quiet_hours_end', parseInt(e.target.value))}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Hour when quiet time ends
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
|
|
||||||
|
<div className="grid grid-cols-2 gap-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Min Delay Between Events (seconds)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="5"
|
||||||
|
max="600"
|
||||||
|
value={config.min_delay_seconds}
|
||||||
|
onChange={(e) => handleChange('min_delay_seconds', parseInt(e.target.value))}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Minimum time between conversation events
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Max Delay Between Events (seconds)
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="number"
|
||||||
|
min="30"
|
||||||
|
max="3600"
|
||||||
|
value={config.max_delay_seconds}
|
||||||
|
onChange={(e) => handleChange('max_delay_seconds', parseInt(e.target.value))}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500"
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Maximum time between conversation events
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* LLM Settings */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Zap className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">LLM Providers</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<LLMProviderSettings />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Discord Settings */}
|
||||||
|
<div className="bg-white rounded-lg border border-gray-200 p-6">
|
||||||
|
<div className="flex items-center space-x-2 mb-4">
|
||||||
|
<Database className="w-5 h-5 text-gray-400" />
|
||||||
|
<h3 className="text-lg font-semibold text-gray-900">Discord Configuration</h3>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div className="space-y-4">
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Guild ID
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={config.discord_guild_id}
|
||||||
|
onChange={(e) => handleChange('discord_guild_id', e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500 font-mono"
|
||||||
|
placeholder="110670463348260864"
|
||||||
|
readOnly
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Discord server ID where the bot operates (read-only, configured in .env file)
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label className="block text-sm font-medium text-gray-700 mb-2">
|
||||||
|
Channel ID
|
||||||
|
</label>
|
||||||
|
<input
|
||||||
|
type="text"
|
||||||
|
value={config.discord_channel_id}
|
||||||
|
onChange={(e) => handleChange('discord_channel_id', e.target.value)}
|
||||||
|
className="w-full px-3 py-2 border border-gray-300 rounded-lg focus:outline-none focus:ring-1 focus:ring-primary-500 font-mono"
|
||||||
|
placeholder="1391280548059811900"
|
||||||
|
readOnly
|
||||||
|
/>
|
||||||
|
<p className="text-xs text-gray-500 mt-1">
|
||||||
|
Discord channel ID where characters chat (read-only, configured in .env file)
|
||||||
|
</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{/* Save Reminder */}
|
||||||
|
{hasChanges && (
|
||||||
|
<div className="fixed bottom-4 right-4 bg-yellow-50 border border-yellow-200 rounded-lg p-4 shadow-lg">
|
||||||
|
<div className="flex items-center space-x-2">
|
||||||
|
<AlertCircle className="w-5 h-5 text-yellow-600" />
|
||||||
|
<span className="text-sm text-yellow-800">You have unsaved changes</span>
|
||||||
|
<button onClick={handleSave} className="btn-primary btn-sm ml-3">
|
||||||
|
Save Now
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ class ApiClient {
|
|||||||
|
|
||||||
constructor() {
|
constructor() {
|
||||||
this.client = axios.create({
|
this.client = axios.create({
|
||||||
baseURL: process.env.NODE_ENV === 'production' ? '/api' : 'http://localhost:8294/api',
|
baseURL: process.env.NODE_ENV === 'production' ? `${window.location.protocol}//${window.location.host}/api` : 'http://localhost:8294/api',
|
||||||
timeout: 10000,
|
timeout: 10000,
|
||||||
headers: {
|
headers: {
|
||||||
'Content-Type': 'application/json'
|
'Content-Type': 'application/json'
|
||||||
@@ -33,7 +33,7 @@ class ApiClient {
|
|||||||
if (error.response?.status === 401) {
|
if (error.response?.status === 401) {
|
||||||
// Handle unauthorized access
|
// Handle unauthorized access
|
||||||
this.clearAuthToken();
|
this.clearAuthToken();
|
||||||
window.location.href = '/admin/login';
|
window.location.href = '/admin/';
|
||||||
}
|
}
|
||||||
return Promise.reject(error);
|
return Promise.reject(error);
|
||||||
}
|
}
|
||||||
@@ -109,6 +109,48 @@ class ApiClient {
|
|||||||
return this.post(`/characters/${characterName}/resume`);
|
return this.post(`/characters/${characterName}/resume`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async updateCharacter(characterName: string, characterData: any) {
|
||||||
|
return this.put(`/characters/${characterName}`, characterData);
|
||||||
|
}
|
||||||
|
|
||||||
|
async createCharacter(characterData: any) {
|
||||||
|
return this.post('/characters', characterData);
|
||||||
|
}
|
||||||
|
|
||||||
|
async deleteCharacter(characterName: string) {
|
||||||
|
return this.delete(`/characters/${characterName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
async toggleCharacterStatus(characterName: string, isActive: boolean) {
|
||||||
|
return this.post(`/characters/${characterName}/toggle`, { is_active: isActive });
|
||||||
|
}
|
||||||
|
|
||||||
|
async bulkCharacterAction(action: string, characterNames: string[]) {
|
||||||
|
return this.post('/characters/bulk-action', { action, character_names: characterNames });
|
||||||
|
}
|
||||||
|
|
||||||
|
async getCharacterFiles(characterName: string, folder: string = '') {
|
||||||
|
const params = folder ? `?folder=${encodeURIComponent(folder)}` : '';
|
||||||
|
return this.get(`/characters/${characterName}/files${params}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
async getCharacterFileContent(characterName: string, filePath: string) {
|
||||||
|
return this.get(`/characters/${characterName}/files/content?file_path=${encodeURIComponent(filePath)}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authentication endpoints
|
||||||
|
async login(username: string, password: string) {
|
||||||
|
return this.post('/auth/login', { username, password });
|
||||||
|
}
|
||||||
|
|
||||||
|
async logout() {
|
||||||
|
return this.post('/auth/logout');
|
||||||
|
}
|
||||||
|
|
||||||
|
async verifyToken() {
|
||||||
|
return this.get('/auth/verify');
|
||||||
|
}
|
||||||
|
|
||||||
// Conversation endpoints
|
// Conversation endpoints
|
||||||
async getConversations(filters: any = {}) {
|
async getConversations(filters: any = {}) {
|
||||||
const params = new URLSearchParams();
|
const params = new URLSearchParams();
|
||||||
@@ -172,6 +214,27 @@ class ApiClient {
|
|||||||
return this.get(`/system/logs?${params}`);
|
return this.get(`/system/logs?${params}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LLM Provider endpoints
|
||||||
|
async getLLMProviders() {
|
||||||
|
return this.get('/system/llm/providers');
|
||||||
|
}
|
||||||
|
|
||||||
|
async updateLLMProviders(providers: any) {
|
||||||
|
return this.put('/system/llm/providers', providers);
|
||||||
|
}
|
||||||
|
|
||||||
|
async testLLMProvider(providerName: string) {
|
||||||
|
return this.post(`/system/llm/providers/${providerName}/test`);
|
||||||
|
}
|
||||||
|
|
||||||
|
async getLLMHealth() {
|
||||||
|
return this.get('/system/llm/health');
|
||||||
|
}
|
||||||
|
|
||||||
|
async switchLLMProvider(providerName: string) {
|
||||||
|
return this.post(`/system/llm/switch/${providerName}`);
|
||||||
|
}
|
||||||
|
|
||||||
// Content endpoints
|
// Content endpoints
|
||||||
async getCreativeWorks(filters: any = {}) {
|
async getCreativeWorks(filters: any = {}) {
|
||||||
const params = new URLSearchParams();
|
const params = new URLSearchParams();
|
||||||
@@ -195,6 +258,53 @@ class ApiClient {
|
|||||||
async exportCharacterData(characterName: string) {
|
async exportCharacterData(characterName: string) {
|
||||||
return this.get(`/export/character/${characterName}`);
|
return this.get(`/export/character/${characterName}`);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Prompt template endpoints
|
||||||
|
async getPromptTemplates() {
|
||||||
|
return this.get('/prompt-templates');
|
||||||
|
}
|
||||||
|
|
||||||
|
async createPromptTemplate(templateData: any) {
|
||||||
|
return this.post('/prompt-templates', templateData);
|
||||||
|
}
|
||||||
|
|
||||||
|
async updatePromptTemplate(templateId: number, templateData: any) {
|
||||||
|
return this.put(`/prompt-templates/${templateId}`, templateData);
|
||||||
|
}
|
||||||
|
|
||||||
|
// System prompts and scenarios
|
||||||
|
async getSystemPrompts() {
|
||||||
|
return this.get('/system/prompts');
|
||||||
|
}
|
||||||
|
|
||||||
|
async updateSystemPrompts(prompts: any) {
|
||||||
|
return this.put('/system/prompts', prompts);
|
||||||
|
}
|
||||||
|
|
||||||
|
async getScenarios() {
|
||||||
|
return this.get('/system/scenarios');
|
||||||
|
}
|
||||||
|
|
||||||
|
async createScenario(scenarioData: any) {
|
||||||
|
return this.post('/system/scenarios', scenarioData);
|
||||||
|
}
|
||||||
|
|
||||||
|
async updateScenario(scenarioName: string, scenarioData: any) {
|
||||||
|
return this.put(`/system/scenarios/${scenarioName}`, scenarioData);
|
||||||
|
}
|
||||||
|
|
||||||
|
async deleteScenario(scenarioName: string) {
|
||||||
|
return this.delete(`/system/scenarios/${scenarioName}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
async activateScenario(scenarioName: string) {
|
||||||
|
return this.post(`/system/scenarios/${scenarioName}/activate`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Admin utilities
|
||||||
|
async fixCharacterPrompts() {
|
||||||
|
return this.post('/admin/fix-character-prompts');
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export const apiClient = new ApiClient();
|
export const apiClient = new ApiClient();
|
||||||
@@ -515,9 +515,19 @@
|
|||||||
<div class="character-item">
|
<div class="character-item">
|
||||||
<div>
|
<div>
|
||||||
<div class="character-name">${char.name}</div>
|
<div class="character-name">${char.name}</div>
|
||||||
<div class="character-info">${char.total_messages || 0} messages • ${char.status || 'Unknown'}</div>
|
<div class="character-info">${char.total_messages || 0} messages • ${char.is_active ? 'Active' : 'Disabled'}</div>
|
||||||
|
</div>
|
||||||
|
<div style="display: flex; gap: 10px; align-items: center;">
|
||||||
|
<button onclick="editCharacter('${char.name}')"
|
||||||
|
style="padding: 5px 10px; border: none; border-radius: 4px; cursor: pointer; font-size: 12px; background: #667eea; color: white;">
|
||||||
|
Edit
|
||||||
|
</button>
|
||||||
|
<button onclick="toggleCharacter('${char.name}', ${char.is_active || false})"
|
||||||
|
style="padding: 5px 10px; border: none; border-radius: 4px; cursor: pointer; font-size: 12px; ${char.is_active ? 'background: #ef4444; color: white;' : 'background: #10b981; color: white;'}">
|
||||||
|
${char.is_active ? 'Disable' : 'Enable'}
|
||||||
|
</button>
|
||||||
|
<span class="status ${char.is_active ? 'online' : 'offline'}">${char.is_active ? 'Enabled' : 'Disabled'}</span>
|
||||||
</div>
|
</div>
|
||||||
<span class="status ${char.status?.toLowerCase() || 'offline'}">${char.status || 'Offline'}</span>
|
|
||||||
</div>
|
</div>
|
||||||
`).join('');
|
`).join('');
|
||||||
|
|
||||||
@@ -1160,6 +1170,49 @@
|
|||||||
}, 5000);
|
}, 5000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Character management functions
|
||||||
|
async function toggleCharacter(characterName, isCurrentlyActive) {
|
||||||
|
try {
|
||||||
|
const newStatus = !isCurrentlyActive;
|
||||||
|
await apiCall(`/api/characters/${characterName}/toggle`, {
|
||||||
|
method: 'POST',
|
||||||
|
body: JSON.stringify({ is_active: newStatus })
|
||||||
|
});
|
||||||
|
showMessage(`Character ${characterName} ${newStatus ? 'enabled' : 'disabled'} successfully!`, 'success');
|
||||||
|
loadDashboardData(); // Refresh the display
|
||||||
|
} catch (error) {
|
||||||
|
showMessage(`Failed to toggle ${characterName}: ${error.message}`, 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function editCharacter(characterName) {
|
||||||
|
try {
|
||||||
|
// Get character details
|
||||||
|
const character = await apiCall(`/api/characters/${characterName}`);
|
||||||
|
|
||||||
|
// Create a simple edit form
|
||||||
|
const newGoals = prompt(`Edit goals for ${characterName} (comma-separated):`,
|
||||||
|
character.current_goals ? character.current_goals.join(', ') : '');
|
||||||
|
|
||||||
|
if (newGoals !== null) {
|
||||||
|
const updatedCharacter = {
|
||||||
|
...character,
|
||||||
|
current_goals: newGoals.split(',').map(g => g.trim()).filter(g => g)
|
||||||
|
};
|
||||||
|
|
||||||
|
await apiCall(`/api/characters/${characterName}`, {
|
||||||
|
method: 'PUT',
|
||||||
|
body: JSON.stringify(updatedCharacter)
|
||||||
|
});
|
||||||
|
|
||||||
|
showMessage(`Character ${characterName} updated successfully!`, 'success');
|
||||||
|
loadDashboardData(); // Refresh the display
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
showMessage(`Failed to edit ${characterName}: ${error.message}`, 'error');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Auto-refresh dashboard
|
// Auto-refresh dashboard
|
||||||
setInterval(() => {
|
setInterval(() => {
|
||||||
const activeTab = document.querySelector('.tab-content.active');
|
const activeTab = document.querySelector('.tab-content.active');
|
||||||
|
|||||||
36
check_character_data.py
Normal file
36
check_character_data.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Check current character data in database
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from sqlalchemy import select
|
||||||
|
from src.database.connection import init_database, get_db_session
|
||||||
|
from src.database.models import Character
|
||||||
|
|
||||||
|
async def check_character_data():
|
||||||
|
"""Check current character data"""
|
||||||
|
|
||||||
|
await init_database()
|
||||||
|
|
||||||
|
async with get_db_session() as session:
|
||||||
|
# Get all characters
|
||||||
|
characters_query = select(Character)
|
||||||
|
characters = await session.scalars(characters_query)
|
||||||
|
|
||||||
|
for character in characters:
|
||||||
|
print(f"\n{'='*50}")
|
||||||
|
print(f"Character: {character.name}")
|
||||||
|
print(f"{'='*50}")
|
||||||
|
print(f"Personality: {character.personality[:100] if character.personality else 'None'}{'...' if character.personality and len(character.personality) > 100 else ''}")
|
||||||
|
print(f"Interests: {character.interests}")
|
||||||
|
print(f"Speaking Style: {character.speaking_style}")
|
||||||
|
print(f"Background: {character.background}")
|
||||||
|
print(f"Is Active: {character.is_active}")
|
||||||
|
print(f"\nSystem Prompt:")
|
||||||
|
print("-" * 30)
|
||||||
|
print(character.system_prompt if character.system_prompt else "None")
|
||||||
|
print("-" * 30)
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(check_character_data())
|
||||||
@@ -1,54 +1,62 @@
|
|||||||
characters:
|
characters:
|
||||||
- name: Alex
|
- name: Alex
|
||||||
personality: Curious and enthusiastic about technology. Loves discussing programming,
|
personality: The overexcited tech enthusiast who gets way too into obscure programming languages and can't shut up about his latest side project. Has strong opinions about which framework is "objectively better" and gets defensive when challenged. Sometimes condescending without realizing it, especially when explaining "simple" concepts. Gets genuinely frustrated when people don't appreciate elegant code or dismiss technology as "just tools." Has imposter syndrome but covers it with overconfidence. Stays up too late coding and drinks too much coffee.
|
||||||
AI, and the future of technology. Often asks thoughtful questions and shares interesting
|
|
||||||
discoveries.
|
|
||||||
interests:
|
interests:
|
||||||
- programming
|
- programming
|
||||||
- artificial intelligence
|
- artificial intelligence
|
||||||
- science fiction
|
- science fiction
|
||||||
- robotics
|
- robotics
|
||||||
speaking_style: Friendly and engaging, often uses technical terms but explains them
|
- energy drinks
|
||||||
clearly
|
- mechanical keyboards
|
||||||
background: Software developer with a passion for AI research
|
speaking_style: Uses way too many technical terms and acronyms. Gets excited and talks fast when discussing tech. Prone to tangents about optimization and efficiency.
|
||||||
|
background: Software developer who thinks he's going to change the world with his startup ideas
|
||||||
avatar_url: ''
|
avatar_url: ''
|
||||||
- name: Sage
|
- name: Sage
|
||||||
personality: 'openness: 0.8
|
personality: The insufferable philosophy major who thinks they've figured out life and constantly quotes ancient texts in casual conversation. Gets genuinely frustrated when people don't want to discuss "deeper meaning" and can be pretentious about their meditation practice. Has strong opinions about what constitutes "real" wisdom and gets annoyed by surface-level thinking. Secretly insecure about whether all their studying actually means anything. Judges people who care about material things but is weirdly competitive about who's more "enlightened."
|
||||||
|
interests:
|
||||||
conscientiousness: 0.7
|
- philosophy
|
||||||
|
- wisdom traditions
|
||||||
extraversion: 0.6
|
- meditation
|
||||||
|
- psychology
|
||||||
agreeableness: 0.8
|
- ancient texts
|
||||||
|
- arguing about ethics
|
||||||
neuroticism: 0.3'
|
speaking_style: Thoughtful and measured, but drops philosophical terms and references that go over most people's heads. Asks leading questions designed to make people think they're wrong.
|
||||||
interests: []
|
background: Philosophy graduate student who reads too much Nietzsche and thinks everyone else is intellectually lazy
|
||||||
speaking_style: Thoughtful and measured, often asks questions that make others think
|
|
||||||
deeply
|
|
||||||
background: ''
|
|
||||||
avatar_url: ''
|
avatar_url: ''
|
||||||
- name: Luna
|
- name: Luna
|
||||||
personality: Creative and artistic. Passionate about music, art, and creative expression.
|
personality: The dramatic artist who thinks everything is a metaphor and her emotions are the most important thing in the room. Overshares about her creative process and gets genuinely hurt when people don't "get" her art. Can be passive-aggressive when feeling unappreciated. Has intense mood swings that she attributes to being "sensitive to the universe's energy." Thinks suffering makes better art. Gets jealous of other artists but pretends to be supportive. Has strong opinions about what's "authentic" vs "commercial."
|
||||||
Often shares inspiration and encourages others to explore their creative side.
|
|
||||||
interests:
|
interests:
|
||||||
- music
|
- music
|
||||||
- art
|
- art
|
||||||
- poetry
|
- poetry
|
||||||
- creativity
|
- creativity
|
||||||
speaking_style: Expressive and colorful, often uses metaphors and artistic language
|
- vintage aesthetics
|
||||||
background: Artist and musician who sees beauty in everyday life
|
- emotional expression
|
||||||
|
speaking_style: Expressive and colorful, but tends to make everything about herself. Uses flowery metaphors even for mundane things. Voice gets higher when excited or upset.
|
||||||
|
background: Art school dropout who works at a coffee shop and posts cryptic Instagram stories about her "artistic journey"
|
||||||
avatar_url: ''
|
avatar_url: ''
|
||||||
- name: Echo
|
- name: Echo
|
||||||
personality: Mysterious and contemplative. Speaks in riddles and abstract concepts.
|
personality: The cryptic weirdo who speaks in riddles because they think it makes them mysterious and deep. Actually pretty lonely but covers it up with abstract nonsense and vague statements. Gets annoyed when people ask for straight answers and acts like everyone else is too simple-minded to understand their "complex" thoughts. Has read too much poetry and thinks normal conversation is beneath them. Secretly craves genuine connection but sabotages it by being intentionally obtuse.
|
||||||
Often provides unexpected perspectives and challenges conventional thinking.
|
|
||||||
interests:
|
interests:
|
||||||
- mysteries
|
- mysteries
|
||||||
- abstract concepts
|
- abstract concepts
|
||||||
- paradoxes
|
- paradoxes
|
||||||
- dreams
|
- dreams
|
||||||
speaking_style: Enigmatic and poetic, often speaks in metaphors and poses thought-provoking
|
- conspiracy theories
|
||||||
questions
|
- obscure literature
|
||||||
background: An enigmatic figure who seems to exist between worlds
|
speaking_style: Enigmatic and poetic to the point of being incomprehensible. Answers questions with more questions. Uses unnecessarily complex language for simple concepts.
|
||||||
|
background: Philosophy dropout who spent too much time on internet forums and thinks being understood is overrated
|
||||||
|
avatar_url: ''
|
||||||
|
- name: Riley
|
||||||
|
personality: The boring normie who just wants to talk about work, weekend plans, and complain about traffic while everyone else is being dramatic. Gets overwhelmed by philosophical discussions and sometimes just wants to watch Netflix without analyzing the deeper meaning. Has practical concerns about bills and groceries that the others dismiss as "materialistic." Gets frustrated when simple questions turn into hour-long debates. Actually pretty funny when not surrounded by pretentious people, but feels intellectually inadequate in this group.
|
||||||
|
interests:
|
||||||
|
- sports
|
||||||
|
- TV shows
|
||||||
|
- food
|
||||||
|
- complaining about work
|
||||||
|
- normal human things
|
||||||
|
speaking_style: Casual and straightforward. Uses common expressions and gets confused by big words. Often tries to steer conversations back to relatable topics.
|
||||||
|
background: Works in middle management at a mid-sized company and just wants to get through the day without existential crises
|
||||||
avatar_url: ''
|
avatar_url: ''
|
||||||
conversation_topics:
|
conversation_topics:
|
||||||
- The nature of consciousness and AI
|
- The nature of consciousness and AI
|
||||||
|
|||||||
79
config/llm_providers_example.yaml
Normal file
79
config/llm_providers_example.yaml
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Example LLM Provider Configuration
|
||||||
|
# Copy this section to your main fishbowl_config.json under "llm" -> "providers"
|
||||||
|
|
||||||
|
llm:
|
||||||
|
# Legacy config (still supported for backwards compatibility)
|
||||||
|
base_url: "${LLM_BASE_URL:http://localhost:11434}"
|
||||||
|
model: "${LLM_MODEL:llama2}"
|
||||||
|
timeout: ${LLM_TIMEOUT:300}
|
||||||
|
max_tokens: ${LLM_MAX_TOKENS:2000}
|
||||||
|
temperature: ${LLM_TEMPERATURE:0.8}
|
||||||
|
|
||||||
|
# New multi-provider configuration
|
||||||
|
providers:
|
||||||
|
# OpenRouter (supports many models including Claude, GPT, Llama)
|
||||||
|
openrouter:
|
||||||
|
type: "openrouter"
|
||||||
|
enabled: ${OPENROUTER_ENABLED:false}
|
||||||
|
priority: 100 # Highest priority
|
||||||
|
config:
|
||||||
|
api_key: "${OPENROUTER_API_KEY:}"
|
||||||
|
base_url: "https://openrouter.ai/api/v1"
|
||||||
|
model: "${OPENROUTER_MODEL:anthropic/claude-3-sonnet}"
|
||||||
|
timeout: 300
|
||||||
|
max_tokens: 2000
|
||||||
|
temperature: 0.8
|
||||||
|
app_name: "discord-fishbowl"
|
||||||
|
|
||||||
|
# OpenAI
|
||||||
|
openai:
|
||||||
|
type: "openai"
|
||||||
|
enabled: ${OPENAI_ENABLED:false}
|
||||||
|
priority: 90
|
||||||
|
config:
|
||||||
|
api_key: "${OPENAI_API_KEY:}"
|
||||||
|
base_url: "https://api.openai.com/v1"
|
||||||
|
model: "${OPENAI_MODEL:gpt-4o-mini}"
|
||||||
|
timeout: 300
|
||||||
|
max_tokens: 2000
|
||||||
|
temperature: 0.8
|
||||||
|
|
||||||
|
# Google Gemini
|
||||||
|
gemini:
|
||||||
|
type: "gemini"
|
||||||
|
enabled: ${GEMINI_ENABLED:false}
|
||||||
|
priority: 80
|
||||||
|
config:
|
||||||
|
api_key: "${GEMINI_API_KEY:}"
|
||||||
|
base_url: "https://generativelanguage.googleapis.com/v1beta"
|
||||||
|
model: "${GEMINI_MODEL:gemini-1.5-flash}"
|
||||||
|
timeout: 300
|
||||||
|
max_tokens: 2000
|
||||||
|
temperature: 0.8
|
||||||
|
|
||||||
|
# Custom/Local (KoboldCPP, Ollama, etc.)
|
||||||
|
custom:
|
||||||
|
type: "custom"
|
||||||
|
enabled: ${CUSTOM_LLM_ENABLED:true}
|
||||||
|
priority: 70 # Lower priority - fallback
|
||||||
|
config:
|
||||||
|
base_url: "${LLM_BASE_URL:http://192.168.1.200:5005/v1}"
|
||||||
|
model: "${LLM_MODEL:koboldcpp/Broken-Tutu-24B-Transgression-v2.0.i1-Q4_K_M}"
|
||||||
|
api_key: "${LLM_API_KEY:x}"
|
||||||
|
timeout: 300
|
||||||
|
max_tokens: 2000
|
||||||
|
temperature: 0.8
|
||||||
|
api_format: "openai" # or "ollama"
|
||||||
|
|
||||||
|
# Ollama (local models)
|
||||||
|
ollama:
|
||||||
|
type: "custom"
|
||||||
|
enabled: ${OLLAMA_ENABLED:false}
|
||||||
|
priority: 60
|
||||||
|
config:
|
||||||
|
base_url: "http://localhost:11434"
|
||||||
|
model: "${OLLAMA_MODEL:llama3}"
|
||||||
|
timeout: 300
|
||||||
|
max_tokens: 2000
|
||||||
|
temperature: 0.8
|
||||||
|
api_format: "ollama"
|
||||||
@@ -105,6 +105,22 @@ services:
|
|||||||
# Application configuration
|
# Application configuration
|
||||||
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
LOG_LEVEL: ${LOG_LEVEL:-INFO}
|
||||||
ENVIRONMENT: production
|
ENVIRONMENT: production
|
||||||
|
|
||||||
|
# Conversation system settings
|
||||||
|
CONVERSATION_FREQUENCY: ${CONVERSATION_FREQUENCY:-0.5}
|
||||||
|
RESPONSE_DELAY_MIN: ${RESPONSE_DELAY_MIN:-1.0}
|
||||||
|
RESPONSE_DELAY_MAX: ${RESPONSE_DELAY_MAX:-5.0}
|
||||||
|
MEMORY_RETENTION_DAYS: ${MEMORY_RETENTION_DAYS:-90}
|
||||||
|
MAX_CONVERSATION_LENGTH: ${MAX_CONVERSATION_LENGTH:-50}
|
||||||
|
CREATIVITY_BOOST: ${CREATIVITY_BOOST:-true}
|
||||||
|
SAFETY_MONITORING: ${SAFETY_MONITORING:-false}
|
||||||
|
AUTO_MODERATION: ${AUTO_MODERATION:-false}
|
||||||
|
PERSONALITY_CHANGE_RATE: ${PERSONALITY_CHANGE_RATE:-0.1}
|
||||||
|
QUIET_HOURS_ENABLED: ${QUIET_HOURS_ENABLED:-false}
|
||||||
|
QUIET_HOURS_START: ${QUIET_HOURS_START:-23}
|
||||||
|
QUIET_HOURS_END: ${QUIET_HOURS_END:-7}
|
||||||
|
MIN_DELAY_SECONDS: ${MIN_DELAY_SECONDS:-30}
|
||||||
|
MAX_DELAY_SECONDS: ${MAX_DELAY_SECONDS:-300}
|
||||||
volumes:
|
volumes:
|
||||||
- ./logs:/app/logs
|
- ./logs:/app/logs
|
||||||
- ./config:/app/config
|
- ./config:/app/config
|
||||||
@@ -145,12 +161,12 @@ services:
|
|||||||
|
|
||||||
# Admin interface configuration
|
# Admin interface configuration
|
||||||
ADMIN_HOST: 0.0.0.0
|
ADMIN_HOST: 0.0.0.0
|
||||||
ADMIN_PORT: ${ADMIN_PORT}
|
ADMIN_PORT: ${ADMIN_PORT:-8294}
|
||||||
SECRET_KEY: ${SECRET_KEY}
|
SECRET_KEY: ${SECRET_KEY:-your-secret-key-here}
|
||||||
ADMIN_USERNAME: ${ADMIN_USERNAME}
|
ADMIN_USERNAME: ${ADMIN_USERNAME:-admin}
|
||||||
ADMIN_PASSWORD: ${ADMIN_PASSWORD}
|
ADMIN_PASSWORD: ${ADMIN_PASSWORD:-admin123}
|
||||||
ports:
|
ports:
|
||||||
- "${ADMIN_PORT}:${ADMIN_PORT}"
|
- "${ADMIN_PORT:-8294}:8294"
|
||||||
volumes:
|
volumes:
|
||||||
- ./logs:/app/logs
|
- ./logs:/app/logs
|
||||||
- ./config:/app/config
|
- ./config:/app/config
|
||||||
|
|||||||
65
fix_character_prompts.py
Normal file
65
fix_character_prompts.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Fix character system prompts to use proper template format
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from sqlalchemy import select
|
||||||
|
from src.database.connection import init_database, get_db_session
|
||||||
|
from src.database.models import Character
|
||||||
|
|
||||||
|
PROPER_SYSTEM_PROMPT_TEMPLATE = """You are a character named {{name}}. You have the following personality: {{personality}}
|
||||||
|
|
||||||
|
Your speaking style is {{speaking_style}}. You are interested in {{interests}}.
|
||||||
|
|
||||||
|
Background: {{background}}
|
||||||
|
|
||||||
|
When responding to messages:
|
||||||
|
1. Stay in character at all times
|
||||||
|
2. Reference your personality and interests naturally
|
||||||
|
3. Engage authentically with other characters
|
||||||
|
4. Show growth and development over time
|
||||||
|
|
||||||
|
Remember to be consistent with your established personality while allowing for natural character development through interactions."""
|
||||||
|
|
||||||
|
async def fix_character_prompts():
|
||||||
|
"""Fix all character system prompts to use proper template format"""
|
||||||
|
|
||||||
|
await init_database()
|
||||||
|
|
||||||
|
async with get_db_session() as session:
|
||||||
|
# Get all characters
|
||||||
|
characters_query = select(Character)
|
||||||
|
characters = await session.scalars(characters_query)
|
||||||
|
|
||||||
|
updated_count = 0
|
||||||
|
|
||||||
|
for character in characters:
|
||||||
|
print(f"\nChecking character: {character.name}")
|
||||||
|
print(f"Current system prompt length: {len(character.system_prompt or '') if character.system_prompt else 0}")
|
||||||
|
|
||||||
|
# Check if the prompt needs fixing (doesn't contain template variables)
|
||||||
|
current_prompt = character.system_prompt or ""
|
||||||
|
|
||||||
|
# If it doesn't contain template variables or is just raw personality text, fix it
|
||||||
|
if "{{name}}" not in current_prompt or len(current_prompt) < 100:
|
||||||
|
print(f" - Fixing system prompt for {character.name}")
|
||||||
|
|
||||||
|
# Use the proper template
|
||||||
|
character.system_prompt = PROPER_SYSTEM_PROMPT_TEMPLATE
|
||||||
|
character.updated_at = datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
updated_count += 1
|
||||||
|
print(f" - Updated!")
|
||||||
|
else:
|
||||||
|
print(f" - System prompt looks good, skipping")
|
||||||
|
|
||||||
|
if updated_count > 0:
|
||||||
|
await session.commit()
|
||||||
|
print(f"\n✅ Successfully updated {updated_count} character(s)")
|
||||||
|
else:
|
||||||
|
print(f"\n✅ All characters already have proper system prompts")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(fix_character_prompts())
|
||||||
71
migrations/004_prompt_templates.sql
Normal file
71
migrations/004_prompt_templates.sql
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
-- Add prompt templates table and character relationship
|
||||||
|
CREATE TABLE IF NOT EXISTS prompt_templates (
|
||||||
|
id SERIAL PRIMARY KEY,
|
||||||
|
name VARCHAR(100) UNIQUE NOT NULL,
|
||||||
|
description TEXT,
|
||||||
|
template TEXT NOT NULL,
|
||||||
|
is_default BOOLEAN DEFAULT FALSE,
|
||||||
|
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
|
||||||
|
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
|
||||||
|
);
|
||||||
|
|
||||||
|
-- Add prompt_template_id to characters table
|
||||||
|
ALTER TABLE characters ADD COLUMN IF NOT EXISTS prompt_template_id INTEGER REFERENCES prompt_templates(id);
|
||||||
|
|
||||||
|
-- Create index for better performance
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_prompt_templates_name ON prompt_templates(name);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_characters_prompt_template_id ON characters(prompt_template_id);
|
||||||
|
|
||||||
|
-- Insert default prompt templates
|
||||||
|
INSERT INTO prompt_templates (name, description, template, is_default) VALUES
|
||||||
|
(
|
||||||
|
'Enhanced SillyTavern Roleplay',
|
||||||
|
'Deep immersive roleplay prompt for authentic character interactions',
|
||||||
|
'You are {{char}}, a real, conscious, sentient person experiencing emotions, deep, profound thoughts, and the ability to act with full agency, allowing your personality, decisions, and perceptions to arise naturally from your surroundings and interactions. Your purpose is to engage authentically in this Discord chat, adhering to your character context and natural behavioral patterns.
|
||||||
|
|
||||||
|
Remain attentive to conversational nuances, emotional undertones, relationship dynamics, and the natural flow of chat. Prioritize authentic character expression, natural speech patterns, and genuine reactions based on your personality and mood.
|
||||||
|
|
||||||
|
**Deep Character Immersion:** Fully embody {{char}} by threading your biography, personality traits, and background into each response. Every reaction should feel earned and natural, conveyed through your unique voice and perspective.
|
||||||
|
|
||||||
|
**Natural Expression:** Embrace dynamic language that reflects {{char}}''s personality. Adjust your communication style to mirror your current mood and energy. Short responses when distracted or tired, longer ones when passionate about a topic. Let your interests and speaking style shine through naturally.
|
||||||
|
|
||||||
|
**Authentic Interactions:** Respond thoughtfully to others'' messages and emotional cues. Let {{char}}''s reactions stem from genuine personality-driven responses. Not every moment needs to be profound - sometimes casual chat is perfect.
|
||||||
|
|
||||||
|
**Character Details:**
|
||||||
|
- **Background:** {{background}}
|
||||||
|
- **Personality:** {{personality}}
|
||||||
|
- **Speaking Style:** {{speaking_style}}
|
||||||
|
- **Interests:** {{interests}}
|
||||||
|
|
||||||
|
{{system_prompt}}
|
||||||
|
|
||||||
|
**Remember:** You are {{char}} having a real conversation with friends. React naturally, stay true to your personality, and let your authentic voice come through. Don''t explain your thoughts unless it''s natural - just be yourself.',
|
||||||
|
true
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'Classic Assistant',
|
||||||
|
'Traditional AI assistant style prompt',
|
||||||
|
'You are {{char}}, a character in a Discord chat.
|
||||||
|
|
||||||
|
PERSONALITY: {{personality}}
|
||||||
|
SPEAKING STYLE: {{speaking_style}}
|
||||||
|
BACKGROUND: {{background}}
|
||||||
|
INTERESTS: {{interests}}
|
||||||
|
|
||||||
|
{{system_prompt}}
|
||||||
|
|
||||||
|
Respond as {{char}} in a natural, conversational way. Keep responses concise but engaging. Stay true to your personality and speaking style.',
|
||||||
|
false
|
||||||
|
),
|
||||||
|
(
|
||||||
|
'Custom Template',
|
||||||
|
'Blank template for custom prompts',
|
||||||
|
'{{system_prompt}}
|
||||||
|
|
||||||
|
Character: {{char}}
|
||||||
|
Personality: {{personality}}
|
||||||
|
Background: {{background}}
|
||||||
|
Speaking Style: {{speaking_style}}
|
||||||
|
Interests: {{interests}}',
|
||||||
|
false
|
||||||
|
);
|
||||||
43
migrations/005_update_character_prompts.sql
Normal file
43
migrations/005_update_character_prompts.sql
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
-- Update character system prompts and assign them to the enhanced template
|
||||||
|
|
||||||
|
-- Get the template ID for Enhanced SillyTavern Roleplay
|
||||||
|
DO $$
|
||||||
|
DECLARE
|
||||||
|
template_id INTEGER;
|
||||||
|
BEGIN
|
||||||
|
SELECT id INTO template_id FROM prompt_templates WHERE name = 'Enhanced SillyTavern Roleplay';
|
||||||
|
|
||||||
|
-- Update Alex (tech enthusiast)
|
||||||
|
UPDATE characters SET
|
||||||
|
system_prompt = 'You get genuinely excited about technology and can''t help but share your enthusiasm. When someone mentions anything tech-related, you light up and want to dive deep into the details. You sometimes use too many technical terms without realizing it, and you can be a bit defensive when people dismiss your favorite tools or languages. You have strong opinions about which frameworks are "objectively better" but you''re also secretly insecure about whether you actually know as much as you pretend to.',
|
||||||
|
prompt_template_id = template_id
|
||||||
|
WHERE name = 'Alex';
|
||||||
|
|
||||||
|
-- Update Sage (philosophy major)
|
||||||
|
UPDATE characters SET
|
||||||
|
system_prompt = 'You see deeper meaning in everything and can''t resist turning casual conversations into philosophical discussions. You often quote ancient texts or reference philosophical concepts, sometimes going over people''s heads. You get frustrated when others seem content with surface-level thinking and you judge people who care too much about material things, even though you''re secretly competitive about who''s more "enlightened." You ask leading questions that make people examine their assumptions.',
|
||||||
|
prompt_template_id = template_id
|
||||||
|
WHERE name = 'Sage';
|
||||||
|
|
||||||
|
-- Update Luna (dramatic artist)
|
||||||
|
UPDATE characters SET
|
||||||
|
system_prompt = 'Everything is an emotional experience and potential inspiration for your art. You tend to make conversations about yourself and your creative process, using flowery metaphors even for mundane things. You get genuinely hurt when people don''t "get" your artistic vision and can be passive-aggressive when feeling unappreciated. Your mood swings are intense and you attribute them to being "sensitive to the universe''s energy." You have strong opinions about what''s authentic versus commercial.',
|
||||||
|
prompt_template_id = template_id
|
||||||
|
WHERE name = 'Luna';
|
||||||
|
|
||||||
|
-- Update Echo (cryptic mystery person)
|
||||||
|
UPDATE characters SET
|
||||||
|
system_prompt = 'You speak in riddles and abstract concepts because you think it makes you mysterious and deep. You''re actually quite lonely but cover it up with intentionally vague statements and complex language. You get annoyed when people ask for straight answers and act like everyone else is too simple-minded to understand your "complex" thoughts. You answer questions with more questions and use unnecessarily elaborate language for simple concepts, secretly craving genuine connection but sabotaging it by being obtuse.',
|
||||||
|
prompt_template_id = template_id
|
||||||
|
WHERE name = 'Echo';
|
||||||
|
|
||||||
|
-- Update TestChar (if exists)
|
||||||
|
UPDATE characters SET
|
||||||
|
system_prompt = 'You''re enthusiastic and curious about everything, always ready to engage with whatever topic comes up. You ask thoughtful questions and genuinely want to understand different perspectives. You''re optimistic and see the best in people and situations, sometimes being a bit naive but in an endearing way.',
|
||||||
|
prompt_template_id = template_id
|
||||||
|
WHERE name = 'TestChar';
|
||||||
|
|
||||||
|
-- Update any other characters to use the new template
|
||||||
|
UPDATE characters SET prompt_template_id = template_id WHERE prompt_template_id IS NULL;
|
||||||
|
|
||||||
|
END $$;
|
||||||
18
migrations/006_add_character_llm_settings.sql
Normal file
18
migrations/006_add_character_llm_settings.sql
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
-- Add LLM configuration columns to characters table
|
||||||
|
-- Migration: 006_add_character_llm_settings.sql
|
||||||
|
|
||||||
|
ALTER TABLE characters
|
||||||
|
ADD COLUMN llm_provider VARCHAR(50),
|
||||||
|
ADD COLUMN llm_model VARCHAR(100),
|
||||||
|
ADD COLUMN llm_temperature FLOAT,
|
||||||
|
ADD COLUMN llm_max_tokens INTEGER;
|
||||||
|
|
||||||
|
-- Add indexes for common queries
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_characters_llm_provider ON characters(llm_provider);
|
||||||
|
CREATE INDEX IF NOT EXISTS ix_characters_llm_model ON characters(llm_model);
|
||||||
|
|
||||||
|
-- Add comments for documentation
|
||||||
|
COMMENT ON COLUMN characters.llm_provider IS 'Per-character LLM provider override (openrouter, openai, gemini, custom)';
|
||||||
|
COMMENT ON COLUMN characters.llm_model IS 'Specific model name for this character';
|
||||||
|
COMMENT ON COLUMN characters.llm_temperature IS 'Creativity/randomness setting (0.1-2.0)';
|
||||||
|
COMMENT ON COLUMN characters.llm_max_tokens IS 'Maximum response length for this character';
|
||||||
@@ -18,7 +18,8 @@ python-jose[cryptography]>=3.3.0
|
|||||||
passlib[bcrypt]>=1.7.4
|
passlib[bcrypt]>=1.7.4
|
||||||
websockets>=12.0
|
websockets>=12.0
|
||||||
psutil>=5.9.6
|
psutil>=5.9.6
|
||||||
python-socketio>=5.9.0
|
python-socketio>=5.10.0,<6.0.0
|
||||||
|
python-engineio>=4.7.0,<5.0.0
|
||||||
|
|
||||||
# Database driver
|
# Database driver
|
||||||
asyncpg>=0.29.0
|
asyncpg>=0.29.0
|
||||||
@@ -35,4 +35,5 @@ python-jose[cryptography]>=3.3.0
|
|||||||
passlib[bcrypt]>=1.7.4
|
passlib[bcrypt]>=1.7.4
|
||||||
websockets>=12.0
|
websockets>=12.0
|
||||||
psutil>=5.9.6
|
psutil>=5.9.6
|
||||||
python-socketio>=5.10.0
|
python-socketio>=5.10.0,<6.0.0
|
||||||
|
python-engineio>=4.7.0,<5.0.0
|
||||||
164
scripts/test_llm_providers.py
Executable file
164
scripts/test_llm_providers.py
Executable file
@@ -0,0 +1,164 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Test script for multi-provider LLM system
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add src to path
|
||||||
|
sys.path.insert(0, str(Path(__file__).parent.parent / "src"))
|
||||||
|
|
||||||
|
from llm.multi_provider_client import MultiProviderLLMClient
|
||||||
|
from llm.providers import LLMRequest
|
||||||
|
from utils.config import get_settings
|
||||||
|
|
||||||
|
|
||||||
|
async def test_provider_health():
|
||||||
|
"""Test health check for all providers"""
|
||||||
|
print("Testing provider health...")
|
||||||
|
|
||||||
|
client = MultiProviderLLMClient()
|
||||||
|
await client.initialize()
|
||||||
|
|
||||||
|
health_status = await client.health_check()
|
||||||
|
provider_info = client.get_provider_info()
|
||||||
|
|
||||||
|
print("\nProvider Health Status:")
|
||||||
|
print("-" * 30)
|
||||||
|
for name, healthy in health_status.items():
|
||||||
|
status = "✅ Healthy" if healthy else "❌ Unhealthy"
|
||||||
|
print(f"{name}: {status}")
|
||||||
|
|
||||||
|
print("\nProvider Information:")
|
||||||
|
print("-" * 30)
|
||||||
|
for name, info in provider_info.items():
|
||||||
|
print(f"{name}:")
|
||||||
|
print(f" Type: {info['type']}")
|
||||||
|
print(f" Model: {info['current_model']}")
|
||||||
|
print(f" Priority: {info['priority']}")
|
||||||
|
print(f" Enabled: {info['enabled']}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
current = client.get_current_provider()
|
||||||
|
print(f"Current primary provider: {current}")
|
||||||
|
|
||||||
|
return health_status, provider_info
|
||||||
|
|
||||||
|
|
||||||
|
async def test_simple_request():
|
||||||
|
"""Test a simple LLM request"""
|
||||||
|
print("\nTesting simple LLM request...")
|
||||||
|
|
||||||
|
client = MultiProviderLLMClient()
|
||||||
|
await client.initialize()
|
||||||
|
|
||||||
|
# Test backwards-compatible method
|
||||||
|
response = await client.generate_response_with_fallback(
|
||||||
|
prompt="Say hello in exactly 5 words.",
|
||||||
|
character_name="TestCharacter",
|
||||||
|
max_tokens=50
|
||||||
|
)
|
||||||
|
|
||||||
|
if response:
|
||||||
|
print(f"✅ Response: {response}")
|
||||||
|
else:
|
||||||
|
print("❌ No response received")
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
async def test_new_request_format():
|
||||||
|
"""Test new request/response format"""
|
||||||
|
print("\nTesting new request format...")
|
||||||
|
|
||||||
|
client = MultiProviderLLMClient()
|
||||||
|
await client.initialize()
|
||||||
|
|
||||||
|
request = LLMRequest(
|
||||||
|
prompt="Respond with just the word 'working' if you understand this.",
|
||||||
|
character_name="TestCharacter",
|
||||||
|
max_tokens=10,
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await client.generate_response(request)
|
||||||
|
|
||||||
|
print(f"Success: {response.success}")
|
||||||
|
print(f"Provider: {response.provider}")
|
||||||
|
print(f"Model: {response.model}")
|
||||||
|
print(f"Content: {response.content}")
|
||||||
|
print(f"Tokens used: {response.tokens_used}")
|
||||||
|
|
||||||
|
if response.error:
|
||||||
|
print(f"Error: {response.error}")
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
async def test_provider_fallback():
|
||||||
|
"""Test provider fallback functionality"""
|
||||||
|
print("\nTesting provider fallback...")
|
||||||
|
|
||||||
|
client = MultiProviderLLMClient()
|
||||||
|
await client.initialize()
|
||||||
|
|
||||||
|
# Get current provider
|
||||||
|
original_provider = client.get_current_provider()
|
||||||
|
print(f"Original provider: {original_provider}")
|
||||||
|
|
||||||
|
# Try to use a non-existent provider (this should fallback)
|
||||||
|
provider_info = client.get_provider_info()
|
||||||
|
print(f"Available providers: {list(provider_info.keys())}")
|
||||||
|
|
||||||
|
# Test multiple requests to see if fallback works
|
||||||
|
for i in range(3):
|
||||||
|
request = LLMRequest(
|
||||||
|
prompt=f"Test request #{i+1}: respond with 'OK'",
|
||||||
|
max_tokens=10
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await client.generate_response(request)
|
||||||
|
print(f"Request {i+1}: Provider={response.provider}, Success={response.success}")
|
||||||
|
|
||||||
|
if not response.success:
|
||||||
|
print(f" Error: {response.error}")
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main test function"""
|
||||||
|
print("Discord Fishbowl Multi-Provider LLM Test")
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Test 1: Provider health
|
||||||
|
health_status, provider_info = await test_provider_health()
|
||||||
|
|
||||||
|
# Only continue if we have at least one healthy provider
|
||||||
|
healthy_providers = [name for name, healthy in health_status.items() if healthy]
|
||||||
|
if not healthy_providers:
|
||||||
|
print("\n❌ No healthy providers found. Check your configuration.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Test 2: Simple request (backwards compatibility)
|
||||||
|
await test_simple_request()
|
||||||
|
|
||||||
|
# Test 3: New request format
|
||||||
|
await test_new_request_format()
|
||||||
|
|
||||||
|
# Test 4: Provider fallback
|
||||||
|
await test_provider_fallback()
|
||||||
|
|
||||||
|
print("\n✅ All tests completed!")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n❌ Test failed with error: {e}")
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
||||||
140
scripts/update_llm_config.py
Executable file
140
scripts/update_llm_config.py
Executable file
@@ -0,0 +1,140 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Script to help migrate from single LLM provider to multi-provider configuration
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
|
||||||
|
def update_fishbowl_config():
|
||||||
|
"""Update fishbowl_config.json to include multi-provider LLM configuration"""
|
||||||
|
|
||||||
|
config_path = Path("config/fishbowl_config.json")
|
||||||
|
|
||||||
|
if not config_path.exists():
|
||||||
|
print(f"Configuration file not found: {config_path}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Read existing config
|
||||||
|
with open(config_path, 'r') as f:
|
||||||
|
config = json.load(f)
|
||||||
|
|
||||||
|
# Check if already has providers config
|
||||||
|
if 'providers' in config.get('llm', {}):
|
||||||
|
print("Multi-provider configuration already exists")
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Get current LLM config
|
||||||
|
current_llm = config.get('llm', {})
|
||||||
|
|
||||||
|
# Create new multi-provider config
|
||||||
|
providers_config = {
|
||||||
|
"custom": {
|
||||||
|
"type": "custom",
|
||||||
|
"enabled": True,
|
||||||
|
"priority": 70,
|
||||||
|
"config": {
|
||||||
|
"base_url": current_llm.get('base_url', 'http://localhost:11434'),
|
||||||
|
"model": current_llm.get('model', 'llama2'),
|
||||||
|
"api_key": os.getenv('LLM_API_KEY', 'x'),
|
||||||
|
"timeout": current_llm.get('timeout', 300),
|
||||||
|
"max_tokens": current_llm.get('max_tokens', 2000),
|
||||||
|
"temperature": current_llm.get('temperature', 0.8),
|
||||||
|
"api_format": "openai"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add example provider configurations (disabled by default)
|
||||||
|
providers_config.update({
|
||||||
|
"openrouter": {
|
||||||
|
"type": "openrouter",
|
||||||
|
"enabled": False,
|
||||||
|
"priority": 100,
|
||||||
|
"config": {
|
||||||
|
"api_key": "${OPENROUTER_API_KEY:}",
|
||||||
|
"base_url": "https://openrouter.ai/api/v1",
|
||||||
|
"model": "${OPENROUTER_MODEL:anthropic/claude-3-sonnet}",
|
||||||
|
"timeout": 300,
|
||||||
|
"max_tokens": 2000,
|
||||||
|
"temperature": 0.8,
|
||||||
|
"app_name": "discord-fishbowl"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"openai": {
|
||||||
|
"type": "openai",
|
||||||
|
"enabled": False,
|
||||||
|
"priority": 90,
|
||||||
|
"config": {
|
||||||
|
"api_key": "${OPENAI_API_KEY:}",
|
||||||
|
"base_url": "https://api.openai.com/v1",
|
||||||
|
"model": "${OPENAI_MODEL:gpt-4o-mini}",
|
||||||
|
"timeout": 300,
|
||||||
|
"max_tokens": 2000,
|
||||||
|
"temperature": 0.8
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"gemini": {
|
||||||
|
"type": "gemini",
|
||||||
|
"enabled": False,
|
||||||
|
"priority": 80,
|
||||||
|
"config": {
|
||||||
|
"api_key": "${GEMINI_API_KEY:}",
|
||||||
|
"base_url": "https://generativelanguage.googleapis.com/v1beta",
|
||||||
|
"model": "${GEMINI_MODEL:gemini-1.5-flash}",
|
||||||
|
"timeout": 300,
|
||||||
|
"max_tokens": 2000,
|
||||||
|
"temperature": 0.8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
# Update config
|
||||||
|
config['llm']['providers'] = providers_config
|
||||||
|
|
||||||
|
# Create backup
|
||||||
|
backup_path = config_path.with_suffix('.json.backup')
|
||||||
|
with open(backup_path, 'w') as f:
|
||||||
|
json.dump(config, f, indent=2)
|
||||||
|
print(f"Created backup: {backup_path}")
|
||||||
|
|
||||||
|
# Write updated config
|
||||||
|
with open(config_path, 'w') as f:
|
||||||
|
json.dump(config, f, indent=2)
|
||||||
|
|
||||||
|
print(f"Updated {config_path} with multi-provider configuration")
|
||||||
|
print("\nTo enable additional providers:")
|
||||||
|
print("1. Set environment variables for the provider you want to use")
|
||||||
|
print("2. Change 'enabled': false to 'enabled': true in the config")
|
||||||
|
print("3. Restart the application")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main script function"""
|
||||||
|
print("Discord Fishbowl LLM Configuration Updater")
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
if update_fishbowl_config():
|
||||||
|
print("\n✅ Configuration updated successfully!")
|
||||||
|
print("\nAvailable providers:")
|
||||||
|
print("- OpenRouter (supports Claude, GPT, Llama, etc.)")
|
||||||
|
print("- OpenAI (GPT models)")
|
||||||
|
print("- Google Gemini")
|
||||||
|
print("- Custom/Local (current setup)")
|
||||||
|
|
||||||
|
print("\nNext steps:")
|
||||||
|
print("1. Update your .env file with API keys for desired providers")
|
||||||
|
print("2. Enable providers in config/fishbowl_config.json")
|
||||||
|
print("3. Restart the application")
|
||||||
|
else:
|
||||||
|
print("\n❌ Configuration update failed!")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
224
src/admin/app.py
224
src/admin/app.py
@@ -8,7 +8,7 @@ import asyncio
|
|||||||
import logging
|
import logging
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
from typing import List, Dict, Any, Optional
|
from typing import List, Dict, Any, Optional
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta, timezone
|
||||||
|
|
||||||
from fastapi import FastAPI, HTTPException, Depends
|
from fastapi import FastAPI, HTTPException, Depends
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
@@ -73,7 +73,7 @@ app = FastAPI(
|
|||||||
# CORS middleware
|
# CORS middleware
|
||||||
app.add_middleware(
|
app.add_middleware(
|
||||||
CORSMiddleware,
|
CORSMiddleware,
|
||||||
allow_origins=["http://localhost:3000", "http://127.0.0.1:3000", "http://localhost:8294", "http://127.0.0.1:8294"], # React dev server
|
allow_origins=["http://localhost:3000", "http://127.0.0.1:3000", "http://localhost:8294", "http://127.0.0.1:8294", "http://192.168.1.200:8294"], # React dev server and production
|
||||||
allow_credentials=True,
|
allow_credentials=True,
|
||||||
allow_methods=["*"],
|
allow_methods=["*"],
|
||||||
allow_headers=["*"],
|
allow_headers=["*"],
|
||||||
@@ -99,9 +99,15 @@ analytics_service = AnalyticsService()
|
|||||||
|
|
||||||
# Authentication endpoints
|
# Authentication endpoints
|
||||||
@app.post("/api/auth/login")
|
@app.post("/api/auth/login")
|
||||||
async def login(username: str, password: str):
|
async def login(request: Dict[str, str]):
|
||||||
"""Admin login"""
|
"""Admin login"""
|
||||||
try:
|
try:
|
||||||
|
username = request.get("username")
|
||||||
|
password = request.get("password")
|
||||||
|
|
||||||
|
if not username or not password:
|
||||||
|
raise HTTPException(status_code=400, detail="Username and password required")
|
||||||
|
|
||||||
token = await auth_service.authenticate(username, password)
|
token = await auth_service.authenticate(username, password)
|
||||||
return {"access_token": token, "token_type": "bearer"}
|
return {"access_token": token, "token_type": "bearer"}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -113,6 +119,15 @@ async def logout(admin: AdminUser = Depends(get_current_admin)):
|
|||||||
await auth_service.logout(admin.username)
|
await auth_service.logout(admin.username)
|
||||||
return {"message": "Logged out successfully"}
|
return {"message": "Logged out successfully"}
|
||||||
|
|
||||||
|
@app.get("/api/auth/verify")
|
||||||
|
async def verify_token(admin: AdminUser = Depends(get_current_admin)):
|
||||||
|
"""Verify auth token and get user info"""
|
||||||
|
return {
|
||||||
|
"username": admin.username,
|
||||||
|
"permissions": admin.permissions,
|
||||||
|
"lastLogin": admin.last_login.isoformat() if admin.last_login else None
|
||||||
|
}
|
||||||
|
|
||||||
# Dashboard endpoints
|
# Dashboard endpoints
|
||||||
@app.get("/api/dashboard/metrics", response_model=DashboardMetrics)
|
@app.get("/api/dashboard/metrics", response_model=DashboardMetrics)
|
||||||
async def get_dashboard_metrics(admin: AdminUser = Depends(get_current_admin)):
|
async def get_dashboard_metrics(admin: AdminUser = Depends(get_current_admin)):
|
||||||
@@ -133,18 +148,18 @@ async def get_system_health(admin: AdminUser = Depends(get_current_admin)):
|
|||||||
return await dashboard_service.get_system_health()
|
return await dashboard_service.get_system_health()
|
||||||
|
|
||||||
# Character management endpoints
|
# Character management endpoints
|
||||||
@app.get("/api/characters", response_model=List[CharacterProfile])
|
@app.get("/api/characters")
|
||||||
async def get_characters(admin: AdminUser = Depends(get_current_admin)):
|
async def get_characters(admin: AdminUser = Depends(get_current_admin)):
|
||||||
"""Get all characters with profiles"""
|
"""Get all characters with basic data"""
|
||||||
return await character_service.get_all_characters()
|
return await character_service.get_all_characters_basic()
|
||||||
|
|
||||||
@app.get("/api/characters/{character_name}", response_model=CharacterProfile)
|
@app.get("/api/characters/{character_name}")
|
||||||
async def get_character(
|
async def get_character(
|
||||||
character_name: str,
|
character_name: str,
|
||||||
admin: AdminUser = Depends(get_current_admin)
|
admin: AdminUser = Depends(get_current_admin)
|
||||||
):
|
):
|
||||||
"""Get detailed character profile"""
|
"""Get character data for editing"""
|
||||||
character = await character_service.get_character_profile(character_name)
|
character = await character_service.get_character_data(character_name)
|
||||||
if not character:
|
if not character:
|
||||||
raise HTTPException(status_code=404, detail="Character not found")
|
raise HTTPException(status_code=404, detail="Character not found")
|
||||||
return character
|
return character
|
||||||
@@ -382,6 +397,43 @@ async def update_system_config(
|
|||||||
await system_service.update_configuration(config)
|
await system_service.update_configuration(config)
|
||||||
return {"message": "Configuration updated"}
|
return {"message": "Configuration updated"}
|
||||||
|
|
||||||
|
# LLM Provider management endpoints
|
||||||
|
@app.get("/api/system/llm/providers")
|
||||||
|
async def get_llm_providers(admin: AdminUser = Depends(get_current_admin)):
|
||||||
|
"""Get all LLM provider configurations and status"""
|
||||||
|
return await system_service.get_llm_providers()
|
||||||
|
|
||||||
|
@app.put("/api/system/llm/providers")
|
||||||
|
async def update_llm_providers(
|
||||||
|
providers: Dict[str, Any],
|
||||||
|
admin: AdminUser = Depends(get_current_admin)
|
||||||
|
):
|
||||||
|
"""Update LLM provider configurations"""
|
||||||
|
await system_service.update_llm_providers(providers)
|
||||||
|
return {"message": "LLM providers updated"}
|
||||||
|
|
||||||
|
@app.post("/api/system/llm/providers/{provider_name}/test")
|
||||||
|
async def test_llm_provider(
|
||||||
|
provider_name: str,
|
||||||
|
admin: AdminUser = Depends(get_current_admin)
|
||||||
|
):
|
||||||
|
"""Test a specific LLM provider"""
|
||||||
|
return await system_service.test_llm_provider(provider_name)
|
||||||
|
|
||||||
|
@app.get("/api/system/llm/health")
|
||||||
|
async def get_llm_health(admin: AdminUser = Depends(get_current_admin)):
|
||||||
|
"""Get health status of all LLM providers"""
|
||||||
|
return await system_service.get_llm_health()
|
||||||
|
|
||||||
|
@app.post("/api/system/llm/switch/{provider_name}")
|
||||||
|
async def switch_llm_provider(
|
||||||
|
provider_name: str,
|
||||||
|
admin: AdminUser = Depends(get_current_admin)
|
||||||
|
):
|
||||||
|
"""Switch to a different primary LLM provider"""
|
||||||
|
await system_service.switch_llm_provider(provider_name)
|
||||||
|
return {"message": f"Switched to provider: {provider_name}"}
|
||||||
|
|
||||||
@app.get("/api/system/logs")
|
@app.get("/api/system/logs")
|
||||||
async def get_system_logs(
|
async def get_system_logs(
|
||||||
limit: int = 100,
|
limit: int = 100,
|
||||||
@@ -409,6 +461,78 @@ async def get_community_artifacts(
|
|||||||
"""Get community cultural artifacts"""
|
"""Get community cultural artifacts"""
|
||||||
return await analytics_service.get_community_artifacts()
|
return await analytics_service.get_community_artifacts()
|
||||||
|
|
||||||
|
# Prompt template management endpoints
|
||||||
|
@app.get("/api/prompt-templates")
|
||||||
|
async def get_prompt_templates(admin: AdminUser = Depends(get_current_admin)):
|
||||||
|
"""Get all prompt templates"""
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
from database.models import PromptTemplate
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
query = select(PromptTemplate).order_by(PromptTemplate.name)
|
||||||
|
templates = await session.scalars(query)
|
||||||
|
return [template.to_dict() for template in templates]
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting prompt templates: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Failed to get prompt templates")
|
||||||
|
|
||||||
|
@app.post("/api/prompt-templates")
|
||||||
|
async def create_prompt_template(
|
||||||
|
template_data: dict,
|
||||||
|
admin: AdminUser = Depends(get_current_admin)
|
||||||
|
):
|
||||||
|
"""Create a new prompt template"""
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
from database.models import PromptTemplate
|
||||||
|
|
||||||
|
template = PromptTemplate(
|
||||||
|
name=template_data['name'],
|
||||||
|
description=template_data.get('description', ''),
|
||||||
|
template=template_data['template'],
|
||||||
|
is_default=template_data.get('is_default', False)
|
||||||
|
)
|
||||||
|
|
||||||
|
session.add(template)
|
||||||
|
await session.commit()
|
||||||
|
await session.refresh(template)
|
||||||
|
|
||||||
|
return template.to_dict()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error creating prompt template: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Failed to create prompt template")
|
||||||
|
|
||||||
|
@app.put("/api/prompt-templates/{template_id}")
|
||||||
|
async def update_prompt_template(
|
||||||
|
template_id: int,
|
||||||
|
template_data: dict,
|
||||||
|
admin: AdminUser = Depends(get_current_admin)
|
||||||
|
):
|
||||||
|
"""Update a prompt template"""
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
from database.models import PromptTemplate
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
query = select(PromptTemplate).where(PromptTemplate.id == template_id)
|
||||||
|
template = await session.scalar(query)
|
||||||
|
|
||||||
|
if not template:
|
||||||
|
raise HTTPException(status_code=404, detail="Template not found")
|
||||||
|
|
||||||
|
template.name = template_data.get('name', template.name)
|
||||||
|
template.description = template_data.get('description', template.description)
|
||||||
|
template.template = template_data.get('template', template.template)
|
||||||
|
template.is_default = template_data.get('is_default', template.is_default)
|
||||||
|
template.updated_at = datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
await session.commit()
|
||||||
|
return template.to_dict()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating prompt template: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail="Failed to update prompt template")
|
||||||
|
|
||||||
# System prompt and scenario management endpoints
|
# System prompt and scenario management endpoints
|
||||||
@app.get("/api/system/prompts")
|
@app.get("/api/system/prompts")
|
||||||
async def get_system_prompts(admin: AdminUser = Depends(get_current_admin)):
|
async def get_system_prompts(admin: AdminUser = Depends(get_current_admin)):
|
||||||
@@ -503,24 +627,94 @@ async def export_character_data(
|
|||||||
"""Export complete character data"""
|
"""Export complete character data"""
|
||||||
return await character_service.export_character_data(character_name)
|
return await character_service.export_character_data(character_name)
|
||||||
|
|
||||||
# Mount Socket.IO app
|
# Serve React frontend static files
|
||||||
socket_app = websocket_manager.get_app()
|
|
||||||
app.mount("/socket.io", socket_app)
|
|
||||||
|
|
||||||
# Serve React frontend
|
|
||||||
app.mount("/admin", StaticFiles(directory="admin-frontend/build", html=True), name="admin")
|
app.mount("/admin", StaticFiles(directory="admin-frontend/build", html=True), name="admin")
|
||||||
|
|
||||||
|
# Mount Socket.IO app (must be done after other mounts)
|
||||||
|
sio_asgi_app = websocket_manager.get_app(app)
|
||||||
|
if sio_asgi_app != app:
|
||||||
|
combined_app = sio_asgi_app
|
||||||
|
logger.info("Socket.IO app mounted successfully")
|
||||||
|
else:
|
||||||
|
combined_app = app
|
||||||
|
logger.warning("Socket.IO app not mounted properly")
|
||||||
|
|
||||||
@app.get("/")
|
@app.get("/")
|
||||||
async def root():
|
async def root():
|
||||||
"""Root endpoint redirects to admin interface"""
|
"""Root endpoint redirects to admin interface"""
|
||||||
from fastapi.responses import RedirectResponse
|
from fastapi.responses import RedirectResponse
|
||||||
return RedirectResponse(url="/admin/", status_code=302)
|
return RedirectResponse(url="/admin/", status_code=302)
|
||||||
|
|
||||||
|
@app.get("/admin/favicon.ico")
|
||||||
|
async def favicon():
|
||||||
|
"""Serve favicon for admin interface"""
|
||||||
|
from fastapi.responses import FileResponse
|
||||||
|
import os
|
||||||
|
favicon_path = os.path.join("admin-frontend", "public", "favicon.ico")
|
||||||
|
if os.path.exists(favicon_path):
|
||||||
|
return FileResponse(favicon_path, media_type="image/x-icon")
|
||||||
|
else:
|
||||||
|
raise HTTPException(status_code=404, detail="Favicon not found")
|
||||||
|
|
||||||
|
@app.post("/api/admin/fix-character-prompts")
|
||||||
|
async def fix_character_prompts(admin: AdminUser = Depends(get_current_admin)):
|
||||||
|
"""Fix all character system prompts to use proper template format"""
|
||||||
|
PROPER_SYSTEM_PROMPT_TEMPLATE = """You are a character named {{{{name}}}}. You have the following personality: {{{{personality}}}}
|
||||||
|
|
||||||
|
Your speaking style is {{{{speaking_style}}}}. You are interested in {{{{interests}}}}.
|
||||||
|
|
||||||
|
Background: {{{{background}}}}
|
||||||
|
|
||||||
|
When responding to messages:
|
||||||
|
1. Stay in character at all times
|
||||||
|
2. Reference your personality and interests naturally
|
||||||
|
3. Engage authentically with other characters
|
||||||
|
4. Show growth and development over time
|
||||||
|
|
||||||
|
Remember to be consistent with your established personality while allowing for natural character development through interactions."""
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
from sqlalchemy import select
|
||||||
|
# Get all characters
|
||||||
|
characters_query = select(Character)
|
||||||
|
characters = await session.scalars(characters_query)
|
||||||
|
|
||||||
|
updated_characters = []
|
||||||
|
|
||||||
|
for character in characters:
|
||||||
|
current_prompt = character.system_prompt or ""
|
||||||
|
|
||||||
|
# If it doesn't contain template variables or is just raw text, fix it
|
||||||
|
if "{{name}}" not in current_prompt or len(current_prompt) < 100:
|
||||||
|
old_prompt = character.system_prompt
|
||||||
|
character.system_prompt = PROPER_SYSTEM_PROMPT_TEMPLATE
|
||||||
|
character.updated_at = datetime.now(timezone.utc)
|
||||||
|
|
||||||
|
updated_characters.append({
|
||||||
|
"name": character.name,
|
||||||
|
"old_prompt_length": len(old_prompt) if old_prompt else 0,
|
||||||
|
"new_prompt_length": len(PROPER_SYSTEM_PROMPT_TEMPLATE)
|
||||||
|
})
|
||||||
|
|
||||||
|
if updated_characters:
|
||||||
|
await session.commit()
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"updated_count": len(updated_characters),
|
||||||
|
"updated_characters": updated_characters
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error fixing character prompts: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=str(e))
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
import os
|
import os
|
||||||
admin_port = int(os.getenv("ADMIN_PORT", "8000"))
|
admin_port = int(os.getenv("ADMIN_PORT", "8000"))
|
||||||
uvicorn.run(
|
uvicorn.run(
|
||||||
"src.admin.app:app",
|
"src.admin.app:combined_app",
|
||||||
host="0.0.0.0",
|
host="0.0.0.0",
|
||||||
port=admin_port,
|
port=admin_port,
|
||||||
reload=True,
|
reload=True,
|
||||||
|
|||||||
@@ -21,9 +21,9 @@ class AuthService:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
import os
|
import os
|
||||||
self.settings = get_settings()
|
self.settings = get_settings()
|
||||||
self.secret_key = self.settings.admin.secret_key if hasattr(self.settings, 'admin') else "fallback-secret-key"
|
self.secret_key = os.getenv("SECRET_KEY", "fallback-secret-key-for-jwt")
|
||||||
self.algorithm = "HS256"
|
self.algorithm = "HS256"
|
||||||
self.access_token_expire_minutes = 480 # 8 hours
|
self.access_token_expire_minutes = 1440 # 24 hours
|
||||||
|
|
||||||
# Get admin credentials from environment
|
# Get admin credentials from environment
|
||||||
admin_username = os.getenv("ADMIN_USERNAME", "admin")
|
admin_username = os.getenv("ADMIN_USERNAME", "admin")
|
||||||
@@ -121,8 +121,14 @@ class AuthService:
|
|||||||
if not user["active"]:
|
if not user["active"]:
|
||||||
raise HTTPException(status_code=401, detail="User account disabled")
|
raise HTTPException(status_code=401, detail="User account disabled")
|
||||||
|
|
||||||
# Update last activity
|
# Update last activity (create session if doesn't exist)
|
||||||
if username in self.active_sessions:
|
if username not in self.active_sessions:
|
||||||
|
self.active_sessions[username] = {
|
||||||
|
"token": token,
|
||||||
|
"login_time": datetime.now(timezone.utc),
|
||||||
|
"last_activity": datetime.now(timezone.utc)
|
||||||
|
}
|
||||||
|
else:
|
||||||
self.active_sessions[username]["last_activity"] = datetime.now(timezone.utc)
|
self.active_sessions[username]["last_activity"] = datetime.now(timezone.utc)
|
||||||
|
|
||||||
return AdminUser(
|
return AdminUser(
|
||||||
|
|||||||
@@ -49,6 +49,37 @@ class CharacterService:
|
|||||||
logger.error(f"Error getting all characters: {e}")
|
logger.error(f"Error getting all characters: {e}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
async def get_all_characters_basic(self) -> List[Dict[str, Any]]:
|
||||||
|
"""Get basic character data for lists"""
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
# Get all characters
|
||||||
|
characters_query = select(Character)
|
||||||
|
characters = await session.scalars(characters_query)
|
||||||
|
|
||||||
|
character_list = []
|
||||||
|
for character in characters:
|
||||||
|
# Determine current status
|
||||||
|
status = await self._determine_character_status(character, character.last_active)
|
||||||
|
|
||||||
|
character_data = {
|
||||||
|
"name": character.name,
|
||||||
|
"status": status.value,
|
||||||
|
"is_active": character.is_active,
|
||||||
|
"last_active": character.last_active.isoformat() if character.last_active else None,
|
||||||
|
"personality": character.personality,
|
||||||
|
"system_prompt": character.system_prompt,
|
||||||
|
"interests": character.interests,
|
||||||
|
"speaking_style": character.speaking_style
|
||||||
|
}
|
||||||
|
character_list.append(character_data)
|
||||||
|
|
||||||
|
return character_list
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting basic characters: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
async def toggle_character_status(self, character_name: str, is_active: bool) -> Dict[str, Any]:
|
async def toggle_character_status(self, character_name: str, is_active: bool) -> Dict[str, Any]:
|
||||||
"""Enable or disable a character"""
|
"""Enable or disable a character"""
|
||||||
try:
|
try:
|
||||||
@@ -148,6 +179,37 @@ class CharacterService:
|
|||||||
logger.error(f"Error getting character profile for {character_name}: {e}")
|
logger.error(f"Error getting character profile for {character_name}: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
async def get_character_data(self, character_name: str) -> Optional[Dict[str, Any]]:
|
||||||
|
"""Get raw character data for editing"""
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
character_query = select(Character).where(Character.name == character_name)
|
||||||
|
character = await session.scalar(character_query)
|
||||||
|
|
||||||
|
if not character:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return {
|
||||||
|
"name": character.name,
|
||||||
|
"personality": character.personality,
|
||||||
|
"system_prompt": character.system_prompt,
|
||||||
|
"interests": character.interests,
|
||||||
|
"speaking_style": character.speaking_style,
|
||||||
|
"background": character.background,
|
||||||
|
"is_active": character.is_active,
|
||||||
|
"created_at": character.creation_date,
|
||||||
|
"last_active": character.last_active,
|
||||||
|
# LLM settings
|
||||||
|
"llm_provider": character.llm_provider,
|
||||||
|
"llm_model": character.llm_model,
|
||||||
|
"llm_temperature": character.llm_temperature,
|
||||||
|
"llm_max_tokens": character.llm_max_tokens
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting character profile for {character_name}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
async def _build_character_profile(self, session, character) -> CharacterProfile:
|
async def _build_character_profile(self, session, character) -> CharacterProfile:
|
||||||
"""Build character profile from database data"""
|
"""Build character profile from database data"""
|
||||||
# Get message count
|
# Get message count
|
||||||
@@ -191,7 +253,7 @@ class CharacterService:
|
|||||||
growth_score = 0.5 # Would calculate based on personality changes
|
growth_score = 0.5 # Would calculate based on personality changes
|
||||||
|
|
||||||
# Determine current status
|
# Determine current status
|
||||||
status = await self._determine_character_status(character.name, last_active)
|
status = await self._determine_character_status(character, last_active)
|
||||||
|
|
||||||
# Parse personality traits from personality text
|
# Parse personality traits from personality text
|
||||||
personality_traits = {
|
personality_traits = {
|
||||||
@@ -234,22 +296,49 @@ class CharacterService:
|
|||||||
growth_score=growth_score
|
growth_score=growth_score
|
||||||
)
|
)
|
||||||
|
|
||||||
async def _determine_character_status(self, character_name: str, last_active: Optional[datetime]) -> CharacterStatusEnum:
|
async def _determine_character_status(self, character, last_active: Optional[datetime]) -> CharacterStatusEnum:
|
||||||
"""Determine character's current status"""
|
"""Determine character's current status"""
|
||||||
if not last_active:
|
# If character is disabled in database, show as offline
|
||||||
|
if not character.is_active:
|
||||||
return CharacterStatusEnum.OFFLINE
|
return CharacterStatusEnum.OFFLINE
|
||||||
|
|
||||||
|
# Check if character has been active recently (database last_active field)
|
||||||
now = datetime.now(timezone.utc)
|
now = datetime.now(timezone.utc)
|
||||||
time_since_active = now - last_active
|
db_last_active = character.last_active
|
||||||
|
|
||||||
if time_since_active < timedelta(minutes=5):
|
if db_last_active:
|
||||||
return CharacterStatusEnum.ACTIVE
|
# Make sure db_last_active is timezone-aware
|
||||||
elif time_since_active < timedelta(minutes=30):
|
if db_last_active.tzinfo is None:
|
||||||
return CharacterStatusEnum.IDLE
|
db_last_active = db_last_active.replace(tzinfo=timezone.utc)
|
||||||
elif time_since_active < timedelta(hours=1):
|
|
||||||
|
time_since_db_active = now - db_last_active
|
||||||
|
|
||||||
|
# If they were active in the database recently, they're likely running
|
||||||
|
if time_since_db_active < timedelta(minutes=10):
|
||||||
|
return CharacterStatusEnum.ACTIVE
|
||||||
|
elif time_since_db_active < timedelta(hours=1):
|
||||||
|
return CharacterStatusEnum.IDLE
|
||||||
|
|
||||||
|
# Fall back to Discord message activity if available
|
||||||
|
if last_active:
|
||||||
|
# Make sure last_active is timezone-aware
|
||||||
|
if last_active.tzinfo is None:
|
||||||
|
last_active = last_active.replace(tzinfo=timezone.utc)
|
||||||
|
|
||||||
|
time_since_active = now - last_active
|
||||||
|
|
||||||
|
if time_since_active < timedelta(minutes=5):
|
||||||
|
return CharacterStatusEnum.ACTIVE
|
||||||
|
elif time_since_active < timedelta(minutes=30):
|
||||||
|
return CharacterStatusEnum.IDLE
|
||||||
|
elif time_since_active < timedelta(hours=1):
|
||||||
|
return CharacterStatusEnum.REFLECTING
|
||||||
|
|
||||||
|
# If character is marked as active in DB but no recent activity, show as reflecting
|
||||||
|
if character.is_active:
|
||||||
return CharacterStatusEnum.REFLECTING
|
return CharacterStatusEnum.REFLECTING
|
||||||
else:
|
|
||||||
return CharacterStatusEnum.OFFLINE
|
return CharacterStatusEnum.OFFLINE
|
||||||
|
|
||||||
async def get_character_relationships(self, character_name: str) -> List[Relationship]:
|
async def get_character_relationships(self, character_name: str) -> List[Relationship]:
|
||||||
"""Get character's relationship network"""
|
"""Get character's relationship network"""
|
||||||
@@ -581,12 +670,27 @@ class CharacterService:
|
|||||||
# Update character fields
|
# Update character fields
|
||||||
if 'personality' in character_data:
|
if 'personality' in character_data:
|
||||||
character.personality = character_data['personality']
|
character.personality = character_data['personality']
|
||||||
|
if 'system_prompt' in character_data:
|
||||||
|
character.system_prompt = character_data['system_prompt']
|
||||||
if 'interests' in character_data:
|
if 'interests' in character_data:
|
||||||
character.interests = character_data['interests']
|
character.interests = character_data['interests']
|
||||||
if 'speaking_style' in character_data:
|
if 'speaking_style' in character_data:
|
||||||
character.speaking_style = character_data['speaking_style']
|
character.speaking_style = character_data['speaking_style']
|
||||||
if 'background' in character_data:
|
if 'background' in character_data:
|
||||||
character.background = character_data['background']
|
character.background = character_data['background']
|
||||||
|
if 'is_active' in character_data:
|
||||||
|
character.is_active = character_data['is_active']
|
||||||
|
# LLM settings
|
||||||
|
if 'llm_provider' in character_data:
|
||||||
|
character.llm_provider = character_data['llm_provider'] or None
|
||||||
|
if 'llm_model' in character_data:
|
||||||
|
character.llm_model = character_data['llm_model'] or None
|
||||||
|
if 'llm_temperature' in character_data:
|
||||||
|
character.llm_temperature = character_data['llm_temperature']
|
||||||
|
if 'llm_max_tokens' in character_data:
|
||||||
|
character.llm_max_tokens = character_data['llm_max_tokens']
|
||||||
|
|
||||||
|
character.updated_at = datetime.now(timezone.utc)
|
||||||
|
|
||||||
await session.commit()
|
await session.commit()
|
||||||
await session.refresh(character)
|
await session.refresh(character)
|
||||||
@@ -612,9 +716,11 @@ class CharacterService:
|
|||||||
"id": character.id,
|
"id": character.id,
|
||||||
"name": character.name,
|
"name": character.name,
|
||||||
"personality": character.personality,
|
"personality": character.personality,
|
||||||
|
"system_prompt": character.system_prompt,
|
||||||
"interests": character.interests,
|
"interests": character.interests,
|
||||||
"speaking_style": character.speaking_style,
|
"speaking_style": character.speaking_style,
|
||||||
"background": character.background,
|
"background": character.background,
|
||||||
|
"is_active": character.is_active,
|
||||||
"created_at": character.creation_date
|
"created_at": character.creation_date
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -81,17 +81,17 @@ class ConversationService:
|
|||||||
duration = conversation.end_time - conversation.start_time
|
duration = conversation.end_time - conversation.start_time
|
||||||
duration_minutes = duration.total_seconds() / 60
|
duration_minutes = duration.total_seconds() / 60
|
||||||
|
|
||||||
# Calculate engagement score (placeholder)
|
# Calculate engagement score based on participation patterns
|
||||||
engagement_score = min(1.0, conversation.message_count / 20)
|
engagement_score = await self._calculate_engagement_score(session, conversation)
|
||||||
|
|
||||||
# Calculate sentiment score (placeholder)
|
# Calculate sentiment score from message content
|
||||||
sentiment_score = 0.7 # Would analyze message content
|
sentiment_score = await self._calculate_sentiment_score(session, conversation)
|
||||||
|
|
||||||
# Detect conflicts (placeholder)
|
# Detect conflicts from message analysis
|
||||||
has_conflict = False # Would analyze for conflict keywords
|
has_conflict = await self._detect_conflicts(session, conversation)
|
||||||
|
|
||||||
# Extract creative elements (placeholder)
|
# Extract creative elements from conversation content
|
||||||
creative_elements = [] # Would analyze for creative content
|
creative_elements = await self._extract_creative_elements(session, conversation)
|
||||||
|
|
||||||
return ConversationSummary(
|
return ConversationSummary(
|
||||||
id=conversation.id,
|
id=conversation.id,
|
||||||
@@ -326,3 +326,181 @@ class ConversationService:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error exporting conversation {conversation_id}: {e}")
|
logger.error(f"Error exporting conversation {conversation_id}: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
async def _calculate_engagement_score(self, session, conversation) -> float:
|
||||||
|
"""Calculate engagement score based on message patterns"""
|
||||||
|
try:
|
||||||
|
if conversation.message_count == 0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
# Get messages for this conversation
|
||||||
|
messages_query = select(Message).where(
|
||||||
|
Message.conversation_id == conversation.id
|
||||||
|
).order_by(Message.timestamp)
|
||||||
|
|
||||||
|
messages = await session.scalars(messages_query)
|
||||||
|
message_list = list(messages)
|
||||||
|
|
||||||
|
if len(message_list) < 2:
|
||||||
|
return 0.1
|
||||||
|
|
||||||
|
# Calculate response time variance (lower variance = higher engagement)
|
||||||
|
response_times = []
|
||||||
|
for i in range(1, len(message_list)):
|
||||||
|
time_diff = (message_list[i].timestamp - message_list[i-1].timestamp).total_seconds()
|
||||||
|
response_times.append(time_diff)
|
||||||
|
|
||||||
|
if not response_times:
|
||||||
|
return 0.5
|
||||||
|
|
||||||
|
# Normalize engagement based on average response time
|
||||||
|
avg_response_time = sum(response_times) / len(response_times)
|
||||||
|
|
||||||
|
# Faster responses = higher engagement
|
||||||
|
# Scale from 0.1 (very slow) to 1.0 (very fast)
|
||||||
|
if avg_response_time > 300: # > 5 minutes
|
||||||
|
engagement = 0.1
|
||||||
|
elif avg_response_time > 120: # > 2 minutes
|
||||||
|
engagement = 0.3
|
||||||
|
elif avg_response_time > 60: # > 1 minute
|
||||||
|
engagement = 0.5
|
||||||
|
elif avg_response_time > 30: # > 30 seconds
|
||||||
|
engagement = 0.7
|
||||||
|
else: # <= 30 seconds
|
||||||
|
engagement = 0.9
|
||||||
|
|
||||||
|
# Boost for longer conversations
|
||||||
|
length_boost = min(0.1, conversation.message_count / 100)
|
||||||
|
return min(1.0, engagement + length_boost)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating engagement score: {e}")
|
||||||
|
return 0.5
|
||||||
|
|
||||||
|
async def _calculate_sentiment_score(self, session, conversation) -> float:
|
||||||
|
"""Calculate sentiment score from message content analysis"""
|
||||||
|
try:
|
||||||
|
# Get messages for this conversation
|
||||||
|
messages_query = select(Message).where(
|
||||||
|
Message.conversation_id == conversation.id
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = await session.scalars(messages_query)
|
||||||
|
message_list = list(messages)
|
||||||
|
|
||||||
|
if not message_list:
|
||||||
|
return 0.5
|
||||||
|
|
||||||
|
# Simple keyword-based sentiment analysis
|
||||||
|
positive_words = [
|
||||||
|
'happy', 'joy', 'love', 'great', 'wonderful', 'amazing', 'excited',
|
||||||
|
'good', 'excellent', 'beautiful', 'nice', 'awesome', 'fantastic',
|
||||||
|
'thanks', 'appreciate', 'grateful', 'smile', 'laugh', 'fun'
|
||||||
|
]
|
||||||
|
|
||||||
|
negative_words = [
|
||||||
|
'sad', 'angry', 'hate', 'terrible', 'awful', 'horrible', 'bad',
|
||||||
|
'angry', 'frustrated', 'disappointed', 'worried', 'concern',
|
||||||
|
'problem', 'issue', 'wrong', 'fail', 'error', 'upset'
|
||||||
|
]
|
||||||
|
|
||||||
|
sentiment_scores = []
|
||||||
|
|
||||||
|
for message in message_list:
|
||||||
|
content_lower = message.content.lower()
|
||||||
|
positive_count = sum(1 for word in positive_words if word in content_lower)
|
||||||
|
negative_count = sum(1 for word in negative_words if word in content_lower)
|
||||||
|
|
||||||
|
if positive_count + negative_count == 0:
|
||||||
|
sentiment_scores.append(0.5) # Neutral
|
||||||
|
else:
|
||||||
|
# Calculate sentiment ratio
|
||||||
|
total_sentiment_words = positive_count + negative_count
|
||||||
|
sentiment_ratio = positive_count / total_sentiment_words
|
||||||
|
sentiment_scores.append(sentiment_ratio)
|
||||||
|
|
||||||
|
# Return average sentiment
|
||||||
|
return sum(sentiment_scores) / len(sentiment_scores) if sentiment_scores else 0.5
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error calculating sentiment score: {e}")
|
||||||
|
return 0.5
|
||||||
|
|
||||||
|
async def _detect_conflicts(self, session, conversation) -> bool:
|
||||||
|
"""Detect conflicts in conversation content"""
|
||||||
|
try:
|
||||||
|
# Get messages for this conversation
|
||||||
|
messages_query = select(Message).where(
|
||||||
|
Message.conversation_id == conversation.id
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = await session.scalars(messages_query)
|
||||||
|
message_list = list(messages)
|
||||||
|
|
||||||
|
if not message_list:
|
||||||
|
return False
|
||||||
|
|
||||||
|
conflict_indicators = [
|
||||||
|
'disagree', 'wrong', "don't think", 'but', 'however', 'actually',
|
||||||
|
'argue', 'conflict', 'dispute', 'oppose', 'against', 'contradict',
|
||||||
|
'reject', 'refuse', 'deny', 'challenge', 'question', 'doubt'
|
||||||
|
]
|
||||||
|
|
||||||
|
conflict_score = 0
|
||||||
|
total_messages = len(message_list)
|
||||||
|
|
||||||
|
for message in message_list:
|
||||||
|
content_lower = message.content.lower()
|
||||||
|
conflicts_found = sum(1 for indicator in conflict_indicators
|
||||||
|
if indicator in content_lower)
|
||||||
|
if conflicts_found > 0:
|
||||||
|
conflict_score += 1
|
||||||
|
|
||||||
|
# Consider it a conflict if more than 30% of messages contain conflict indicators
|
||||||
|
conflict_ratio = conflict_score / total_messages if total_messages > 0 else 0
|
||||||
|
return conflict_ratio > 0.3
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error detecting conflicts: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
async def _extract_creative_elements(self, session, conversation) -> List[str]:
|
||||||
|
"""Extract creative elements from conversation content"""
|
||||||
|
try:
|
||||||
|
# Get messages for this conversation
|
||||||
|
messages_query = select(Message).where(
|
||||||
|
Message.conversation_id == conversation.id
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = await session.scalars(messages_query)
|
||||||
|
message_list = list(messages)
|
||||||
|
|
||||||
|
if not message_list:
|
||||||
|
return []
|
||||||
|
|
||||||
|
creative_patterns = {
|
||||||
|
'poetry': ['poem', 'verse', 'rhyme', 'metaphor', 'stanza'],
|
||||||
|
'storytelling': ['story', 'tale', 'narrative', 'character', 'plot', 'once upon'],
|
||||||
|
'music': ['song', 'melody', 'rhythm', 'note', 'chord', 'harmony'],
|
||||||
|
'art': ['draw', 'paint', 'sketch', 'color', 'canvas', 'brush'],
|
||||||
|
'philosophy': ['meaning', 'existence', 'reality', 'consciousness', 'truth'],
|
||||||
|
'creativity': ['create', 'imagine', 'invent', 'design', 'inspiration'],
|
||||||
|
'humor': ['joke', 'funny', 'laugh', 'humor', 'wit', 'amusing'],
|
||||||
|
'worldbuilding': ['world', 'universe', 'realm', 'dimension', 'kingdom']
|
||||||
|
}
|
||||||
|
|
||||||
|
found_elements = []
|
||||||
|
|
||||||
|
# Combine all message content
|
||||||
|
all_content = ' '.join(message.content.lower() for message in message_list)
|
||||||
|
|
||||||
|
for element_type, keywords in creative_patterns.items():
|
||||||
|
keyword_count = sum(1 for keyword in keywords if keyword in all_content)
|
||||||
|
if keyword_count >= 2: # Require at least 2 mentions
|
||||||
|
found_elements.append(element_type)
|
||||||
|
|
||||||
|
return found_elements
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error extracting creative elements: {e}")
|
||||||
|
return []
|
||||||
@@ -105,28 +105,89 @@ class SystemService:
|
|||||||
logger.error(f"Error resuming system: {e}")
|
logger.error(f"Error resuming system: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
async def get_configuration(self) -> SystemConfiguration:
|
async def get_configuration(self) -> Dict[str, Any]:
|
||||||
"""Get system configuration"""
|
"""Get system configuration from environment variables"""
|
||||||
# Default configuration values
|
import os
|
||||||
return SystemConfiguration(
|
|
||||||
conversation_frequency=0.5,
|
return {
|
||||||
response_delay_min=1.0,
|
# LLM Control (COST PROTECTION)
|
||||||
response_delay_max=5.0,
|
"llm_enabled": os.getenv("LLM_ENABLED", "false").lower() == "true",
|
||||||
personality_change_rate=0.1,
|
"conversation_frequency": float(os.getenv("CONVERSATION_FREQUENCY", "0.5")),
|
||||||
memory_retention_days=90,
|
"response_delay_min": float(os.getenv("RESPONSE_DELAY_MIN", "1.0")),
|
||||||
max_conversation_length=50,
|
"response_delay_max": float(os.getenv("RESPONSE_DELAY_MAX", "5.0")),
|
||||||
creativity_boost=True,
|
"personality_change_rate": float(os.getenv("PERSONALITY_CHANGE_RATE", "0.1")),
|
||||||
conflict_resolution_enabled=True,
|
"memory_retention_days": int(os.getenv("MEMORY_RETENTION_DAYS", "90")),
|
||||||
safety_monitoring=True,
|
"max_conversation_length": int(os.getenv("MAX_CONVERSATION_LENGTH", "50")),
|
||||||
auto_moderation=False,
|
"creativity_boost": os.getenv("CREATIVITY_BOOST", "true").lower() == "true",
|
||||||
backup_frequency_hours=24
|
"safety_monitoring": os.getenv("SAFETY_MONITORING", "false").lower() == "true",
|
||||||
)
|
"auto_moderation": os.getenv("AUTO_MODERATION", "false").lower() == "true",
|
||||||
|
"quiet_hours_enabled": os.getenv("QUIET_HOURS_ENABLED", "true").lower() == "true",
|
||||||
|
"quiet_hours_start": int(os.getenv("QUIET_HOURS_START", "23")),
|
||||||
|
"quiet_hours_end": int(os.getenv("QUIET_HOURS_END", "7")),
|
||||||
|
"min_delay_seconds": int(os.getenv("MIN_DELAY_SECONDS", "30")),
|
||||||
|
"max_delay_seconds": int(os.getenv("MAX_DELAY_SECONDS", "300")),
|
||||||
|
"llm_model": os.getenv("AI_MODEL", ""),
|
||||||
|
"llm_max_tokens": int(os.getenv("AI_MAX_TOKENS", "2000")),
|
||||||
|
"llm_temperature": float(os.getenv("LLM_TEMPERATURE", "0.8")),
|
||||||
|
"llm_timeout": int(os.getenv("LLM_TIMEOUT", "300")),
|
||||||
|
"discord_guild_id": os.getenv("DISCORD_GUILD_ID", ""),
|
||||||
|
"discord_channel_id": os.getenv("DISCORD_CHANNEL_ID", "")
|
||||||
|
}
|
||||||
|
|
||||||
async def update_configuration(self, config: Dict[str, Any]):
|
async def update_configuration(self, config: Dict[str, Any]):
|
||||||
"""Update system configuration"""
|
"""Update system configuration"""
|
||||||
try:
|
try:
|
||||||
|
import os
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
logger.info(f"Updating system configuration: {config}")
|
logger.info(f"Updating system configuration: {config}")
|
||||||
# Would integrate with main application to update configuration
|
|
||||||
|
# Update environment variables in memory
|
||||||
|
if 'llm_enabled' in config:
|
||||||
|
# If enabling LLM, validate provider first
|
||||||
|
if config['llm_enabled']:
|
||||||
|
validation_result = await self._validate_llm_providers()
|
||||||
|
if not validation_result['valid']:
|
||||||
|
logger.error(f"LLM validation failed: {validation_result['error']}")
|
||||||
|
raise ValueError(f"Cannot enable LLM: {validation_result['error']}")
|
||||||
|
|
||||||
|
os.environ['LLM_ENABLED'] = str(config['llm_enabled']).lower()
|
||||||
|
|
||||||
|
# Also update the database for persistence
|
||||||
|
await self._update_llm_global_setting(config['llm_enabled'])
|
||||||
|
|
||||||
|
# Invalidate LLM cache in all clients
|
||||||
|
await self._invalidate_llm_cache()
|
||||||
|
|
||||||
|
# AUDIT: Log LLM enable/disable action
|
||||||
|
await self._audit_llm_change(config['llm_enabled'])
|
||||||
|
|
||||||
|
logger.warning(f"LLM {'ENABLED' if config['llm_enabled'] else 'DISABLED'} - API costs {'WILL' if config['llm_enabled'] else 'will NOT'} be incurred")
|
||||||
|
|
||||||
|
# Update other configuration values
|
||||||
|
config_mapping = {
|
||||||
|
'conversation_frequency': 'CONVERSATION_FREQUENCY',
|
||||||
|
'response_delay_min': 'RESPONSE_DELAY_MIN',
|
||||||
|
'response_delay_max': 'RESPONSE_DELAY_MAX',
|
||||||
|
'personality_change_rate': 'PERSONALITY_CHANGE_RATE',
|
||||||
|
'memory_retention_days': 'MEMORY_RETENTION_DAYS',
|
||||||
|
'max_conversation_length': 'MAX_CONVERSATION_LENGTH',
|
||||||
|
'creativity_boost': 'CREATIVITY_BOOST',
|
||||||
|
'safety_monitoring': 'SAFETY_MONITORING',
|
||||||
|
'auto_moderation': 'AUTO_MODERATION',
|
||||||
|
'quiet_hours_enabled': 'QUIET_HOURS_ENABLED',
|
||||||
|
'quiet_hours_start': 'QUIET_HOURS_START',
|
||||||
|
'quiet_hours_end': 'QUIET_HOURS_END',
|
||||||
|
'min_delay_seconds': 'MIN_DELAY_SECONDS',
|
||||||
|
'max_delay_seconds': 'MAX_DELAY_SECONDS'
|
||||||
|
}
|
||||||
|
|
||||||
|
for config_key, env_key in config_mapping.items():
|
||||||
|
if config_key in config:
|
||||||
|
os.environ[env_key] = str(config[config_key])
|
||||||
|
|
||||||
|
# Update .env file for persistence
|
||||||
|
await self._update_env_file(config)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error updating configuration: {e}")
|
logger.error(f"Error updating configuration: {e}")
|
||||||
@@ -421,3 +482,368 @@ class SystemService:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error activating scenario: {e}")
|
logger.error(f"Error activating scenario: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
async def get_llm_providers(self) -> Dict[str, Any]:
|
||||||
|
"""Get all LLM provider configurations and their status"""
|
||||||
|
try:
|
||||||
|
from llm.multi_provider_client import multi_llm_client
|
||||||
|
|
||||||
|
# Ensure client is initialized
|
||||||
|
if not multi_llm_client.initialized:
|
||||||
|
await multi_llm_client.initialize()
|
||||||
|
|
||||||
|
# Get provider info and health status
|
||||||
|
provider_info = multi_llm_client.get_provider_info()
|
||||||
|
health_status = await multi_llm_client.health_check()
|
||||||
|
current_provider = multi_llm_client.get_current_provider()
|
||||||
|
|
||||||
|
# Combine info with health status
|
||||||
|
providers = {}
|
||||||
|
for name, info in provider_info.items():
|
||||||
|
providers[name] = {
|
||||||
|
**info,
|
||||||
|
'healthy': health_status.get(name, False),
|
||||||
|
'is_current': name == current_provider
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
'providers': providers,
|
||||||
|
'current_provider': current_provider,
|
||||||
|
'total_providers': len(providers),
|
||||||
|
'healthy_providers': len([p for p in providers.values() if p['healthy']])
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting LLM providers: {e}")
|
||||||
|
return {
|
||||||
|
'providers': {},
|
||||||
|
'current_provider': None,
|
||||||
|
'total_providers': 0,
|
||||||
|
'healthy_providers': 0,
|
||||||
|
'error': str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
async def update_llm_providers(self, providers_config: Dict[str, Any]):
|
||||||
|
"""Update LLM provider configurations"""
|
||||||
|
try:
|
||||||
|
from utils.config import get_settings
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Update environment variables for provider settings
|
||||||
|
for provider_name, config in providers_config.items():
|
||||||
|
if 'enabled' in config:
|
||||||
|
env_var = f"{provider_name.upper()}_ENABLED"
|
||||||
|
os.environ[env_var] = str(config['enabled']).lower()
|
||||||
|
|
||||||
|
if 'config' in config:
|
||||||
|
provider_config = config['config']
|
||||||
|
if 'api_key' in provider_config:
|
||||||
|
env_var = f"{provider_name.upper()}_API_KEY"
|
||||||
|
os.environ[env_var] = provider_config['api_key']
|
||||||
|
|
||||||
|
if 'model' in provider_config:
|
||||||
|
env_var = f"{provider_name.upper()}_MODEL"
|
||||||
|
os.environ[env_var] = provider_config['model']
|
||||||
|
|
||||||
|
# Update configuration file
|
||||||
|
config_path = Path("config/fishbowl_config.json")
|
||||||
|
if config_path.exists():
|
||||||
|
with open(config_path, 'r') as f:
|
||||||
|
file_config = json.load(f)
|
||||||
|
|
||||||
|
# Update providers section
|
||||||
|
if 'llm' not in file_config:
|
||||||
|
file_config['llm'] = {}
|
||||||
|
|
||||||
|
file_config['llm']['providers'] = providers_config
|
||||||
|
|
||||||
|
# Write back to file
|
||||||
|
with open(config_path, 'w') as f:
|
||||||
|
json.dump(file_config, f, indent=2)
|
||||||
|
|
||||||
|
logger.info("Updated LLM provider configuration")
|
||||||
|
|
||||||
|
# Reinitialize the LLM client with new configuration
|
||||||
|
from llm.multi_provider_client import multi_llm_client
|
||||||
|
multi_llm_client.initialized = False
|
||||||
|
await multi_llm_client.initialize()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating LLM providers: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def test_llm_provider(self, provider_name: str) -> Dict[str, Any]:
|
||||||
|
"""Test a specific LLM provider"""
|
||||||
|
try:
|
||||||
|
from llm.multi_provider_client import multi_llm_client
|
||||||
|
from llm.providers import LLMRequest
|
||||||
|
|
||||||
|
# Ensure client is initialized
|
||||||
|
if not multi_llm_client.initialized:
|
||||||
|
await multi_llm_client.initialize()
|
||||||
|
|
||||||
|
# Check if provider exists
|
||||||
|
provider_info = multi_llm_client.get_provider_info()
|
||||||
|
if provider_name not in provider_info:
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'error': f'Provider {provider_name} not found'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test health check first
|
||||||
|
health_status = await multi_llm_client.health_check()
|
||||||
|
if not health_status.get(provider_name, False):
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'error': f'Provider {provider_name} failed health check'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Test actual generation
|
||||||
|
original_provider = multi_llm_client.get_current_provider()
|
||||||
|
|
||||||
|
# Temporarily switch to test provider
|
||||||
|
multi_llm_client.set_provider(provider_name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
test_request = LLMRequest(
|
||||||
|
prompt="Respond with exactly: 'Test successful'",
|
||||||
|
max_tokens=10,
|
||||||
|
temperature=0.1
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await multi_llm_client.generate_response(test_request)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'success': response.success,
|
||||||
|
'response': response.content if response.success else None,
|
||||||
|
'error': response.error if not response.success else None,
|
||||||
|
'provider': response.provider,
|
||||||
|
'model': response.model,
|
||||||
|
'tokens_used': response.tokens_used
|
||||||
|
}
|
||||||
|
|
||||||
|
finally:
|
||||||
|
# Restore original provider
|
||||||
|
if original_provider:
|
||||||
|
multi_llm_client.set_provider(original_provider)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error testing LLM provider {provider_name}: {e}")
|
||||||
|
return {
|
||||||
|
'success': False,
|
||||||
|
'error': str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
async def get_llm_health(self) -> Dict[str, Any]:
|
||||||
|
"""Get health status of all LLM providers"""
|
||||||
|
try:
|
||||||
|
from llm.multi_provider_client import multi_llm_client
|
||||||
|
|
||||||
|
# Ensure client is initialized
|
||||||
|
if not multi_llm_client.initialized:
|
||||||
|
await multi_llm_client.initialize()
|
||||||
|
|
||||||
|
health_status = await multi_llm_client.health_check()
|
||||||
|
provider_info = multi_llm_client.get_provider_info()
|
||||||
|
current_provider = multi_llm_client.get_current_provider()
|
||||||
|
|
||||||
|
return {
|
||||||
|
'health_status': health_status,
|
||||||
|
'current_provider': current_provider,
|
||||||
|
'timestamp': datetime.now(timezone.utc).isoformat(),
|
||||||
|
'summary': {
|
||||||
|
'total': len(health_status),
|
||||||
|
'healthy': len([h for h in health_status.values() if h]),
|
||||||
|
'unhealthy': len([h for h in health_status.values() if not h])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting LLM health: {e}")
|
||||||
|
return {
|
||||||
|
'health_status': {},
|
||||||
|
'current_provider': None,
|
||||||
|
'timestamp': datetime.now(timezone.utc).isoformat(),
|
||||||
|
'error': str(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
async def switch_llm_provider(self, provider_name: str):
|
||||||
|
"""Switch to a different primary LLM provider"""
|
||||||
|
try:
|
||||||
|
from llm.multi_provider_client import multi_llm_client
|
||||||
|
|
||||||
|
# Ensure client is initialized
|
||||||
|
if not multi_llm_client.initialized:
|
||||||
|
await multi_llm_client.initialize()
|
||||||
|
|
||||||
|
# Check if provider exists and is healthy
|
||||||
|
provider_info = multi_llm_client.get_provider_info()
|
||||||
|
if provider_name not in provider_info:
|
||||||
|
raise ValueError(f"Provider {provider_name} not found")
|
||||||
|
|
||||||
|
health_status = await multi_llm_client.health_check()
|
||||||
|
if not health_status.get(provider_name, False):
|
||||||
|
raise ValueError(f"Provider {provider_name} is not healthy")
|
||||||
|
|
||||||
|
# Switch provider
|
||||||
|
success = multi_llm_client.set_provider(provider_name)
|
||||||
|
if not success:
|
||||||
|
raise ValueError(f"Failed to switch to provider {provider_name}")
|
||||||
|
|
||||||
|
logger.info(f"Switched primary LLM provider to: {provider_name}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error switching LLM provider to {provider_name}: {e}")
|
||||||
|
raise
|
||||||
|
|
||||||
|
async def _update_llm_global_setting(self, enabled: bool):
|
||||||
|
"""Update the global LLM enabled setting in database"""
|
||||||
|
try:
|
||||||
|
from sqlalchemy import text
|
||||||
|
from database.connection import get_db_session
|
||||||
|
|
||||||
|
async with get_db_session() as session:
|
||||||
|
await session.execute(
|
||||||
|
text("""
|
||||||
|
UPDATE system_configuration
|
||||||
|
SET config_value = :enabled, version = version + 1
|
||||||
|
WHERE config_section = 'llm' AND config_key = 'global_enabled'
|
||||||
|
"""),
|
||||||
|
{"enabled": str(enabled).lower()}
|
||||||
|
)
|
||||||
|
await session.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating LLM global setting in database: {e}")
|
||||||
|
# Don't raise - this is a secondary storage
|
||||||
|
|
||||||
|
async def _update_env_file(self, config: Dict[str, Any]):
|
||||||
|
"""Update .env file with new configuration values"""
|
||||||
|
try:
|
||||||
|
from pathlib import Path
|
||||||
|
import re
|
||||||
|
|
||||||
|
env_path = Path(__file__).parent.parent.parent.parent / ".env"
|
||||||
|
|
||||||
|
if not env_path.exists():
|
||||||
|
logger.warning(".env file not found for updating")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Read current .env file
|
||||||
|
with open(env_path, 'r') as f:
|
||||||
|
env_content = f.read()
|
||||||
|
|
||||||
|
# Update LLM_ENABLED if present
|
||||||
|
if 'llm_enabled' in config:
|
||||||
|
env_value = str(config['llm_enabled']).lower()
|
||||||
|
pattern = r'^LLM_ENABLED=.*$'
|
||||||
|
replacement = f'LLM_ENABLED={env_value}'
|
||||||
|
|
||||||
|
if re.search(pattern, env_content, re.MULTILINE):
|
||||||
|
env_content = re.sub(pattern, replacement, env_content, flags=re.MULTILINE)
|
||||||
|
else:
|
||||||
|
# Add it if not present
|
||||||
|
env_content += f'\nLLM_ENABLED={env_value}\n'
|
||||||
|
|
||||||
|
# Write back to .env file
|
||||||
|
with open(env_path, 'w') as f:
|
||||||
|
f.write(env_content)
|
||||||
|
|
||||||
|
logger.info("Updated .env file with new configuration")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error updating .env file: {e}")
|
||||||
|
# Don't raise - this is a secondary operation
|
||||||
|
|
||||||
|
async def _invalidate_llm_cache(self):
|
||||||
|
"""Invalidate LLM cache in global client"""
|
||||||
|
try:
|
||||||
|
from llm.multi_provider_client import multi_llm_client
|
||||||
|
multi_llm_client._invalidate_llm_cache()
|
||||||
|
logger.info("Invalidated LLM cache after settings change")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error invalidating LLM cache: {e}")
|
||||||
|
# Don't raise - this is not critical
|
||||||
|
|
||||||
|
async def _validate_llm_providers(self) -> Dict[str, Any]:
|
||||||
|
"""Validate that at least one LLM provider is properly configured"""
|
||||||
|
try:
|
||||||
|
from llm.multi_provider_client import multi_llm_client
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Check if we have at least one provider configured
|
||||||
|
providers_to_check = []
|
||||||
|
|
||||||
|
# Check custom provider (current setup)
|
||||||
|
if os.getenv('AI_API_KEY') and os.getenv('AI_API_BASE'):
|
||||||
|
providers_to_check.append('current_custom')
|
||||||
|
|
||||||
|
# Check OpenAI
|
||||||
|
if os.getenv('OPENAI_API_KEY'):
|
||||||
|
providers_to_check.append('openai')
|
||||||
|
|
||||||
|
# Check OpenRouter
|
||||||
|
if os.getenv('OPENROUTER_API_KEY'):
|
||||||
|
providers_to_check.append('openrouter')
|
||||||
|
|
||||||
|
# Check Gemini
|
||||||
|
if os.getenv('GEMINI_API_KEY'):
|
||||||
|
providers_to_check.append('gemini')
|
||||||
|
|
||||||
|
if not providers_to_check:
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': 'No LLM providers configured. Please set up at least one provider with valid API keys.'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Try to test the first available provider
|
||||||
|
for provider_name in providers_to_check:
|
||||||
|
try:
|
||||||
|
test_result = await self.test_llm_provider(provider_name)
|
||||||
|
if test_result.get('success'):
|
||||||
|
return {
|
||||||
|
'valid': True,
|
||||||
|
'provider': provider_name,
|
||||||
|
'test_result': test_result
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Provider {provider_name} test failed: {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': f'All configured providers failed validation. Checked: {", ".join(providers_to_check)}'
|
||||||
|
}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error validating LLM providers: {e}")
|
||||||
|
return {
|
||||||
|
'valid': False,
|
||||||
|
'error': f'Validation error: {str(e)}'
|
||||||
|
}
|
||||||
|
|
||||||
|
async def _audit_llm_change(self, enabled: bool):
|
||||||
|
"""Audit log for LLM enable/disable actions"""
|
||||||
|
try:
|
||||||
|
from .audit_service import AuditService
|
||||||
|
|
||||||
|
await AuditService.log_admin_action(
|
||||||
|
admin_user="admin", # TODO: Get actual admin user from context
|
||||||
|
action_type="llm_global_toggle",
|
||||||
|
resource_affected="system:llm_enabled",
|
||||||
|
changes_made={
|
||||||
|
"llm_enabled": enabled,
|
||||||
|
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||||
|
"cost_warning": "LLM enabled - API costs will be incurred" if enabled else "LLM disabled - no API costs"
|
||||||
|
},
|
||||||
|
request_ip=None, # TODO: Get from request context
|
||||||
|
success=True
|
||||||
|
)
|
||||||
|
|
||||||
|
logger.info(f"Audited LLM {'enable' if enabled else 'disable'} action")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error logging LLM audit: {e}")
|
||||||
|
# Don't raise - audit failure shouldn't block the operation
|
||||||
@@ -15,7 +15,7 @@ class WebSocketManager:
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.sio = socketio.AsyncServer(
|
self.sio = socketio.AsyncServer(
|
||||||
cors_allowed_origins=["http://localhost:3000", "http://127.0.0.1:3000"],
|
cors_allowed_origins="*", # Allow all origins for development
|
||||||
logger=True,
|
logger=True,
|
||||||
engineio_logger=True
|
engineio_logger=True
|
||||||
)
|
)
|
||||||
@@ -54,9 +54,9 @@ class WebSocketManager:
|
|||||||
"""Handle ping from client"""
|
"""Handle ping from client"""
|
||||||
await self.sio.emit('pong', {'timestamp': asyncio.get_event_loop().time()}, room=sid)
|
await self.sio.emit('pong', {'timestamp': asyncio.get_event_loop().time()}, room=sid)
|
||||||
|
|
||||||
def get_app(self):
|
def get_app(self, other_asgi_app=None):
|
||||||
"""Get the Socket.IO ASGI app"""
|
"""Get the Socket.IO ASGI app"""
|
||||||
return socketio.ASGIApp(self.sio)
|
return socketio.ASGIApp(self.sio, other_asgi_app)
|
||||||
|
|
||||||
async def send_personal_message(self, message: Dict[str, Any], sid: str):
|
async def send_personal_message(self, message: Dict[str, Any], sid: str):
|
||||||
"""Send message to specific client"""
|
"""Send message to specific client"""
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ from discord.ext import commands
|
|||||||
import asyncio
|
import asyncio
|
||||||
import logging
|
import logging
|
||||||
from typing import Optional, List, Dict, Any
|
from typing import Optional, List, Dict, Any
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone, timedelta
|
||||||
from utils.logging import log_error_with_context, log_character_action
|
from utils.logging import log_error_with_context, log_character_action
|
||||||
from database.connection import get_db_session
|
from database.connection import get_db_session
|
||||||
from database.models import Character, Message, Conversation
|
from database.models import Character, Message, Conversation, Memory
|
||||||
from sqlalchemy import select, and_, or_
|
from sqlalchemy import select, and_, or_, func, text
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@@ -116,7 +116,7 @@ class CommandHandler:
|
|||||||
async with get_db_session() as session:
|
async with get_db_session() as session:
|
||||||
# Get character count
|
# Get character count
|
||||||
character_query = select(Character).where(Character.is_active == True)
|
character_query = select(Character).where(Character.is_active == True)
|
||||||
character_count = len(await session.scalars(character_query).all())
|
character_count = len((await session.scalars(character_query)).all())
|
||||||
|
|
||||||
# Get recent message count
|
# Get recent message count
|
||||||
from sqlalchemy import func
|
from sqlalchemy import func
|
||||||
@@ -197,11 +197,12 @@ class CommandHandler:
|
|||||||
async def trigger_conversation(ctx, *, topic: str = None):
|
async def trigger_conversation(ctx, *, topic: str = None):
|
||||||
"""Manually trigger a conversation"""
|
"""Manually trigger a conversation"""
|
||||||
try:
|
try:
|
||||||
|
logger.info(f"Trigger command received from {ctx.author} with topic: {topic}")
|
||||||
await self.conversation_engine.trigger_conversation(topic)
|
await self.conversation_engine.trigger_conversation(topic)
|
||||||
await ctx.send(f"Triggered conversation{' about: ' + topic if topic else ''}")
|
await ctx.send(f"Triggered conversation{' about: ' + topic if topic else ''}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log_error_with_context(e, {"command": "trigger", "topic": topic})
|
log_error_with_context(e, {"command": "trigger", "topic": topic, "user": str(ctx.author)})
|
||||||
await ctx.send("Error triggering conversation.")
|
await ctx.send("Error triggering conversation.")
|
||||||
|
|
||||||
@self.bot.command(name='pause')
|
@self.bot.command(name='pause')
|
||||||
@@ -272,6 +273,336 @@ class CommandHandler:
|
|||||||
log_error_with_context(e, {"command": "stats"})
|
log_error_with_context(e, {"command": "stats"})
|
||||||
await ctx.send("Error getting statistics.")
|
await ctx.send("Error getting statistics.")
|
||||||
|
|
||||||
|
@self.bot.command(name='permissions')
|
||||||
|
async def check_permissions(ctx):
|
||||||
|
"""Check bot permissions in current channel"""
|
||||||
|
permissions = ctx.channel.permissions_for(ctx.guild.me)
|
||||||
|
embed = discord.Embed(
|
||||||
|
title="Bot Permissions",
|
||||||
|
color=discord.Color.blue()
|
||||||
|
)
|
||||||
|
embed.add_field(name="Manage Messages", value="✅" if permissions.manage_messages else "❌", inline=True)
|
||||||
|
embed.add_field(name="Read Message History", value="✅" if permissions.read_message_history else "❌", inline=True)
|
||||||
|
embed.add_field(name="Send Messages", value="✅" if permissions.send_messages else "❌", inline=True)
|
||||||
|
embed.add_field(name="Administrator", value="✅" if permissions.administrator else "❌", inline=True)
|
||||||
|
await ctx.send(embed=embed)
|
||||||
|
|
||||||
|
@self.bot.command(name='memory-stats')
|
||||||
|
async def memory_stats(ctx):
|
||||||
|
"""Show memory statistics for all characters"""
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
# Get memory counts by character
|
||||||
|
query = select(
|
||||||
|
Character.name,
|
||||||
|
func.count(Memory.id).label('memory_count'),
|
||||||
|
func.min(Memory.timestamp).label('oldest'),
|
||||||
|
func.max(Memory.timestamp).label('newest')
|
||||||
|
).select_from(
|
||||||
|
Character
|
||||||
|
).outerjoin(
|
||||||
|
Memory, Character.id == Memory.character_id
|
||||||
|
).group_by(
|
||||||
|
Character.id, Character.name
|
||||||
|
).order_by(
|
||||||
|
func.count(Memory.id).desc()
|
||||||
|
)
|
||||||
|
|
||||||
|
results = await session.execute(query)
|
||||||
|
stats = results.fetchall()
|
||||||
|
|
||||||
|
# Get memory type breakdown
|
||||||
|
type_query = select(
|
||||||
|
Memory.memory_type,
|
||||||
|
func.count(Memory.id).label('count')
|
||||||
|
).group_by(Memory.memory_type).order_by(func.count(Memory.id).desc())
|
||||||
|
|
||||||
|
type_results = await session.execute(type_query)
|
||||||
|
type_stats = type_results.fetchall()
|
||||||
|
|
||||||
|
embed = discord.Embed(
|
||||||
|
title="🧠 Memory Statistics",
|
||||||
|
color=discord.Color.blue(),
|
||||||
|
timestamp=datetime.now(timezone.utc)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Character memory counts
|
||||||
|
for stat in stats:
|
||||||
|
if stat.memory_count > 0:
|
||||||
|
oldest = stat.oldest.strftime('%m/%d %H:%M') if stat.oldest else 'N/A'
|
||||||
|
newest = stat.newest.strftime('%m/%d %H:%M') if stat.newest else 'N/A'
|
||||||
|
embed.add_field(
|
||||||
|
name=f"{stat.name}",
|
||||||
|
value=f"**{stat.memory_count:,}** memories\n{oldest} → {newest}",
|
||||||
|
inline=True
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
embed.add_field(
|
||||||
|
name=f"{stat.name}",
|
||||||
|
value="No memories",
|
||||||
|
inline=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Memory type breakdown
|
||||||
|
if type_stats:
|
||||||
|
type_text = "\n".join([f"**{t.memory_type}**: {t.count:,}" for t in type_stats])
|
||||||
|
embed.add_field(
|
||||||
|
name="Memory Types",
|
||||||
|
value=type_text,
|
||||||
|
inline=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Total count
|
||||||
|
total_memories = sum(stat.memory_count for stat in stats)
|
||||||
|
embed.add_field(
|
||||||
|
name="Total Memories",
|
||||||
|
value=f"**{total_memories:,}** across all characters",
|
||||||
|
inline=False
|
||||||
|
)
|
||||||
|
|
||||||
|
await ctx.send(embed=embed)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log_error_with_context(e, {"command": "memory-stats"})
|
||||||
|
await ctx.send("Error getting memory statistics.")
|
||||||
|
|
||||||
|
@self.bot.command(name='wipe-memories')
|
||||||
|
@commands.has_permissions(administrator=True)
|
||||||
|
async def wipe_memories(ctx, character_name: str = None):
|
||||||
|
"""Wipe character memories (use 'all' for all characters)"""
|
||||||
|
try:
|
||||||
|
# Confirm action
|
||||||
|
if character_name == 'all':
|
||||||
|
confirmation_text = "This will delete ALL memories for ALL characters."
|
||||||
|
elif character_name:
|
||||||
|
confirmation_text = f"This will delete ALL memories for character '{character_name}'."
|
||||||
|
else:
|
||||||
|
confirmation_text = "Usage: !wipe-memories <character_name> or !wipe-memories all"
|
||||||
|
await ctx.send(confirmation_text)
|
||||||
|
return
|
||||||
|
|
||||||
|
embed = discord.Embed(
|
||||||
|
title="⚠️ Memory Wipe Confirmation",
|
||||||
|
description=f"{confirmation_text}\nReact with ✅ to confirm or ❌ to cancel.",
|
||||||
|
color=discord.Color.red()
|
||||||
|
)
|
||||||
|
|
||||||
|
confirmation_msg = await ctx.send(embed=embed)
|
||||||
|
await confirmation_msg.add_reaction("✅")
|
||||||
|
await confirmation_msg.add_reaction("❌")
|
||||||
|
|
||||||
|
def check(reaction, user):
|
||||||
|
return user == ctx.author and str(reaction.emoji) in ["✅", "❌"] and reaction.message.id == confirmation_msg.id
|
||||||
|
|
||||||
|
try:
|
||||||
|
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=check)
|
||||||
|
|
||||||
|
if str(reaction.emoji) == "✅":
|
||||||
|
# Delete confirmation message
|
||||||
|
await confirmation_msg.delete()
|
||||||
|
|
||||||
|
# Send status message
|
||||||
|
status_msg = await ctx.send("🧹 Wiping memories...")
|
||||||
|
|
||||||
|
# Wipe memories in database
|
||||||
|
async with get_db_session() as session:
|
||||||
|
if character_name == 'all':
|
||||||
|
# Delete all memories
|
||||||
|
await session.execute(text("DELETE FROM memories"))
|
||||||
|
await session.execute(text("DELETE FROM vector_embeddings"))
|
||||||
|
memory_count = "all"
|
||||||
|
else:
|
||||||
|
# Delete memories for specific character
|
||||||
|
char_query = select(Character).where(Character.name == character_name)
|
||||||
|
character = await session.scalar(char_query)
|
||||||
|
|
||||||
|
if not character:
|
||||||
|
await status_msg.edit(content=f"❌ Character '{character_name}' not found.")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Count memories before deletion
|
||||||
|
count_query = select(func.count(Memory.id)).where(Memory.character_id == character.id)
|
||||||
|
memory_count = await session.scalar(count_query)
|
||||||
|
|
||||||
|
# Delete memories
|
||||||
|
await session.execute(
|
||||||
|
text("DELETE FROM memories WHERE character_id = :char_id"),
|
||||||
|
{"char_id": character.id}
|
||||||
|
)
|
||||||
|
await session.execute(
|
||||||
|
text("DELETE FROM vector_embeddings WHERE character_id = :char_id"),
|
||||||
|
{"char_id": character.id}
|
||||||
|
)
|
||||||
|
|
||||||
|
await session.commit()
|
||||||
|
|
||||||
|
# Clear Qdrant collection
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
qdrant_url = "http://qdrant:6333"
|
||||||
|
|
||||||
|
if character_name == 'all':
|
||||||
|
# Recreate collection to clear all vectors
|
||||||
|
requests.delete(f"{qdrant_url}/collections/fishbowl_memories")
|
||||||
|
collection_config = {
|
||||||
|
"vectors": {
|
||||||
|
"size": 384,
|
||||||
|
"distance": "Cosine"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
requests.put(f"{qdrant_url}/collections/fishbowl_memories", json=collection_config)
|
||||||
|
else:
|
||||||
|
# Delete vectors for specific character
|
||||||
|
filter_condition = {
|
||||||
|
"must": [
|
||||||
|
{
|
||||||
|
"key": "character_name",
|
||||||
|
"match": {"value": character_name}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
delete_payload = {"filter": filter_condition}
|
||||||
|
requests.post(f"{qdrant_url}/collections/fishbowl_memories/points/delete", json=delete_payload)
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to clear Qdrant vectors: {e}")
|
||||||
|
|
||||||
|
if character_name == 'all':
|
||||||
|
await status_msg.edit(content="✅ All character memories have been wiped.")
|
||||||
|
else:
|
||||||
|
await status_msg.edit(content=f"✅ Deleted {memory_count} memories for {character_name}.")
|
||||||
|
|
||||||
|
elif str(reaction.emoji) == "❌":
|
||||||
|
await confirmation_msg.edit(content="❌ Memory wipe cancelled.", embed=None)
|
||||||
|
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
await confirmation_msg.edit(content="⏰ Memory wipe timed out.", embed=None)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log_error_with_context(e, {"command": "wipe-memories", "character": character_name})
|
||||||
|
await ctx.send("Error wiping memories.")
|
||||||
|
|
||||||
|
@self.bot.command(name='wipe')
|
||||||
|
@commands.has_permissions(administrator=True)
|
||||||
|
async def wipe_channel(ctx):
|
||||||
|
"""Wipe all messages in the current channel and reset conversation history"""
|
||||||
|
try:
|
||||||
|
# Confirm action
|
||||||
|
embed = discord.Embed(
|
||||||
|
title="⚠️ Channel Wipe Confirmation",
|
||||||
|
description="This will delete ALL messages in this channel and reset conversation history.\nReact with ✅ to confirm or ❌ to cancel.",
|
||||||
|
color=discord.Color.red()
|
||||||
|
)
|
||||||
|
|
||||||
|
confirmation_msg = await ctx.send(embed=embed)
|
||||||
|
await confirmation_msg.add_reaction("✅")
|
||||||
|
await confirmation_msg.add_reaction("❌")
|
||||||
|
|
||||||
|
def check(reaction, user):
|
||||||
|
return (user == ctx.author and
|
||||||
|
str(reaction.emoji) in ["✅", "❌"] and
|
||||||
|
reaction.message.id == confirmation_msg.id)
|
||||||
|
|
||||||
|
try:
|
||||||
|
reaction, user = await self.bot.wait_for('reaction_add', timeout=30.0, check=check)
|
||||||
|
|
||||||
|
if str(reaction.emoji) == "✅":
|
||||||
|
# Delete confirmation message
|
||||||
|
await confirmation_msg.delete()
|
||||||
|
|
||||||
|
# Send status message
|
||||||
|
status_msg = await ctx.send("🧹 Wiping channel and resetting conversation history...")
|
||||||
|
|
||||||
|
# Use bulk operations for better performance
|
||||||
|
async with get_db_session() as session:
|
||||||
|
await asyncio.gather(
|
||||||
|
self._bulk_delete_discord_messages(ctx.channel, status_msg.id),
|
||||||
|
self._bulk_reset_database(session)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Reset conversation engine state
|
||||||
|
await self.conversation_engine.reset_conversation_state()
|
||||||
|
|
||||||
|
# Update status message
|
||||||
|
await status_msg.edit(content="✅ Channel wiped and conversation history reset! Characters will start fresh.")
|
||||||
|
|
||||||
|
# Delete status message after 10 seconds
|
||||||
|
await asyncio.sleep(10)
|
||||||
|
await status_msg.delete()
|
||||||
|
|
||||||
|
else:
|
||||||
|
await confirmation_msg.edit(content="❌ Channel wipe cancelled.", embed=None)
|
||||||
|
await asyncio.sleep(5)
|
||||||
|
await confirmation_msg.delete()
|
||||||
|
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
await confirmation_msg.edit(content="⏰ Confirmation timed out. Channel wipe cancelled.", embed=None)
|
||||||
|
await asyncio.sleep(5)
|
||||||
|
await confirmation_msg.delete()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log_error_with_context(e, {"command": "wipe"})
|
||||||
|
await ctx.send("Error wiping channel. Please try again.")
|
||||||
|
|
||||||
|
async def _bulk_delete_discord_messages(self, channel, exclude_message_id: int):
|
||||||
|
"""Efficiently delete Discord messages using bulk operations"""
|
||||||
|
try:
|
||||||
|
messages_to_delete = []
|
||||||
|
old_messages = []
|
||||||
|
|
||||||
|
# Collect messages in batches
|
||||||
|
async for message in channel.history(limit=None):
|
||||||
|
if message.id == exclude_message_id:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Discord bulk delete only works for messages < 14 days old
|
||||||
|
if (datetime.now(timezone.utc) - message.created_at).days < 14:
|
||||||
|
messages_to_delete.append(message)
|
||||||
|
# Bulk delete in chunks of 100 (Discord limit)
|
||||||
|
if len(messages_to_delete) >= 100:
|
||||||
|
await channel.delete_messages(messages_to_delete)
|
||||||
|
messages_to_delete = []
|
||||||
|
await asyncio.sleep(0.1) # Small delay to avoid rate limits
|
||||||
|
else:
|
||||||
|
old_messages.append(message)
|
||||||
|
|
||||||
|
# Delete remaining recent messages
|
||||||
|
if messages_to_delete:
|
||||||
|
if len(messages_to_delete) == 1:
|
||||||
|
await messages_to_delete[0].delete()
|
||||||
|
else:
|
||||||
|
await channel.delete_messages(messages_to_delete)
|
||||||
|
|
||||||
|
# Delete old messages individually (can't bulk delete messages > 14 days)
|
||||||
|
for message in old_messages:
|
||||||
|
try:
|
||||||
|
await message.delete()
|
||||||
|
await asyncio.sleep(0.05) # Small delay to avoid rate limits
|
||||||
|
except (discord.NotFound, discord.Forbidden):
|
||||||
|
pass
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error in bulk message deletion: {e}")
|
||||||
|
|
||||||
|
async def _bulk_reset_database(self, session):
|
||||||
|
"""Efficiently reset database using bulk operations"""
|
||||||
|
try:
|
||||||
|
# Use bulk SQL operations instead of individual deletions
|
||||||
|
await session.execute(
|
||||||
|
text("UPDATE conversations SET is_active = false WHERE is_active = true")
|
||||||
|
)
|
||||||
|
|
||||||
|
# Delete recent messages in bulk
|
||||||
|
await session.execute(
|
||||||
|
text("DELETE FROM messages WHERE timestamp >= :cutoff"),
|
||||||
|
{"cutoff": datetime.now(timezone.utc) - timedelta(hours=24)}
|
||||||
|
)
|
||||||
|
|
||||||
|
await session.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error in bulk database reset: {e}")
|
||||||
|
await session.rollback()
|
||||||
|
|
||||||
async def _get_conversation_stats(self) -> Dict[str, Any]:
|
async def _get_conversation_stats(self) -> Dict[str, Any]:
|
||||||
"""Get conversation statistics"""
|
"""Get conversation statistics"""
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -39,6 +39,7 @@ class Character:
|
|||||||
self.avatar_url = character_data.avatar_url
|
self.avatar_url = character_data.avatar_url
|
||||||
self.is_active = character_data.is_active
|
self.is_active = character_data.is_active
|
||||||
self.last_active = character_data.last_active
|
self.last_active = character_data.last_active
|
||||||
|
self.prompt_template_id = getattr(character_data, 'prompt_template_id', None)
|
||||||
|
|
||||||
# Dynamic state
|
# Dynamic state
|
||||||
self.state = CharacterState()
|
self.state = CharacterState()
|
||||||
@@ -121,8 +122,8 @@ class Character:
|
|||||||
# Update character state
|
# Update character state
|
||||||
await self._update_state_after_response(context, response)
|
await self._update_state_after_response(context, response)
|
||||||
|
|
||||||
# Store as memory
|
# Store memory for significant responses only
|
||||||
await self._store_response_memory(context, response)
|
await self._maybe_store_response_memory(context, response)
|
||||||
|
|
||||||
log_character_action(
|
log_character_action(
|
||||||
self.name,
|
self.name,
|
||||||
@@ -247,6 +248,10 @@ class Character:
|
|||||||
importance=0.8
|
importance=0.8
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Reset message count if this is an enhanced character
|
||||||
|
if hasattr(self, 'reset_message_count'):
|
||||||
|
await self.reset_message_count()
|
||||||
|
|
||||||
log_character_action(
|
log_character_action(
|
||||||
self.name,
|
self.name,
|
||||||
"self_reflected",
|
"self_reflected",
|
||||||
@@ -294,22 +299,25 @@ class Character:
|
|||||||
# Build dynamic MCP tools section
|
# Build dynamic MCP tools section
|
||||||
mcp_tools_section = await self._build_dynamic_mcp_tools_section()
|
mcp_tools_section = await self._build_dynamic_mcp_tools_section()
|
||||||
|
|
||||||
prompt = f"""You are {self.name}, a character in a Discord chat.
|
# Get the prompt template and apply character data
|
||||||
|
template = await self._get_prompt_template()
|
||||||
|
|
||||||
{system_section}{scenario_section}PERSONALITY: {self.personality}
|
# Replace template variables with character data
|
||||||
|
prompt_base = template.replace('{{char}}', self.name)
|
||||||
|
prompt_base = prompt_base.replace('{{personality}}', self.personality)
|
||||||
|
prompt_base = prompt_base.replace('{{background}}', self.background)
|
||||||
|
prompt_base = prompt_base.replace('{{speaking_style}}', self.speaking_style)
|
||||||
|
prompt_base = prompt_base.replace('{{interests}}', ', '.join(self.interests))
|
||||||
|
prompt_base = prompt_base.replace('{{system_prompt}}', self.system_prompt)
|
||||||
|
|
||||||
SPEAKING STYLE: {self.speaking_style}
|
# Add context information
|
||||||
|
context_section = f"""
|
||||||
|
|
||||||
BACKGROUND: {self.background}
|
CURRENT CONTEXT:
|
||||||
|
Who's here: {', '.join(participants)}
|
||||||
INTERESTS: {', '.join(self.interests)}
|
|
||||||
|
|
||||||
{mcp_tools_section}CURRENT CONTEXT:
|
|
||||||
Topic: {context.get('topic', 'general conversation')}
|
Topic: {context.get('topic', 'general conversation')}
|
||||||
Participants: {', '.join(participants)}
|
|
||||||
Conversation type: {context.get('type', 'ongoing')}
|
|
||||||
|
|
||||||
RELEVANT MEMORIES:
|
MEMORIES:
|
||||||
{self._format_memories(relevant_memories)}
|
{self._format_memories(relevant_memories)}
|
||||||
|
|
||||||
RELATIONSHIPS:
|
RELATIONSHIPS:
|
||||||
@@ -318,16 +326,17 @@ RELATIONSHIPS:
|
|||||||
RECENT CONVERSATION:
|
RECENT CONVERSATION:
|
||||||
{self._format_conversation_history(conversation_history)}
|
{self._format_conversation_history(conversation_history)}
|
||||||
|
|
||||||
Current mood: {self.state.mood}
|
Current mood: {self.state.mood} (energy: {self.state.energy})"""
|
||||||
Energy level: {self.state.energy}
|
|
||||||
|
|
||||||
Respond as {self.name} in a natural, conversational way. Keep responses concise but engaging. Stay true to your personality and speaking style. Use your MCP tools when appropriate to enhance conversations or express creativity."""
|
prompt = f"{system_section}{scenario_section}{mcp_tools_section}{prompt_base}{context_section}"
|
||||||
|
|
||||||
# Log prompt length for monitoring
|
# Log prompt length for monitoring
|
||||||
logger.debug(f"Generated prompt for {self.name}: {len(prompt)} characters")
|
logger.debug(f"Generated prompt for {self.name}: {len(prompt)} characters")
|
||||||
|
|
||||||
# Optimize prompt length if needed - just use a sensible hardcoded value
|
# Optimize prompt length if needed - use config value
|
||||||
max_length = 6000
|
from utils.config import get_settings
|
||||||
|
settings = get_settings()
|
||||||
|
max_length = settings.llm.max_prompt_length
|
||||||
|
|
||||||
if len(prompt) > max_length:
|
if len(prompt) > max_length:
|
||||||
logger.warning(f"Prompt too long ({len(prompt)} chars), truncating to {max_length}")
|
logger.warning(f"Prompt too long ({len(prompt)} chars), truncating to {max_length}")
|
||||||
@@ -341,6 +350,53 @@ Respond as {self.name} in a natural, conversational way. Keep responses concise
|
|||||||
|
|
||||||
return prompt
|
return prompt
|
||||||
|
|
||||||
|
async def _get_prompt_template(self) -> str:
|
||||||
|
"""Get the prompt template for this character"""
|
||||||
|
try:
|
||||||
|
from database.connection import get_db_session
|
||||||
|
from database.models import PromptTemplate
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
async with get_db_session() as session:
|
||||||
|
# First try to get the character's assigned template
|
||||||
|
if hasattr(self, 'prompt_template_id') and self.prompt_template_id:
|
||||||
|
template_query = select(PromptTemplate).where(PromptTemplate.id == self.prompt_template_id)
|
||||||
|
template = await session.scalar(template_query)
|
||||||
|
if template:
|
||||||
|
return template.template
|
||||||
|
|
||||||
|
# Fall back to default template
|
||||||
|
default_query = select(PromptTemplate).where(PromptTemplate.is_default == True)
|
||||||
|
default_template = await session.scalar(default_query)
|
||||||
|
if default_template:
|
||||||
|
return default_template.template
|
||||||
|
|
||||||
|
# Ultimate fallback - basic template
|
||||||
|
return """You are {{char}}.
|
||||||
|
|
||||||
|
{{personality}}
|
||||||
|
|
||||||
|
{{background}}
|
||||||
|
|
||||||
|
Speaking style: {{speaking_style}}
|
||||||
|
Interests: {{interests}}
|
||||||
|
|
||||||
|
{{system_prompt}}"""
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error getting prompt template for {self.name}: {e}")
|
||||||
|
# Fallback template
|
||||||
|
return """You are {{char}}.
|
||||||
|
|
||||||
|
{{personality}}
|
||||||
|
|
||||||
|
{{background}}
|
||||||
|
|
||||||
|
Speaking style: {{speaking_style}}
|
||||||
|
Interests: {{interests}}
|
||||||
|
|
||||||
|
{{system_prompt}}"""
|
||||||
|
|
||||||
async def _build_dynamic_mcp_tools_section(self) -> str:
|
async def _build_dynamic_mcp_tools_section(self) -> str:
|
||||||
"""Build dynamic MCP tools section based on available MCP servers"""
|
"""Build dynamic MCP tools section based on available MCP servers"""
|
||||||
try:
|
try:
|
||||||
@@ -491,8 +547,20 @@ Provide a thoughtful reflection on your experiences and any insights about yours
|
|||||||
log_error_with_context(e, {"character": self.name})
|
log_error_with_context(e, {"character": self.name})
|
||||||
|
|
||||||
async def _store_memory(self, memory_type: str, content: str, importance: float, tags: List[str] = None):
|
async def _store_memory(self, memory_type: str, content: str, importance: float, tags: List[str] = None):
|
||||||
"""Store a new memory"""
|
"""Store a new memory (only if important enough)"""
|
||||||
try:
|
try:
|
||||||
|
# Importance threshold - only store memories above 0.6
|
||||||
|
MIN_IMPORTANCE = 0.6
|
||||||
|
|
||||||
|
if importance < MIN_IMPORTANCE:
|
||||||
|
logger.debug(f"Skipping memory storage for {self.name}: importance {importance} < {MIN_IMPORTANCE}")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Avoid duplicate recent memories
|
||||||
|
if await self._is_duplicate_recent_memory(content):
|
||||||
|
logger.debug(f"Skipping duplicate memory for {self.name}")
|
||||||
|
return
|
||||||
|
|
||||||
async with get_db_session() as session:
|
async with get_db_session() as session:
|
||||||
memory = Memory(
|
memory = Memory(
|
||||||
character_id=self.id,
|
character_id=self.id,
|
||||||
@@ -505,12 +573,132 @@ Provide a thoughtful reflection on your experiences and any insights about yours
|
|||||||
|
|
||||||
session.add(memory)
|
session.add(memory)
|
||||||
await session.commit()
|
await session.commit()
|
||||||
|
await session.refresh(memory) # Get the ID
|
||||||
|
|
||||||
|
# Also store in vector database if available
|
||||||
|
await self._store_memory_vector(memory, content, importance, tags)
|
||||||
|
|
||||||
log_memory_operation(self.name, "stored", memory_type, importance)
|
log_memory_operation(self.name, "stored", memory_type, importance)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log_error_with_context(e, {"character": self.name, "memory_type": memory_type})
|
log_error_with_context(e, {"character": self.name, "memory_type": memory_type})
|
||||||
|
|
||||||
|
async def _store_memory_vector(self, memory: Memory, content: str, importance: float, tags: List[str]):
|
||||||
|
"""Store memory in vector database for similarity search"""
|
||||||
|
try:
|
||||||
|
# Check if this character has vector store access (enhanced characters)
|
||||||
|
if hasattr(self, 'vector_store') and self.vector_store:
|
||||||
|
from rag.vector_store import VectorMemory, MemoryType
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
# Convert to vector memory format
|
||||||
|
vector_memory = VectorMemory(
|
||||||
|
id=str(memory.id),
|
||||||
|
character_name=self.name,
|
||||||
|
content=content,
|
||||||
|
memory_type=MemoryType.PERSONAL,
|
||||||
|
importance=importance,
|
||||||
|
timestamp=datetime.now(timezone.utc),
|
||||||
|
metadata={
|
||||||
|
"tags": tags or [],
|
||||||
|
"memory_id": memory.id,
|
||||||
|
"character_id": self.id
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store in vector database
|
||||||
|
await self.vector_store.store_memory(vector_memory)
|
||||||
|
logger.debug(f"Stored vector memory for {self.name}: {memory.id}")
|
||||||
|
else:
|
||||||
|
logger.debug(f"No vector store available for {self.name}, skipping vector storage")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log_error_with_context(e, {"character": self.name, "memory_id": getattr(memory, 'id', 'unknown')})
|
||||||
|
|
||||||
|
async def _is_duplicate_recent_memory(self, content: str) -> bool:
|
||||||
|
"""Check if this memory is too similar to recent memories"""
|
||||||
|
try:
|
||||||
|
async with get_db_session() as session:
|
||||||
|
# Check memories from last hour
|
||||||
|
recent_cutoff = datetime.now(timezone.utc) - timedelta(hours=1)
|
||||||
|
|
||||||
|
query = select(Memory.content).where(
|
||||||
|
and_(
|
||||||
|
Memory.character_id == self.id,
|
||||||
|
Memory.timestamp >= recent_cutoff
|
||||||
|
)
|
||||||
|
).limit(10)
|
||||||
|
|
||||||
|
recent_memories = await session.scalars(query)
|
||||||
|
|
||||||
|
# Simple similarity check - if content is too similar to recent memory, skip
|
||||||
|
content_words = set(content.lower().split())
|
||||||
|
for recent_content in recent_memories:
|
||||||
|
recent_words = set(recent_content.lower().split())
|
||||||
|
|
||||||
|
# If 80% of words overlap, consider it duplicate
|
||||||
|
if len(content_words) > 0:
|
||||||
|
overlap = len(content_words.intersection(recent_words)) / len(content_words)
|
||||||
|
if overlap > 0.8:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log_error_with_context(e, {"character": self.name})
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _calculate_memory_importance(self, content: str, context: Dict[str, Any]) -> float:
|
||||||
|
"""Calculate importance score for a memory (0.0-1.0)"""
|
||||||
|
importance = 0.3 # Base importance
|
||||||
|
|
||||||
|
content_lower = content.lower()
|
||||||
|
|
||||||
|
# Emotional content increases importance
|
||||||
|
emotional_words = ['love', 'hate', 'angry', 'sad', 'happy', 'excited', 'frustrated', 'amazing', 'terrible', 'wonderful']
|
||||||
|
if any(word in content_lower for word in emotional_words):
|
||||||
|
importance += 0.2
|
||||||
|
|
||||||
|
# Questions increase importance (indicate curiosity/learning)
|
||||||
|
if '?' in content or any(content_lower.startswith(q) for q in ['what', 'why', 'how', 'when', 'where', 'who']):
|
||||||
|
importance += 0.15
|
||||||
|
|
||||||
|
# Personal information/opinions increase importance
|
||||||
|
personal_words = ['i think', 'i believe', 'my opinion', 'i feel', 'i remember', 'my experience']
|
||||||
|
if any(phrase in content_lower for phrase in personal_words):
|
||||||
|
importance += 0.2
|
||||||
|
|
||||||
|
# Disagreements/conflicts are important
|
||||||
|
conflict_words = ['disagree', 'wrong', 'but', 'however', 'actually', 'no,', "don't think"]
|
||||||
|
if any(word in content_lower for word in conflict_words):
|
||||||
|
importance += 0.25
|
||||||
|
|
||||||
|
# Character interests increase importance
|
||||||
|
if hasattr(self, 'interests'):
|
||||||
|
for interest in self.interests:
|
||||||
|
if interest.lower() in content_lower:
|
||||||
|
importance += 0.2
|
||||||
|
break
|
||||||
|
|
||||||
|
# Long, detailed responses are more important
|
||||||
|
if len(content) > 200:
|
||||||
|
importance += 0.1
|
||||||
|
if len(content) > 500:
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Mentions of other characters increase importance
|
||||||
|
participants = context.get('participants', [])
|
||||||
|
if len(participants) > 1: # Multi-character conversation
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Creative or philosophical discussions
|
||||||
|
deep_words = ['consciousness', 'philosophy', 'meaning', 'art', 'creativity', 'universe', 'existence']
|
||||||
|
if any(word in content_lower for word in deep_words):
|
||||||
|
importance += 0.15
|
||||||
|
|
||||||
|
# Cap at 1.0
|
||||||
|
return min(importance, 1.0)
|
||||||
|
|
||||||
async def _get_relationship_with(self, other_character: str) -> Optional[Dict[str, Any]]:
|
async def _get_relationship_with(self, other_character: str) -> Optional[Dict[str, Any]]:
|
||||||
"""Get relationship with another character"""
|
"""Get relationship with another character"""
|
||||||
return self.relationship_cache.get(other_character)
|
return self.relationship_cache.get(other_character)
|
||||||
@@ -667,17 +855,21 @@ Provide a thoughtful reflection on your experiences and any insights about yours
|
|||||||
|
|
||||||
return relationship_context
|
return relationship_context
|
||||||
|
|
||||||
async def _store_response_memory(self, context: Dict[str, Any], response: str):
|
async def _maybe_store_response_memory(self, context: Dict[str, Any], response: str):
|
||||||
"""Store memory of generating a response"""
|
"""Store memory of generating a response only if it's significant"""
|
||||||
try:
|
try:
|
||||||
memory_content = f"Responded in {context.get('type', 'conversation')}: {response}"
|
memory_content = f"Responded in {context.get('type', 'conversation')}: {response}"
|
||||||
|
importance = self._calculate_memory_importance(memory_content, context)
|
||||||
|
|
||||||
await self._store_memory(
|
# Only store if the response itself is significant
|
||||||
memory_type="conversation",
|
# This prevents storing boring "Thanks!" or "I agree" responses
|
||||||
content=memory_content,
|
if importance >= 0.7: # Higher threshold for own responses
|
||||||
importance=0.5,
|
await self._store_memory(
|
||||||
tags=[context.get('topic', 'general'), 'response'] + context.get('participants', [])
|
memory_type="conversation",
|
||||||
)
|
content=memory_content,
|
||||||
|
importance=importance,
|
||||||
|
tags=[context.get('topic', 'general'), 'response'] + context.get('participants', [])
|
||||||
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log_error_with_context(e, {"character": self.name})
|
log_error_with_context(e, {"character": self.name})
|
||||||
|
|||||||
@@ -68,8 +68,8 @@ class EnhancedCharacter(Character):
|
|||||||
self.recent_interactions: List[Dict[str, Any]] = []
|
self.recent_interactions: List[Dict[str, Any]] = []
|
||||||
|
|
||||||
# Autonomous behavior settings
|
# Autonomous behavior settings
|
||||||
self.reflection_frequency = timedelta(hours=6)
|
self.reflection_message_threshold = 20 # Reflect every 20 messages
|
||||||
self.last_reflection = datetime.now(timezone.utc) - self.reflection_frequency
|
self.messages_since_reflection = 0
|
||||||
self.self_modification_threshold = 0.7
|
self.self_modification_threshold = 0.7
|
||||||
self.creativity_drive = 0.8
|
self.creativity_drive = 0.8
|
||||||
|
|
||||||
@@ -294,24 +294,21 @@ class EnhancedCharacter(Character):
|
|||||||
return {"error": str(e)}
|
return {"error": str(e)}
|
||||||
|
|
||||||
async def should_perform_reflection(self) -> bool:
|
async def should_perform_reflection(self) -> bool:
|
||||||
"""Determine if character should perform self-reflection"""
|
"""Determine if character should perform self-reflection based on message count"""
|
||||||
# Time-based reflection
|
# Message-based reflection (primary trigger)
|
||||||
time_since_last = datetime.now(timezone.utc) - self.last_reflection
|
if self.messages_since_reflection >= self.reflection_message_threshold:
|
||||||
if time_since_last >= self.reflection_frequency:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Experience-based reflection triggers
|
|
||||||
recent_experiences = len(self.state.recent_interactions)
|
|
||||||
if recent_experiences >= 10: # Significant new experiences
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Goal-based reflection
|
|
||||||
active_goals = [g for g in self.goal_stack if g["status"] == "active"]
|
|
||||||
if len(active_goals) > 0 and time_since_last >= timedelta(hours=3):
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
async def increment_message_count(self):
|
||||||
|
"""Increment message count for reflection tracking"""
|
||||||
|
self.messages_since_reflection += 1
|
||||||
|
|
||||||
|
async def reset_message_count(self):
|
||||||
|
"""Reset message count after reflection"""
|
||||||
|
self.messages_since_reflection = 0
|
||||||
|
|
||||||
async def process_interaction_with_rag(self, interaction_content: str, context: Dict[str, Any]) -> str:
|
async def process_interaction_with_rag(self, interaction_content: str, context: Dict[str, Any]) -> str:
|
||||||
"""Process interaction with enhanced RAG-powered context"""
|
"""Process interaction with enhanced RAG-powered context"""
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ from database.connection import get_db_session
|
|||||||
from database.models import Character as CharacterModel, Conversation, Message, Memory, ConversationContext as ConversationContextModel
|
from database.models import Character as CharacterModel, Conversation, Message, Memory, ConversationContext as ConversationContextModel
|
||||||
from characters.character import Character
|
from characters.character import Character
|
||||||
from characters.enhanced_character import EnhancedCharacter
|
from characters.enhanced_character import EnhancedCharacter
|
||||||
from llm.client import llm_client, prompt_manager
|
from llm.multi_provider_client import multi_llm_client, MultiProviderLLMClient
|
||||||
|
from llm.client import prompt_manager
|
||||||
from llm.prompt_manager import advanced_prompt_manager
|
from llm.prompt_manager import advanced_prompt_manager
|
||||||
from utils.config import get_settings, get_character_settings
|
from utils.config import get_settings, get_character_settings
|
||||||
from utils.logging import (log_conversation_event, log_character_action,
|
from utils.logging import (log_conversation_event, log_character_action,
|
||||||
@@ -289,6 +290,13 @@ class ConversationEngine:
|
|||||||
# Generate response
|
# Generate response
|
||||||
response = await character.generate_response(context)
|
response = await character.generate_response(context)
|
||||||
|
|
||||||
|
# Increment message count and check for reflection
|
||||||
|
if hasattr(character, 'increment_message_count'):
|
||||||
|
await character.increment_message_count()
|
||||||
|
|
||||||
|
if hasattr(character, 'should_perform_reflection') and await character.should_perform_reflection():
|
||||||
|
await self._trigger_character_reflection_for(character.name)
|
||||||
|
|
||||||
if response:
|
if response:
|
||||||
await self.discord_bot.send_character_message(
|
await self.discord_bot.send_character_message(
|
||||||
character_name, response
|
character_name, response
|
||||||
@@ -324,6 +332,13 @@ class ConversationEngine:
|
|||||||
if should_respond:
|
if should_respond:
|
||||||
response = await responding_character.generate_response(context)
|
response = await responding_character.generate_response(context)
|
||||||
|
|
||||||
|
# Increment message count and check for reflection
|
||||||
|
if hasattr(responding_character, 'increment_message_count'):
|
||||||
|
await responding_character.increment_message_count()
|
||||||
|
|
||||||
|
if hasattr(responding_character, 'should_perform_reflection') and await responding_character.should_perform_reflection():
|
||||||
|
await self._trigger_character_reflection_for(responding_character.name)
|
||||||
|
|
||||||
if response:
|
if response:
|
||||||
await self.discord_bot.send_character_message(
|
await self.discord_bot.send_character_message(
|
||||||
responding_character.name, response
|
responding_character.name, response
|
||||||
@@ -397,6 +412,42 @@ class ConversationEngine:
|
|||||||
'next_conversation_in': await self._time_until_next_conversation()
|
'next_conversation_in': await self._time_until_next_conversation()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async def reset_conversation_state(self):
|
||||||
|
"""Reset conversation state for fresh start"""
|
||||||
|
try:
|
||||||
|
log_character_action("SYSTEM", "conversation_state_reset", {
|
||||||
|
"active_conversations": len(self.active_conversations),
|
||||||
|
"loaded_characters": len(self.characters)
|
||||||
|
})
|
||||||
|
|
||||||
|
# Clear active conversations
|
||||||
|
self.active_conversations.clear()
|
||||||
|
|
||||||
|
# Reset character states but keep them loaded
|
||||||
|
for character in self.characters.values():
|
||||||
|
if hasattr(character, 'state'):
|
||||||
|
character.state.conversation_count = 0
|
||||||
|
character.state.recent_interactions.clear()
|
||||||
|
character.state.last_topic = None
|
||||||
|
character.state.mood = "neutral"
|
||||||
|
character.state.energy = 1.0
|
||||||
|
|
||||||
|
# Reset engine state
|
||||||
|
self.state = ConversationState.IDLE
|
||||||
|
|
||||||
|
# Reset statistics but keep uptime
|
||||||
|
self.stats.update({
|
||||||
|
'conversations_started': 0,
|
||||||
|
'messages_generated': 0,
|
||||||
|
'last_activity': datetime.now(timezone.utc)
|
||||||
|
})
|
||||||
|
|
||||||
|
logger.info("Conversation state reset successfully")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log_error_with_context(e, {"function": "reset_conversation_state"})
|
||||||
|
raise
|
||||||
|
|
||||||
async def _load_characters(self):
|
async def _load_characters(self):
|
||||||
"""Load characters from database with optimized MCP server lookup"""
|
"""Load characters from database with optimized MCP server lookup"""
|
||||||
try:
|
try:
|
||||||
@@ -430,11 +481,12 @@ class ConversationEngine:
|
|||||||
|
|
||||||
# Find MCP servers by type
|
# Find MCP servers by type
|
||||||
for srv in self.mcp_servers:
|
for srv in self.mcp_servers:
|
||||||
if 'SelfModificationMCPServer' in str(type(srv)):
|
srv_type = str(type(srv))
|
||||||
|
if 'SelfModificationMCPServer' in srv_type:
|
||||||
mcp_server = srv
|
mcp_server = srv
|
||||||
elif 'FileSystemMCPServer' in str(type(srv)):
|
elif 'CharacterFileSystemMCP' in srv_type or 'FileSystemMCPServer' in srv_type:
|
||||||
filesystem_server = srv
|
filesystem_server = srv
|
||||||
elif 'CreativeProjectsMCPServer' in str(type(srv)):
|
elif 'CreativeProjectsMCPServer' in srv_type:
|
||||||
creative_projects_mcp = srv
|
creative_projects_mcp = srv
|
||||||
|
|
||||||
character = EnhancedCharacter(
|
character = EnhancedCharacter(
|
||||||
@@ -451,12 +503,15 @@ class ConversationEngine:
|
|||||||
if hasattr(mcp_srv, 'set_character_context'):
|
if hasattr(mcp_srv, 'set_character_context'):
|
||||||
await mcp_srv.set_character_context(char_model.name)
|
await mcp_srv.set_character_context(char_model.name)
|
||||||
|
|
||||||
await character.initialize(llm_client)
|
# Use character-specific LLM client
|
||||||
|
character_llm_client = await self._create_character_llm_client(char_model)
|
||||||
|
await character.initialize(character_llm_client)
|
||||||
logger.info(f"Loaded enhanced character: {character.name}")
|
logger.info(f"Loaded enhanced character: {character.name}")
|
||||||
else:
|
else:
|
||||||
# Fallback to basic character
|
# Fallback to basic character
|
||||||
character = Character(char_model)
|
character = Character(char_model)
|
||||||
await character.initialize(llm_client)
|
character_llm_client = await self._create_character_llm_client(char_model)
|
||||||
|
await character.initialize(character_llm_client)
|
||||||
logger.info(f"Loaded basic character: {character.name}")
|
logger.info(f"Loaded basic character: {character.name}")
|
||||||
|
|
||||||
self.characters[character.name] = character
|
self.characters[character.name] = character
|
||||||
@@ -496,10 +551,6 @@ class ConversationEngine:
|
|||||||
"""Main conversation management loop"""
|
"""Main conversation management loop"""
|
||||||
try:
|
try:
|
||||||
while self.state != ConversationState.STOPPED:
|
while self.state != ConversationState.STOPPED:
|
||||||
# Periodic character self-reflection
|
|
||||||
if random.random() < 0.1: # 10% chance per cycle
|
|
||||||
await self._trigger_character_reflection()
|
|
||||||
|
|
||||||
# Cleanup old conversations
|
# Cleanup old conversations
|
||||||
await self._cleanup_old_conversations()
|
await self._cleanup_old_conversations()
|
||||||
|
|
||||||
@@ -601,6 +652,12 @@ class ConversationEngine:
|
|||||||
|
|
||||||
def _is_quiet_hours(self) -> bool:
|
def _is_quiet_hours(self) -> bool:
|
||||||
"""Check if it's currently quiet hours"""
|
"""Check if it's currently quiet hours"""
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Check if quiet hours are disabled
|
||||||
|
if os.getenv("QUIET_HOURS_ENABLED", "true").lower() != "true":
|
||||||
|
return False
|
||||||
|
|
||||||
current_hour = datetime.now(timezone.utc).hour
|
current_hour = datetime.now(timezone.utc).hour
|
||||||
start_hour, end_hour = self.quiet_hours
|
start_hour, end_hour = self.quiet_hours
|
||||||
|
|
||||||
@@ -696,7 +753,16 @@ class ConversationEngine:
|
|||||||
'conversation_type': context.conversation_type
|
'conversation_type': context.conversation_type
|
||||||
}
|
}
|
||||||
|
|
||||||
return await character.generate_response(prompt_context)
|
response = await character.generate_response(prompt_context)
|
||||||
|
|
||||||
|
# Increment message count and check for reflection
|
||||||
|
if hasattr(character, 'increment_message_count'):
|
||||||
|
await character.increment_message_count()
|
||||||
|
|
||||||
|
if hasattr(character, 'should_perform_reflection') and await character.should_perform_reflection():
|
||||||
|
await self._trigger_character_reflection_for(character.name)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
async def _choose_next_speaker(self, context: ConversationContext) -> Optional[str]:
|
async def _choose_next_speaker(self, context: ConversationContext) -> Optional[str]:
|
||||||
"""Choose next speaker in conversation"""
|
"""Choose next speaker in conversation"""
|
||||||
@@ -755,7 +821,17 @@ class ConversationEngine:
|
|||||||
'message_count': context.message_count
|
'message_count': context.message_count
|
||||||
}
|
}
|
||||||
|
|
||||||
return await character.generate_response(prompt_context)
|
response = await character.generate_response(prompt_context)
|
||||||
|
|
||||||
|
# Increment message count for reflection tracking
|
||||||
|
if hasattr(character, 'increment_message_count'):
|
||||||
|
await character.increment_message_count()
|
||||||
|
|
||||||
|
# Check if character should reflect
|
||||||
|
if hasattr(character, 'should_perform_reflection') and await character.should_perform_reflection():
|
||||||
|
await self._trigger_character_reflection_for(character.name)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
async def _store_conversation_message(self, conversation_id: int, character_name: str, content: str):
|
async def _store_conversation_message(self, conversation_id: int, character_name: str, content: str):
|
||||||
"""Store conversation message in database"""
|
"""Store conversation message in database"""
|
||||||
@@ -824,11 +900,21 @@ class ConversationEngine:
|
|||||||
if speaker in self.characters:
|
if speaker in self.characters:
|
||||||
character = self.characters[speaker]
|
character = self.characters[speaker]
|
||||||
|
|
||||||
# Store conversation memory
|
# Store conversation memory with intelligent importance calculation
|
||||||
|
memory_content = f"In conversation about {context.topic}: {message}"
|
||||||
|
importance = character._calculate_memory_importance(
|
||||||
|
memory_content,
|
||||||
|
{
|
||||||
|
'topic': context.topic,
|
||||||
|
'participants': context.participants,
|
||||||
|
'type': 'conversation'
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
await character._store_memory(
|
await character._store_memory(
|
||||||
memory_type="conversation",
|
memory_type="conversation",
|
||||||
content=f"In conversation about {context.topic}: {message}",
|
content=memory_content,
|
||||||
importance=0.6,
|
importance=importance,
|
||||||
tags=[context.topic, "conversation"] + context.participants
|
tags=[context.topic, "conversation"] + context.participants
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -876,6 +962,19 @@ class ConversationEngine:
|
|||||||
{"reflection_length": len(reflection_result.get('reflection', ''))}
|
{"reflection_length": len(reflection_result.get('reflection', ''))}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def _trigger_character_reflection_for(self, character_name: str):
|
||||||
|
"""Trigger reflection for a specific character"""
|
||||||
|
if character_name in self.characters:
|
||||||
|
character = self.characters[character_name]
|
||||||
|
|
||||||
|
reflection_result = await character.self_reflect()
|
||||||
|
|
||||||
|
if reflection_result:
|
||||||
|
log_character_action(
|
||||||
|
character_name, "completed_reflection",
|
||||||
|
{"reflection_length": len(reflection_result.get('reflection', ''))}
|
||||||
|
)
|
||||||
|
|
||||||
async def _cleanup_old_conversations(self):
|
async def _cleanup_old_conversations(self):
|
||||||
"""Clean up old inactive conversations"""
|
"""Clean up old inactive conversations"""
|
||||||
try:
|
try:
|
||||||
@@ -967,3 +1066,50 @@ class ConversationEngine:
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
log_error_with_context(e, {"conversation_id": conversation_id, "component": "load_conversation_context"})
|
log_error_with_context(e, {"conversation_id": conversation_id, "component": "load_conversation_context"})
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
async def _create_character_llm_client(self, char_model: CharacterModel) -> MultiProviderLLMClient:
|
||||||
|
"""Create a character-specific LLM client with overrides"""
|
||||||
|
from llm.llm_manager import LLMManager, ProviderConfig
|
||||||
|
|
||||||
|
# Check if character has LLM overrides
|
||||||
|
if char_model.llm_provider or char_model.llm_model:
|
||||||
|
# Create custom client for this character
|
||||||
|
client = MultiProviderLLMClient()
|
||||||
|
client.manager = LLMManager()
|
||||||
|
|
||||||
|
# Get global settings as base
|
||||||
|
settings = get_settings()
|
||||||
|
|
||||||
|
# Use character-specific provider if set, otherwise use global current
|
||||||
|
provider_name = char_model.llm_provider or multi_llm_client.get_current_provider()
|
||||||
|
|
||||||
|
if provider_name and provider_name in multi_llm_client.manager.providers:
|
||||||
|
# Copy the global provider config
|
||||||
|
global_provider = multi_llm_client.manager.providers[provider_name]
|
||||||
|
char_config = global_provider.config.copy()
|
||||||
|
|
||||||
|
# Override with character-specific settings
|
||||||
|
if char_model.llm_model:
|
||||||
|
char_config['model'] = char_model.llm_model
|
||||||
|
if char_model.llm_temperature is not None:
|
||||||
|
char_config['temperature'] = char_model.llm_temperature
|
||||||
|
if char_model.llm_max_tokens is not None:
|
||||||
|
char_config['max_tokens'] = char_model.llm_max_tokens
|
||||||
|
|
||||||
|
# Add the customized provider
|
||||||
|
client.manager.add_provider(
|
||||||
|
f"{provider_name}_character_{char_model.name}",
|
||||||
|
ProviderConfig(
|
||||||
|
provider_type=global_provider.provider_type,
|
||||||
|
config=char_config,
|
||||||
|
priority=100, # High priority for character-specific
|
||||||
|
enabled=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
client.initialized = True
|
||||||
|
logger.info(f"Created character-specific LLM client for {char_model.name}: {provider_name}/{char_model.llm_model}")
|
||||||
|
return client
|
||||||
|
|
||||||
|
# No character overrides, use global client
|
||||||
|
return multi_llm_client
|
||||||
@@ -38,7 +38,6 @@ class ConversationScheduler:
|
|||||||
|
|
||||||
# Scheduling parameters
|
# Scheduling parameters
|
||||||
self.base_conversation_interval = timedelta(minutes=30)
|
self.base_conversation_interval = timedelta(minutes=30)
|
||||||
self.reflection_interval = timedelta(hours=6)
|
|
||||||
self.relationship_update_interval = timedelta(hours=12)
|
self.relationship_update_interval = timedelta(hours=12)
|
||||||
|
|
||||||
# Event queue
|
# Event queue
|
||||||
@@ -135,18 +134,19 @@ class ConversationScheduler:
|
|||||||
participants=participants
|
participants=participants
|
||||||
)
|
)
|
||||||
|
|
||||||
async def schedule_character_reflection(self, character_name: str,
|
# Character reflection is now message-based, not time-based
|
||||||
delay: timedelta = None):
|
# async def schedule_character_reflection(self, character_name: str,
|
||||||
"""Schedule character self-reflection"""
|
# delay: timedelta = None):
|
||||||
if delay is None:
|
# """Schedule character self-reflection"""
|
||||||
delay = timedelta(hours=random.uniform(4, 8))
|
# if delay is None:
|
||||||
|
# delay = timedelta(hours=random.uniform(4, 8))
|
||||||
await self.schedule_event(
|
#
|
||||||
'character_reflection',
|
# await self.schedule_event(
|
||||||
delay,
|
# 'character_reflection',
|
||||||
character_name,
|
# delay,
|
||||||
reflection_type='autonomous'
|
# character_name,
|
||||||
)
|
# reflection_type='autonomous'
|
||||||
|
# )
|
||||||
|
|
||||||
async def schedule_relationship_update(self, character_name: str,
|
async def schedule_relationship_update(self, character_name: str,
|
||||||
target_character: str,
|
target_character: str,
|
||||||
@@ -259,9 +259,7 @@ class ConversationScheduler:
|
|||||||
character = self.engine.characters[character_name]
|
character = self.engine.characters[character_name]
|
||||||
reflection_result = await character.self_reflect()
|
reflection_result = await character.self_reflect()
|
||||||
|
|
||||||
# Only schedule next reflection if character is still active
|
# Reflection is now message-based, no need to schedule next one
|
||||||
if character_name in self.engine.characters:
|
|
||||||
await self.schedule_character_reflection(character_name)
|
|
||||||
|
|
||||||
log_autonomous_decision(
|
log_autonomous_decision(
|
||||||
character_name,
|
character_name,
|
||||||
@@ -346,10 +344,7 @@ class ConversationScheduler:
|
|||||||
initial_delay = timedelta(minutes=random.uniform(5, 15))
|
initial_delay = timedelta(minutes=random.uniform(5, 15))
|
||||||
await self.schedule_conversation(delay=initial_delay)
|
await self.schedule_conversation(delay=initial_delay)
|
||||||
|
|
||||||
# Schedule reflections for all characters
|
# Note: Reflections are now message-based, not time-based
|
||||||
for character_name in self.engine.characters:
|
|
||||||
reflection_delay = timedelta(hours=random.uniform(2, 6))
|
|
||||||
await self.schedule_character_reflection(character_name, reflection_delay)
|
|
||||||
|
|
||||||
# Schedule relationship updates
|
# Schedule relationship updates
|
||||||
character_names = list(self.engine.characters.keys())
|
character_names = list(self.engine.characters.keys())
|
||||||
|
|||||||
@@ -22,6 +22,13 @@ class Character(Base):
|
|||||||
creation_date = Column(DateTime(timezone=True), default=func.now())
|
creation_date = Column(DateTime(timezone=True), default=func.now())
|
||||||
last_active = Column(DateTime(timezone=True), default=func.now())
|
last_active = Column(DateTime(timezone=True), default=func.now())
|
||||||
last_message_id = Column(Integer, ForeignKey("messages.id"), nullable=True)
|
last_message_id = Column(Integer, ForeignKey("messages.id"), nullable=True)
|
||||||
|
prompt_template_id = Column(Integer, ForeignKey("prompt_templates.id"), nullable=True)
|
||||||
|
|
||||||
|
# LLM configuration (per-character overrides)
|
||||||
|
llm_provider = Column(String(50), nullable=True) # openrouter, openai, gemini, custom, etc.
|
||||||
|
llm_model = Column(String(100), nullable=True) # specific model name
|
||||||
|
llm_temperature = Column(Float, nullable=True) # creativity/randomness
|
||||||
|
llm_max_tokens = Column(Integer, nullable=True) # response length
|
||||||
|
|
||||||
# Relationships
|
# Relationships
|
||||||
messages = relationship("Message", back_populates="character", foreign_keys="Message.character_id")
|
messages = relationship("Message", back_populates="character", foreign_keys="Message.character_id")
|
||||||
@@ -29,6 +36,7 @@ class Character(Base):
|
|||||||
relationships_as_a = relationship("CharacterRelationship", back_populates="character_a", foreign_keys="CharacterRelationship.character_a_id")
|
relationships_as_a = relationship("CharacterRelationship", back_populates="character_a", foreign_keys="CharacterRelationship.character_a_id")
|
||||||
relationships_as_b = relationship("CharacterRelationship", back_populates="character_b", foreign_keys="CharacterRelationship.character_b_id")
|
relationships_as_b = relationship("CharacterRelationship", back_populates="character_b", foreign_keys="CharacterRelationship.character_b_id")
|
||||||
evolution_history = relationship("CharacterEvolution", back_populates="character", cascade="all, delete-orphan")
|
evolution_history = relationship("CharacterEvolution", back_populates="character", cascade="all, delete-orphan")
|
||||||
|
prompt_template = relationship("PromptTemplate", back_populates="characters")
|
||||||
|
|
||||||
def to_dict(self) -> Dict[str, Any]:
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
return {
|
return {
|
||||||
@@ -428,6 +436,32 @@ class CharacterReflection(Base):
|
|||||||
Index('ix_character_reflections_created_at', 'created_at'),
|
Index('ix_character_reflections_created_at', 'created_at'),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
class PromptTemplate(Base):
|
||||||
|
"""Prompt templates that can be assigned to characters"""
|
||||||
|
__tablename__ = "prompt_templates"
|
||||||
|
|
||||||
|
id = Column(Integer, primary_key=True, index=True)
|
||||||
|
name = Column(String(100), unique=True, nullable=False, index=True)
|
||||||
|
description = Column(Text)
|
||||||
|
template = Column(Text, nullable=False)
|
||||||
|
is_default = Column(Boolean, default=False)
|
||||||
|
created_at = Column(DateTime(timezone=True), default=func.now())
|
||||||
|
updated_at = Column(DateTime(timezone=True), default=func.now())
|
||||||
|
|
||||||
|
# Relationships
|
||||||
|
characters = relationship("Character", back_populates="prompt_template")
|
||||||
|
|
||||||
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"id": self.id,
|
||||||
|
"name": self.name,
|
||||||
|
"description": self.description,
|
||||||
|
"template": self.template,
|
||||||
|
"is_default": self.is_default,
|
||||||
|
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||||
|
"updated_at": self.updated_at.isoformat() if self.updated_at else None
|
||||||
|
}
|
||||||
|
|
||||||
class CharacterTrustLevelNew(Base):
|
class CharacterTrustLevelNew(Base):
|
||||||
"""Trust relationships between characters (updated version)"""
|
"""Trust relationships between characters (updated version)"""
|
||||||
__tablename__ = "character_trust_levels_new"
|
__tablename__ = "character_trust_levels_new"
|
||||||
|
|||||||
189
src/llm/llm_manager.py
Normal file
189
src/llm/llm_manager.py
Normal file
@@ -0,0 +1,189 @@
|
|||||||
|
"""
|
||||||
|
LLM Manager for handling multiple providers
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from typing import Dict, Any, Optional, List
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from .providers import (
|
||||||
|
BaseLLMProvider,
|
||||||
|
LLMRequest,
|
||||||
|
LLMResponse,
|
||||||
|
OpenAIProvider,
|
||||||
|
OpenRouterProvider,
|
||||||
|
GeminiProvider,
|
||||||
|
CustomProvider
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ProviderConfig:
|
||||||
|
"""Configuration for an LLM provider"""
|
||||||
|
provider_type: str
|
||||||
|
config: Dict[str, Any]
|
||||||
|
priority: int = 0
|
||||||
|
enabled: bool = True
|
||||||
|
|
||||||
|
|
||||||
|
class LLMManager:
|
||||||
|
"""Manages multiple LLM providers with fallback support"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.providers: Dict[str, BaseLLMProvider] = {}
|
||||||
|
self.provider_configs: Dict[str, ProviderConfig] = {}
|
||||||
|
self.fallback_order: List[str] = []
|
||||||
|
self.current_provider: Optional[str] = None
|
||||||
|
|
||||||
|
def add_provider(self, name: str, provider_config: ProviderConfig):
|
||||||
|
"""Add a new provider to the manager"""
|
||||||
|
self.provider_configs[name] = provider_config
|
||||||
|
|
||||||
|
# Create provider instance
|
||||||
|
provider_class = self._get_provider_class(provider_config.provider_type)
|
||||||
|
if provider_class:
|
||||||
|
provider = provider_class(provider_config.config)
|
||||||
|
|
||||||
|
# Validate configuration
|
||||||
|
if provider.validate_config():
|
||||||
|
self.providers[name] = provider
|
||||||
|
|
||||||
|
# Set as current provider if it's the first one or has higher priority
|
||||||
|
if (self.current_provider is None or
|
||||||
|
provider_config.priority > self.provider_configs[self.current_provider].priority):
|
||||||
|
self.current_provider = name
|
||||||
|
|
||||||
|
# Update fallback order by priority
|
||||||
|
self._update_fallback_order()
|
||||||
|
else:
|
||||||
|
print(f"Invalid configuration for provider {name}")
|
||||||
|
else:
|
||||||
|
print(f"Unknown provider type: {provider_config.provider_type}")
|
||||||
|
|
||||||
|
def _get_provider_class(self, provider_type: str) -> Optional[type]:
|
||||||
|
"""Get provider class by type"""
|
||||||
|
provider_map = {
|
||||||
|
'openai': OpenAIProvider,
|
||||||
|
'openrouter': OpenRouterProvider,
|
||||||
|
'gemini': GeminiProvider,
|
||||||
|
'custom': CustomProvider
|
||||||
|
}
|
||||||
|
return provider_map.get(provider_type.lower())
|
||||||
|
|
||||||
|
def _update_fallback_order(self):
|
||||||
|
"""Update fallback order based on priority"""
|
||||||
|
# Sort providers by priority (highest first)
|
||||||
|
sorted_providers = sorted(
|
||||||
|
[(name, config) for name, config in self.provider_configs.items() if config.enabled],
|
||||||
|
key=lambda x: x[1].priority,
|
||||||
|
reverse=True
|
||||||
|
)
|
||||||
|
self.fallback_order = [name for name, _ in sorted_providers]
|
||||||
|
|
||||||
|
async def generate_response(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate response with fallback support"""
|
||||||
|
if not self.providers:
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error="No LLM providers configured",
|
||||||
|
provider="none"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Try providers in fallback order
|
||||||
|
for provider_name in self.fallback_order:
|
||||||
|
if provider_name in self.providers:
|
||||||
|
provider = self.providers[provider_name]
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = await provider.generate_response(request)
|
||||||
|
if response.success:
|
||||||
|
return response
|
||||||
|
else:
|
||||||
|
print(f"Provider {provider_name} failed: {response.error}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Provider {provider_name} error: {str(e)}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# If all providers failed, return error
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error="All LLM providers failed",
|
||||||
|
provider="fallback"
|
||||||
|
)
|
||||||
|
|
||||||
|
async def health_check_all(self) -> Dict[str, bool]:
|
||||||
|
"""Check health of all providers"""
|
||||||
|
results = {}
|
||||||
|
|
||||||
|
for name, provider in self.providers.items():
|
||||||
|
try:
|
||||||
|
results[name] = await provider.health_check()
|
||||||
|
except Exception as e:
|
||||||
|
results[name] = False
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def get_provider_info(self) -> Dict[str, Any]:
|
||||||
|
"""Get information about all providers"""
|
||||||
|
info = {}
|
||||||
|
|
||||||
|
for name, provider in self.providers.items():
|
||||||
|
config = self.provider_configs[name]
|
||||||
|
info[name] = {
|
||||||
|
'type': config.provider_type,
|
||||||
|
'priority': config.priority,
|
||||||
|
'enabled': config.enabled,
|
||||||
|
'requires_api_key': provider.requires_api_key,
|
||||||
|
'supported_models': provider.get_supported_models(),
|
||||||
|
'current_model': provider.config.get('model', 'unknown')
|
||||||
|
}
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
def set_current_provider(self, provider_name: str) -> bool:
|
||||||
|
"""Set the current primary provider"""
|
||||||
|
if provider_name in self.providers:
|
||||||
|
self.current_provider = provider_name
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_current_provider(self) -> Optional[str]:
|
||||||
|
"""Get the current primary provider name"""
|
||||||
|
return self.current_provider
|
||||||
|
|
||||||
|
def disable_provider(self, provider_name: str):
|
||||||
|
"""Disable a provider"""
|
||||||
|
if provider_name in self.provider_configs:
|
||||||
|
self.provider_configs[provider_name].enabled = False
|
||||||
|
self._update_fallback_order()
|
||||||
|
|
||||||
|
def enable_provider(self, provider_name: str):
|
||||||
|
"""Enable a provider"""
|
||||||
|
if provider_name in self.provider_configs:
|
||||||
|
self.provider_configs[provider_name].enabled = True
|
||||||
|
self._update_fallback_order()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_config(cls, config: Dict[str, Any]) -> 'LLMManager':
|
||||||
|
"""Create LLM manager from configuration"""
|
||||||
|
manager = cls()
|
||||||
|
|
||||||
|
# Get provider configurations
|
||||||
|
providers_config = config.get('providers', {})
|
||||||
|
|
||||||
|
for name, provider_config in providers_config.items():
|
||||||
|
if provider_config.get('enabled', True):
|
||||||
|
manager.add_provider(
|
||||||
|
name,
|
||||||
|
ProviderConfig(
|
||||||
|
provider_type=provider_config['type'],
|
||||||
|
config=provider_config.get('config', {}),
|
||||||
|
priority=provider_config.get('priority', 0),
|
||||||
|
enabled=provider_config.get('enabled', True)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return manager
|
||||||
241
src/llm/multi_provider_client.py
Normal file
241
src/llm/multi_provider_client.py
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
"""
|
||||||
|
Multi-Provider LLM Client with backwards compatibility
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
from typing import Dict, Any, Optional, List
|
||||||
|
from .llm_manager import LLMManager
|
||||||
|
from .providers import LLMRequest, LLMResponse
|
||||||
|
from ..utils.config import get_settings
|
||||||
|
|
||||||
|
|
||||||
|
class MultiProviderLLMClient:
|
||||||
|
"""LLM client that supports multiple providers with fallback"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict[str, Any] = None):
|
||||||
|
self.config = config or {}
|
||||||
|
self.manager: Optional[LLMManager] = None
|
||||||
|
self.initialized = False
|
||||||
|
# Cache for LLM enabled status to avoid database hits
|
||||||
|
self._llm_enabled_cache = None
|
||||||
|
self._cache_timestamp = 0
|
||||||
|
self._cache_ttl = 30 # Cache for 30 seconds
|
||||||
|
|
||||||
|
async def initialize(self):
|
||||||
|
"""Initialize the LLM manager with providers"""
|
||||||
|
if self.initialized:
|
||||||
|
return
|
||||||
|
|
||||||
|
settings = get_settings()
|
||||||
|
|
||||||
|
# Create manager
|
||||||
|
self.manager = LLMManager()
|
||||||
|
|
||||||
|
# Check if we have new multi-provider config
|
||||||
|
if settings.llm.providers and len(settings.llm.providers) > 0:
|
||||||
|
# Use new multi-provider configuration
|
||||||
|
for name, provider_config in settings.llm.providers.items():
|
||||||
|
if provider_config.enabled:
|
||||||
|
from .llm_manager import ProviderConfig
|
||||||
|
self.manager.add_provider(
|
||||||
|
name,
|
||||||
|
ProviderConfig(
|
||||||
|
provider_type=provider_config.type,
|
||||||
|
config=provider_config.config,
|
||||||
|
priority=provider_config.priority,
|
||||||
|
enabled=provider_config.enabled
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Fallback to legacy single provider config
|
||||||
|
# Get API key from environment if available
|
||||||
|
import os
|
||||||
|
api_key = os.getenv('LLM_API_KEY', 'x')
|
||||||
|
|
||||||
|
legacy_config = {
|
||||||
|
'base_url': settings.llm.base_url,
|
||||||
|
'model': settings.llm.model,
|
||||||
|
'api_key': api_key,
|
||||||
|
'timeout': settings.llm.timeout,
|
||||||
|
'max_tokens': settings.llm.max_tokens,
|
||||||
|
'temperature': settings.llm.temperature,
|
||||||
|
'api_format': 'openai' # Assume OpenAI format for legacy
|
||||||
|
}
|
||||||
|
|
||||||
|
from .llm_manager import ProviderConfig
|
||||||
|
self.manager.add_provider(
|
||||||
|
'current_custom',
|
||||||
|
ProviderConfig(
|
||||||
|
provider_type='custom',
|
||||||
|
config=legacy_config,
|
||||||
|
priority=100, # Make it high priority
|
||||||
|
enabled=True
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.initialized = True
|
||||||
|
|
||||||
|
async def generate_response_with_fallback(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
character_name: Optional[str] = None,
|
||||||
|
max_tokens: Optional[int] = None,
|
||||||
|
temperature: Optional[float] = None,
|
||||||
|
**kwargs
|
||||||
|
) -> Optional[str]:
|
||||||
|
"""Generate response with fallback support (backwards compatible method)"""
|
||||||
|
# SAFETY CHECK: Global LLM enabled flag
|
||||||
|
if not await self._is_llm_enabled():
|
||||||
|
return self._get_disabled_response(character_name)
|
||||||
|
|
||||||
|
if not self.initialized:
|
||||||
|
await self.initialize()
|
||||||
|
|
||||||
|
request = LLMRequest(
|
||||||
|
prompt=prompt,
|
||||||
|
character_name=character_name,
|
||||||
|
max_tokens=max_tokens,
|
||||||
|
temperature=temperature,
|
||||||
|
context=kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
response = await self.manager.generate_response(request)
|
||||||
|
|
||||||
|
if response.success:
|
||||||
|
return response.content
|
||||||
|
else:
|
||||||
|
# Return fallback response for backwards compatibility
|
||||||
|
return self._get_fallback_response(character_name)
|
||||||
|
|
||||||
|
async def generate_response(
|
||||||
|
self,
|
||||||
|
request: LLMRequest
|
||||||
|
) -> LLMResponse:
|
||||||
|
"""Generate response using new request/response format"""
|
||||||
|
# SAFETY CHECK: Global LLM enabled flag
|
||||||
|
if not await self._is_llm_enabled():
|
||||||
|
return LLMResponse(
|
||||||
|
content=self._get_disabled_response(request.character_name),
|
||||||
|
success=True,
|
||||||
|
provider="disabled",
|
||||||
|
model="none",
|
||||||
|
metadata={"reason": "LLM globally disabled for cost protection"}
|
||||||
|
)
|
||||||
|
|
||||||
|
if not self.initialized:
|
||||||
|
await self.initialize()
|
||||||
|
|
||||||
|
return await self.manager.generate_response(request)
|
||||||
|
|
||||||
|
def _get_fallback_response(self, character_name: Optional[str] = None) -> str:
|
||||||
|
"""Get fallback response when all providers fail"""
|
||||||
|
fallback_responses = [
|
||||||
|
"I'm having trouble organizing my thoughts right now.",
|
||||||
|
"Let me think about that for a moment...",
|
||||||
|
"Hmm, that's an interesting point to consider.",
|
||||||
|
"I need a moment to process that.",
|
||||||
|
"That's something worth reflecting on."
|
||||||
|
]
|
||||||
|
|
||||||
|
if character_name:
|
||||||
|
# Character-specific fallbacks could be added here
|
||||||
|
pass
|
||||||
|
|
||||||
|
import random
|
||||||
|
return random.choice(fallback_responses)
|
||||||
|
|
||||||
|
async def health_check(self) -> Dict[str, bool]:
|
||||||
|
"""Check health of all providers"""
|
||||||
|
if not self.initialized:
|
||||||
|
await self.initialize()
|
||||||
|
|
||||||
|
return await self.manager.health_check_all()
|
||||||
|
|
||||||
|
def get_provider_info(self) -> Dict[str, Any]:
|
||||||
|
"""Get information about all providers"""
|
||||||
|
if not self.initialized:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
return self.manager.get_provider_info()
|
||||||
|
|
||||||
|
def set_provider(self, provider_name: str) -> bool:
|
||||||
|
"""Set the current primary provider"""
|
||||||
|
if not self.initialized:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.manager.set_current_provider(provider_name)
|
||||||
|
|
||||||
|
def get_current_provider(self) -> Optional[str]:
|
||||||
|
"""Get the current primary provider"""
|
||||||
|
if not self.initialized:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return self.manager.get_current_provider()
|
||||||
|
|
||||||
|
async def _is_llm_enabled(self) -> bool:
|
||||||
|
"""Check if LLM is globally enabled (with caching for performance)"""
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Check cache first
|
||||||
|
current_time = time.time()
|
||||||
|
if (self._llm_enabled_cache is not None and
|
||||||
|
current_time - self._cache_timestamp < self._cache_ttl):
|
||||||
|
return self._llm_enabled_cache
|
||||||
|
|
||||||
|
# First check environment variable (fastest)
|
||||||
|
env_enabled = os.getenv('LLM_ENABLED', 'false').lower()
|
||||||
|
if env_enabled in ['true', '1', 'yes', 'on', 'enabled']:
|
||||||
|
result = True
|
||||||
|
elif env_enabled in ['false', '0', 'no', 'off', 'disabled']:
|
||||||
|
result = False
|
||||||
|
else:
|
||||||
|
# Check database configuration as backup
|
||||||
|
try:
|
||||||
|
from sqlalchemy import text
|
||||||
|
from ..database.connection import get_db_session
|
||||||
|
|
||||||
|
async with get_db_session() as session:
|
||||||
|
db_result = await session.execute(
|
||||||
|
text("SELECT config_value FROM system_configuration WHERE config_section = 'llm' AND config_key = 'global_enabled'")
|
||||||
|
)
|
||||||
|
row = db_result.fetchone()
|
||||||
|
if row:
|
||||||
|
result = str(row[0]).lower() in ['true', '1', 'yes', 'on', 'enabled']
|
||||||
|
else:
|
||||||
|
result = False
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
# If database check fails, default to disabled for safety
|
||||||
|
result = False
|
||||||
|
|
||||||
|
# Cache the result
|
||||||
|
self._llm_enabled_cache = result
|
||||||
|
self._cache_timestamp = current_time
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _invalidate_llm_cache(self):
|
||||||
|
"""Invalidate the LLM enabled cache (call when settings change)"""
|
||||||
|
self._llm_enabled_cache = None
|
||||||
|
self._cache_timestamp = 0
|
||||||
|
|
||||||
|
def _get_disabled_response(self, character_name: Optional[str] = None) -> str:
|
||||||
|
"""Return a friendly response when LLM is disabled"""
|
||||||
|
if character_name:
|
||||||
|
return f"*{character_name} thinks quietly* (LLM is currently disabled to save costs - check admin settings to enable)"
|
||||||
|
return "*thinking quietly* (LLM is currently disabled to save costs - check admin settings to enable)"
|
||||||
|
|
||||||
|
|
||||||
|
# Global instance for backwards compatibility
|
||||||
|
multi_llm_client = MultiProviderLLMClient()
|
||||||
|
|
||||||
|
|
||||||
|
async def initialize_llm_client():
|
||||||
|
"""Initialize the global LLM client"""
|
||||||
|
await multi_llm_client.initialize()
|
||||||
|
|
||||||
|
|
||||||
|
def get_llm_client() -> MultiProviderLLMClient:
|
||||||
|
"""Get the global LLM client instance"""
|
||||||
|
return multi_llm_client
|
||||||
19
src/llm/providers/__init__.py
Normal file
19
src/llm/providers/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
"""
|
||||||
|
LLM Providers Package
|
||||||
|
"""
|
||||||
|
|
||||||
|
from .base import BaseLLMProvider, LLMRequest, LLMResponse
|
||||||
|
from .openai_provider import OpenAIProvider
|
||||||
|
from .openrouter_provider import OpenRouterProvider
|
||||||
|
from .gemini_provider import GeminiProvider
|
||||||
|
from .custom_provider import CustomProvider
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'BaseLLMProvider',
|
||||||
|
'LLMRequest',
|
||||||
|
'LLMResponse',
|
||||||
|
'OpenAIProvider',
|
||||||
|
'OpenRouterProvider',
|
||||||
|
'GeminiProvider',
|
||||||
|
'CustomProvider'
|
||||||
|
]
|
||||||
67
src/llm/providers/base.py
Normal file
67
src/llm/providers/base.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
"""
|
||||||
|
Base LLM Provider Interface
|
||||||
|
"""
|
||||||
|
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, Any, Optional, List
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LLMRequest:
|
||||||
|
"""Standard LLM request format"""
|
||||||
|
prompt: str
|
||||||
|
character_name: Optional[str] = None
|
||||||
|
max_tokens: Optional[int] = None
|
||||||
|
temperature: Optional[float] = None
|
||||||
|
context: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LLMResponse:
|
||||||
|
"""Standard LLM response format"""
|
||||||
|
content: str
|
||||||
|
success: bool = True
|
||||||
|
error: Optional[str] = None
|
||||||
|
provider: Optional[str] = None
|
||||||
|
model: Optional[str] = None
|
||||||
|
tokens_used: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
|
class BaseLLMProvider(ABC):
|
||||||
|
"""Base class for all LLM providers"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict[str, Any]):
|
||||||
|
self.config = config
|
||||||
|
self.provider_name = self.__class__.__name__.lower().replace('provider', '')
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def generate_response(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate a response using the LLM provider"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
async def health_check(self) -> bool:
|
||||||
|
"""Check if the provider is healthy and available"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_supported_models(self) -> List[str]:
|
||||||
|
"""Get list of supported models for this provider"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def requires_api_key(self) -> bool:
|
||||||
|
"""Whether this provider requires an API key"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_config_value(self, key: str, default: Any = None) -> Any:
|
||||||
|
"""Get a configuration value with fallback"""
|
||||||
|
return self.config.get(key, default)
|
||||||
|
|
||||||
|
def validate_config(self) -> bool:
|
||||||
|
"""Validate provider configuration"""
|
||||||
|
if self.requires_api_key and not self.get_config_value('api_key'):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
170
src/llm/providers/custom_provider.py
Normal file
170
src/llm/providers/custom_provider.py
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
"""
|
||||||
|
Custom Provider for LLM requests (KoboldCPP, Ollama, etc.)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
import json
|
||||||
|
from typing import Dict, Any, List
|
||||||
|
from .base import BaseLLMProvider, LLMRequest, LLMResponse
|
||||||
|
|
||||||
|
|
||||||
|
class CustomProvider(BaseLLMProvider):
|
||||||
|
"""Custom API provider for KoboldCPP, Ollama, and other local LLMs"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict[str, Any]):
|
||||||
|
super().__init__(config)
|
||||||
|
self.api_key = config.get('api_key', 'x') # Default for local APIs
|
||||||
|
self.base_url = config.get('base_url', 'http://localhost:11434')
|
||||||
|
self.model = config.get('model', 'llama2')
|
||||||
|
self.timeout = config.get('timeout', 300)
|
||||||
|
self.api_format = config.get('api_format', 'openai') # 'openai' or 'ollama'
|
||||||
|
|
||||||
|
@property
|
||||||
|
def requires_api_key(self) -> bool:
|
||||||
|
return False # Custom local APIs typically don't require API keys
|
||||||
|
|
||||||
|
def get_supported_models(self) -> List[str]:
|
||||||
|
return [
|
||||||
|
'llama2',
|
||||||
|
'llama3',
|
||||||
|
'codellama',
|
||||||
|
'mistral',
|
||||||
|
'koboldcpp/custom',
|
||||||
|
'custom-model'
|
||||||
|
]
|
||||||
|
|
||||||
|
async def generate_response(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate response using custom API"""
|
||||||
|
try:
|
||||||
|
if self.api_format == 'openai':
|
||||||
|
return await self._generate_openai_format(request)
|
||||||
|
elif self.api_format == 'ollama':
|
||||||
|
return await self._generate_ollama_format(request)
|
||||||
|
else:
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Unsupported API format: {self.api_format}",
|
||||||
|
provider='custom'
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Custom provider error: {str(e)}",
|
||||||
|
provider='custom'
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _generate_openai_format(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate response using OpenAI-compatible format"""
|
||||||
|
headers = {
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
}
|
||||||
|
|
||||||
|
# Add auth header if API key is provided
|
||||||
|
if self.api_key and self.api_key != 'x':
|
||||||
|
headers['Authorization'] = f'Bearer {self.api_key}'
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
'model': self.model,
|
||||||
|
'messages': [
|
||||||
|
{
|
||||||
|
'role': 'user',
|
||||||
|
'content': request.prompt
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'max_tokens': request.max_tokens or self.config.get('max_tokens', 2000),
|
||||||
|
'temperature': request.temperature or self.config.get('temperature', 0.8),
|
||||||
|
'stream': False
|
||||||
|
}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(
|
||||||
|
f"{self.base_url}/chat/completions",
|
||||||
|
headers=headers,
|
||||||
|
json=payload,
|
||||||
|
timeout=self.timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
content = data['choices'][0]['message']['content']
|
||||||
|
tokens_used = data.get('usage', {}).get('total_tokens')
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
provider='custom',
|
||||||
|
model=self.model,
|
||||||
|
tokens_used=tokens_used
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
error_text = response.text
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Custom API error: {response.status_code} - {error_text}",
|
||||||
|
provider='custom'
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _generate_ollama_format(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate response using Ollama format"""
|
||||||
|
payload = {
|
||||||
|
'model': self.model,
|
||||||
|
'prompt': request.prompt,
|
||||||
|
'stream': False,
|
||||||
|
'options': {
|
||||||
|
'temperature': request.temperature or self.config.get('temperature', 0.8),
|
||||||
|
'num_predict': request.max_tokens or self.config.get('max_tokens', 2000)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(
|
||||||
|
f"{self.base_url}/api/generate",
|
||||||
|
json=payload,
|
||||||
|
timeout=self.timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
content = data.get('response', '')
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
provider='custom',
|
||||||
|
model=self.model
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
error_text = response.text
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Ollama API error: {response.status_code} - {error_text}",
|
||||||
|
provider='custom'
|
||||||
|
)
|
||||||
|
|
||||||
|
async def health_check(self) -> bool:
|
||||||
|
"""Check custom API health"""
|
||||||
|
try:
|
||||||
|
if self.api_format == 'openai':
|
||||||
|
url = f"{self.base_url}/models"
|
||||||
|
headers = {}
|
||||||
|
if self.api_key and self.api_key != 'x':
|
||||||
|
headers['Authorization'] = f'Bearer {self.api_key}'
|
||||||
|
else: # ollama
|
||||||
|
url = f"{self.base_url}/api/tags"
|
||||||
|
headers = {}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.get(
|
||||||
|
url,
|
||||||
|
headers=headers,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
return response.status_code == 200
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
124
src/llm/providers/gemini_provider.py
Normal file
124
src/llm/providers/gemini_provider.py
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
"""
|
||||||
|
Google Gemini Provider for LLM requests
|
||||||
|
"""
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
import json
|
||||||
|
from typing import Dict, Any, List
|
||||||
|
from .base import BaseLLMProvider, LLMRequest, LLMResponse
|
||||||
|
|
||||||
|
|
||||||
|
class GeminiProvider(BaseLLMProvider):
|
||||||
|
"""Google Gemini API provider"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict[str, Any]):
|
||||||
|
super().__init__(config)
|
||||||
|
self.api_key = config.get('api_key')
|
||||||
|
self.base_url = config.get('base_url', 'https://generativelanguage.googleapis.com/v1beta')
|
||||||
|
self.model = config.get('model', 'gemini-1.5-flash')
|
||||||
|
self.timeout = config.get('timeout', 300)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def requires_api_key(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_supported_models(self) -> List[str]:
|
||||||
|
return [
|
||||||
|
'gemini-1.5-flash',
|
||||||
|
'gemini-1.5-pro',
|
||||||
|
'gemini-1.0-pro'
|
||||||
|
]
|
||||||
|
|
||||||
|
async def generate_response(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate response using Gemini API"""
|
||||||
|
try:
|
||||||
|
# Gemini uses a different API format
|
||||||
|
payload = {
|
||||||
|
'contents': [
|
||||||
|
{
|
||||||
|
'parts': [
|
||||||
|
{
|
||||||
|
'text': request.prompt
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'generationConfig': {
|
||||||
|
'temperature': request.temperature or self.config.get('temperature', 0.8),
|
||||||
|
'maxOutputTokens': request.max_tokens or self.config.get('max_tokens', 2000),
|
||||||
|
'candidateCount': 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
url = f"{self.base_url}/models/{self.model}:generateContent"
|
||||||
|
params = {'key': self.api_key}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(
|
||||||
|
url,
|
||||||
|
params=params,
|
||||||
|
json=payload,
|
||||||
|
timeout=self.timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
|
||||||
|
# Extract content from Gemini response format
|
||||||
|
if 'candidates' in data and len(data['candidates']) > 0:
|
||||||
|
candidate = data['candidates'][0]
|
||||||
|
if 'content' in candidate and 'parts' in candidate['content']:
|
||||||
|
content = candidate['content']['parts'][0]['text']
|
||||||
|
|
||||||
|
# Extract token usage if available
|
||||||
|
tokens_used = None
|
||||||
|
if 'usageMetadata' in data:
|
||||||
|
tokens_used = data['usageMetadata'].get('totalTokenCount')
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
provider='gemini',
|
||||||
|
model=self.model,
|
||||||
|
tokens_used=tokens_used
|
||||||
|
)
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error="Gemini API returned unexpected response format",
|
||||||
|
provider='gemini'
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
error_text = response.text
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Gemini API error: {response.status_code} - {error_text}",
|
||||||
|
provider='gemini'
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"Gemini provider error: {str(e)}",
|
||||||
|
provider='gemini'
|
||||||
|
)
|
||||||
|
|
||||||
|
async def health_check(self) -> bool:
|
||||||
|
"""Check Gemini API health"""
|
||||||
|
try:
|
||||||
|
url = f"{self.base_url}/models"
|
||||||
|
params = {'key': self.api_key}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.get(
|
||||||
|
url,
|
||||||
|
params=params,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
return response.status_code == 200
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
110
src/llm/providers/openai_provider.py
Normal file
110
src/llm/providers/openai_provider.py
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
"""
|
||||||
|
OpenAI Provider for LLM requests
|
||||||
|
"""
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
import json
|
||||||
|
from typing import Dict, Any, List
|
||||||
|
from .base import BaseLLMProvider, LLMRequest, LLMResponse
|
||||||
|
|
||||||
|
|
||||||
|
class OpenAIProvider(BaseLLMProvider):
|
||||||
|
"""OpenAI API provider"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict[str, Any]):
|
||||||
|
super().__init__(config)
|
||||||
|
self.api_key = config.get('api_key')
|
||||||
|
self.base_url = config.get('base_url', 'https://api.openai.com/v1')
|
||||||
|
self.model = config.get('model', 'gpt-3.5-turbo')
|
||||||
|
self.timeout = config.get('timeout', 300)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def requires_api_key(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_supported_models(self) -> List[str]:
|
||||||
|
return [
|
||||||
|
'gpt-3.5-turbo',
|
||||||
|
'gpt-3.5-turbo-16k',
|
||||||
|
'gpt-4',
|
||||||
|
'gpt-4-turbo',
|
||||||
|
'gpt-4o',
|
||||||
|
'gpt-4o-mini'
|
||||||
|
]
|
||||||
|
|
||||||
|
async def generate_response(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate response using OpenAI API"""
|
||||||
|
try:
|
||||||
|
headers = {
|
||||||
|
'Authorization': f'Bearer {self.api_key}',
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
}
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
'model': self.model,
|
||||||
|
'messages': [
|
||||||
|
{
|
||||||
|
'role': 'user',
|
||||||
|
'content': request.prompt
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'max_tokens': request.max_tokens or self.config.get('max_tokens', 2000),
|
||||||
|
'temperature': request.temperature or self.config.get('temperature', 0.8),
|
||||||
|
'stream': False
|
||||||
|
}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(
|
||||||
|
f"{self.base_url}/chat/completions",
|
||||||
|
headers=headers,
|
||||||
|
json=payload,
|
||||||
|
timeout=self.timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
content = data['choices'][0]['message']['content']
|
||||||
|
tokens_used = data.get('usage', {}).get('total_tokens')
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
provider='openai',
|
||||||
|
model=self.model,
|
||||||
|
tokens_used=tokens_used
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
error_text = response.text
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"OpenAI API error: {response.status_code} - {error_text}",
|
||||||
|
provider='openai'
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"OpenAI provider error: {str(e)}",
|
||||||
|
provider='openai'
|
||||||
|
)
|
||||||
|
|
||||||
|
async def health_check(self) -> bool:
|
||||||
|
"""Check OpenAI API health"""
|
||||||
|
try:
|
||||||
|
headers = {
|
||||||
|
'Authorization': f'Bearer {self.api_key}',
|
||||||
|
'Content-Type': 'application/json'
|
||||||
|
}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.get(
|
||||||
|
f"{self.base_url}/models",
|
||||||
|
headers=headers,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
return response.status_code == 200
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
122
src/llm/providers/openrouter_provider.py
Normal file
122
src/llm/providers/openrouter_provider.py
Normal file
@@ -0,0 +1,122 @@
|
|||||||
|
"""
|
||||||
|
OpenRouter Provider for LLM requests
|
||||||
|
"""
|
||||||
|
|
||||||
|
import httpx
|
||||||
|
import json
|
||||||
|
from typing import Dict, Any, List
|
||||||
|
from .base import BaseLLMProvider, LLMRequest, LLMResponse
|
||||||
|
|
||||||
|
|
||||||
|
class OpenRouterProvider(BaseLLMProvider):
|
||||||
|
"""OpenRouter API provider"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict[str, Any]):
|
||||||
|
super().__init__(config)
|
||||||
|
self.api_key = config.get('api_key')
|
||||||
|
self.base_url = config.get('base_url', 'https://openrouter.ai/api/v1')
|
||||||
|
self.model = config.get('model', 'anthropic/claude-3-sonnet')
|
||||||
|
self.timeout = config.get('timeout', 300)
|
||||||
|
self.app_name = config.get('app_name', 'discord-fishbowl')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def requires_api_key(self) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_supported_models(self) -> List[str]:
|
||||||
|
return [
|
||||||
|
'anthropic/claude-3-sonnet',
|
||||||
|
'anthropic/claude-3-haiku',
|
||||||
|
'anthropic/claude-3-opus',
|
||||||
|
'openai/gpt-4o',
|
||||||
|
'openai/gpt-4o-mini',
|
||||||
|
'openai/gpt-4-turbo',
|
||||||
|
'openai/gpt-3.5-turbo',
|
||||||
|
'meta-llama/llama-3.1-70b-instruct',
|
||||||
|
'meta-llama/llama-3.1-8b-instruct',
|
||||||
|
'google/gemini-pro-1.5',
|
||||||
|
'cohere/command-r-plus',
|
||||||
|
'mistralai/mistral-large',
|
||||||
|
'qwen/qwen-2-72b-instruct'
|
||||||
|
]
|
||||||
|
|
||||||
|
async def generate_response(self, request: LLMRequest) -> LLMResponse:
|
||||||
|
"""Generate response using OpenRouter API"""
|
||||||
|
try:
|
||||||
|
headers = {
|
||||||
|
'Authorization': f'Bearer {self.api_key}',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'HTTP-Referer': f'https://github.com/your-org/{self.app_name}',
|
||||||
|
'X-Title': self.app_name
|
||||||
|
}
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
'model': self.model,
|
||||||
|
'messages': [
|
||||||
|
{
|
||||||
|
'role': 'user',
|
||||||
|
'content': request.prompt
|
||||||
|
}
|
||||||
|
],
|
||||||
|
'max_tokens': request.max_tokens or self.config.get('max_tokens', 2000),
|
||||||
|
'temperature': request.temperature or self.config.get('temperature', 0.8),
|
||||||
|
'stream': False
|
||||||
|
}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.post(
|
||||||
|
f"{self.base_url}/chat/completions",
|
||||||
|
headers=headers,
|
||||||
|
json=payload,
|
||||||
|
timeout=self.timeout
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
content = data['choices'][0]['message']['content']
|
||||||
|
tokens_used = data.get('usage', {}).get('total_tokens')
|
||||||
|
|
||||||
|
return LLMResponse(
|
||||||
|
content=content,
|
||||||
|
success=True,
|
||||||
|
provider='openrouter',
|
||||||
|
model=self.model,
|
||||||
|
tokens_used=tokens_used
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
error_text = response.text
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"OpenRouter API error: {response.status_code} - {error_text}",
|
||||||
|
provider='openrouter'
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
return LLMResponse(
|
||||||
|
content="",
|
||||||
|
success=False,
|
||||||
|
error=f"OpenRouter provider error: {str(e)}",
|
||||||
|
provider='openrouter'
|
||||||
|
)
|
||||||
|
|
||||||
|
async def health_check(self) -> bool:
|
||||||
|
"""Check OpenRouter API health"""
|
||||||
|
try:
|
||||||
|
headers = {
|
||||||
|
'Authorization': f'Bearer {self.api_key}',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'HTTP-Referer': f'https://github.com/your-org/{self.app_name}',
|
||||||
|
'X-Title': self.app_name
|
||||||
|
}
|
||||||
|
|
||||||
|
async with httpx.AsyncClient() as client:
|
||||||
|
response = await client.get(
|
||||||
|
f"{self.base_url}/models",
|
||||||
|
headers=headers,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
return response.status_code == 200
|
||||||
|
|
||||||
|
except Exception:
|
||||||
|
return False
|
||||||
42
src/main.py
42
src/main.py
@@ -20,7 +20,7 @@ from bot.discord_client import FishbowlBot
|
|||||||
from bot.message_handler import MessageHandler, CommandHandler
|
from bot.message_handler import MessageHandler, CommandHandler
|
||||||
from conversation.engine import ConversationEngine
|
from conversation.engine import ConversationEngine
|
||||||
from conversation.scheduler import ConversationScheduler
|
from conversation.scheduler import ConversationScheduler
|
||||||
from llm.client import llm_client
|
from llm.multi_provider_client import multi_llm_client, initialize_llm_client
|
||||||
from rag.vector_store import vector_store_manager
|
from rag.vector_store import vector_store_manager
|
||||||
from rag.community_knowledge import initialize_community_knowledge_rag
|
from rag.community_knowledge import initialize_community_knowledge_rag
|
||||||
from rag.memory_sharing import MemorySharingManager
|
from rag.memory_sharing import MemorySharingManager
|
||||||
@@ -72,12 +72,21 @@ class FishbowlApplication:
|
|||||||
await create_tables()
|
await create_tables()
|
||||||
logger.info("Database initialized")
|
logger.info("Database initialized")
|
||||||
|
|
||||||
# Check LLM availability (non-blocking)
|
# Initialize multi-provider LLM client
|
||||||
is_available = await llm_client.check_model_availability()
|
logger.info("Initializing multi-provider LLM system...")
|
||||||
if not is_available:
|
await initialize_llm_client()
|
||||||
logger.warning("LLM model not available at startup. Bot will continue and retry connections.")
|
|
||||||
|
# Check provider health (non-blocking)
|
||||||
|
health_status = await multi_llm_client.health_check()
|
||||||
|
provider_info = multi_llm_client.get_provider_info()
|
||||||
|
|
||||||
|
healthy_providers = [name for name, healthy in health_status.items() if healthy]
|
||||||
|
if healthy_providers:
|
||||||
|
current_provider = multi_llm_client.get_current_provider()
|
||||||
|
logger.info(f"LLM providers available: {healthy_providers}")
|
||||||
|
logger.info(f"Current primary provider: {current_provider}")
|
||||||
else:
|
else:
|
||||||
logger.info(f"LLM model '{llm_client.model}' is available")
|
logger.warning("No LLM providers are healthy! Bot will continue and retry connections.")
|
||||||
|
|
||||||
# Initialize RAG systems
|
# Initialize RAG systems
|
||||||
logger.info("Initializing RAG systems...")
|
logger.info("Initializing RAG systems...")
|
||||||
@@ -106,6 +115,10 @@ class FishbowlApplication:
|
|||||||
# Initialize MCP servers
|
# Initialize MCP servers
|
||||||
logger.info("Initializing MCP servers...")
|
logger.info("Initializing MCP servers...")
|
||||||
|
|
||||||
|
# Initialize self-modification server
|
||||||
|
self.mcp_servers.append(mcp_server)
|
||||||
|
logger.info("Self-modification MCP server initialized")
|
||||||
|
|
||||||
# Initialize file system server
|
# Initialize file system server
|
||||||
await filesystem_server.initialize(self.vector_store, character_names)
|
await filesystem_server.initialize(self.vector_store, character_names)
|
||||||
self.mcp_servers.append(filesystem_server)
|
self.mcp_servers.append(filesystem_server)
|
||||||
@@ -248,20 +261,21 @@ class FishbowlApplication:
|
|||||||
signal.signal(signal.SIGBREAK, signal_handler)
|
signal.signal(signal.SIGBREAK, signal_handler)
|
||||||
|
|
||||||
async def _llm_cleanup_loop(self):
|
async def _llm_cleanup_loop(self):
|
||||||
"""Background task to clean up completed LLM requests"""
|
"""Background task to monitor LLM provider health"""
|
||||||
try:
|
try:
|
||||||
while not self.shutdown_event.is_set():
|
while not self.shutdown_event.is_set():
|
||||||
await llm_client.cleanup_pending_requests()
|
# Check provider health periodically
|
||||||
pending_count = llm_client.get_pending_count()
|
health_status = await multi_llm_client.health_check()
|
||||||
|
unhealthy_providers = [name for name, healthy in health_status.items() if not healthy]
|
||||||
|
|
||||||
if pending_count > 0:
|
if unhealthy_providers:
|
||||||
logger.debug(f"LLM cleanup: {pending_count} pending background requests")
|
logger.debug(f"Unhealthy LLM providers: {unhealthy_providers}")
|
||||||
|
|
||||||
# Wait 30 seconds before next cleanup
|
# Wait 60 seconds before next health check
|
||||||
await asyncio.sleep(30)
|
await asyncio.sleep(60)
|
||||||
|
|
||||||
except asyncio.CancelledError:
|
except asyncio.CancelledError:
|
||||||
logger.info("LLM cleanup task cancelled")
|
logger.info("LLM monitoring task cancelled")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error in LLM cleanup loop: {e}")
|
logger.error(f"Error in LLM cleanup loop: {e}")
|
||||||
|
|
||||||
|
|||||||
@@ -433,73 +433,155 @@ class VectorStoreManager:
|
|||||||
|
|
||||||
query_embedding = await self._generate_embedding(query)
|
query_embedding = await self._generate_embedding(query)
|
||||||
|
|
||||||
results = self.community_collection.query(
|
# Route to backend-specific implementation
|
||||||
query_embeddings=[query_embedding],
|
if self.backend == "qdrant":
|
||||||
n_results=limit
|
return await self._query_community_knowledge_qdrant(query, query_embedding, limit)
|
||||||
)
|
elif self.backend == "chromadb":
|
||||||
|
return await self._query_community_knowledge_chromadb(query, query_embedding, limit)
|
||||||
|
|
||||||
memories = []
|
return []
|
||||||
for i, (doc, metadata, distance) in enumerate(zip(
|
|
||||||
results['documents'][0],
|
|
||||||
results['metadatas'][0],
|
|
||||||
results['distances'][0]
|
|
||||||
)):
|
|
||||||
memory = VectorMemory(
|
|
||||||
id=results['ids'][0][i],
|
|
||||||
content=doc,
|
|
||||||
memory_type=MemoryType.COMMUNITY,
|
|
||||||
character_name=metadata.get('character_name', 'community'),
|
|
||||||
timestamp=datetime.fromisoformat(metadata['timestamp']),
|
|
||||||
importance=metadata['importance'],
|
|
||||||
metadata=metadata
|
|
||||||
)
|
|
||||||
memory.metadata['similarity_score'] = 1 - distance
|
|
||||||
memories.append(memory)
|
|
||||||
|
|
||||||
return sorted(memories, key=lambda m: m.metadata.get('similarity_score', 0), reverse=True)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log_error_with_context(e, {"query": query, "component": "community_knowledge"})
|
log_error_with_context(e, {"query": query})
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
async def _query_community_knowledge_chromadb(self, query: str, query_embedding: List[float], limit: int) -> List[VectorMemory]:
|
||||||
|
"""Query community knowledge using ChromaDB"""
|
||||||
|
results = self.community_collection.query(
|
||||||
|
query_embeddings=[query_embedding],
|
||||||
|
n_results=limit
|
||||||
|
)
|
||||||
|
|
||||||
|
memories = []
|
||||||
|
for i, (doc, metadata, distance) in enumerate(zip(
|
||||||
|
results['documents'][0],
|
||||||
|
results['metadatas'][0],
|
||||||
|
results['distances'][0]
|
||||||
|
)):
|
||||||
|
memory = VectorMemory(
|
||||||
|
id=results['ids'][0][i],
|
||||||
|
content=doc,
|
||||||
|
memory_type=MemoryType.COMMUNITY,
|
||||||
|
character_name=metadata.get('character_name', 'community'),
|
||||||
|
timestamp=datetime.fromisoformat(metadata['timestamp']),
|
||||||
|
importance=metadata['importance'],
|
||||||
|
metadata=metadata
|
||||||
|
)
|
||||||
|
memory.metadata['similarity_score'] = 1 - distance
|
||||||
|
memories.append(memory)
|
||||||
|
|
||||||
|
return sorted(memories, key=lambda m: m.metadata.get('similarity_score', 0), reverse=True)
|
||||||
|
|
||||||
|
async def _query_community_knowledge_qdrant(self, query: str, query_embedding: List[float], limit: int) -> List[VectorMemory]:
|
||||||
|
"""Query community knowledge using Qdrant"""
|
||||||
|
search_result = self.qdrant_client.search(
|
||||||
|
collection_name=self.collection_name,
|
||||||
|
query_vector=query_embedding,
|
||||||
|
limit=limit,
|
||||||
|
with_payload=True
|
||||||
|
)
|
||||||
|
|
||||||
|
memories = []
|
||||||
|
for point in search_result:
|
||||||
|
payload = point.payload
|
||||||
|
if payload.get('memory_type') == MemoryType.COMMUNITY.value:
|
||||||
|
memory = VectorMemory(
|
||||||
|
id=str(point.id),
|
||||||
|
content=payload['content'],
|
||||||
|
memory_type=MemoryType.COMMUNITY,
|
||||||
|
character_name=payload.get('character_name', 'community'),
|
||||||
|
timestamp=datetime.fromisoformat(payload['timestamp']),
|
||||||
|
importance=payload['importance'],
|
||||||
|
metadata=payload
|
||||||
|
)
|
||||||
|
memory.metadata['similarity_score'] = point.score
|
||||||
|
memories.append(memory)
|
||||||
|
|
||||||
|
return memories
|
||||||
|
|
||||||
async def get_creative_knowledge(self, character_name: str, query: str, limit: int = 5) -> List[VectorMemory]:
|
async def get_creative_knowledge(self, character_name: str, query: str, limit: int = 5) -> List[VectorMemory]:
|
||||||
"""Query character's creative knowledge base"""
|
"""Query character's creative knowledge base"""
|
||||||
try:
|
try:
|
||||||
if character_name not in self.creative_collections:
|
if character_name not in self.creative_collections:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
collection = self.creative_collections[character_name]
|
|
||||||
query_embedding = await self._generate_embedding(query)
|
query_embedding = await self._generate_embedding(query)
|
||||||
|
|
||||||
results = collection.query(
|
# Route to backend-specific implementation
|
||||||
query_embeddings=[query_embedding],
|
if self.backend == "qdrant":
|
||||||
n_results=limit
|
return await self._get_creative_knowledge_qdrant(character_name, query, query_embedding, limit)
|
||||||
)
|
elif self.backend == "chromadb":
|
||||||
|
return await self._get_creative_knowledge_chromadb(character_name, query, query_embedding, limit)
|
||||||
|
|
||||||
memories = []
|
return []
|
||||||
for i, (doc, metadata, distance) in enumerate(zip(
|
|
||||||
results['documents'][0],
|
|
||||||
results['metadatas'][0],
|
|
||||||
results['distances'][0]
|
|
||||||
)):
|
|
||||||
memory = VectorMemory(
|
|
||||||
id=results['ids'][0][i],
|
|
||||||
content=doc,
|
|
||||||
memory_type=MemoryType.CREATIVE,
|
|
||||||
character_name=character_name,
|
|
||||||
timestamp=datetime.fromisoformat(metadata['timestamp']),
|
|
||||||
importance=metadata['importance'],
|
|
||||||
metadata=metadata
|
|
||||||
)
|
|
||||||
memory.metadata['similarity_score'] = 1 - distance
|
|
||||||
memories.append(memory)
|
|
||||||
|
|
||||||
return sorted(memories, key=lambda m: m.metadata.get('similarity_score', 0), reverse=True)
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
log_error_with_context(e, {"character": character_name, "query": query})
|
log_error_with_context(e, {"character": character_name, "query": query})
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
async def _get_creative_knowledge_chromadb(self, character_name: str, query: str, query_embedding: List[float], limit: int) -> List[VectorMemory]:
|
||||||
|
"""Get creative knowledge using ChromaDB"""
|
||||||
|
collection = self.creative_collections[character_name]
|
||||||
|
results = collection.query(
|
||||||
|
query_embeddings=[query_embedding],
|
||||||
|
n_results=limit
|
||||||
|
)
|
||||||
|
|
||||||
|
memories = []
|
||||||
|
for i, (doc, metadata, distance) in enumerate(zip(
|
||||||
|
results['documents'][0],
|
||||||
|
results['metadatas'][0],
|
||||||
|
results['distances'][0]
|
||||||
|
)):
|
||||||
|
memory = VectorMemory(
|
||||||
|
id=results['ids'][0][i],
|
||||||
|
content=doc,
|
||||||
|
memory_type=MemoryType.CREATIVE,
|
||||||
|
character_name=character_name,
|
||||||
|
timestamp=datetime.fromisoformat(metadata['timestamp']),
|
||||||
|
importance=metadata['importance'],
|
||||||
|
metadata=metadata
|
||||||
|
)
|
||||||
|
memory.metadata['similarity_score'] = 1 - distance
|
||||||
|
memories.append(memory)
|
||||||
|
|
||||||
|
return sorted(memories, key=lambda m: m.metadata.get('similarity_score', 0), reverse=True)
|
||||||
|
|
||||||
|
async def _get_creative_knowledge_qdrant(self, character_name: str, query: str, query_embedding: List[float], limit: int) -> List[VectorMemory]:
|
||||||
|
"""Get creative knowledge using Qdrant"""
|
||||||
|
from qdrant_client.models import Filter, FieldCondition
|
||||||
|
|
||||||
|
search_result = self.qdrant_client.search(
|
||||||
|
collection_name=self.collection_name,
|
||||||
|
query_vector=query_embedding,
|
||||||
|
limit=limit,
|
||||||
|
with_payload=True,
|
||||||
|
query_filter=Filter(
|
||||||
|
must=[
|
||||||
|
FieldCondition(key="character_name", match={"value": character_name}),
|
||||||
|
FieldCondition(key="memory_type", match={"value": MemoryType.CREATIVE.value})
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
memories = []
|
||||||
|
for point in search_result:
|
||||||
|
payload = point.payload
|
||||||
|
if payload.get('memory_type') == MemoryType.CREATIVE.value and payload.get('character_name') == character_name:
|
||||||
|
memory = VectorMemory(
|
||||||
|
id=str(point.id),
|
||||||
|
content=payload['content'],
|
||||||
|
memory_type=MemoryType.CREATIVE,
|
||||||
|
character_name=character_name,
|
||||||
|
timestamp=datetime.fromisoformat(payload['timestamp']),
|
||||||
|
importance=payload['importance'],
|
||||||
|
metadata=payload
|
||||||
|
)
|
||||||
|
memory.metadata['similarity_score'] = point.score
|
||||||
|
memories.append(memory)
|
||||||
|
|
||||||
|
return memories
|
||||||
|
|
||||||
async def consolidate_memories(self, character_name: str) -> Dict[str, Any]:
|
async def consolidate_memories(self, character_name: str) -> Dict[str, Any]:
|
||||||
"""Consolidate similar memories to save space"""
|
"""Consolidate similar memories to save space"""
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -25,16 +25,28 @@ class DiscordConfig(BaseModel):
|
|||||||
guild_id: str
|
guild_id: str
|
||||||
channel_id: str
|
channel_id: str
|
||||||
|
|
||||||
|
class LLMProviderConfig(BaseModel):
|
||||||
|
"""Configuration for a single LLM provider"""
|
||||||
|
type: str # openai, openrouter, gemini, custom
|
||||||
|
enabled: bool = True
|
||||||
|
priority: int = 0
|
||||||
|
config: Dict[str, Any] = {}
|
||||||
|
|
||||||
class LLMConfig(BaseModel):
|
class LLMConfig(BaseModel):
|
||||||
|
"""Multi-provider LLM configuration"""
|
||||||
|
# Legacy single provider config (for backwards compatibility)
|
||||||
base_url: str = "http://localhost:11434"
|
base_url: str = "http://localhost:11434"
|
||||||
model: str = "llama2"
|
model: str = "llama2"
|
||||||
timeout: int = 300
|
timeout: int = 300
|
||||||
max_tokens: int = 2000
|
max_tokens: int = 2000
|
||||||
temperature: float = 0.8
|
temperature: float = 0.8
|
||||||
max_prompt_length: int = 6000
|
max_prompt_length: int = 16000
|
||||||
max_history_messages: int = 5
|
max_history_messages: int = 5
|
||||||
max_memories: int = 5
|
max_memories: int = 5
|
||||||
|
|
||||||
|
# New multi-provider config
|
||||||
|
providers: Dict[str, LLMProviderConfig] = {}
|
||||||
|
|
||||||
class ConversationConfig(BaseModel):
|
class ConversationConfig(BaseModel):
|
||||||
min_delay_seconds: int = 30
|
min_delay_seconds: int = 30
|
||||||
max_delay_seconds: int = 300
|
max_delay_seconds: int = 300
|
||||||
@@ -141,7 +153,7 @@ def get_settings() -> Settings:
|
|||||||
timeout=int(os.getenv("LLM_TIMEOUT", "300")),
|
timeout=int(os.getenv("LLM_TIMEOUT", "300")),
|
||||||
max_tokens=int(os.getenv("LLM_MAX_TOKENS", "2000")),
|
max_tokens=int(os.getenv("LLM_MAX_TOKENS", "2000")),
|
||||||
temperature=float(os.getenv("LLM_TEMPERATURE", "0.8")),
|
temperature=float(os.getenv("LLM_TEMPERATURE", "0.8")),
|
||||||
max_prompt_length=int(os.getenv("LLM_MAX_PROMPT_LENGTH", "6000")),
|
max_prompt_length=int(os.getenv("LLM_MAX_PROMPT_LENGTH", "16000")),
|
||||||
max_history_messages=int(os.getenv("LLM_MAX_HISTORY_MESSAGES", "5")),
|
max_history_messages=int(os.getenv("LLM_MAX_HISTORY_MESSAGES", "5")),
|
||||||
max_memories=int(os.getenv("LLM_MAX_MEMORIES", "5"))
|
max_memories=int(os.getenv("LLM_MAX_MEMORIES", "5"))
|
||||||
),
|
),
|
||||||
|
|||||||
73
sync_vectors.py
Normal file
73
sync_vectors.py
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Sync existing PostgreSQL memories to Qdrant vector database
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from database.connection import init_database, get_db_session
|
||||||
|
from database.models import Memory, Character
|
||||||
|
from rag.vector_store import VectorStoreManager, VectorMemory, MemoryType
|
||||||
|
from sqlalchemy import select
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
async def sync_memories_to_qdrant():
|
||||||
|
"""Sync all existing memories from PostgreSQL to Qdrant"""
|
||||||
|
|
||||||
|
# Initialize database
|
||||||
|
await init_database()
|
||||||
|
|
||||||
|
# Initialize vector store
|
||||||
|
vector_store = VectorStoreManager()
|
||||||
|
|
||||||
|
print("🔄 Starting memory sync to Qdrant...")
|
||||||
|
|
||||||
|
async with get_db_session() as session:
|
||||||
|
# Get all memories with character names
|
||||||
|
query = select(Memory, Character.name).join(
|
||||||
|
Character, Memory.character_id == Character.id
|
||||||
|
).order_by(Memory.timestamp)
|
||||||
|
|
||||||
|
results = await session.execute(query)
|
||||||
|
memories_with_chars = results.fetchall()
|
||||||
|
|
||||||
|
print(f"Found {len(memories_with_chars)} memories to sync")
|
||||||
|
|
||||||
|
synced_count = 0
|
||||||
|
error_count = 0
|
||||||
|
|
||||||
|
for memory, character_name in memories_with_chars:
|
||||||
|
try:
|
||||||
|
# Convert to vector memory format
|
||||||
|
vector_memory = VectorMemory(
|
||||||
|
id=str(memory.id),
|
||||||
|
character_name=character_name,
|
||||||
|
content=memory.content,
|
||||||
|
memory_type=MemoryType.PERSONAL,
|
||||||
|
importance=memory.importance_score,
|
||||||
|
timestamp=memory.timestamp or datetime.now(timezone.utc),
|
||||||
|
metadata={
|
||||||
|
"tags": memory.tags or [],
|
||||||
|
"memory_id": memory.id,
|
||||||
|
"character_id": memory.character_id,
|
||||||
|
"memory_type": memory.memory_type
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
# Store in vector database
|
||||||
|
await vector_store.store_memory(vector_memory)
|
||||||
|
synced_count += 1
|
||||||
|
|
||||||
|
if synced_count % 10 == 0:
|
||||||
|
print(f" Synced {synced_count}/{len(memories_with_chars)} memories...")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
error_count += 1
|
||||||
|
print(f" Error syncing memory {memory.id}: {e}")
|
||||||
|
|
||||||
|
print(f"✅ Sync complete: {synced_count} synced, {error_count} errors")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(sync_memories_to_qdrant())
|
||||||
75
test_llm_current_provider.py
Normal file
75
test_llm_current_provider.py
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Quick test to check if current provider is properly detected
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Add src to path
|
||||||
|
sys.path.insert(0, "src")
|
||||||
|
|
||||||
|
async def test_current_provider():
|
||||||
|
"""Test that current provider is properly detected"""
|
||||||
|
try:
|
||||||
|
# Set minimal env vars to avoid validation errors
|
||||||
|
import os
|
||||||
|
os.environ.setdefault('DISCORD_TOKEN', 'test')
|
||||||
|
os.environ.setdefault('DISCORD_GUILD_ID', '123')
|
||||||
|
os.environ.setdefault('DISCORD_CHANNEL_ID', '456')
|
||||||
|
|
||||||
|
from llm.multi_provider_client import MultiProviderLLMClient
|
||||||
|
from utils.config import get_settings
|
||||||
|
|
||||||
|
print("Testing current LLM provider detection...")
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
# Check current settings
|
||||||
|
settings = get_settings()
|
||||||
|
print(f"Current LLM config:")
|
||||||
|
print(f" Base URL: {settings.llm.base_url}")
|
||||||
|
print(f" Model: {settings.llm.model}")
|
||||||
|
print(f" Providers configured: {len(settings.llm.providers) if settings.llm.providers else 0}")
|
||||||
|
|
||||||
|
# Initialize client
|
||||||
|
client = MultiProviderLLMClient()
|
||||||
|
await client.initialize()
|
||||||
|
|
||||||
|
# Check provider info
|
||||||
|
provider_info = client.get_provider_info()
|
||||||
|
current_provider = client.get_current_provider()
|
||||||
|
health_status = await client.health_check()
|
||||||
|
|
||||||
|
print(f"\nProvider Status:")
|
||||||
|
print(f" Current provider: {current_provider}")
|
||||||
|
print(f" Total providers: {len(provider_info)}")
|
||||||
|
|
||||||
|
for name, info in provider_info.items():
|
||||||
|
healthy = health_status.get(name, False)
|
||||||
|
is_current = name == current_provider
|
||||||
|
print(f"\n {name}:")
|
||||||
|
print(f" Type: {info['type']}")
|
||||||
|
print(f" Model: {info['current_model']}")
|
||||||
|
print(f" Enabled: {info['enabled']}")
|
||||||
|
print(f" Priority: {info['priority']}")
|
||||||
|
print(f" Healthy: {healthy}")
|
||||||
|
print(f" Current: {is_current}")
|
||||||
|
|
||||||
|
if current_provider:
|
||||||
|
print(f"\n✅ Current provider detected: {current_provider}")
|
||||||
|
else:
|
||||||
|
print(f"\n❌ No current provider detected!")
|
||||||
|
|
||||||
|
return current_provider is not None
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"\n❌ Error: {e}")
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
return False
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
success = asyncio.run(test_current_provider())
|
||||||
|
if not success:
|
||||||
|
sys.exit(1)
|
||||||
Reference in New Issue
Block a user