- Add multi-provider LLM architecture supporting OpenRouter, OpenAI, Gemini, and custom providers - Implement global LLM on/off switch with default DISABLED state for cost protection - Add per-character LLM configuration with provider-specific models and settings - Create performance-optimized caching system for LLM enabled status checks - Add API key validation before enabling LLM providers to prevent broken configurations - Implement audit logging for all LLM enable/disable actions for cost accountability - Create comprehensive admin UI with prominent cost warnings and confirmation dialogs - Add visual indicators in character list for custom AI model configurations - Build character-specific LLM client system with global fallback mechanism - Add database schema support for per-character LLM settings - Implement graceful fallback responses when LLM is globally disabled - Create provider testing and validation system for reliable connections
88 lines
2.1 KiB
Plaintext
88 lines
2.1 KiB
Plaintext
# Discord Fishbowl Environment Configuration
|
|
# Copy this file to .env and fill in your actual values
|
|
# NEVER commit .env files to version control
|
|
|
|
# Discord Bot Configuration
|
|
DISCORD_BOT_TOKEN=your_discord_bot_token_here
|
|
DISCORD_GUILD_ID=your_guild_id_here
|
|
DISCORD_CHANNEL_ID=your_channel_id_here
|
|
|
|
# Database Configuration (matches current working setup)
|
|
DB_TYPE=postgresql
|
|
DB_HOST=localhost
|
|
DB_PORT=15432
|
|
DB_NAME=discord_fishbowl
|
|
DB_USER=postgres
|
|
DB_PASSWORD=fishbowl_password
|
|
DATABASE_URL=postgresql+asyncpg://postgres:${DB_PASSWORD}@${DB_HOST}:${DB_PORT}/${DB_NAME}
|
|
|
|
# Redis Configuration (matches current working setup)
|
|
REDIS_HOST=localhost
|
|
REDIS_PORT=6379
|
|
REDIS_PASSWORD=redis_password
|
|
REDIS_DB=0
|
|
|
|
# Vector Database Configuration
|
|
VECTOR_DB_TYPE=qdrant
|
|
QDRANT_HOST=localhost
|
|
QDRANT_PORT=6333
|
|
QDRANT_COLLECTION=fishbowl_memories
|
|
|
|
# LLM Configuration
|
|
LLM_BASE_URL=http://192.168.1.200:5005/v1
|
|
LLM_MODEL=koboldcpp/Broken-Tutu-24B-Transgression-v2.0.i1-Q4_K_M
|
|
LLM_API_KEY=x
|
|
LLM_TIMEOUT=300
|
|
LLM_MAX_TOKENS=2000
|
|
LLM_TEMPERATURE=0.8
|
|
LLM_MAX_PROMPT_LENGTH=16000
|
|
LLM_MAX_HISTORY_MESSAGES=5
|
|
LLM_MAX_MEMORIES=5
|
|
|
|
# Admin Interface Configuration (matches current working setup)
|
|
ADMIN_HOST=0.0.0.0
|
|
ADMIN_PORT=8294
|
|
ADMIN_USERNAME=admin
|
|
ADMIN_PASSWORD=FIre!@34
|
|
SECRET_KEY=CAKUZ5ds49B1PUEWDWt07TdgxjTtDvvxOOkvOOfbnDE
|
|
|
|
# LLM Provider Configuration
|
|
# OpenRouter (supports Claude, GPT, Llama, etc.)
|
|
OPENROUTER_ENABLED=false
|
|
OPENROUTER_API_KEY=
|
|
OPENROUTER_MODEL=anthropic/claude-3-sonnet
|
|
|
|
# OpenAI
|
|
OPENAI_ENABLED=false
|
|
OPENAI_API_KEY=
|
|
OPENAI_MODEL=gpt-4o-mini
|
|
|
|
# Google Gemini
|
|
GEMINI_ENABLED=false
|
|
GEMINI_API_KEY=
|
|
GEMINI_MODEL=gemini-1.5-flash
|
|
|
|
# Custom/Local LLM (current setup)
|
|
CUSTOM_LLM_ENABLED=true
|
|
|
|
# Ollama
|
|
OLLAMA_ENABLED=false
|
|
OLLAMA_MODEL=llama3
|
|
|
|
# System Configuration
|
|
CONVERSATION_FREQUENCY=0.5
|
|
RESPONSE_DELAY_MIN=1.0
|
|
RESPONSE_DELAY_MAX=5.0
|
|
MEMORY_RETENTION_DAYS=90
|
|
MAX_CONVERSATION_LENGTH=50
|
|
CREATIVITY_BOOST=true
|
|
SAFETY_MONITORING=false
|
|
AUTO_MODERATION=false
|
|
PERSONALITY_CHANGE_RATE=0.1
|
|
|
|
# Logging Configuration
|
|
LOG_LEVEL=INFO
|
|
ENVIRONMENT=development
|
|
|
|
# Optional Services (for development)
|
|
PGADMIN_PASSWORD=generate_secure_pgadmin_password_here |