Implement comprehensive LLM provider system with global cost protection

- Add multi-provider LLM architecture supporting OpenRouter, OpenAI, Gemini, and custom providers
- Implement global LLM on/off switch with default DISABLED state for cost protection
- Add per-character LLM configuration with provider-specific models and settings
- Create performance-optimized caching system for LLM enabled status checks
- Add API key validation before enabling LLM providers to prevent broken configurations
- Implement audit logging for all LLM enable/disable actions for cost accountability
- Create comprehensive admin UI with prominent cost warnings and confirmation dialogs
- Add visual indicators in character list for custom AI model configurations
- Build character-specific LLM client system with global fallback mechanism
- Add database schema support for per-character LLM settings
- Implement graceful fallback responses when LLM is globally disabled
- Create provider testing and validation system for reliable connections
This commit is contained in:
root
2025-07-08 07:35:48 -07:00
parent 004f0325ec
commit 10563900a3
59 changed files with 6686 additions and 791 deletions

View File

@@ -0,0 +1,75 @@
#!/usr/bin/env python3
"""
Quick test to check if current provider is properly detected
"""
import asyncio
import sys
import os
# Add src to path
sys.path.insert(0, "src")
async def test_current_provider():
"""Test that current provider is properly detected"""
try:
# Set minimal env vars to avoid validation errors
import os
os.environ.setdefault('DISCORD_TOKEN', 'test')
os.environ.setdefault('DISCORD_GUILD_ID', '123')
os.environ.setdefault('DISCORD_CHANNEL_ID', '456')
from llm.multi_provider_client import MultiProviderLLMClient
from utils.config import get_settings
print("Testing current LLM provider detection...")
print("=" * 50)
# Check current settings
settings = get_settings()
print(f"Current LLM config:")
print(f" Base URL: {settings.llm.base_url}")
print(f" Model: {settings.llm.model}")
print(f" Providers configured: {len(settings.llm.providers) if settings.llm.providers else 0}")
# Initialize client
client = MultiProviderLLMClient()
await client.initialize()
# Check provider info
provider_info = client.get_provider_info()
current_provider = client.get_current_provider()
health_status = await client.health_check()
print(f"\nProvider Status:")
print(f" Current provider: {current_provider}")
print(f" Total providers: {len(provider_info)}")
for name, info in provider_info.items():
healthy = health_status.get(name, False)
is_current = name == current_provider
print(f"\n {name}:")
print(f" Type: {info['type']}")
print(f" Model: {info['current_model']}")
print(f" Enabled: {info['enabled']}")
print(f" Priority: {info['priority']}")
print(f" Healthy: {healthy}")
print(f" Current: {is_current}")
if current_provider:
print(f"\n✅ Current provider detected: {current_provider}")
else:
print(f"\n❌ No current provider detected!")
return current_provider is not None
except Exception as e:
print(f"\n❌ Error: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
success = asyncio.run(test_current_provider())
if not success:
sys.exit(1)