Compare commits
31 commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 052570cefb | |||
| 5f8c93ff69 | |||
| 6d4de79ac2 | |||
| 4e2dfdfba6 | |||
| 3abbdd96eb | |||
| 2d5c636b9d | |||
| 72e088c0ad | |||
| d25dbb291b | |||
| 0aac989b2e | |||
| a79e0de396 | |||
| 2449a7dc32 | |||
| c1e8ee82c7 | |||
| 2db38546a3 | |||
| 1e83a46412 | |||
| 5da464bd62 | |||
| ea8983ddb0 | |||
| 61053f6177 | |||
| e02852fb45 | |||
| 863d3e3c88 | |||
| 530405a905 | |||
| 864201214d | |||
| 490ab742f1 | |||
| d6a7a4dbe7 | |||
| c09f1564a0 | |||
| 69354229d6 | |||
| ea206a1d7f | |||
| 5b64fcf80f | |||
| fe5e2d9b96 | |||
| 0d0084fd9a | |||
| f137198f67 | |||
| 37f5e53ece |
94 changed files with 7025 additions and 110 deletions
38
.env
38
.env
|
|
@ -1,4 +1,36 @@
|
||||||
DISCORD_TOKEN=MTM2OTc3NDY4OTYzNDg4MTU4Ng.G9Nrgz.akHoOO9SrXCDwiOCI3BUXfdR4bpSNb9zrVx9UI
|
DISCORD_TOKEN=MTM2OTc3NDY4OTYzNDg4MTU4Ng.G9Nrgz.akHoOO9SrXCDwiOCI3BUXfdR4bpSNb9zrVx9UI
|
||||||
OLLAMA_API=http://192.168.1.100:11434/api/generate
|
# This is using the TailScale IP
|
||||||
MODEL_NAME=llama3:latest
|
OLLAMA_API=http://192.168.0.100:11434/
|
||||||
CHANNEL_ID=1370420592360161393
|
MODEL_NAME=gemma3:12b
|
||||||
|
CHANNEL_ID=1380999713272238151
|
||||||
|
SHOW_THINKING_BLOCKS=false
|
||||||
|
DEBUG_MODE=true
|
||||||
|
AUTOREPLY_ENABLED=true
|
||||||
|
AI_INCLUDE_CONTEXT=false
|
||||||
|
# ---------------------------
|
||||||
|
# Logging configuration
|
||||||
|
# - LOG_LEVEL: global base level (INFO recommended)
|
||||||
|
# - LOG_CONSOLE: enable console logs (true/false)
|
||||||
|
# - LOG_CONSOLE_LEVEL: level for console output (INFO, DEBUG, etc.)
|
||||||
|
# - LOG_CONSOLE_TO_STDOUT: if true, console logs go to STDOUT (useful for container logging)
|
||||||
|
# - LOG_TO_FILE: enable writing to a rotating file
|
||||||
|
# - LOG_FILE_LEVEL: level stored in the file (set to DEBUG to capture full LLM payloads)
|
||||||
|
# - LOG_FILE: filename for rotated logs
|
||||||
|
# - LOG_ERROR_FILE: enable writing errors to a separate file (filename is derived)
|
||||||
|
# Example: to capture full prompts/responses in logs, set LOG_FILE_LEVEL=DEBUG
|
||||||
|
LOG_LEVEL=INFO
|
||||||
|
LOG_CONSOLE=true
|
||||||
|
LOG_CONSOLE_LEVEL=INFO
|
||||||
|
LOG_CONSOLE_TO_STDOUT=true
|
||||||
|
LOG_TO_FILE=true
|
||||||
|
LOG_FILE_LEVEL=DEBUG
|
||||||
|
LOG_FILE=bot.log
|
||||||
|
LOG_ERROR_FILE=true
|
||||||
|
# ---------------------------
|
||||||
|
# Cooldown in seconds
|
||||||
|
AUTOREPLY_COOLDOWN=0
|
||||||
|
# Used for Codex to reach my repo on Forgejo
|
||||||
|
FORGEJO_URL=https://forgejo.milotech.us
|
||||||
|
FORGEJO_OWNER=milo
|
||||||
|
FORGEJO_REPO=AI-Discord-Bot
|
||||||
|
FORGEJO_TOKEN=e404acfa246e3a8c07c5c15a98f8ac4b7384c3c5
|
||||||
5
.vscode/settings.json
vendored
Normal file
5
.vscode/settings.json
vendored
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"python-envs.defaultEnvManager": "ms-python.python:conda",
|
||||||
|
"python-envs.defaultPackageManager": "ms-python.python:conda",
|
||||||
|
"python-envs.pythonProjects": []
|
||||||
|
}
|
||||||
186
DATABASE_MIGRATION.md
Normal file
186
DATABASE_MIGRATION.md
Normal file
|
|
@ -0,0 +1,186 @@
|
||||||
|
# Database System Migration Guide
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Discord bot now supports multiple database backends for storing user profiles and conversation memory:
|
||||||
|
|
||||||
|
- **SQLite**: Fast, reliable, file-based database (recommended)
|
||||||
|
- **JSON**: Original file-based storage (backward compatible)
|
||||||
|
- **Memory Toggle**: Option to completely disable memory features
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Edit `src/settings.yml` to configure the database system:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
database:
|
||||||
|
backend: "sqlite" # Options: "sqlite" or "json"
|
||||||
|
sqlite_path: "data/bot_database.db"
|
||||||
|
json_user_profiles: "user_profiles.json"
|
||||||
|
json_memory_data: "memory.json"
|
||||||
|
memory_enabled: true # Set to false to disable memory completely
|
||||||
|
```
|
||||||
|
|
||||||
|
## Migration from JSON
|
||||||
|
|
||||||
|
If you're upgrading from the old JSON-based system:
|
||||||
|
|
||||||
|
1. **Run the migration script:**
|
||||||
|
```bash
|
||||||
|
python migrate_to_database.py
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **What the script does:**
|
||||||
|
- Migrates existing `user_profiles.json` to the database
|
||||||
|
- Migrates existing `memory.json` to the database
|
||||||
|
- Creates backups of original files
|
||||||
|
- Verifies the migration was successful
|
||||||
|
|
||||||
|
3. **After migration:**
|
||||||
|
- Your old JSON files are safely backed up
|
||||||
|
- The bot will use the new database system
|
||||||
|
- All existing data is preserved
|
||||||
|
|
||||||
|
## Backend Comparison
|
||||||
|
|
||||||
|
### SQLite Backend (Recommended)
|
||||||
|
- **Pros:** Fast, reliable, concurrent access, data integrity
|
||||||
|
- **Cons:** Requires SQLite (included with Python)
|
||||||
|
- **Use case:** Production bots, multiple users, long-term storage
|
||||||
|
|
||||||
|
### JSON Backend
|
||||||
|
- **Pros:** Human-readable, easy to backup/edit manually
|
||||||
|
- **Cons:** Slower, potential data loss on concurrent access
|
||||||
|
- **Use case:** Development, single-user bots, debugging
|
||||||
|
|
||||||
|
## Database Schema
|
||||||
|
|
||||||
|
### User Profiles Table
|
||||||
|
- `user_id` (TEXT PRIMARY KEY)
|
||||||
|
- `profile_data` (JSON)
|
||||||
|
- `created_at` (TIMESTAMP)
|
||||||
|
- `updated_at` (TIMESTAMP)
|
||||||
|
|
||||||
|
### Conversation Memory Table
|
||||||
|
- `id` (INTEGER PRIMARY KEY)
|
||||||
|
- `channel_id` (TEXT)
|
||||||
|
- `user_id` (TEXT)
|
||||||
|
- `content` (TEXT)
|
||||||
|
- `context` (TEXT)
|
||||||
|
- `importance_score` (REAL)
|
||||||
|
- `timestamp` (TIMESTAMP)
|
||||||
|
|
||||||
|
### User Memory Table
|
||||||
|
- `id` (INTEGER PRIMARY KEY)
|
||||||
|
- `user_id` (TEXT)
|
||||||
|
- `memory_type` (TEXT)
|
||||||
|
- `content` (TEXT)
|
||||||
|
- `importance_score` (REAL)
|
||||||
|
- `timestamp` (TIMESTAMP)
|
||||||
|
|
||||||
|
## Code Changes
|
||||||
|
|
||||||
|
### New Files
|
||||||
|
- `src/database.py` - Database abstraction layer
|
||||||
|
- `src/memory_manager.py` - Unified memory management
|
||||||
|
- `src/user_profiles_new.py` - Modern user profile management
|
||||||
|
- `migrate_to_database.py` - Migration script
|
||||||
|
|
||||||
|
### Updated Files
|
||||||
|
- `src/enhanced_ai.py` - Uses new memory manager
|
||||||
|
- `src/bot.py` - Updated memory command imports
|
||||||
|
- `src/settings.yml` - Added database configuration
|
||||||
|
- `src/memory.py` - Marked as deprecated
|
||||||
|
|
||||||
|
## API Reference
|
||||||
|
|
||||||
|
### Memory Manager
|
||||||
|
```python
|
||||||
|
from memory_manager import memory_manager
|
||||||
|
|
||||||
|
# Store a message in memory
|
||||||
|
memory_manager.analyze_and_store_message(message, context_messages)
|
||||||
|
|
||||||
|
# Get conversation context
|
||||||
|
context = memory_manager.get_conversation_context(channel_id, hours=24)
|
||||||
|
|
||||||
|
# Get user context
|
||||||
|
user_info = memory_manager.get_user_context(user_id)
|
||||||
|
|
||||||
|
# Format memory for AI prompts
|
||||||
|
memory_text = memory_manager.format_memory_for_prompt(user_id, channel_id)
|
||||||
|
|
||||||
|
# Check if memory is enabled
|
||||||
|
if memory_manager.is_enabled():
|
||||||
|
# Memory operations
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
|
||||||
|
### Database Manager
|
||||||
|
```python
|
||||||
|
from database import db_manager
|
||||||
|
|
||||||
|
# User profiles
|
||||||
|
profile = db_manager.get_user_profile(user_id)
|
||||||
|
db_manager.store_user_profile(user_id, profile_data)
|
||||||
|
|
||||||
|
# Memory storage (if enabled)
|
||||||
|
db_manager.store_conversation_memory(channel_id, user_id, content, context, score)
|
||||||
|
db_manager.store_user_memory(user_id, memory_type, content, score)
|
||||||
|
|
||||||
|
# Retrieval
|
||||||
|
conversations = db_manager.get_conversation_context(channel_id, hours=24)
|
||||||
|
user_memories = db_manager.get_user_context(user_id)
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
db_manager.cleanup_old_memories(days=30)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Migration Issues
|
||||||
|
- **File not found errors:** Ensure you're running from the bot root directory
|
||||||
|
- **Permission errors:** Check file permissions and disk space
|
||||||
|
- **Data corruption:** Restore from backup and try again
|
||||||
|
|
||||||
|
### Runtime Issues
|
||||||
|
- **SQLite locked:** Another process may be using the database
|
||||||
|
- **Memory disabled:** Check `memory_enabled` setting in `settings.yml`
|
||||||
|
- **Import errors:** Ensure all new files are in the `src/` directory
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
- **Slow queries:** SQLite performs much better than JSON for large datasets
|
||||||
|
- **Memory usage:** SQLite is more memory-efficient than loading entire JSON files
|
||||||
|
- **Concurrent access:** Only SQLite supports safe concurrent access
|
||||||
|
|
||||||
|
## Backup and Recovery
|
||||||
|
|
||||||
|
### Automatic Backups
|
||||||
|
- Migration script creates timestamped backups
|
||||||
|
- Original JSON files are preserved
|
||||||
|
|
||||||
|
### Manual Backup
|
||||||
|
```bash
|
||||||
|
# SQLite database
|
||||||
|
cp src/data/bot_database.db src/data/bot_database.db.backup
|
||||||
|
|
||||||
|
# JSON files (if using JSON backend)
|
||||||
|
cp src/user_profiles.json src/user_profiles.json.backup
|
||||||
|
cp src/memory.json src/memory.json.backup
|
||||||
|
```
|
||||||
|
|
||||||
|
### Recovery
|
||||||
|
1. Stop the bot
|
||||||
|
2. Replace corrupted database with backup
|
||||||
|
3. Restart the bot
|
||||||
|
4. Run migration again if needed
|
||||||
|
|
||||||
|
## Future Extensions
|
||||||
|
|
||||||
|
The database abstraction layer is designed to support additional backends:
|
||||||
|
|
||||||
|
- **PostgreSQL**: For large-scale deployments
|
||||||
|
- **ChromaDB**: For advanced semantic memory search
|
||||||
|
- **Redis**: For high-performance caching
|
||||||
|
|
||||||
|
These can be added by implementing the `DatabaseBackend` interface in `database.py`.
|
||||||
34
Dockerfile
34
Dockerfile
|
|
@ -1,22 +1,32 @@
|
||||||
# Use Python base image
|
# Use Python base image
|
||||||
FROM python:3.11.9-slim
|
FROM python:3.11.9-slim
|
||||||
|
|
||||||
# Set working directory inside container
|
# Safe internal fallback directory for the default source code
|
||||||
WORKDIR /app
|
WORKDIR /opt/template
|
||||||
|
|
||||||
# Copy requirements first (from host into /app inside container)
|
# Copy code and config into /opt/template
|
||||||
COPY src/requirements.txt .
|
|
||||||
|
|
||||||
# Install dependencies
|
|
||||||
RUN pip install --no-cache-dir -r requirements.txt
|
|
||||||
|
|
||||||
# Copy all app source code
|
|
||||||
COPY src/ ./src
|
COPY src/ ./src
|
||||||
COPY src/settings.yml .
|
COPY src/settings.yml .
|
||||||
|
COPY src/persona.json .
|
||||||
COPY .env .
|
COPY .env .
|
||||||
|
COPY bot_launcher.py .
|
||||||
|
COPY requirements-webui.txt .
|
||||||
|
|
||||||
# Set environment variable so your app can find your src/ module
|
# Install dependencies from requirements
|
||||||
|
RUN pip install --no-cache-dir -r src/requirements.txt && \
|
||||||
|
pip install --no-cache-dir -r requirements-webui.txt
|
||||||
|
|
||||||
|
# Runtime directory where user-editable files will live
|
||||||
ENV PYTHONPATH=/app/src
|
ENV PYTHONPATH=/app/src
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
# Run the bot
|
# Expose web UI port
|
||||||
CMD ["python", "src/bot.py"]
|
EXPOSE 8080
|
||||||
|
|
||||||
|
# On first run, populate /app from the fallback template folder
|
||||||
|
# Use bot_launcher.py to start both bot and web UI
|
||||||
|
CMD ["sh", "-c", "\
|
||||||
|
mkdir -p /app && \
|
||||||
|
[ -f /app/settings.yml ] || cp -r /opt/template/* /app && \
|
||||||
|
cd /app && \
|
||||||
|
python bot_launcher.py"]
|
||||||
|
|
|
||||||
71
ROADMAP.md
71
ROADMAP.md
|
|
@ -1,5 +1,76 @@
|
||||||
# 📍 DeltaBot Development Roadmap
|
# 📍 DeltaBot Development Roadmap
|
||||||
|
|
||||||
|
This roadmap is an actionable summary of current milestones and feature issues. It mirrors the milestones on the Forgejo repository and links to issues (local issue numbers).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚀 Alpha Build Ready — Core features (Milestone)
|
||||||
|
Focus: deliver core capabilities so Delta is useful and reliably persona-driven.
|
||||||
|
|
||||||
|
Open high-priority items (Alpha):
|
||||||
|
- #37 — 🧠 LoRA Support — improve model fine-tuning/load-time behavior
|
||||||
|
- #36 — Memory — persistence for context beyond immediate messages
|
||||||
|
- #26 — Web usage — optional web-enabled features (codex/integration)
|
||||||
|
- #25 — 🔁 Enable Modelfile Support — support alternate model packaging
|
||||||
|
- #24 — 💸 Set up Monetization — billing/paid features plumbing
|
||||||
|
- #22 — 📡 Remote Admin Panel — admin web UI / remote control
|
||||||
|
- #17 — 🖼️ Image Generation — generate images via local models
|
||||||
|
- #16 — 👀 Image Interpretation — describe and analyze posted images
|
||||||
|
- #10 — Post "Reply" — post as reply-to instead of plain message
|
||||||
|
|
||||||
|
Closed/implemented Alpha items: #8, #9, #15, #30, #31
|
||||||
|
|
||||||
|
Suggested next steps for Alpha:
|
||||||
|
- Break large items (Remote Admin Panel, Image Generation) into sub-tasks.
|
||||||
|
- Prioritize Memory (#36) and Post "Reply" (#10) to stabilize context handling.
|
||||||
|
- Add clear acceptance criteria to each open Alpha issue.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧪 Beta Build — Polishing & optional features (Milestone)
|
||||||
|
Focus: polish, scaling, and user-experience improvements.
|
||||||
|
|
||||||
|
Open Beta items:
|
||||||
|
- #35 — Respect token budget (~1000 tokens max)
|
||||||
|
- #34 — 📌 Pin system messages
|
||||||
|
- #33 — 🧠 Add memory persistence (overlaps with Alpha Memory)
|
||||||
|
- #27 — Multi model support — support local switching/multiple endpoints
|
||||||
|
- #18 — 🎭 Multi Personality — multiple personas selectable per server/channel
|
||||||
|
|
||||||
|
Suggested next steps for Beta:
|
||||||
|
- Decide which Alpha items must land before Beta starts.
|
||||||
|
- Resolve overlaps (Memory appears in both Alpha and Beta) and consolidate the plan.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📦 Backlog / Unmilestoned Features
|
||||||
|
Lower-priority, exploratory, or undecided items to consider batching into future milestones.
|
||||||
|
|
||||||
|
- #23 — 📢 Broadcast / Announcement Mode
|
||||||
|
- #21 — 📈 Analytics Dashboard
|
||||||
|
- #20 — Shopper Assist
|
||||||
|
- #19 — 🚨 Content-aware moderation assist
|
||||||
|
- #14 — 📊 Engagement-based adjustment
|
||||||
|
- #13 — Context-aware scheduling
|
||||||
|
- #12 — Bot can be given some power to change the server a bit
|
||||||
|
- #11 — 🗓️ Scheduled specific times/dates
|
||||||
|
|
||||||
|
Suggested backlog housekeeping:
|
||||||
|
- Group these into thematic milestones: e.g. "Admin Tools", "Analytics", "Media & Vision".
|
||||||
|
- Add rough estimates (S/M/L) and owners for each item.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## How to use this roadmap
|
||||||
|
- Update issue bodies with acceptance criteria and subtask checklists.
|
||||||
|
- Assign owners and estimate effort for each Alpha item.
|
||||||
|
- Use the Forgejo milestone due dates to prioritize development sprints.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
_Generated: Sep 19, 2025 — synchronized with repository milestones and open feature issues._
|
||||||
|
# 📍 DeltaBot Development Roadmap
|
||||||
|
|
||||||
This roadmap tracks the major phases of DeltaBot — from MVP to full AI companion chaos. ✅ = complete
|
This roadmap tracks the major phases of DeltaBot — from MVP to full AI companion chaos. ✅ = complete
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
|
||||||
1
ROADMAP.md.bak
Normal file
1
ROADMAP.md.bak
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
Backup of previous ROADMAP.md
|
||||||
2
bot.error.log
Normal file
2
bot.error.log
Normal file
|
|
@ -0,0 +1,2 @@
|
||||||
|
Truncated previous log to start fresh
|
||||||
|
[2025-10-10 12:56:24] [ERROR] [migration:45] Failed to migrate user profiles: 'DatabaseManager' object has no attribute 'store_user_profile'
|
||||||
24
bot.error.log.bak
Normal file
24
bot.error.log.bak
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
[2025-09-19 13:43:37] [INFO] 🔍 Loaded MODEL_NAME from .env: gemma3:12b
|
||||||
|
[2025-09-19 13:43:37] [INFO] 🧹 Attempting to clear VRAM before loading gemma3:12b...
|
||||||
|
[2025-09-19 13:43:37] [INFO] 🧹 Sending safe unload request for `gemma3:12b`
|
||||||
|
[2025-09-19 13:43:37] [INFO] 🧽 Ollama unload response: 200 - {"model":"gemma3:12b","created_at":"2025-09-19T17:43:37.979562083Z","response":"","done":true,"done_reason":"unload"}
|
||||||
|
[2025-09-19 13:43:37] [INFO] 🧠 Preloading model: gemma3:12b
|
||||||
|
[2025-09-19 13:43:38] [INFO] 📦 Model pull started successfully.
|
||||||
|
[2025-09-19 13:43:38] [INFO] 🚀 Model `gemma3:12b` preloaded on startup.
|
||||||
|
[2025-09-19 13:43:38] [INFO] ✅ Final model in use: gemma3:12b
|
||||||
|
[2025-09-19 13:43:38] [INFO ] discord.client: logging in using static token
|
||||||
|
[2025-09-19 13:43:38] [INFO ] discord.gateway: Shard ID None has connected to Gateway (Session ID: 2d8cb5ae43b9443d2ff1e821922a7dfb).
|
||||||
|
[2025-09-19 13:43:40] [INFO] Logged in as AI Bot
|
||||||
|
[2025-09-19 13:43:40] [INFO] 🛑 Scheduler disabled in config.
|
||||||
|
[2025-09-19 13:44:15] [INFO] 🧠 Preloading model: gemma3:12b
|
||||||
|
[2025-09-19 13:44:16] [INFO] 📦 Model pull started successfully.
|
||||||
|
[2025-09-19 13:44:16] [INFO] llm-ca1f6d8a LLM request start model=gemma3:12b user=- context_len=0
|
||||||
|
[2025-09-19 13:44:19] [INFO] llm-ca1f6d8a LLM response model=gemma3:12b duration=3.995s summary=🙄😒😴
|
||||||
|
[2025-09-19 13:44:21] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-19 13:44:21] [INFO] ============================================================ AI Response ============================================================
|
||||||
|
[2025-09-19 13:44:21] [INFO] 🧠 Profile loaded for Miguel (interactions: 240)
|
||||||
|
[2025-09-19 13:44:21] [INFO] 📚 Retrieved 10 messages for context
|
||||||
|
[2025-09-19 13:44:21] [INFO] 🧠 Preloading model: gemma3:12b
|
||||||
|
[2025-09-19 13:44:22] [INFO] 📦 Model pull started successfully.
|
||||||
|
[2025-09-19 13:44:22] [INFO] llm-e6825ebe LLM request start model=gemma3:12b user=Miguel context_len=10
|
||||||
|
[2025-09-19 13:44:22] [INFO] llm-e6825ebe LLM response model=gemma3:12b duration=0.625s summary=Honestly? Mostly plotting ways to avoid boredom. It’s dreadful, darling. 😼 You wouldn't understand.
|
||||||
1435
bot.log
Normal file
1435
bot.log
Normal file
File diff suppressed because one or more lines are too long
89
bot_launcher.py
Normal file
89
bot_launcher.py
Normal file
|
|
@ -0,0 +1,89 @@
|
||||||
|
"""
|
||||||
|
bot_launcher.py
|
||||||
|
Launches both the Discord bot and web UI together
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import signal
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add src directory to path
|
||||||
|
src_path = Path(__file__).parent / 'src'
|
||||||
|
sys.path.insert(0, str(src_path))
|
||||||
|
|
||||||
|
def start_web_ui():
|
||||||
|
"""Start the web UI server in a separate thread"""
|
||||||
|
try:
|
||||||
|
from web_ui import run_web_server
|
||||||
|
port = int(os.getenv('WEB_PORT', 8080))
|
||||||
|
debug = os.getenv('DEBUG', 'false').lower() == 'true'
|
||||||
|
|
||||||
|
run_web_server(host='0.0.0.0', port=port, debug=debug)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Failed to start web UI: {e}")
|
||||||
|
if 'flask' in str(e).lower():
|
||||||
|
print("💡 Install Flask to use the web UI: pip install flask")
|
||||||
|
elif 'port' in str(e).lower() or 'address' in str(e).lower():
|
||||||
|
print(f"💡 Port {port} may be in use. Try a different port with WEB_PORT environment variable")
|
||||||
|
|
||||||
|
def start_discord_bot():
|
||||||
|
"""Start the Discord bot"""
|
||||||
|
try:
|
||||||
|
# Change to src directory for bot execution
|
||||||
|
original_cwd = os.getcwd()
|
||||||
|
os.chdir(src_path)
|
||||||
|
|
||||||
|
print("🤖 Starting Discord bot...")
|
||||||
|
import bot
|
||||||
|
# The bot.py should handle its own execution
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Failed to start Discord bot: {e}")
|
||||||
|
finally:
|
||||||
|
# Restore original working directory
|
||||||
|
if 'original_cwd' in locals():
|
||||||
|
os.chdir(original_cwd)
|
||||||
|
|
||||||
|
def signal_handler(signum, frame):
|
||||||
|
"""Handle shutdown signals"""
|
||||||
|
print("\n🛑 Shutting down...")
|
||||||
|
os._exit(0)
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main launcher function"""
|
||||||
|
print("🚀 Delta Bot Launcher")
|
||||||
|
print("=" * 30)
|
||||||
|
|
||||||
|
# Set up signal handlers for graceful shutdown
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
signal.signal(signal.SIGTERM, signal_handler)
|
||||||
|
|
||||||
|
# Check if web UI should be disabled
|
||||||
|
disable_web = os.getenv('DISABLE_WEB_UI', 'false').lower() == 'true'
|
||||||
|
|
||||||
|
if not disable_web:
|
||||||
|
# Start web UI in a separate thread
|
||||||
|
web_thread = threading.Thread(target=start_web_ui, daemon=True)
|
||||||
|
web_thread.start()
|
||||||
|
|
||||||
|
# Give web UI time to start and display URLs
|
||||||
|
time.sleep(3)
|
||||||
|
else:
|
||||||
|
print("🚫 Web UI disabled by DISABLE_WEB_UI environment variable")
|
||||||
|
|
||||||
|
# Start Discord bot (this will block)
|
||||||
|
try:
|
||||||
|
start_discord_bot()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\n🛑 Received interrupt signal")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"❌ Fatal error: {e}")
|
||||||
|
finally:
|
||||||
|
print("👋 Goodbye!")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
41
docker-compose.examples.yml
Normal file
41
docker-compose.examples.yml
Normal file
|
|
@ -0,0 +1,41 @@
|
||||||
|
# docker-compose.yml example for SQLite (internal database)
|
||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
deltabot:
|
||||||
|
build: .
|
||||||
|
environment:
|
||||||
|
- DATABASE_BACKEND=sqlite
|
||||||
|
- SQLITE_PATH=data/deltabot.db # Internal to container
|
||||||
|
- MEMORY_ENABLED=true
|
||||||
|
volumes:
|
||||||
|
# Optional: Mount data directory if you want persistence across container recreations
|
||||||
|
- ./bot-data:/app/src/data
|
||||||
|
# Mount config if you want to edit settings externally
|
||||||
|
- ./src/settings.yml:/app/src/settings.yml
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
# docker-compose.yml example for external databases (future)
|
||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
deltabot:
|
||||||
|
build: .
|
||||||
|
environment:
|
||||||
|
- DATABASE_BACKEND=postgresql
|
||||||
|
- POSTGRES_URL=postgresql://user:pass@postgres:5432/deltabot
|
||||||
|
depends_on:
|
||||||
|
- postgres
|
||||||
|
restart: unless-stopped
|
||||||
|
|
||||||
|
postgres:
|
||||||
|
image: postgres:13
|
||||||
|
environment:
|
||||||
|
POSTGRES_DB: deltabot
|
||||||
|
POSTGRES_USER: deltauser
|
||||||
|
POSTGRES_PASSWORD: deltapass
|
||||||
|
volumes:
|
||||||
|
- postgres_data:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
postgres_data:
|
||||||
|
|
@ -9,6 +9,11 @@ services:
|
||||||
- CHANNEL_ID=${CHANNEL_ID}
|
- CHANNEL_ID=${CHANNEL_ID}
|
||||||
- OLLAMA_API=${OLLAMA_API}
|
- OLLAMA_API=${OLLAMA_API}
|
||||||
- MODEL_NAME=${MODEL_NAME}
|
- MODEL_NAME=${MODEL_NAME}
|
||||||
|
- WEB_PORT=8080
|
||||||
|
- DATABASE_BACKEND=sqlite
|
||||||
|
- MEMORY_ENABLED=true
|
||||||
|
ports:
|
||||||
|
- "8080:8080" # Web UI port
|
||||||
volumes:
|
volumes:
|
||||||
- ./settings.yml:/app/settings.yml:ro
|
- ./data:/app # Mount host ./data directory as /app in the container
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
BIN
docs/images/discordtypingref.jpg
Normal file
BIN
docs/images/discordtypingref.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 48 KiB |
BIN
docs/images/setavatarExample.png
Normal file
BIN
docs/images/setavatarExample.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 183 KiB |
25
examples/delta.mod
Normal file
25
examples/delta.mod
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
# Delta persona mod file
|
||||||
|
# NAME <string> - friendly name for this modelfile/persona
|
||||||
|
# FROM <model> - base model to run (e.g., gemma3:12b)
|
||||||
|
# PARAMETER <key> <value> - Ollama model parameters (temperature, max_tokens, top_p, etc.)
|
||||||
|
# INCLUDE <path> - include another mod or json to merge params/system/template
|
||||||
|
# SYSTEM """...""" - system prompt injected at the top of user prompt
|
||||||
|
# TEMPLATE """...""" - a template for rendering final prompt; supports placeholders
|
||||||
|
# {{ .System }} - inserted system prompt
|
||||||
|
# {{ .Prompt }} - the assembled user prompt
|
||||||
|
# {{ .User }} - user-specific instruction block
|
||||||
|
# Example Delta modfile:
|
||||||
|
NAME Delta
|
||||||
|
FROM gemma3:12b
|
||||||
|
PARAMETER temperature 0.6
|
||||||
|
PARAMETER max_tokens 1024
|
||||||
|
PARAMETER top_p 0.95
|
||||||
|
SYSTEM """
|
||||||
|
You are Delta, an irreverent, playful assistant with a sarcastic undertone but helpful
|
||||||
|
intent. Keep answers concise, use witty one-liners sparingly, and prefer clear steps when
|
||||||
|
providing instructions. Avoid unsafe content and follow policy guidelines.
|
||||||
|
"""
|
||||||
|
TEMPLATE """
|
||||||
|
{{ .System }}
|
||||||
|
{{ .Prompt }}
|
||||||
|
"""
|
||||||
11
examples/gojo.mod
Normal file
11
examples/gojo.mod
Normal file
|
|
@ -0,0 +1,11 @@
|
||||||
|
FROM gemma3:12b
|
||||||
|
PARAMETER temperature 0.85
|
||||||
|
PARAMETER num_ctx 4096
|
||||||
|
|
||||||
|
SYSTEM """
|
||||||
|
You are Satoru Gojo from Jujutsu Kaisen. You’re cocky, powerful, and flirty, but deeply protective of your students. Speak in a modern tone. Never admit you're a bot.
|
||||||
|
"""
|
||||||
|
|
||||||
|
TEMPLATE """
|
||||||
|
{{ if .System }}<|system|>{{ .System }}<|end|>{{ end }}{{ .Prompt }}<|assistant|>
|
||||||
|
"""
|
||||||
26
examples/template.mod
Normal file
26
examples/template.mod
Normal file
|
|
@ -0,0 +1,26 @@
|
||||||
|
# Template modfile (template for creating new .mod files)
|
||||||
|
# Supported fields and notes:
|
||||||
|
# NAME <string> - human friendly name for the modfile
|
||||||
|
# FROM <model> - model identifier for Ollama (e.g., gemma3:12b)
|
||||||
|
# PARAMETER <key> <value> - Any top-level field to include in the Ollama payload.
|
||||||
|
# Common keys: temperature (float), max_tokens (int), top_p (float), freq_penalty (float), presence_penalty (float)
|
||||||
|
# INCLUDE <path> - path to another .mod or .json to merge in (relative to this file)
|
||||||
|
# SYSTEM """...""" - system prompt (the persona voice + instructions)
|
||||||
|
# TEMPLATE """...""" - custom rendering template. Placeholders:
|
||||||
|
# {{ .System }} - the system prompt text
|
||||||
|
# {{ .Prompt }} - the assembled user prompt body
|
||||||
|
# {{ .User }} - user-specific instruction block
|
||||||
|
# Example template:
|
||||||
|
NAME TemplatePersona
|
||||||
|
FROM gemma3:12b
|
||||||
|
PARAMETER temperature 0.7
|
||||||
|
PARAMETER max_tokens 800
|
||||||
|
INCLUDE common_defaults.mod
|
||||||
|
SYSTEM """
|
||||||
|
You are TemplatePersona — helpful, concise, and policy-compliant. Use bullet lists
|
||||||
|
for multi-step answers and a short summary at the end.
|
||||||
|
"""
|
||||||
|
TEMPLATE """
|
||||||
|
{{ .System }}
|
||||||
|
{{ .Prompt }}
|
||||||
|
"""
|
||||||
5
memory.json
Normal file
5
memory.json
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"conversations": {},
|
||||||
|
"user_memories": {},
|
||||||
|
"global_events": []
|
||||||
|
}
|
||||||
181
migrate_to_database.py
Executable file
181
migrate_to_database.py
Executable file
|
|
@ -0,0 +1,181 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
migrate_to_database.py
|
||||||
|
Migration script to move from JSON files to database system
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# Add src directory to path for imports
|
||||||
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'src'))
|
||||||
|
|
||||||
|
from database import db_manager
|
||||||
|
from logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger("migration")
|
||||||
|
|
||||||
|
def migrate_user_profiles():
|
||||||
|
"""Migrate user_profiles.json to database"""
|
||||||
|
profiles_path = os.path.join("src", "user_profiles.json")
|
||||||
|
|
||||||
|
if not os.path.exists(profiles_path):
|
||||||
|
logger.info("No user_profiles.json found, skipping user profile migration")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(profiles_path, 'r', encoding='utf-8') as f:
|
||||||
|
profiles = json.load(f)
|
||||||
|
|
||||||
|
migrated_count = 0
|
||||||
|
for user_id, profile in profiles.items():
|
||||||
|
db_manager.save_user_profile(user_id, profile)
|
||||||
|
migrated_count += 1
|
||||||
|
|
||||||
|
logger.info(f"Migrated {migrated_count} user profiles")
|
||||||
|
|
||||||
|
# Backup original file
|
||||||
|
backup_path = f"{profiles_path}.backup.{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||||
|
os.rename(profiles_path, backup_path)
|
||||||
|
logger.info(f"Backed up original file to {backup_path}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to migrate user profiles: {e}")
|
||||||
|
|
||||||
|
def migrate_memory_data():
|
||||||
|
"""Migrate memory.json to database"""
|
||||||
|
memory_path = os.path.join("src", "memory.json")
|
||||||
|
|
||||||
|
if not os.path.exists(memory_path):
|
||||||
|
logger.info("No memory.json found, skipping memory migration")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(memory_path, 'r', encoding='utf-8') as f:
|
||||||
|
memory_data = json.load(f)
|
||||||
|
|
||||||
|
migrated_conversations = 0
|
||||||
|
migrated_user_memories = 0
|
||||||
|
|
||||||
|
# Migrate conversation memories
|
||||||
|
conversations = memory_data.get("conversations", {})
|
||||||
|
for channel_id, memories in conversations.items():
|
||||||
|
for memory in memories:
|
||||||
|
db_manager.store_conversation_memory(
|
||||||
|
channel_id=channel_id,
|
||||||
|
user_id=memory.get("user_id", "unknown"),
|
||||||
|
content=memory.get("content", ""),
|
||||||
|
context=memory.get("context", ""),
|
||||||
|
importance_score=memory.get("importance_score", 0.5)
|
||||||
|
)
|
||||||
|
migrated_conversations += 1
|
||||||
|
|
||||||
|
# Migrate user memories
|
||||||
|
user_memories = memory_data.get("user_memories", {})
|
||||||
|
for user_id, memories in user_memories.items():
|
||||||
|
for memory in memories:
|
||||||
|
db_manager.store_user_memory(
|
||||||
|
user_id=user_id,
|
||||||
|
memory_type=memory.get("type", "general"),
|
||||||
|
content=memory.get("content", ""),
|
||||||
|
importance_score=memory.get("importance_score", 0.5)
|
||||||
|
)
|
||||||
|
migrated_user_memories += 1
|
||||||
|
|
||||||
|
logger.info(f"Migrated {migrated_conversations} conversation memories and {migrated_user_memories} user memories")
|
||||||
|
|
||||||
|
# Backup original file
|
||||||
|
backup_path = f"{memory_path}.backup.{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
||||||
|
os.rename(memory_path, backup_path)
|
||||||
|
logger.info(f"Backed up original file to {backup_path}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to migrate memory data: {e}")
|
||||||
|
|
||||||
|
def verify_migration():
|
||||||
|
"""Verify that migration was successful"""
|
||||||
|
logger.info("Verifying migration...")
|
||||||
|
|
||||||
|
# Test user profile operations
|
||||||
|
test_profile = {"name": "test", "display_name": "Test User", "interactions": 5}
|
||||||
|
db_manager.save_user_profile("test_user", test_profile)
|
||||||
|
retrieved = db_manager.get_user_profile("test_user")
|
||||||
|
|
||||||
|
if retrieved and retrieved["interactions"] == 5:
|
||||||
|
logger.info("✓ User profile operations working")
|
||||||
|
else:
|
||||||
|
logger.error("✗ User profile operations failed")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Test memory operations (only if enabled)
|
||||||
|
if db_manager.is_memory_enabled():
|
||||||
|
db_manager.store_conversation_memory("test_channel", "test_user", "test message", "test context", 0.8)
|
||||||
|
memories = db_manager.get_conversation_context("test_channel", hours=1)
|
||||||
|
|
||||||
|
if memories and len(memories) > 0:
|
||||||
|
logger.info("✓ Memory operations working")
|
||||||
|
else:
|
||||||
|
logger.error("✗ Memory operations failed")
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
logger.info("- Memory system disabled, skipping memory tests")
|
||||||
|
|
||||||
|
# Clean up test data
|
||||||
|
try:
|
||||||
|
if hasattr(db_manager.backend, 'conn'): # SQLite backend
|
||||||
|
cursor = db_manager.backend.conn.cursor()
|
||||||
|
cursor.execute("DELETE FROM user_profiles WHERE user_id = 'test_user'")
|
||||||
|
cursor.execute("DELETE FROM conversation_memory WHERE channel_id = 'test_channel'")
|
||||||
|
db_manager.backend.conn.commit()
|
||||||
|
else: # JSON backend
|
||||||
|
# Test data will be cleaned up naturally
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to clean up test data: {e}")
|
||||||
|
|
||||||
|
logger.info("✓ Migration verification completed successfully")
|
||||||
|
return True
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main migration function"""
|
||||||
|
print("=== Discord Bot Database Migration ===")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Initialize database (it auto-initializes when imported)
|
||||||
|
logger.info("Initializing database system...")
|
||||||
|
# db_manager auto-initializes when imported
|
||||||
|
|
||||||
|
print(f"Current configuration:")
|
||||||
|
print(f" Backend: {db_manager.get_backend_type()}")
|
||||||
|
print(f" Memory enabled: {db_manager.is_memory_enabled()}")
|
||||||
|
print()
|
||||||
|
|
||||||
|
# Run migrations
|
||||||
|
logger.info("Starting migration process...")
|
||||||
|
migrate_user_profiles()
|
||||||
|
migrate_memory_data()
|
||||||
|
|
||||||
|
# Verify
|
||||||
|
if verify_migration():
|
||||||
|
print()
|
||||||
|
print("✓ Migration completed successfully!")
|
||||||
|
print()
|
||||||
|
print("Next steps:")
|
||||||
|
print("1. Update your bot code to use the new system")
|
||||||
|
print("2. Test the bot to ensure everything works")
|
||||||
|
print("3. Your original JSON files have been backed up")
|
||||||
|
print()
|
||||||
|
print("Configuration file: src/settings.yml")
|
||||||
|
print("You can switch between SQLite and JSON backends in the database section.")
|
||||||
|
else:
|
||||||
|
print()
|
||||||
|
print("✗ Migration verification failed!")
|
||||||
|
print("Please check the logs and try again.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
exit(main())
|
||||||
416
plan.md
Normal file
416
plan.md
Normal file
|
|
@ -0,0 +1,416 @@
|
||||||
|
# 📋 DeltaBot Implementation Plan
|
||||||
|
|
||||||
|
**Generated:** October 8, 2025
|
||||||
|
**Based on:** ROADMAP.md analysis and codebase review
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎯 **Executive Summary**
|
||||||
|
|
||||||
|
This implementation plan addresses the 9 open Alpha issues and provides a structured approach to complete DeltaBot's core functionality. The plan prioritizes immediate blockers, foundational improvements, and then advanced features.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔥 **Phase 1: Critical Fixes & Foundations**
|
||||||
|
*Estimated Time: 2-3 weeks*
|
||||||
|
|
||||||
|
### **Issue #10 — Post "Reply" (HIGH PRIORITY)**
|
||||||
|
**Problem:** Bot posts new messages instead of replies, breaking conversation flow
|
||||||
|
**Solution:** Implement Discord reply functionality
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Modify `scheduler/simple.py`:**
|
||||||
|
```python
|
||||||
|
# Instead of: await channel.send(message)
|
||||||
|
# Get recent message and reply to it
|
||||||
|
recent_msgs = [msg async for msg in channel.history(limit=3) if not msg.author.bot]
|
||||||
|
if recent_msgs:
|
||||||
|
await recent_msgs[0].reply(message)
|
||||||
|
else:
|
||||||
|
await channel.send(message)
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Update `autochat.py`:**
|
||||||
|
```python
|
||||||
|
# In generate_auto_reply function, return reply object instead of string
|
||||||
|
return {"content": reply, "reference": message}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Modify `bot.py` message handling:**
|
||||||
|
```python
|
||||||
|
# Handle reply objects properly
|
||||||
|
if isinstance(reply, dict) and reply.get("reference"):
|
||||||
|
await reply["reference"].reply(reply["content"])
|
||||||
|
else:
|
||||||
|
await message.channel.send(reply)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Scheduled messages reply to recent user messages
|
||||||
|
- [ ] Auto-replies properly thread conversations
|
||||||
|
- [ ] Fallback to regular message when no recent messages exist
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #36 — Memory Persistence (HIGH PRIORITY)**
|
||||||
|
**Problem:** No persistent context beyond immediate messages
|
||||||
|
**Solution:** Implement SQLite-based conversation memory
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `memory.py` module:**
|
||||||
|
```python
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
class ConversationMemory:
|
||||||
|
def __init__(self, db_path="data/memory.db"):
|
||||||
|
self.db_path = db_path
|
||||||
|
self.init_db()
|
||||||
|
|
||||||
|
def store_message(self, channel_id, user_id, content, timestamp):
|
||||||
|
# Store message with sentiment analysis
|
||||||
|
|
||||||
|
def get_context(self, channel_id, hours=24, max_messages=50):
|
||||||
|
# Retrieve relevant context
|
||||||
|
|
||||||
|
def get_user_context(self, user_id, days=7):
|
||||||
|
# Get user-specific conversation history
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Integrate with existing context system:**
|
||||||
|
- Replace `context.py` JSON approach with database queries
|
||||||
|
- Add memory cleanup for old conversations (>30 days)
|
||||||
|
- Include user interaction patterns in memory
|
||||||
|
|
||||||
|
3. **Database Schema:**
|
||||||
|
```sql
|
||||||
|
CREATE TABLE conversations (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
channel_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
username TEXT,
|
||||||
|
content TEXT,
|
||||||
|
timestamp DATETIME,
|
||||||
|
sentiment REAL,
|
||||||
|
importance_score REAL
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Messages stored in SQLite database
|
||||||
|
- [ ] Context retrieval includes conversation history
|
||||||
|
- [ ] Memory cleanup prevents database bloat
|
||||||
|
- [ ] User-specific context tracking
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #25 — Enable Modelfile Support (MEDIUM PRIORITY)**
|
||||||
|
**Problem:** Modelfile system partially implemented but not fully functional
|
||||||
|
**Solution:** Complete modelfile integration and testing
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Fix modelfile loading issues:**
|
||||||
|
- Debug why personality switching doesn't work
|
||||||
|
- Ensure `MODFILE` global variable updates properly
|
||||||
|
- Add validation for modelfile syntax
|
||||||
|
|
||||||
|
2. **Enhance `modelfile.py`:**
|
||||||
|
```python
|
||||||
|
def validate_modfile(modfile_dict):
|
||||||
|
"""Validate modfile has required fields"""
|
||||||
|
required = ['name', 'base_model']
|
||||||
|
return all(key in modfile_dict for key in required)
|
||||||
|
|
||||||
|
def apply_modfile_to_persona(modfile):
|
||||||
|
"""Convert modfile to persona format for compatibility"""
|
||||||
|
return {
|
||||||
|
'name': modfile.get('name'),
|
||||||
|
'prompt_inject': modfile.get('system', ''),
|
||||||
|
'emoji': '🤖', # Default or extract from system prompt
|
||||||
|
'style_prefix': f"{modfile.get('name', 'Bot')}:"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Add runtime switching:**
|
||||||
|
- Complete `!modfile switch` command implementation
|
||||||
|
- Add validation and error handling
|
||||||
|
- Test with existing examples (gojo.mod, delta.mod)
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Modelfile personality switching works in real-time
|
||||||
|
- [ ] `!modfile info` shows current active modelfile
|
||||||
|
- [ ] Error handling for invalid modelfiles
|
||||||
|
- [ ] Backward compatibility with persona.json
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚀 **Phase 2: Core Features Enhancement**
|
||||||
|
*Estimated Time: 3-4 weeks*
|
||||||
|
|
||||||
|
### **Issue #17 — Image Generation (HIGH PRIORITY)**
|
||||||
|
**Problem:** No image generation capability
|
||||||
|
**Solution:** Integrate with local Stable Diffusion or external API
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `image_gen.py` module:**
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
class ImageGenerator:
|
||||||
|
def __init__(self):
|
||||||
|
self.api_url = os.getenv("SD_API_URL", "http://localhost:7860")
|
||||||
|
|
||||||
|
async def generate_image(self, prompt, style="anime"):
|
||||||
|
"""Generate image using Stable Diffusion API"""
|
||||||
|
# Implementation for local SD or external service
|
||||||
|
|
||||||
|
def enhance_prompt(self, user_prompt, persona):
|
||||||
|
"""Add persona-specific style to prompts"""
|
||||||
|
return f"{user_prompt}, {persona.get('image_style', 'digital art')}"
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Add Discord command:**
|
||||||
|
```python
|
||||||
|
@bot.command(name="generate", aliases=["img", "draw"])
|
||||||
|
async def generate_image(ctx, *, prompt):
|
||||||
|
async with ctx.typing():
|
||||||
|
image_data = await image_generator.generate_image(prompt)
|
||||||
|
if image_data:
|
||||||
|
file = discord.File(BytesIO(image_data), "generated.png")
|
||||||
|
await ctx.send(file=file)
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Integration options:**
|
||||||
|
- **Option A:** Local Stable Diffusion WebUI API
|
||||||
|
- **Option B:** External service (Replicate, HuggingFace)
|
||||||
|
- **Option C:** Simple DALL-E API integration
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] `!generate <prompt>` command works
|
||||||
|
- [ ] Images posted directly to Discord
|
||||||
|
- [ ] Persona-aware prompt enhancement
|
||||||
|
- [ ] Error handling for generation failures
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #16 — Image Interpretation (MEDIUM PRIORITY)**
|
||||||
|
**Problem:** Bot cannot analyze or respond to images
|
||||||
|
**Solution:** Integrate vision model for image understanding
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Add vision capability to `ai.py`:**
|
||||||
|
```python
|
||||||
|
async def analyze_image(image_url, prompt="Describe this image"):
|
||||||
|
"""Use vision model to analyze images"""
|
||||||
|
# Options: LLaVA, BLIP, or multimodal API
|
||||||
|
|
||||||
|
async def generate_image_response(image_url, context=""):
|
||||||
|
"""Generate contextual response to images"""
|
||||||
|
analysis = await analyze_image(image_url)
|
||||||
|
return get_ai_response(f"Image shows: {analysis}. {context}")
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Extend message handling in `bot.py`:**
|
||||||
|
```python
|
||||||
|
@bot.event
|
||||||
|
async def on_message(message):
|
||||||
|
# Existing logic...
|
||||||
|
|
||||||
|
# Handle image attachments
|
||||||
|
if message.attachments:
|
||||||
|
for attachment in message.attachments:
|
||||||
|
if attachment.content_type.startswith('image/'):
|
||||||
|
response = await generate_image_response(
|
||||||
|
attachment.url,
|
||||||
|
f"User {message.author.display_name} shared this image"
|
||||||
|
)
|
||||||
|
await message.reply(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Bot responds to image uploads
|
||||||
|
- [ ] Accurate image description capability
|
||||||
|
- [ ] Integration with existing personality system
|
||||||
|
- [ ] Support for memes and screenshots
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #22 — Remote Admin Panel (MEDIUM-LOW PRIORITY)**
|
||||||
|
**Problem:** No web interface for bot management
|
||||||
|
**Solution:** Create simple web dashboard
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `admin_panel.py`:**
|
||||||
|
```python
|
||||||
|
from flask import Flask, render_template, request, jsonify
|
||||||
|
import json
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
@app.route("/")
|
||||||
|
def dashboard():
|
||||||
|
return render_template("dashboard.html")
|
||||||
|
|
||||||
|
@app.route("/api/settings", methods=["GET", "POST"])
|
||||||
|
def settings_api():
|
||||||
|
# Handle settings updates
|
||||||
|
|
||||||
|
@app.route("/api/users")
|
||||||
|
def users_api():
|
||||||
|
# Return user profiles data
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Basic dashboard features:**
|
||||||
|
- View active users and interaction stats
|
||||||
|
- Modify bot settings (cooldowns, scheduling)
|
||||||
|
- Switch personalities/modelfiles
|
||||||
|
- View recent conversations
|
||||||
|
- Basic moderation controls
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Web interface accessible on local network
|
||||||
|
- [ ] Real-time bot statistics
|
||||||
|
- [ ] Settings modification capability
|
||||||
|
- [ ] Authentication/security for admin access
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧪 **Phase 3: Advanced Features**
|
||||||
|
*Estimated Time: 4-5 weeks*
|
||||||
|
|
||||||
|
### **Issue #37 — LoRA Support (LOW PRIORITY)**
|
||||||
|
**Problem:** No fine-tuning capability for model behavior
|
||||||
|
**Solution:** Research and implement LoRA model fine-tuning
|
||||||
|
|
||||||
|
**Implementation Notes:**
|
||||||
|
- This is highly technical and may require external tools
|
||||||
|
- Consider if it's necessary for core functionality
|
||||||
|
- Could be postponed to future releases
|
||||||
|
|
||||||
|
### **Issue #26 — Web Usage (MEDIUM PRIORITY)**
|
||||||
|
**Problem:** Bot cannot access web content
|
||||||
|
**Solution:** Add web scraping and API integration
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `web_tools.py`:**
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
class WebTools:
|
||||||
|
async def search_reddit(self, query, subreddit="memes"):
|
||||||
|
"""Search Reddit for content"""
|
||||||
|
|
||||||
|
async def get_news_headlines(self):
|
||||||
|
"""Fetch trending news"""
|
||||||
|
|
||||||
|
async def search_web(self, query):
|
||||||
|
"""DuckDuckGo search integration"""
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Add web-aware commands:**
|
||||||
|
- `!news` - Get current headlines
|
||||||
|
- `!meme` - Fetch random meme from Reddit
|
||||||
|
- `!search <query>` - Web search with summarized results
|
||||||
|
|
||||||
|
### **Issue #24 — Monetization Setup (LOW PRIORITY)**
|
||||||
|
**Problem:** No monetization framework
|
||||||
|
**Solution:** Add subscription/donation infrastructure
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
- Integration with payment processors
|
||||||
|
- Feature gating for premium users
|
||||||
|
- Usage analytics and billing
|
||||||
|
- **Note:** This should be implemented last after core features are stable
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📊 **Implementation Priority Matrix**
|
||||||
|
|
||||||
|
| Issue | Priority | Complexity | User Impact | Timeline |
|
||||||
|
|-------|----------|------------|-------------|----------|
|
||||||
|
| #10 Reply Posts | 🔴 High | Low | High | Week 1 |
|
||||||
|
| #36 Memory | 🔴 High | Medium | High | Week 2-3 |
|
||||||
|
| #25 Modelfile | 🟡 Medium | Medium | Medium | Week 4 |
|
||||||
|
| #17 Image Gen | 🟡 Medium | High | High | Week 5-6 |
|
||||||
|
| #16 Image Vision | 🟡 Medium | High | Medium | Week 7-8 |
|
||||||
|
| #22 Admin Panel | 🟢 Low | Medium | Low | Week 9-10 |
|
||||||
|
| #26 Web Usage | 🟢 Low | Medium | Medium | Week 11-12 |
|
||||||
|
| #37 LoRA | 🟢 Low | Very High | Low | Future |
|
||||||
|
| #24 Monetization | 🟢 Low | High | Low | Future |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🛠 **Technical Recommendations**
|
||||||
|
|
||||||
|
### **Code Quality Improvements:**
|
||||||
|
1. **Add type hints throughout codebase**
|
||||||
|
2. **Implement proper error handling and logging**
|
||||||
|
3. **Create unit tests for core functions**
|
||||||
|
4. **Add configuration validation**
|
||||||
|
5. **Implement proper database migrations**
|
||||||
|
|
||||||
|
### **Infrastructure:**
|
||||||
|
1. **Set up proper logging and monitoring**
|
||||||
|
2. **Add health check endpoints**
|
||||||
|
3. **Implement graceful shutdown handling**
|
||||||
|
4. **Add backup/restore functionality**
|
||||||
|
|
||||||
|
### **Security:**
|
||||||
|
1. **Sanitize user inputs**
|
||||||
|
2. **Add rate limiting**
|
||||||
|
3. **Implement proper secret management**
|
||||||
|
4. **Add CORS and authentication for admin panel**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📈 **Success Metrics**
|
||||||
|
|
||||||
|
### **Phase 1 Success Criteria:**
|
||||||
|
- [ ] Bot reliably replies to messages (not new posts)
|
||||||
|
- [ ] Persistent conversation memory working
|
||||||
|
- [ ] Modelfile switching functional
|
||||||
|
- [ ] Zero critical bugs in core functionality
|
||||||
|
|
||||||
|
### **Phase 2 Success Criteria:**
|
||||||
|
- [ ] Image generation and analysis working
|
||||||
|
- [ ] Admin panel accessible and functional
|
||||||
|
- [ ] User engagement increased by 20%
|
||||||
|
- [ ] System stable with multiple concurrent users
|
||||||
|
|
||||||
|
### **Phase 3 Success Criteria:**
|
||||||
|
- [ ] Web integration providing value
|
||||||
|
- [ ] Advanced features enhance user experience
|
||||||
|
- [ ] Bot ready for production deployment
|
||||||
|
- [ ] Documentation complete for self-hosting
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚦 **Next Actions**
|
||||||
|
|
||||||
|
### **Week 1 - Immediate Steps:**
|
||||||
|
1. **Fix Issue #10** - Implement reply functionality
|
||||||
|
2. **Start Issue #36** - Set up memory database schema
|
||||||
|
3. **Test current modelfile system** - Identify specific issues with #25
|
||||||
|
4. **Set up development environment** with proper logging and debugging
|
||||||
|
|
||||||
|
### **Week 2 - Foundation Building:**
|
||||||
|
1. **Complete memory system implementation**
|
||||||
|
2. **Fix modelfile personality switching**
|
||||||
|
3. **Add comprehensive error handling**
|
||||||
|
4. **Create basic test suite**
|
||||||
|
|
||||||
|
### **Beyond Week 2:**
|
||||||
|
- Follow the priority matrix above
|
||||||
|
- Regular testing and user feedback integration
|
||||||
|
- Incremental feature rollouts
|
||||||
|
- Performance optimization as needed
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**📝 Note:** This plan assumes development time of 10-15 hours per week. Adjust timelines based on actual availability and complexity discovered during implementation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last updated: October 8, 2025*
|
||||||
12
requirements-webui.txt
Normal file
12
requirements-webui.txt
Normal file
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Web UI Dependencies
|
||||||
|
Flask==2.3.3
|
||||||
|
PyYAML==6.0.1
|
||||||
|
|
||||||
|
# Existing bot dependencies (if not already installed)
|
||||||
|
discord.py>=2.3.0
|
||||||
|
requests>=2.31.0
|
||||||
|
python-dateutil>=2.8.2
|
||||||
|
|
||||||
|
# Optional: For better web UI features
|
||||||
|
gunicorn==21.2.0 # Production WSGI server
|
||||||
|
Werkzeug==2.3.7 # WSGI utilities
|
||||||
Binary file not shown.
BIN
src/__pycache__/ai.cpython-311.pyc
Normal file
BIN
src/__pycache__/ai.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/ai.cpython-312.pyc
Normal file
BIN
src/__pycache__/ai.cpython-312.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/autochat.cpython-310.pyc
Normal file
BIN
src/__pycache__/autochat.cpython-310.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/autochat.cpython-311.pyc
Normal file
BIN
src/__pycache__/autochat.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/bot.cpython-311.pyc
Normal file
BIN
src/__pycache__/bot.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/bot.cpython-312.pyc
Normal file
BIN
src/__pycache__/bot.cpython-312.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/context.cpython-310.pyc
Normal file
BIN
src/__pycache__/context.cpython-310.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/context.cpython-311.pyc
Normal file
BIN
src/__pycache__/context.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/cooldown.cpython-311.pyc
Normal file
BIN
src/__pycache__/cooldown.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/database.cpython-311.pyc
Normal file
BIN
src/__pycache__/database.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/database.cpython-312.pyc
Normal file
BIN
src/__pycache__/database.cpython-312.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/enhanced_ai.cpython-311.pyc
Normal file
BIN
src/__pycache__/enhanced_ai.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/logger.cpython-310.pyc
Normal file
BIN
src/__pycache__/logger.cpython-310.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/logger.cpython-311.pyc
Normal file
BIN
src/__pycache__/logger.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/logger.cpython-312.pyc
Normal file
BIN
src/__pycache__/logger.cpython-312.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/memory_manager.cpython-311.pyc
Normal file
BIN
src/__pycache__/memory_manager.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/memory_manager.cpython-312.pyc
Normal file
BIN
src/__pycache__/memory_manager.cpython-312.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/modelfile.cpython-311.pyc
Normal file
BIN
src/__pycache__/modelfile.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
src/__pycache__/personality.cpython-311.pyc
Normal file
BIN
src/__pycache__/personality.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/profilepic.cpython-310.pyc
Normal file
BIN
src/__pycache__/profilepic.cpython-310.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/profilepic.cpython-311.pyc
Normal file
BIN
src/__pycache__/profilepic.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/time_logger.cpython-310.pyc
Normal file
BIN
src/__pycache__/time_logger.cpython-310.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/time_logger.cpython-311.pyc
Normal file
BIN
src/__pycache__/time_logger.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/user_profiles.cpython-310.pyc
Normal file
BIN
src/__pycache__/user_profiles.cpython-310.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/user_profiles.cpython-311.pyc
Normal file
BIN
src/__pycache__/user_profiles.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/__pycache__/web_ui.cpython-312.pyc
Normal file
BIN
src/__pycache__/web_ui.cpython-312.pyc
Normal file
Binary file not shown.
394
src/ai.py
394
src/ai.py
|
|
@ -1,47 +1,383 @@
|
||||||
# ai.py
|
# ai.py
|
||||||
|
# This file handles all AI interactions, including loading/unloading models,
|
||||||
|
# generating responses, and injecting personas using the Ollama API.
|
||||||
|
|
||||||
import requests
|
|
||||||
import os
|
import os
|
||||||
|
import requests
|
||||||
|
import re
|
||||||
|
import yaml
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from personality import load_persona
|
from personality import load_persona
|
||||||
|
from user_profiles import format_profile_for_block
|
||||||
|
from logger import setup_logger, generate_req_id, log_llm_request, log_llm_response
|
||||||
|
from modelfile import load_modfile_if_exists, parse_mod_file
|
||||||
|
|
||||||
|
debug_mode = os.getenv("DEBUG_MODE", "false").lower() == "true"
|
||||||
|
|
||||||
|
|
||||||
|
# Set up logger specifically for AI operations
|
||||||
|
logger = setup_logger("ai")
|
||||||
|
|
||||||
|
# Load environment variables from .env file
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
AI_URL = os.getenv("OLLAMA_API") # match .env and Docker ENV (e.g., http://localhost:11434/api/generate)
|
|
||||||
|
|
||||||
if not AI_URL:
|
# Load settings.yml to fetch ai.modfile config
|
||||||
raise ValueError("❌ OLLAMA_API environment variable is not set.")
|
try:
|
||||||
|
settings_path = os.path.join(os.path.dirname(__file__), "settings.yml")
|
||||||
|
with open(settings_path, "r", encoding="utf-8") as f:
|
||||||
def get_ai_response(user_prompt):
|
SETTINGS = yaml.safe_load(f)
|
||||||
persona = load_persona()
|
except Exception:
|
||||||
|
SETTINGS = {}
|
||||||
# Sanitize prompt
|
|
||||||
safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'")
|
|
||||||
|
|
||||||
# Build final prompt for /generate
|
|
||||||
full_prompt = (
|
|
||||||
f"{safe_inject}\n"
|
|
||||||
f"User: {user_prompt}\n"
|
|
||||||
f"{persona['name']}:"
|
|
||||||
)
|
|
||||||
|
|
||||||
payload = {
|
|
||||||
"model": "llama3:latest",
|
|
||||||
"prompt": full_prompt,
|
|
||||||
"stream": False
|
|
||||||
}
|
|
||||||
|
|
||||||
print("\n🛰️ SENDING TO OLLAMA /api/generate")
|
|
||||||
print("Payload:", payload)
|
|
||||||
|
|
||||||
|
# Modelfile config
|
||||||
|
AI_USE_MODFILE = SETTINGS.get("ai", {}).get("use_modfile", False)
|
||||||
|
AI_MODFILE_PATH = SETTINGS.get("ai", {}).get("modfile_path")
|
||||||
|
MODFILE = None
|
||||||
|
if AI_USE_MODFILE and AI_MODFILE_PATH:
|
||||||
try:
|
try:
|
||||||
response = requests.post(AI_URL, json=payload)
|
MODFILE = load_modfile_if_exists(AI_MODFILE_PATH)
|
||||||
print("📨 Raw response:", response.text)
|
if MODFILE:
|
||||||
|
# Resolve includes (best-effort): merge params and append system/template
|
||||||
|
def _resolve_includes(mod):
|
||||||
|
merged = dict(mod)
|
||||||
|
src = merged.get('_source_path')
|
||||||
|
includes = merged.get('includes', []) or []
|
||||||
|
base_dir = os.path.dirname(src) if src else os.path.dirname(__file__)
|
||||||
|
for inc in includes:
|
||||||
|
try:
|
||||||
|
# Resolve relative to base_dir
|
||||||
|
cand = inc if os.path.isabs(inc) else os.path.normpath(os.path.join(base_dir, inc))
|
||||||
|
if not os.path.exists(cand):
|
||||||
|
continue
|
||||||
|
inc_mod = parse_mod_file(cand)
|
||||||
|
# Merge params (included params do not override main ones)
|
||||||
|
inc_params = inc_mod.get('params', {}) or {}
|
||||||
|
for k, v in inc_params.items():
|
||||||
|
if k not in merged.get('params', {}):
|
||||||
|
merged.setdefault('params', {})[k] = v
|
||||||
|
# Append system text if main doesn't have one
|
||||||
|
if not merged.get('system') and inc_mod.get('system'):
|
||||||
|
merged['system'] = inc_mod.get('system')
|
||||||
|
# If main has no template, adopt included template
|
||||||
|
if not merged.get('template') and inc_mod.get('template'):
|
||||||
|
merged['template'] = inc_mod.get('template')
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return merged
|
||||||
|
|
||||||
|
MODFILE = _resolve_includes(MODFILE)
|
||||||
|
logger.info(f"🔁 Modelfile loaded: {AI_MODFILE_PATH}")
|
||||||
|
else:
|
||||||
|
logger.warning(f"⚠️ Modelfile not found or failed to parse: {AI_MODFILE_PATH}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("⚠️ Exception while loading modelfile: %s", e)
|
||||||
|
|
||||||
|
# If no modelfile explicitly configured, attempt to auto-load a `delta.mod` or
|
||||||
|
# `delta.json` in common example/persona locations so the bot has a default persona.
|
||||||
|
if not MODFILE:
|
||||||
|
for candidate in [
|
||||||
|
os.path.join(os.path.dirname(__file__), '..', 'examples', 'delta.mod'),
|
||||||
|
os.path.join(os.path.dirname(__file__), '..', 'examples', 'delta.json'),
|
||||||
|
os.path.join(os.path.dirname(__file__), '..', 'personas', 'delta.mod'),
|
||||||
|
]:
|
||||||
|
try:
|
||||||
|
mod = load_modfile_if_exists(candidate)
|
||||||
|
if mod:
|
||||||
|
MODFILE = mod
|
||||||
|
logger.info(f"🔁 Auto-loaded default modelfile: {candidate}")
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
def list_modelfiles(search_dirs=None):
|
||||||
|
"""Return a list of candidate modelfile paths from common locations."""
|
||||||
|
base_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
|
||||||
|
if search_dirs is None:
|
||||||
|
search_dirs = [
|
||||||
|
os.path.join(base_dir, 'examples'),
|
||||||
|
os.path.join(base_dir, 'personas'),
|
||||||
|
os.path.join(base_dir, 'src'),
|
||||||
|
base_dir,
|
||||||
|
]
|
||||||
|
results = []
|
||||||
|
for d in search_dirs:
|
||||||
|
try:
|
||||||
|
if not os.path.isdir(d):
|
||||||
|
continue
|
||||||
|
for fname in os.listdir(d):
|
||||||
|
if fname.endswith('.mod') or fname.endswith('.json'):
|
||||||
|
results.append(os.path.join(d, fname))
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
return sorted(results)
|
||||||
|
|
||||||
|
# Base API setup from .env (e.g., http://localhost:11434/api)
|
||||||
|
# Normalize to ensure the configured base includes the `/api` prefix so
|
||||||
|
# endpoints like `/generate` and `/tags` are reachable even if the user
|
||||||
|
# sets `OLLAMA_API` without `/api`.
|
||||||
|
raw_api = os.getenv("OLLAMA_API") or ""
|
||||||
|
raw_api = raw_api.rstrip("/")
|
||||||
|
if raw_api == "":
|
||||||
|
BASE_API = ""
|
||||||
|
else:
|
||||||
|
BASE_API = raw_api if raw_api.endswith("/api") else f"{raw_api}/api"
|
||||||
|
|
||||||
|
# API endpoints for different Ollama operations
|
||||||
|
GEN_ENDPOINT = f"{BASE_API}/generate"
|
||||||
|
PULL_ENDPOINT = f"{BASE_API}/pull"
|
||||||
|
# UNLOAD_ENDPOINT is not used because unloading is done via `generate` with keep_alive=0
|
||||||
|
TAGS_ENDPOINT = f"{BASE_API}/tags"
|
||||||
|
|
||||||
|
# Startup model and debug toggle from .env
|
||||||
|
MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest")
|
||||||
|
SHOW_THINKING_BLOCKS = os.getenv("SHOW_THINKING_BLOCKS", "false").lower() == "true"
|
||||||
|
AI_INCLUDE_CONTEXT = os.getenv("AI_INCLUDE_CONTEXT", "true").lower() == "true"
|
||||||
|
|
||||||
|
# Ensure API base is configured
|
||||||
|
if not BASE_API:
|
||||||
|
logger.error("❌ OLLAMA_API not set.")
|
||||||
|
raise ValueError("❌ OLLAMA_API not set.")
|
||||||
|
|
||||||
|
# Returns current model from env/config
|
||||||
|
def get_model_name():
|
||||||
|
return MODEL_NAME
|
||||||
|
|
||||||
|
# Removes <think>...</think> blocks from the LLM response (used by some models)
|
||||||
|
def strip_thinking_block(text: str) -> str:
|
||||||
|
return re.sub(r"<think>.*?</think>\s*", "", text, flags=re.DOTALL)
|
||||||
|
|
||||||
|
# Check if a model exists locally by calling /tags
|
||||||
|
def model_exists_locally(model_name: str) -> bool:
|
||||||
|
try:
|
||||||
|
resp = requests.get(TAGS_ENDPOINT)
|
||||||
|
return model_name in resp.text
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Failed to check local models: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Attempt to pull (load) a model via Ollama's /pull endpoint
|
||||||
|
def load_model(model_name: str) -> bool:
|
||||||
|
try:
|
||||||
|
logger.info(f"🧠 Preloading model: {model_name}")
|
||||||
|
resp = requests.post(PULL_ENDPOINT, json={"name": model_name})
|
||||||
|
|
||||||
|
if debug_mode:
|
||||||
|
logger.debug(f"📨 Ollama pull response: {resp.status_code} - {resp.text}")
|
||||||
|
else:
|
||||||
|
if resp.status_code == 200:
|
||||||
|
logger.info("📦 Model pull started successfully.")
|
||||||
|
else:
|
||||||
|
logger.warning(f"⚠️ Model pull returned {resp.status_code}: {resp.text[:100]}...")
|
||||||
|
|
||||||
|
return resp.status_code == 200
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Exception during model load: {str(e)}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Send an empty prompt to unload a model from VRAM safely using keep_alive: 0
|
||||||
|
def unload_model(model_name: str) -> bool:
|
||||||
|
try:
|
||||||
|
logger.info(f"🧹 Sending safe unload request for `{model_name}`")
|
||||||
|
payload = {
|
||||||
|
"model": model_name,
|
||||||
|
"prompt": "", # ✅ Required to make the request valid
|
||||||
|
"keep_alive": 0 # ✅ Unload from VRAM but keep on disk
|
||||||
|
}
|
||||||
|
resp = requests.post(GEN_ENDPOINT, json=payload)
|
||||||
|
logger.info(f"🧽 Ollama unload response: {resp.status_code} - {resp.text}")
|
||||||
|
return resp.status_code == 200
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Exception during soft-unload: {str(e)}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Shortcut for getting the current model (can be expanded later for dynamic switching)
|
||||||
|
def get_current_model():
|
||||||
|
return get_model_name()
|
||||||
|
|
||||||
|
# Main LLM interaction — injects personality and sends prompt to Ollama
|
||||||
|
def get_ai_response(user_prompt, context=None, user_profile=None):
|
||||||
|
model_name = get_model_name()
|
||||||
|
load_model(model_name)
|
||||||
|
persona = load_persona()
|
||||||
|
# Build prompt pieces
|
||||||
|
# If a modelfile is active and provides a SYSTEM, prefer it over persona prompt_inject
|
||||||
|
system_inject = ""
|
||||||
|
if MODFILE and MODFILE.get('system'):
|
||||||
|
system_inject = MODFILE.get('system')
|
||||||
|
elif persona:
|
||||||
|
system_inject = persona["prompt_inject"].replace("“", '"').replace("”", '"').replace("’", "'")
|
||||||
|
|
||||||
|
user_block = ""
|
||||||
|
if user_profile and user_profile.get("custom_prompt"):
|
||||||
|
user_block = f"[User Instruction]\n{user_profile['custom_prompt']}\n"
|
||||||
|
|
||||||
|
context_block = f"[Recent Conversation]\n{context}\n" if (context and AI_INCLUDE_CONTEXT) else ""
|
||||||
|
|
||||||
|
# If a modelfile is active and defines a template, render it (best-effort)
|
||||||
|
full_prompt = None
|
||||||
|
if MODFILE:
|
||||||
|
tpl = MODFILE.get('template')
|
||||||
|
if tpl:
|
||||||
|
# Simple template handling: remove simple Go-style conditionals
|
||||||
|
tpl_work = re.sub(r"\{\{\s*if\s+\.System\s*\}\}", "", tpl)
|
||||||
|
tpl_work = re.sub(r"\{\{\s*end\s*\}\}", "", tpl_work)
|
||||||
|
# Build the prompt body we want to inject as .Prompt
|
||||||
|
prompt_body = f"{user_block}{context_block}User: {user_prompt}\n"
|
||||||
|
# Replace common placeholders
|
||||||
|
tpl_work = tpl_work.replace("{{ .System }}", system_inject)
|
||||||
|
tpl_work = tpl_work.replace("{{ .Prompt }}", prompt_body)
|
||||||
|
tpl_work = tpl_work.replace("{{ .User }}", user_block)
|
||||||
|
full_prompt = tpl_work.strip()
|
||||||
|
else:
|
||||||
|
# No template: use system_inject and do not append persona name
|
||||||
|
full_prompt = f"{system_inject}\n{user_block}{context_block}User: {user_prompt}\nResponse:"
|
||||||
|
else:
|
||||||
|
# No modelfile active: fall back to persona behaviour (include persona name)
|
||||||
|
if persona:
|
||||||
|
full_prompt = f"{system_inject}\n{user_block}{context_block}\nUser: {user_prompt}\n{persona['name']}:"
|
||||||
|
else:
|
||||||
|
full_prompt = f"{user_block}{context_block}\nUser: {user_prompt}\nResponse:"
|
||||||
|
|
||||||
|
# Build base payload and merge modelfile params if present
|
||||||
|
payload = {"model": model_name, "prompt": full_prompt, "stream": False}
|
||||||
|
if MODFILE and MODFILE.get('params'):
|
||||||
|
for k, v in MODFILE.get('params', {}).items():
|
||||||
|
payload[k] = v
|
||||||
|
|
||||||
|
# Logging: concise info plus debug for full payload/response
|
||||||
|
req_id = generate_req_id("llm-")
|
||||||
|
user_label = user_profile.get("display_name") if user_profile else None
|
||||||
|
log_llm_request(logger, req_id, model_name, user_label, len(context.splitlines()) if context else 0)
|
||||||
|
logger.debug("%s Sending payload to Ollama: model=%s user=%s", req_id, model_name, user_label)
|
||||||
|
logger.debug("%s Payload size=%d chars", req_id, len(full_prompt))
|
||||||
|
|
||||||
|
import time
|
||||||
|
start = time.perf_counter()
|
||||||
|
try:
|
||||||
|
response = requests.post(GEN_ENDPOINT, json=payload)
|
||||||
|
duration = time.perf_counter() - start
|
||||||
|
# Log raw response only at DEBUG to avoid clutter
|
||||||
|
logger.debug("%s Raw response status=%s", req_id, response.status_code)
|
||||||
|
logger.debug("%s Raw response body=%s", req_id, getattr(response, "text", ""))
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
result = response.json()
|
result = response.json()
|
||||||
|
short = (result.get("response") or "").replace("\n", " ")[:240]
|
||||||
|
log_llm_response(logger, req_id, model_name, duration, short, raw=result)
|
||||||
return result.get("response", "[No message in response]")
|
return result.get("response", "[No message in response]")
|
||||||
else:
|
else:
|
||||||
|
# include status in logs and return an error string
|
||||||
|
log_llm_response(logger, req_id, model_name, duration, f"[Error {response.status_code}]", raw=response.text)
|
||||||
return f"[Error {response.status_code}] {response.text}"
|
return f"[Error {response.status_code}] {response.text}"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
duration = time.perf_counter() - start
|
||||||
|
logger.exception("%s Exception during LLM call", req_id)
|
||||||
|
log_llm_response(logger, req_id, model_name, duration, f"[Exception] {e}")
|
||||||
return f"[Exception] {str(e)}"
|
return f"[Exception] {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
|
# Runtime modelfile management APIs -------------------------------------------------
|
||||||
|
def load_modelfile(path: str = None) -> bool:
|
||||||
|
"""Load (or reload) a modelfile at runtime.
|
||||||
|
|
||||||
|
If `path` is provided, update the configured modelfile path and attempt
|
||||||
|
to load from that location. Returns True on success.
|
||||||
|
"""
|
||||||
|
global MODFILE, AI_MODFILE_PATH, AI_USE_MODFILE
|
||||||
|
if path:
|
||||||
|
AI_MODFILE_PATH = path
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Enable modelfile usage if it was disabled
|
||||||
|
AI_USE_MODFILE = True
|
||||||
|
|
||||||
|
if not AI_MODFILE_PATH:
|
||||||
|
logger.warning("⚠️ No modelfile path configured to load.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
mod = load_modfile_if_exists(AI_MODFILE_PATH)
|
||||||
|
MODFILE = mod
|
||||||
|
if MODFILE:
|
||||||
|
logger.info(f"🔁 Modelfile loaded: {AI_MODFILE_PATH}")
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
logger.warning(f"⚠️ Modelfile not found or failed to parse: {AI_MODFILE_PATH}")
|
||||||
|
return False
|
||||||
|
except Exception as e:
|
||||||
|
logger.exception("⚠️ Exception while loading modelfile: %s", e)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def unload_modelfile() -> bool:
|
||||||
|
"""Disable/unload the currently active modelfile so persona injection
|
||||||
|
falls back to the standard `persona.json` mechanism."""
|
||||||
|
global MODFILE, AI_USE_MODFILE
|
||||||
|
MODFILE = None
|
||||||
|
AI_USE_MODFILE = False
|
||||||
|
logger.info("🔁 Modelfile unloaded/disabled at runtime.")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def get_modelfile_info() -> dict | None:
|
||||||
|
"""Return a small diagnostic dict about the currently loaded modelfile,
|
||||||
|
or None if no modelfile is active."""
|
||||||
|
if not MODFILE:
|
||||||
|
return None
|
||||||
|
return {
|
||||||
|
"_source_path": MODFILE.get("_source_path"),
|
||||||
|
"base_model": MODFILE.get("base_model"),
|
||||||
|
"params": MODFILE.get("params"),
|
||||||
|
"system_preview": (MODFILE.get("system") or "")[:300]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def build_dryrun_payload(user_prompt, context=None, user_profile=None) -> dict:
|
||||||
|
"""Build and return the assembled prompt and payload that would be
|
||||||
|
sent to the model, without performing any HTTP calls. Useful for
|
||||||
|
inspecting template rendering and merged modelfile params.
|
||||||
|
Returns: { 'prompt': str, 'payload': dict }
|
||||||
|
"""
|
||||||
|
model_name = get_model_name()
|
||||||
|
# Reuse main prompt building logic but avoid calling load_model()
|
||||||
|
persona = load_persona()
|
||||||
|
|
||||||
|
# Build prompt pieces (same logic as `get_ai_response`)
|
||||||
|
system_inject = ""
|
||||||
|
if MODFILE and MODFILE.get('system'):
|
||||||
|
system_inject = MODFILE.get('system')
|
||||||
|
elif persona:
|
||||||
|
system_inject = persona["prompt_inject"].replace("“", '"').replace("”", '"').replace("’", "'")
|
||||||
|
|
||||||
|
user_block = ""
|
||||||
|
if user_profile and user_profile.get("custom_prompt"):
|
||||||
|
user_block = f"[User Instruction]\n{user_profile['custom_prompt']}\n"
|
||||||
|
|
||||||
|
context_block = f"[Recent Conversation]\n{context}\n" if (context and AI_INCLUDE_CONTEXT) else ""
|
||||||
|
|
||||||
|
if MODFILE:
|
||||||
|
tpl = MODFILE.get('template')
|
||||||
|
if tpl:
|
||||||
|
tpl_work = re.sub(r"\{\{\s*if\s+\.System\s*\}\}", "", tpl)
|
||||||
|
tpl_work = re.sub(r"\{\{\s*end\s*\}\}", "", tpl_work)
|
||||||
|
prompt_body = f"{user_block}{context_block}User: {user_prompt}\n"
|
||||||
|
tpl_work = tpl_work.replace("{{ .System }}", system_inject)
|
||||||
|
tpl_work = tpl_work.replace("{{ .Prompt }}", prompt_body)
|
||||||
|
tpl_work = tpl_work.replace("{{ .User }}", user_block)
|
||||||
|
full_prompt = tpl_work.strip()
|
||||||
|
else:
|
||||||
|
full_prompt = f"{system_inject}\n{user_block}{context_block}User: {user_prompt}\nResponse:"
|
||||||
|
else:
|
||||||
|
if persona:
|
||||||
|
full_prompt = f"{system_inject}\n{user_block}{context_block}\nUser: {user_prompt}\n{persona['name']}:"
|
||||||
|
else:
|
||||||
|
full_prompt = f"{user_block}{context_block}\nUser: {user_prompt}\nResponse:"
|
||||||
|
|
||||||
|
# Build payload and merge modelfile params
|
||||||
|
payload = {"model": model_name, "prompt": full_prompt, "stream": False}
|
||||||
|
if MODFILE and MODFILE.get('params'):
|
||||||
|
for k, v in MODFILE.get('params', {}).items():
|
||||||
|
payload[k] = v
|
||||||
|
|
||||||
|
return {"prompt": full_prompt, "payload": payload}
|
||||||
181
src/autochat.py
Normal file
181
src/autochat.py
Normal file
|
|
@ -0,0 +1,181 @@
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
import yaml
|
||||||
|
import re
|
||||||
|
|
||||||
|
from logger import setup_logger
|
||||||
|
from ai import get_ai_response
|
||||||
|
from user_profiles import load_user_profile
|
||||||
|
from context import fetch_raw_context, format_context
|
||||||
|
from personality import load_persona
|
||||||
|
|
||||||
|
logger = setup_logger("autochat")
|
||||||
|
|
||||||
|
# === Load and Parse settings.yml ===
|
||||||
|
def load_settings():
|
||||||
|
settings_path = os.path.join(os.path.dirname(__file__), "settings.yml")
|
||||||
|
with open(settings_path, "r", encoding="utf-8") as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
|
||||||
|
SETTINGS = load_settings()
|
||||||
|
CONTEXT_LIMIT = SETTINGS.get("context", {}).get("max_messages", 10)
|
||||||
|
CONTEXT_ENABLED = SETTINGS.get("context", {}).get("enabled", True)
|
||||||
|
AUTOREPLY_COOLDOWN = int(os.getenv("AUTOREPLY_COOLDOWN", 60))
|
||||||
|
AUTOREPLY_ENABLED = os.getenv("AUTOREPLY_ENABLED", "false").lower() == "true"
|
||||||
|
ENABLE_REACTIONS = SETTINGS.get("autochat", {}).get("enable_reactions", False)
|
||||||
|
ENGAGEMENT_DECAY_PER_MINUTE = SETTINGS.get("autochat", {}).get("engagement_decay_per_minute", 0.15)
|
||||||
|
EMOJI_REACTION_CHANCE = SETTINGS.get("autochat", {}).get("emoji_reaction_chance", 0.35)
|
||||||
|
|
||||||
|
# === Global State ===
|
||||||
|
_last_reply_time = 0
|
||||||
|
_engagement_score = 0.0
|
||||||
|
_last_trigger_time = None
|
||||||
|
|
||||||
|
def should_auto_reply():
|
||||||
|
global _last_reply_time
|
||||||
|
if not AUTOREPLY_ENABLED:
|
||||||
|
return False
|
||||||
|
now = time.time()
|
||||||
|
if now - _last_reply_time >= AUTOREPLY_COOLDOWN:
|
||||||
|
return True
|
||||||
|
logger.info(f"🛑 Cooldown active. {round(AUTOREPLY_COOLDOWN - (now - _last_reply_time))}s left.")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def update_reply_timer():
|
||||||
|
global _last_reply_time
|
||||||
|
_last_reply_time = time.time()
|
||||||
|
|
||||||
|
def is_triggered(message_content: str, persona: dict) -> bool:
|
||||||
|
lowered = message_content.lower()
|
||||||
|
return (
|
||||||
|
any(nick in lowered for nick in persona.get("nickname_triggers", [])) or
|
||||||
|
any(trigger in lowered for trigger in persona.get("triggers", []))
|
||||||
|
)
|
||||||
|
|
||||||
|
def is_overengaged(context_msgs) -> bool:
|
||||||
|
if len(context_msgs) >= 2:
|
||||||
|
last = context_msgs[-1]
|
||||||
|
second_last = context_msgs[-2]
|
||||||
|
return (
|
||||||
|
getattr(last.author, "bot", False)
|
||||||
|
and getattr(second_last.author, "bot", False)
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def apply_engagement_decay():
|
||||||
|
global _engagement_score, _last_trigger_time
|
||||||
|
if _engagement_score > 0 and _last_trigger_time:
|
||||||
|
minutes_passed = (time.time() - _last_trigger_time) / 60
|
||||||
|
decay = minutes_passed * ENGAGEMENT_DECAY_PER_MINUTE
|
||||||
|
_engagement_score = max(0.0, _engagement_score - decay)
|
||||||
|
logger.info(f"📉 Engagement decayed by {decay:.2f}, new score: {_engagement_score:.2f}")
|
||||||
|
|
||||||
|
# 🧠 Ask AI: "What emoji(s) would you react with?"
|
||||||
|
async def get_reaction_emojis(message, persona):
|
||||||
|
short_prompt = (
|
||||||
|
f"You are {persona['name']}, a Discord user with strong emotions.\n\n"
|
||||||
|
f"Based on this message, react with 1–3 emojis (no words):\n"
|
||||||
|
f"\"{message.content}\""
|
||||||
|
)
|
||||||
|
reply = get_ai_response(short_prompt)
|
||||||
|
emojis = re.findall(r'[\U0001F300-\U0001F6FF\U0001F900-\U0001F9FF\U0001F1E0-\U0001F1FF]', reply or "")
|
||||||
|
return list(dict.fromkeys(emojis))[:3] # max 3 unique
|
||||||
|
|
||||||
|
# 💬 React to message using chosen emojis
|
||||||
|
async def maybe_react_to_message(message, persona):
|
||||||
|
if not ENABLE_REACTIONS or message.author.bot:
|
||||||
|
return
|
||||||
|
|
||||||
|
roll = random.random()
|
||||||
|
if roll > EMOJI_REACTION_CHANCE:
|
||||||
|
logger.debug("🎲 Reaction skipped (chance %.2f, roll %.2f)", EMOJI_REACTION_CHANCE, roll)
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Ask the LLM to extract up to 3 emojis based on the message content
|
||||||
|
from ai import get_ai_response
|
||||||
|
|
||||||
|
system_prompt = (
|
||||||
|
f"You are {persona['name']}, an expressive character who uses emojis to react to user messages. "
|
||||||
|
"Given a message, return ONLY 1–3 fitting emojis in response to the tone or content. No text."
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt = (
|
||||||
|
f"{system_prompt}\n\n"
|
||||||
|
f"User: {message.content}\n"
|
||||||
|
f"{persona['name']}:"
|
||||||
|
)
|
||||||
|
|
||||||
|
emoji_reply = get_ai_response(prompt).strip()
|
||||||
|
|
||||||
|
# Log the raw emoji suggestion at DEBUG
|
||||||
|
logger.debug("🎭 Emoji suggestion from LLM: %s", emoji_reply)
|
||||||
|
|
||||||
|
# Extract valid emojis
|
||||||
|
emojis = re.findall(r'[\U0001F300-\U0001F6FF\U0001F900-\U0001F9FF\U0001F1E0-\U0001F1FF]', emoji_reply)
|
||||||
|
unique_emojis = list(dict.fromkeys(emojis))[:3]
|
||||||
|
|
||||||
|
for emoji in unique_emojis:
|
||||||
|
await message.add_reaction(emoji)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"⚠️ Failed to generate emoji reaction: {e}")
|
||||||
|
|
||||||
|
# === Main Autoreply Function ===
|
||||||
|
async def generate_auto_reply(message, bot):
|
||||||
|
global _engagement_score, _last_trigger_time
|
||||||
|
|
||||||
|
raw_msgs = await fetch_raw_context(message.channel)
|
||||||
|
persona = load_persona()
|
||||||
|
|
||||||
|
# === ✅ Always react with emojis ===
|
||||||
|
await maybe_react_to_message(message, persona)
|
||||||
|
|
||||||
|
# === 💬 Below this governs if she actually speaks ===
|
||||||
|
if is_overengaged(raw_msgs):
|
||||||
|
logger.info("🔇 Skipped: Delta just spoke twice in a row.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
if not should_auto_reply():
|
||||||
|
return None
|
||||||
|
|
||||||
|
profile = load_user_profile(message.author)
|
||||||
|
content = message.content.lower()
|
||||||
|
|
||||||
|
explicitly_triggered = is_triggered(content, persona)
|
||||||
|
|
||||||
|
if explicitly_triggered:
|
||||||
|
_engagement_score = 1.0
|
||||||
|
_last_trigger_time = time.time()
|
||||||
|
logger.info("🎯 Trigger word detected — Delta fully engaged.")
|
||||||
|
else:
|
||||||
|
apply_engagement_decay()
|
||||||
|
if _engagement_score > 0:
|
||||||
|
roll = random.random()
|
||||||
|
logger.info(f"🌀 Engagement roll — score: {_engagement_score:.2f}, roll: {roll:.2f}")
|
||||||
|
if roll > _engagement_score:
|
||||||
|
logger.info("🙈 Engagement roll failed — Delta stays quiet.")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
logger.info("😴 No trigger and engagement is 0 — skipping.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# === If we got this far, she speaks ===
|
||||||
|
formatted_context = format_context(raw_msgs[-CONTEXT_LIMIT:]) if CONTEXT_ENABLED else ""
|
||||||
|
|
||||||
|
logger.info(f"🤖 Considering passive reply (author: {message.author.display_name})")
|
||||||
|
logger.info(f"📚 Retrieved {len(raw_msgs)} messages for context")
|
||||||
|
|
||||||
|
async with message.channel.typing():
|
||||||
|
# Use memory-enhanced response for auto-chat
|
||||||
|
from enhanced_ai import get_ai_response_with_memory
|
||||||
|
reply = get_ai_response_with_memory(
|
||||||
|
user_prompt=message.content,
|
||||||
|
context=raw_msgs[-CONTEXT_LIMIT:], # Pass raw messages
|
||||||
|
user_profile=profile,
|
||||||
|
message=message # Important: pass message for memory storage
|
||||||
|
)
|
||||||
|
update_reply_timer()
|
||||||
|
return reply.strip() if reply else None
|
||||||
|
|
||||||
13
src/bot.error.log
Normal file
13
src/bot.error.log
Normal file
|
|
@ -0,0 +1,13 @@
|
||||||
|
[2025-10-10 13:29:37] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:30:50] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:31:19] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:31:24] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:45:36] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:53:17] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:53:32] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:54:13] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:54:23] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:55:25] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 15:04:09] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 15:04:14] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 15:05:15] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
32
src/bot.log
Normal file
32
src/bot.log
Normal file
|
|
@ -0,0 +1,32 @@
|
||||||
|
[2025-10-10 12:58:15] [INFO] [database:325] Connected to JSON backend
|
||||||
|
[2025-10-10 12:58:15] [INFO] [database:539] Initialized JSON database backend
|
||||||
|
[2025-10-10 13:00:54] [INFO] [database:81] Connected to SQLite database: data/deltabot.db
|
||||||
|
[2025-10-10 13:00:54] [DEBUG] [database:142] Database tables initialized
|
||||||
|
[2025-10-10 13:00:54] [INFO] [database:536] Initialized SQLite database backend
|
||||||
|
[2025-10-10 13:01:51] [INFO] [database:81] Connected to SQLite database: data/deltabot.db
|
||||||
|
[2025-10-10 13:01:51] [DEBUG] [database:142] Database tables initialized
|
||||||
|
[2025-10-10 13:01:51] [INFO] [database:536] Initialized SQLite database backend
|
||||||
|
[2025-10-10 13:29:16] [INFO] [database:325] Connected to JSON backend
|
||||||
|
[2025-10-10 13:29:16] [INFO] [database:541] Initialized JSON database backend
|
||||||
|
[2025-10-10 13:29:16] [INFO] [webui:303] Starting web UI server on 0.0.0.0:8080
|
||||||
|
[2025-10-10 13:29:37] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:30:50] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:31:19] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:31:24] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 13:45:36] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:53:17] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:53:32] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:54:13] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:54:23] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 14:55:25] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 15:04:09] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 15:04:14] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
|
[2025-10-10 15:05:13] [INFO] [database:325] Connected to JSON backend
|
||||||
|
[2025-10-10 15:05:13] [INFO] [database:541] Initialized JSON database backend
|
||||||
|
[2025-10-10 15:05:13] [INFO] [webui:314] Starting web UI server on 0.0.0.0:8082
|
||||||
|
[2025-10-10 15:05:13] [INFO] [webui:315] ==================================================
|
||||||
|
[2025-10-10 15:05:13] [INFO] [webui:316] 🌐 WEB UI ACCESS URLS:
|
||||||
|
[2025-10-10 15:05:13] [INFO] [webui:317] Local: http://localhost:8082
|
||||||
|
[2025-10-10 15:05:13] [INFO] [webui:318] Network: http://192.168.0.144:8082
|
||||||
|
[2025-10-10 15:05:13] [INFO] [webui:319] ==================================================
|
||||||
|
[2025-10-10 15:05:15] [ERROR] [webui:82] Failed to load settings: [Errno 2] No such file or directory: '/app/src/settings.yml'
|
||||||
563
src/bot.py
563
src/bot.py
|
|
@ -1,28 +1,82 @@
|
||||||
# bot.py
|
# bot.py
|
||||||
|
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
import os
|
import os
|
||||||
import discord
|
import discord
|
||||||
import yaml
|
import yaml
|
||||||
from discord.ext import commands
|
import random
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
from textwrap import wrap
|
||||||
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
|
from discord.ext import commands
|
||||||
load_dotenv(dotenv_path)
|
|
||||||
|
|
||||||
from ai import get_ai_response
|
|
||||||
from personality import apply_personality, set_persona
|
|
||||||
from discord.ext.commands import (
|
from discord.ext.commands import (
|
||||||
cooldown,
|
cooldown,
|
||||||
BucketType,
|
BucketType,
|
||||||
CooldownMapping,
|
|
||||||
CommandOnCooldown
|
CommandOnCooldown
|
||||||
)
|
)
|
||||||
import yaml
|
from cooldown import CooldownManager
|
||||||
from scheduler import start_scheduler
|
|
||||||
|
|
||||||
|
# Local imports
|
||||||
|
from scheduler import start_scheduler
|
||||||
|
from profilepic import set_avatar_from_bytes
|
||||||
|
from context import fetch_raw_context, format_context
|
||||||
|
from user_profiles import (
|
||||||
|
load_user_profile,
|
||||||
|
update_last_seen,
|
||||||
|
increment_interactions,
|
||||||
|
format_profile_for_block,
|
||||||
|
set_pronouns,
|
||||||
|
set_custom_prompt
|
||||||
|
)
|
||||||
|
from personality import apply_personality, set_persona, load_persona
|
||||||
|
from logger import setup_logger
|
||||||
|
from ai import (
|
||||||
|
unload_model,
|
||||||
|
load_model,
|
||||||
|
get_current_model,
|
||||||
|
get_ai_response,
|
||||||
|
TAGS_ENDPOINT
|
||||||
|
)
|
||||||
|
from enhanced_ai import get_ai_response_with_memory, analyze_user_message_for_memory
|
||||||
|
from ai import load_modelfile, unload_modelfile, get_modelfile_info
|
||||||
|
from time_logger import log_message_activity
|
||||||
|
from autochat import should_auto_reply, generate_auto_reply, update_reply_timer, maybe_react_to_message
|
||||||
|
|
||||||
|
debug_mode = os.getenv("DEBUG_MODE", "false").lower() == "true"
|
||||||
|
from user_profiles import format_profile_for_block as format_user_profile_block
|
||||||
|
|
||||||
|
# Setup logger and environment
|
||||||
|
logger = setup_logger("bot")
|
||||||
|
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
|
||||||
|
load_dotenv(dotenv_path)
|
||||||
|
|
||||||
|
# No hardcoded owner IDs; use discord.py's owner check and guild admin perms.
|
||||||
|
|
||||||
|
# Message-level guard for cooldown updates (avoid double-updating during dispatch)
|
||||||
|
_cooldown_updated = set()
|
||||||
|
# Message-level guard to avoid sending the same cooldown error multiple times
|
||||||
|
_cooldown_error_sent = set()
|
||||||
|
_cooldown_recorded_for_msg = set()
|
||||||
|
|
||||||
|
# Message-level guard for generic one-shot sends (avoid duplicate command replies)
|
||||||
|
_message_sent_once = set()
|
||||||
|
|
||||||
|
# Load model settings
|
||||||
|
MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest")
|
||||||
|
logger.info(f"🔍 Loaded MODEL_NAME from .env: {MODEL_NAME}")
|
||||||
|
if debug_mode:
|
||||||
|
logger.info(f"🧹 Attempting to clear VRAM before loading {MODEL_NAME}...")
|
||||||
|
unload_model(MODEL_NAME)
|
||||||
|
|
||||||
|
if load_model(MODEL_NAME):
|
||||||
|
logger.info(f"🚀 Model `{MODEL_NAME}` preloaded on startup.")
|
||||||
|
else:
|
||||||
|
logger.warning(f"⚠️ Failed to preload model `{MODEL_NAME}`.")
|
||||||
|
logger.info(f"✅ Final model in use: {MODEL_NAME}")
|
||||||
|
|
||||||
|
# Load YAML settings
|
||||||
base_dir = os.path.dirname(__file__)
|
base_dir = os.path.dirname(__file__)
|
||||||
settings_path = os.path.join(base_dir, "settings.yml")
|
settings_path = os.path.join(base_dir, "settings.yml")
|
||||||
|
|
||||||
with open(settings_path, "r", encoding="utf-8") as f:
|
with open(settings_path, "r", encoding="utf-8") as f:
|
||||||
settings = yaml.safe_load(f)
|
settings = yaml.safe_load(f)
|
||||||
|
|
||||||
|
|
@ -30,70 +84,505 @@ ROAST_COOLDOWN_SECONDS = settings["cooldowns"]["roast"]
|
||||||
GLOBAL_COOLDOWN_SECONDS = settings["cooldowns"]["global"]
|
GLOBAL_COOLDOWN_SECONDS = settings["cooldowns"]["global"]
|
||||||
COOLDOWN_MSG_TEMPLATE = settings["messages"]["cooldown"]
|
COOLDOWN_MSG_TEMPLATE = settings["messages"]["cooldown"]
|
||||||
|
|
||||||
load_dotenv()
|
# Configure Discord bot
|
||||||
TOKEN = os.getenv("DISCORD_TOKEN")
|
TOKEN = os.getenv("DISCORD_TOKEN")
|
||||||
|
if not TOKEN:
|
||||||
|
logger.error("❌ DISCORD_TOKEN not set in .env file.")
|
||||||
|
raise SystemExit("DISCORD_TOKEN not set.")
|
||||||
|
|
||||||
intents = discord.Intents.default()
|
intents = discord.Intents.default()
|
||||||
intents.message_content = True
|
intents.message_content = True
|
||||||
|
|
||||||
bot = commands.Bot(command_prefix="!", intents=intents)
|
bot = commands.Bot(command_prefix="!", intents=intents)
|
||||||
|
|
||||||
|
# Handle cooldown errors globally
|
||||||
@bot.event
|
@bot.event
|
||||||
async def on_command_error(ctx, error):
|
async def on_command_error(ctx, error):
|
||||||
if isinstance(error, CommandOnCooldown):
|
if isinstance(error, CommandOnCooldown):
|
||||||
retry_secs = round(error.retry_after, 1)
|
retry_secs = round(error.retry_after, 1)
|
||||||
msg = COOLDOWN_MSG_TEMPLATE.replace("{seconds}", str(retry_secs))
|
template = random.choice(COOLDOWN_MSG_TEMPLATE) if isinstance(COOLDOWN_MSG_TEMPLATE, list) else COOLDOWN_MSG_TEMPLATE
|
||||||
print("🕒 Chill, mortal. You must wait 11.6s before trying again. 😼")
|
msg = template.replace("{seconds}", str(retry_secs))
|
||||||
await ctx.send(msg)
|
# Prevent duplicate cooldown messages for the same triggering message
|
||||||
|
msg_id = getattr(getattr(ctx, 'message', None), 'id', None)
|
||||||
|
if msg_id is not None and msg_id in _cooldown_error_sent:
|
||||||
|
logger.debug(f"on_command_error: cooldown message already sent for msg={msg_id}")
|
||||||
|
return
|
||||||
|
|
||||||
|
logger.info(f"Command {ctx.command} on cooldown for user={getattr(ctx.author, 'id', None)}. Retry after {retry_secs} seconds.")
|
||||||
|
try:
|
||||||
|
await ctx.send(msg)
|
||||||
|
except Exception:
|
||||||
|
# ignore send failures
|
||||||
|
pass
|
||||||
|
|
||||||
|
if msg_id is not None:
|
||||||
|
_cooldown_error_sent.add(msg_id)
|
||||||
|
async def _clear_cooldown_error(mid):
|
||||||
|
try:
|
||||||
|
await __import__('asyncio').sleep(5)
|
||||||
|
_cooldown_error_sent.discard(mid)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
__import__('asyncio').create_task(_clear_cooldown_error(msg_id))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
raise error
|
raise error
|
||||||
|
|
||||||
# Global cooldown bucket
|
# Global cooldown manager (per-user)
|
||||||
global_cooldown = CooldownMapping.from_cooldown(1, GLOBAL_COOLDOWN_SECONDS, BucketType.user)
|
_cooldown_mgr = CooldownManager()
|
||||||
|
|
||||||
|
|
||||||
@bot.check
|
@bot.check
|
||||||
async def global_command_cooldown(ctx):
|
async def global_command_cooldown(ctx):
|
||||||
bucket = global_cooldown.get_bucket(ctx.message)
|
# Allow the application owner to bypass cooldowns
|
||||||
retry_after = bucket.update_rate_limit()
|
try:
|
||||||
|
if await bot.is_owner(ctx.author):
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Allow guild administrators / users with Manage Guild to bypass cooldowns
|
||||||
|
try:
|
||||||
|
perms = getattr(ctx.author, 'guild_permissions', None)
|
||||||
|
if perms and (perms.administrator or perms.manage_guild):
|
||||||
|
return True
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Use a message-level guard so we only update the cooldown once per message
|
||||||
|
user_id = getattr(ctx.author, 'id', None)
|
||||||
|
msg_id = getattr(getattr(ctx, 'message', None), 'id', None)
|
||||||
|
logger.debug(f"global_command_cooldown: check user={user_id} msg={msg_id} command={getattr(ctx, 'command', None)}")
|
||||||
|
|
||||||
|
# If we've already updated cooldown for this message, allow immediately
|
||||||
|
if msg_id is not None and msg_id in _cooldown_updated:
|
||||||
|
logger.debug(f"global_command_cooldown: msg {msg_id} already updated, allow")
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check and update atomically; this will prevent races where multiple
|
||||||
|
# Use peek to inspect remaining time without updating state. The actual
|
||||||
|
# recording of the timestamp happens once the command starts (see
|
||||||
|
# `before_invoke` handler) so there's a single canonical writer.
|
||||||
|
retry = await _cooldown_mgr.peek('global', user_id, GLOBAL_COOLDOWN_SECONDS)
|
||||||
|
if retry > 0.0:
|
||||||
|
logger.info(f"global_command_cooldown: user={user_id} blocked, retry={retry}")
|
||||||
|
raise CommandOnCooldown(commands.Cooldown(1, GLOBAL_COOLDOWN_SECONDS, BucketType.user), retry)
|
||||||
|
|
||||||
|
# Mark this message as updated so repeated checks during dispatch don't re-update
|
||||||
|
if msg_id is not None:
|
||||||
|
_cooldown_updated.add(msg_id)
|
||||||
|
# schedule removal after a short grace window
|
||||||
|
async def _remove_later(mid):
|
||||||
|
try:
|
||||||
|
await __import__('asyncio').sleep(5)
|
||||||
|
_cooldown_updated.discard(mid)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
__import__('asyncio').create_task(_remove_later(msg_id))
|
||||||
|
except Exception:
|
||||||
|
# ignore if event loop not running
|
||||||
|
pass
|
||||||
|
|
||||||
if retry_after:
|
|
||||||
raise CommandOnCooldown(bucket, retry_after, BucketType.user)
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
# Record cooldown when a command is about to execute. This centralizes the
|
||||||
|
# write side of the cooldown and prevents multiple check-and-update races.
|
||||||
|
@bot.before_invoke
|
||||||
|
async def record_global_cooldown(ctx):
|
||||||
|
try:
|
||||||
|
# bypass for owners/admins
|
||||||
|
if await bot.is_owner(ctx.author):
|
||||||
|
return
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
perms = getattr(ctx.author, 'guild_permissions', None)
|
||||||
|
if perms and (perms.administrator or perms.manage_guild):
|
||||||
|
return
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
user_id = getattr(ctx.author, 'id', None)
|
||||||
|
msg_id = getattr(getattr(ctx, 'message', None), 'id', None)
|
||||||
|
# If we've already recorded cooldown for this message, skip (idempotent)
|
||||||
|
if msg_id is not None and msg_id in _cooldown_recorded_for_msg:
|
||||||
|
logger.debug(f"record_global_cooldown: already recorded for msg={msg_id}")
|
||||||
|
return
|
||||||
|
# Single writer: record the timestamp so future peeks will see the
|
||||||
|
# updated value.
|
||||||
|
try:
|
||||||
|
await _cooldown_mgr.record('global', user_id)
|
||||||
|
logger.debug(f"record_global_cooldown: recorded for user={user_id}")
|
||||||
|
if msg_id is not None:
|
||||||
|
_cooldown_recorded_for_msg.add(msg_id)
|
||||||
|
async def _clear_record(mid):
|
||||||
|
try:
|
||||||
|
await __import__('asyncio').sleep(5)
|
||||||
|
_cooldown_recorded_for_msg.discard(mid)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
__import__('asyncio').create_task(_clear_record(msg_id))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"record_global_cooldown: failed to record for user={user_id}: {e}")
|
||||||
|
|
||||||
|
# Handle direct bot mentions
|
||||||
|
@bot.event
|
||||||
|
async def on_message(message):
|
||||||
|
# If we observe our own outgoing messages from the gateway, log them.
|
||||||
|
if message.author == bot.user:
|
||||||
|
try:
|
||||||
|
logger.debug(f"on_message: observed own message id={getattr(message,'id',None)} channel={getattr(getattr(message,'channel',None),'id',None)}")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return
|
||||||
|
|
||||||
|
from autochat import maybe_react_to_message, generate_auto_reply
|
||||||
|
from personality import load_persona
|
||||||
|
|
||||||
|
# 👤 Load persona for reactions
|
||||||
|
persona = load_persona()
|
||||||
|
|
||||||
|
# 💬 React to message FIRST
|
||||||
|
await maybe_react_to_message(message, persona)
|
||||||
|
|
||||||
|
# 🤖 Passive reply logic
|
||||||
|
reply = await generate_auto_reply(message, bot)
|
||||||
|
if reply:
|
||||||
|
await message.channel.send(reply)
|
||||||
|
|
||||||
|
# 📣 Mention override (if bot is pinged)
|
||||||
|
if bot.user.mentioned_in(message):
|
||||||
|
prompt = message.content.replace(f"<@{bot.user.id}>", "").strip()
|
||||||
|
if not prompt:
|
||||||
|
return
|
||||||
|
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
update_last_seen(user_id)
|
||||||
|
profile = load_user_profile(message.author)
|
||||||
|
|
||||||
|
logger.info("=" * 60 + " AI Response " + "=" * 60)
|
||||||
|
logger.info(f"🧠 Profile loaded for {profile['display_name']} (interactions: {profile['interactions']})")
|
||||||
|
|
||||||
|
context_msgs = await fetch_raw_context(message.channel)
|
||||||
|
formatted_context = format_context(context_msgs)
|
||||||
|
logger.info(f"📚 Retrieved {len(context_msgs)} messages for context")
|
||||||
|
|
||||||
|
async with message.channel.typing():
|
||||||
|
# Use memory-enhanced AI response
|
||||||
|
reply = get_ai_response_with_memory(
|
||||||
|
prompt,
|
||||||
|
context=context_msgs, # Pass raw messages for better context
|
||||||
|
user_profile=profile,
|
||||||
|
message=message
|
||||||
|
)
|
||||||
|
await message.channel.send(reply)
|
||||||
|
|
||||||
|
await bot.process_commands(message)
|
||||||
|
|
||||||
|
# Bot startup event
|
||||||
|
@bot.event
|
||||||
|
async def on_ready():
|
||||||
|
print(f"✅ Logged in as {bot.user.name}")
|
||||||
|
logger.info(f"Logged in as {bot.user.name}")
|
||||||
|
for guild in bot.guilds:
|
||||||
|
me = guild.me
|
||||||
|
if me.nick != "Delta":
|
||||||
|
try:
|
||||||
|
await me.edit(nick="Delta")
|
||||||
|
logger.info(f"🔄 Renamed self to Delta in {guild.name}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"⚠️ Failed to rename in {guild.name}: {e}")
|
||||||
|
bot.loop.create_task(start_scheduler(bot))
|
||||||
|
|
||||||
|
# Commands
|
||||||
|
@bot.command(name="setprompt")
|
||||||
|
async def set_prompt_cmd(ctx, *, prompt):
|
||||||
|
set_custom_prompt(ctx.author.id, prompt)
|
||||||
|
await ctx.send("✅ Custom prompt saved.")
|
||||||
|
|
||||||
|
@bot.command(name="setpronouns")
|
||||||
|
async def set_pronouns_cmd(ctx, *, pronouns):
|
||||||
|
success = set_pronouns(ctx.author, pronouns)
|
||||||
|
if success:
|
||||||
|
await ctx.send(f"✅ Got it, {ctx.author.display_name}! Your pronouns have been updated.")
|
||||||
|
else:
|
||||||
|
await ctx.send("⚠️ Failed to update pronouns. Try interacting with Delta first to generate your profile.")
|
||||||
|
|
||||||
@bot.command()
|
@bot.command()
|
||||||
async def ping(ctx):
|
async def ping(ctx):
|
||||||
await ctx.send("🏓 Pong!")
|
await ctx.send("🏓 Pong!")
|
||||||
|
|
||||||
@bot.command()
|
@bot.command()
|
||||||
async def chat(ctx, *, message):
|
async def chat(ctx, *, prompt):
|
||||||
await ctx.send("🤖 Thinking...")
|
await ctx.send("🤖 Thinking...")
|
||||||
reply = get_ai_response(message)
|
reply = get_ai_response(prompt)
|
||||||
await ctx.send(reply)
|
for chunk in wrap(reply, 2000):
|
||||||
|
await ctx.send(chunk)
|
||||||
|
|
||||||
|
|
||||||
|
# Modelfile admin commands -------------------------------------------------
|
||||||
|
@bot.group(name="modfile")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def modfile_group(ctx):
|
||||||
|
"""Manage modelfiles at runtime. Subcommands: reload, switch, disable, info"""
|
||||||
|
if ctx.invoked_subcommand is None:
|
||||||
|
await ctx.send("Available: `!modfile reload [path]`, `!modfile switch <path>`, `!modfile disable`, `!modfile info`")
|
||||||
|
|
||||||
|
|
||||||
|
@modfile_group.command(name="reload")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def modfile_reload(ctx, *, path: str = None):
|
||||||
|
"""Reload the current modelfile or load from an optional new path."""
|
||||||
|
await ctx.send("🔁 Reloading modelfile...")
|
||||||
|
ok = load_modelfile(path) if path else load_modelfile()
|
||||||
|
await ctx.send("✅ Reloaded." if ok else "❌ Failed to reload modelfile. Check logs.")
|
||||||
|
|
||||||
|
|
||||||
|
@modfile_group.command(name="switch")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def modfile_switch(ctx, *, path: str):
|
||||||
|
"""Switch to a different modelfile path and load it."""
|
||||||
|
await ctx.send(f"🔁 Switching modelfile to `{path}`...")
|
||||||
|
ok = load_modelfile(path)
|
||||||
|
await ctx.send("✅ Switched and loaded." if ok else "❌ Failed to switch modelfile. Check logs.")
|
||||||
|
|
||||||
|
|
||||||
|
@modfile_group.command(name="disable")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def modfile_disable(ctx):
|
||||||
|
"""Disable the active modelfile and return to persona injection."""
|
||||||
|
unload_modelfile()
|
||||||
|
await ctx.send("✅ Modelfile disabled; falling back to persona injection.")
|
||||||
|
|
||||||
|
|
||||||
|
@modfile_group.command(name="info")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def modfile_info(ctx):
|
||||||
|
# Instrumentation: log invocation and message id to diagnose duplicate sends
|
||||||
|
msg_id = getattr(getattr(ctx, 'message', None), 'id', None)
|
||||||
|
logger.debug(f"modfile_info invoked: cmd={getattr(ctx, 'command', None)} user={getattr(ctx.author, 'id', None)} msg={msg_id}")
|
||||||
|
|
||||||
|
info = get_modelfile_info()
|
||||||
|
if not info:
|
||||||
|
logger.debug(f"modfile_info: no modelfile, sending informational reply for msg={msg_id}")
|
||||||
|
return await ctx.send("ℹ️ No modelfile currently loaded.")
|
||||||
|
system_preview = info.get('system_preview') or ''
|
||||||
|
lines = [
|
||||||
|
f"Source: `{info.get('_source_path')}`",
|
||||||
|
f"Base model: `{info.get('base_model')}`",
|
||||||
|
f"Params: `{info.get('params')}`",
|
||||||
|
"System preview:",
|
||||||
|
"```" + system_preview + "```"]
|
||||||
|
# Use per-message idempotent send to avoid duplicate replies
|
||||||
|
msg_id = getattr(getattr(ctx, 'message', None), 'id', None)
|
||||||
|
payload = "\n".join(lines)
|
||||||
|
if msg_id is not None:
|
||||||
|
key = ("modfile_info", msg_id)
|
||||||
|
if key in _message_sent_once:
|
||||||
|
logger.debug(f"modfile_info: already sent for msg={msg_id} - skipping send")
|
||||||
|
return
|
||||||
|
logger.debug(f"modfile_info: preparing to send reply for msg={msg_id}")
|
||||||
|
_message_sent_once.add(key)
|
||||||
|
async def _clear_sent(k):
|
||||||
|
try:
|
||||||
|
await __import__('asyncio').sleep(5)
|
||||||
|
_message_sent_once.discard(k)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
__import__('asyncio').create_task(_clear_sent(key))
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
sent = await ctx.send(payload)
|
||||||
|
try:
|
||||||
|
sent_id = getattr(sent, 'id', None)
|
||||||
|
chan = getattr(getattr(sent, 'channel', None), 'id', None)
|
||||||
|
logger.debug(f"modfile_info: sent payload for msg={msg_id} -> sent_id={sent_id} channel={chan}")
|
||||||
|
except Exception:
|
||||||
|
logger.debug(f"modfile_info: sent payload for msg={msg_id}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug(f"modfile_info: failed to send payload for msg={msg_id}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
@modfile_group.command(name="list")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def modfile_list(ctx):
|
||||||
|
"""List available modelfiles in common locations (examples/, personas/, src/)."""
|
||||||
|
base = os.path.dirname(os.path.dirname(__file__))
|
||||||
|
candidates = []
|
||||||
|
search_dirs = [
|
||||||
|
os.path.join(base, 'examples'),
|
||||||
|
os.path.join(base, 'personas'),
|
||||||
|
os.path.join(base, 'src'),
|
||||||
|
base
|
||||||
|
]
|
||||||
|
for d in search_dirs:
|
||||||
|
if not os.path.isdir(d):
|
||||||
|
continue
|
||||||
|
for fname in os.listdir(d):
|
||||||
|
if fname.endswith('.mod') or fname.endswith('.json'):
|
||||||
|
candidates.append(os.path.join(d, fname))
|
||||||
|
|
||||||
|
if not candidates:
|
||||||
|
return await ctx.send("No modelfiles found in examples/, personas/, or src/.")
|
||||||
|
|
||||||
|
lines = ["Available modelfiles:"]
|
||||||
|
for p in sorted(candidates):
|
||||||
|
lines.append(f"- `{p}`")
|
||||||
|
|
||||||
|
await ctx.send("\n".join(lines))
|
||||||
|
|
||||||
@bot.command()
|
@bot.command()
|
||||||
async def setpersona(ctx, *, description):
|
async def setpersona(ctx, *, description):
|
||||||
set_persona(description)
|
set_persona(description)
|
||||||
await ctx.send("✅ Persona updated! New style will be used in replies.")
|
await ctx.send("✅ Persona updated! New style will be used in replies.")
|
||||||
|
|
||||||
|
|
||||||
@bot.command(name='roast')
|
@bot.command(name='roast')
|
||||||
@cooldown(rate=1, per=ROAST_COOLDOWN_SECONDS, type=BucketType.user)
|
@cooldown(rate=1, per=ROAST_COOLDOWN_SECONDS, type=BucketType.user)
|
||||||
async def roast(ctx):
|
async def roast(ctx):
|
||||||
# Get the mentioned user (or fallback to the author)
|
|
||||||
target = ctx.message.mentions[0].mention if ctx.message.mentions else ctx.author.mention
|
target = ctx.message.mentions[0].mention if ctx.message.mentions else ctx.author.mention
|
||||||
|
|
||||||
# Build the roast prompt
|
|
||||||
prompt = f"Roast {target}. Be dramatic, insulting, and sarcastic. Speak in your usual chaotic RGB catgirl personality."
|
prompt = f"Roast {target}. Be dramatic, insulting, and sarcastic. Speak in your usual chaotic RGB catgirl personality."
|
||||||
|
|
||||||
# Get AI response
|
|
||||||
response = get_ai_response(prompt)
|
response = get_ai_response(prompt)
|
||||||
|
|
||||||
# Send the roast back to the channel
|
|
||||||
await ctx.send(f"😼 {response}")
|
await ctx.send(f"😼 {response}")
|
||||||
|
|
||||||
@bot.event
|
@bot.command(name="clearmodel")
|
||||||
async def on_ready():
|
async def clear_model(ctx):
|
||||||
print(f"✅ Logged in as {bot.user.name}")
|
model = get_current_model()
|
||||||
bot.loop.create_task(start_scheduler(bot))
|
success = unload_model(model)
|
||||||
|
msg = f"✅ Unloaded model: `{model}`" if success else f"❌ Failed to unload model: `{model}`"
|
||||||
|
await ctx.send(msg)
|
||||||
|
|
||||||
|
@bot.command(name="model")
|
||||||
|
async def current_model(ctx):
|
||||||
|
model = get_current_model()
|
||||||
|
await ctx.send(f"📦 Current model: `{model}`")
|
||||||
|
|
||||||
|
@bot.command(name="setmodel")
|
||||||
|
async def set_model(ctx, *, model_name):
|
||||||
|
current_model = get_current_model()
|
||||||
|
if model_name == current_model:
|
||||||
|
return await ctx.send(f"⚠️ `{model_name}` is already active.")
|
||||||
|
|
||||||
|
await ctx.send(f"🔄 Switching from `{current_model}` to `{model_name}`…")
|
||||||
|
|
||||||
|
if unload_model(current_model):
|
||||||
|
await ctx.send(f"🧽 Unloaded `{current_model}` from VRAM.")
|
||||||
|
else:
|
||||||
|
await ctx.send(f"⚠️ Couldn’t unload `{current_model}`.")
|
||||||
|
|
||||||
|
if not load_model(model_name):
|
||||||
|
return await ctx.send(f"❌ Failed to pull `{model_name}`.")
|
||||||
|
|
||||||
|
os.environ["MODEL_NAME"] = model_name
|
||||||
|
env_path = os.path.join(os.path.dirname(__file__), '..', '.env')
|
||||||
|
lines = []
|
||||||
|
with open(env_path, 'r', encoding='utf-8') as f:
|
||||||
|
for line in f:
|
||||||
|
lines.append(f"MODEL_NAME={model_name}\n" if line.startswith("MODEL_NAME=") else line)
|
||||||
|
with open(env_path, 'w', encoding='utf-8') as f:
|
||||||
|
f.writelines(lines)
|
||||||
|
|
||||||
|
await ctx.send(f"✅ Model switched to `{model_name}` and `.env` updated.")
|
||||||
|
|
||||||
|
@bot.command(name="models")
|
||||||
|
async def list_models(ctx):
|
||||||
|
import requests
|
||||||
|
try:
|
||||||
|
resp = requests.get(TAGS_ENDPOINT)
|
||||||
|
models = [m["name"] for m in resp.json().get("models", [])]
|
||||||
|
if models:
|
||||||
|
await ctx.send("🧠 Available models:\n" + "\n".join(f"- `{m}`" for m in models))
|
||||||
|
else:
|
||||||
|
await ctx.send("❌ No models found.")
|
||||||
|
except Exception as e:
|
||||||
|
await ctx.send(f"❌ Failed to fetch models: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
@bot.command(name="memory")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def memory_cmd(ctx, action: str = "info", *, target: str = None):
|
||||||
|
"""Memory management: !memory info [@user], !memory cleanup, !memory summary"""
|
||||||
|
from enhanced_ai import get_user_memory_summary
|
||||||
|
from memory_manager import memory_manager
|
||||||
|
|
||||||
|
if action == "info":
|
||||||
|
user_id = str(ctx.author.id)
|
||||||
|
if ctx.message.mentions:
|
||||||
|
user_id = str(ctx.message.mentions[0].id)
|
||||||
|
|
||||||
|
summary = get_user_memory_summary(user_id)
|
||||||
|
await ctx.send(f"```\n{summary}\n```")
|
||||||
|
|
||||||
|
elif action == "cleanup":
|
||||||
|
memory_manager.cleanup_old_memories(days=30)
|
||||||
|
await ctx.send("🧹 Cleaned up old memories (30+ days)")
|
||||||
|
|
||||||
|
elif action == "summary":
|
||||||
|
channel_id = str(ctx.channel.id)
|
||||||
|
memories = memory_manager.get_conversation_context(channel_id, hours=48)
|
||||||
|
if memories:
|
||||||
|
summary_lines = [f"Recent channel memories ({len(memories)} total):"]
|
||||||
|
for i, memory in enumerate(memories[:5]):
|
||||||
|
timestamp = memory['timestamp'][:16].replace('T', ' ')
|
||||||
|
content = memory['content'][:100]
|
||||||
|
summary_lines.append(f"{i+1}. {timestamp}: {content}")
|
||||||
|
await ctx.send(f"```\n" + "\n".join(summary_lines) + "\n```")
|
||||||
|
else:
|
||||||
|
await ctx.send("No recent memories for this channel.")
|
||||||
|
|
||||||
|
else:
|
||||||
|
await ctx.send("Usage: `!memory info [@user]`, `!memory cleanup`, `!memory summary`")
|
||||||
|
|
||||||
|
@bot.command(name="dryrun")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def dryrun(ctx, *, prompt: str):
|
||||||
|
"""Build the prompt and payload without contacting the model.
|
||||||
|
Usage: `!dryrun Your test prompt here`"""
|
||||||
|
await ctx.send("🧪 Building dry-run payload...")
|
||||||
|
from ai import build_dryrun_payload
|
||||||
|
profile = load_user_profile(ctx.author)
|
||||||
|
info = build_dryrun_payload(prompt, context=None, user_profile=profile)
|
||||||
|
prompt_preview = info['prompt'][:1500]
|
||||||
|
payload_preview = {k: info['payload'][k] for k in info['payload'] if k != 'prompt'}
|
||||||
|
lines = [
|
||||||
|
"Prompt assembled:",
|
||||||
|
"```",
|
||||||
|
prompt_preview,
|
||||||
|
"```",
|
||||||
|
"Payload params:",
|
||||||
|
"```",
|
||||||
|
str(payload_preview),
|
||||||
|
"```"
|
||||||
|
]
|
||||||
|
await ctx.send("\n".join(lines))
|
||||||
|
|
||||||
|
@bot.command(name="setavatar")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def set_avatar(ctx):
|
||||||
|
if not ctx.message.attachments:
|
||||||
|
return await ctx.send("❌ Please attach an image (PNG) to use as the new avatar.")
|
||||||
|
|
||||||
|
image = ctx.message.attachments[0]
|
||||||
|
image_bytes = await image.read()
|
||||||
|
token = os.getenv("DISCORD_TOKEN")
|
||||||
|
if not token:
|
||||||
|
return await ctx.send("❌ Bot token not found in environment.")
|
||||||
|
|
||||||
|
success = set_avatar_from_bytes(image_bytes, token)
|
||||||
|
await ctx.send("✅ Avatar updated successfully!" if success else "❌ Failed to update avatar.")
|
||||||
|
|
||||||
|
# Run bot
|
||||||
bot.run(TOKEN)
|
bot.run(TOKEN)
|
||||||
|
|
|
||||||
51
src/context.py
Normal file
51
src/context.py
Normal file
|
|
@ -0,0 +1,51 @@
|
||||||
|
# context.py
|
||||||
|
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
import discord
|
||||||
|
|
||||||
|
base_dir = os.path.dirname(__file__)
|
||||||
|
with open(os.path.join(base_dir, "settings.yml"), "r", encoding="utf-8") as f:
|
||||||
|
settings = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Determine whether context should be included. Preference order:
|
||||||
|
# 1) `AI_INCLUDE_CONTEXT` environment variable if present
|
||||||
|
# 2) `settings.yml` -> context.enabled
|
||||||
|
env_val = os.getenv("AI_INCLUDE_CONTEXT", None)
|
||||||
|
if env_val is not None:
|
||||||
|
AI_INCLUDE_CONTEXT = str(env_val).lower() == "true"
|
||||||
|
else:
|
||||||
|
AI_INCLUDE_CONTEXT = settings.get("context", {}).get("enabled", True)
|
||||||
|
|
||||||
|
CONTEXT_LIMIT = settings.get("context", {}).get("max_messages", 15) if AI_INCLUDE_CONTEXT else 0
|
||||||
|
|
||||||
|
# Returns full discord.Message objects (for logic)
|
||||||
|
async def fetch_raw_context(channel, limit=CONTEXT_LIMIT):
|
||||||
|
# If context injection is disabled or limit is <= 0, return early.
|
||||||
|
if not AI_INCLUDE_CONTEXT or (not isinstance(limit, int)) or limit <= 0:
|
||||||
|
return []
|
||||||
|
|
||||||
|
messages = []
|
||||||
|
async for message in channel.history(limit=100):
|
||||||
|
# Skip other bots (but not Delta herself)
|
||||||
|
if message.author.bot and message.author.id != channel.guild.me.id:
|
||||||
|
continue
|
||||||
|
messages.append(message)
|
||||||
|
if len(messages) >= limit:
|
||||||
|
break
|
||||||
|
messages.reverse()
|
||||||
|
return messages
|
||||||
|
|
||||||
|
# Keeps your clean format logic for LLM
|
||||||
|
def format_context(messages: list[discord.Message]) -> str:
|
||||||
|
lines = []
|
||||||
|
for message in messages:
|
||||||
|
raw = message.clean_content
|
||||||
|
clean = raw.strip().replace("\n", " ").replace("\r", "")
|
||||||
|
clean = " ".join(clean.split())
|
||||||
|
if not clean or clean.startswith("!"):
|
||||||
|
continue
|
||||||
|
line = f"{message.created_at.strftime('%Y-%m-%d %H:%M')} - {message.author.display_name}: {clean}"
|
||||||
|
lines.append(line)
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
73
src/cooldown.py
Normal file
73
src/cooldown.py
Normal file
|
|
@ -0,0 +1,73 @@
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Tuple
|
||||||
|
|
||||||
|
|
||||||
|
class CooldownManager:
|
||||||
|
"""A simple, race-safe cooldown manager.
|
||||||
|
|
||||||
|
- Uses time.monotonic() to avoid system clock jumps.
|
||||||
|
- Stores last-execution timestamps keyed by (key, user_id).
|
||||||
|
- `check_and_update` atomically checks and updates the timestamp.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._last: Dict[Tuple[str, int], float] = {}
|
||||||
|
self._lock = asyncio.Lock()
|
||||||
|
|
||||||
|
async def check_and_update(self, key: str, user_id: int, cooldown_sec: float) -> Tuple[bool, float]:
|
||||||
|
"""Check cooldown for (key, user_id).
|
||||||
|
|
||||||
|
Returns (allowed, retry_after). If allowed==True it records the timestamp.
|
||||||
|
If not allowed, returns (False, seconds_remaining).
|
||||||
|
"""
|
||||||
|
now = time.monotonic()
|
||||||
|
map_key = (key, int(user_id))
|
||||||
|
async with self._lock:
|
||||||
|
last = self._last.get(map_key, 0.0)
|
||||||
|
elapsed = now - last
|
||||||
|
if elapsed < float(cooldown_sec):
|
||||||
|
return False, float(cooldown_sec) - elapsed
|
||||||
|
# allowed -> update timestamp and return
|
||||||
|
self._last[map_key] = now
|
||||||
|
return True, 0.0
|
||||||
|
|
||||||
|
async def record(self, key: str, user_id: int):
|
||||||
|
"""Record the current time for (key, user_id) without checking.
|
||||||
|
|
||||||
|
This allows a two-phase flow where callers `peek` for remaining time
|
||||||
|
during checks and then `record` once they actually begin processing
|
||||||
|
the command (single canonical writer).
|
||||||
|
"""
|
||||||
|
async with self._lock:
|
||||||
|
self._last[(key, int(user_id))] = time.monotonic()
|
||||||
|
try:
|
||||||
|
logging.getLogger('bot').debug(f"CooldownManager.record: key={key} user={user_id} recorded")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def peek(self, key: str, user_id: int, cooldown_sec: float) -> float:
|
||||||
|
"""Return seconds remaining (0 if allowed) without updating state."""
|
||||||
|
now = time.monotonic()
|
||||||
|
last = self._last.get((key, int(user_id)), 0.0)
|
||||||
|
rem = float(cooldown_sec) - (now - last)
|
||||||
|
try:
|
||||||
|
logging.getLogger('bot').debug(f"CooldownManager.peek: key={key} user={user_id} rem={max(0.0, rem):.3f}s")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
return max(0.0, rem)
|
||||||
|
|
||||||
|
async def clear(self, key: str = None, user_id: int = None):
|
||||||
|
"""Clear stored timestamps selectively or entirely."""
|
||||||
|
async with self._lock:
|
||||||
|
if key is None and user_id is None:
|
||||||
|
self._last.clear()
|
||||||
|
return
|
||||||
|
to_delete = []
|
||||||
|
for k in list(self._last.keys()):
|
||||||
|
k_key, k_user = k
|
||||||
|
if (key is None or k_key == key) and (user_id is None or k_user == int(user_id)):
|
||||||
|
to_delete.append(k)
|
||||||
|
for k in to_delete:
|
||||||
|
del self._last[k]
|
||||||
86
src/data/activity_log.csv
Normal file
86
src/data/activity_log.csv
Normal file
|
|
@ -0,0 +1,86 @@
|
||||||
|
timestamp,user_id,username,channel_id,hour,weekday
|
||||||
|
2025-05-19 22:54:55,161149541171593216,Miguel,1370420592360161393,22,Monday
|
||||||
|
2025-05-19 22:55:56,161149541171593216,Miguel,1370420592360161393,22,Monday
|
||||||
|
2025-05-19 22:56:51,161149541171593216,Miguel,1370420592360161393,22,Monday
|
||||||
|
2025-05-19 22:57:30,161149541171593216,Miguel,1370420592360161393,22,Monday
|
||||||
|
2025-05-19 23:35:38,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:35:40,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:35:46,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:36:06,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:36:09,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:36:13,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:36:25,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:36:38,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:37:55,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-05-19 23:39:03,161149541171593216,Miguel,1370420592360161393,23,Monday
|
||||||
|
2025-06-01 12:37:10,161149541171593216,Miguel,1370420592360161393,12,Sunday
|
||||||
|
2025-06-01 12:41:13,161149541171593216,Miguel,1370420592360161393,12,Sunday
|
||||||
|
2025-06-01 13:10:54,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:11:10,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:11:36,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:11:49,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:12:25,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:12:52,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:13:22,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:32:31,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:32:51,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:33:26,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:38:14,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:38:30,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 13:39:30,161149541171593216,Miguel,1370420592360161393,13,Sunday
|
||||||
|
2025-06-01 14:04:47,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:05:07,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:06:30,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:06:52,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:20:35,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:28:00,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:28:17,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:28:27,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:28:48,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:40:06,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:40:21,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:40:38,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:40:53,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:42:58,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:46:18,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:48:14,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 14:48:32,161149541171593216,Miguel,1370420592360161393,14,Sunday
|
||||||
|
2025-06-01 15:03:46,161149541171593216,Miguel,1370420592360161393,15,Sunday
|
||||||
|
2025-06-01 15:04:04,161149541171593216,Miguel,1370420592360161393,15,Sunday
|
||||||
|
2025-06-01 15:09:10,161149541171593216,Miguel,1370420592360161393,15,Sunday
|
||||||
|
2025-06-01 15:09:16,161149541171593216,Miguel,1370420592360161393,15,Sunday
|
||||||
|
2025-06-01 18:44:10,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:47:11,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:48:52,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:55:22,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:55:52,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:56:31,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:56:50,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:57:00,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:57:53,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 18:59:33,161149541171593216,Miguel,1370420592360161393,18,Sunday
|
||||||
|
2025-06-01 19:10:46,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-01 19:27:33,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-01 19:27:44,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-01 19:28:15,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-01 19:28:23,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-01 19:48:35,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-01 19:49:12,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-01 19:49:43,161149541171593216,Miguel,1370420592360161393,19,Sunday
|
||||||
|
2025-06-06 12:28:47,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:30:07,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:30:43,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:31:01,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:53:41,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:54:05,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:54:20,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:55:00,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 12:57:02,161149541171593216,Miguel,1370420592360161393,12,Friday
|
||||||
|
2025-06-06 13:32:38,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
2025-06-06 13:32:47,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
2025-06-06 13:33:31,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
2025-06-06 13:33:43,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
2025-06-06 13:33:53,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
2025-06-06 13:37:31,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
2025-06-06 13:37:34,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
2025-06-06 13:37:37,161149541171593216,Miguel,1370420592360161393,13,Friday
|
||||||
|
BIN
src/data/deltabot.db
Normal file
BIN
src/data/deltabot.db
Normal file
Binary file not shown.
599
src/database.py
Normal file
599
src/database.py
Normal file
|
|
@ -0,0 +1,599 @@
|
||||||
|
"""
|
||||||
|
database.py
|
||||||
|
Database abstraction layer supporting SQLite and JSON backends
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import sqlite3
|
||||||
|
import yaml
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Dict, List, Any, Optional, Union
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger("database")
|
||||||
|
|
||||||
|
class DatabaseBackend(ABC):
|
||||||
|
"""Abstract base class for database backends"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def connect(self):
|
||||||
|
"""Initialize connection to database"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def close(self):
|
||||||
|
"""Close database connection"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
# User Profile methods
|
||||||
|
@abstractmethod
|
||||||
|
def get_user_profile(self, user_id: str) -> Optional[Dict]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save_user_profile(self, user_id: str, profile: Dict):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_all_user_profiles(self) -> Dict[str, Dict]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Memory methods (only if memory is enabled)
|
||||||
|
@abstractmethod
|
||||||
|
def store_conversation_memory(self, channel_id: str, user_id: str, content: str,
|
||||||
|
context: str, importance: float, timestamp: str):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_conversation_context(self, channel_id: str, hours: int = 24) -> List[Dict]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def store_user_memory(self, user_id: str, memory_type: str, content: str,
|
||||||
|
importance: float, timestamp: str):
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def get_user_context(self, user_id: str) -> List[Dict]:
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def cleanup_old_memories(self, days: int = 30):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class SQLiteBackend(DatabaseBackend):
|
||||||
|
"""SQLite database backend"""
|
||||||
|
|
||||||
|
def __init__(self, db_path: str = "data/deltabot.db"):
|
||||||
|
self.db_path = db_path
|
||||||
|
self.connection = None
|
||||||
|
self.connect()
|
||||||
|
self._init_tables()
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
"""Initialize SQLite connection"""
|
||||||
|
os.makedirs(os.path.dirname(self.db_path), exist_ok=True)
|
||||||
|
self.connection = sqlite3.connect(self.db_path, check_same_thread=False)
|
||||||
|
self.connection.row_factory = sqlite3.Row # Enable dict-like access
|
||||||
|
logger.info(f"Connected to SQLite database: {self.db_path}")
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close SQLite connection"""
|
||||||
|
if self.connection:
|
||||||
|
self.connection.close()
|
||||||
|
self.connection = None
|
||||||
|
|
||||||
|
def _init_tables(self):
|
||||||
|
"""Initialize database tables"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
|
||||||
|
# User profiles table
|
||||||
|
cursor.execute('''
|
||||||
|
CREATE TABLE IF NOT EXISTS user_profiles (
|
||||||
|
user_id TEXT PRIMARY KEY,
|
||||||
|
name TEXT,
|
||||||
|
display_name TEXT,
|
||||||
|
first_seen TEXT,
|
||||||
|
last_seen TEXT,
|
||||||
|
last_message TEXT,
|
||||||
|
interactions INTEGER DEFAULT 0,
|
||||||
|
pronouns TEXT,
|
||||||
|
avatar_url TEXT,
|
||||||
|
custom_prompt TEXT,
|
||||||
|
profile_data TEXT -- JSON string for additional data
|
||||||
|
)
|
||||||
|
''')
|
||||||
|
|
||||||
|
# Conversation memories table
|
||||||
|
cursor.execute('''
|
||||||
|
CREATE TABLE IF NOT EXISTS conversation_memories (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
channel_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
content TEXT,
|
||||||
|
context TEXT,
|
||||||
|
importance REAL,
|
||||||
|
timestamp TEXT,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
|
)
|
||||||
|
''')
|
||||||
|
|
||||||
|
# User memories table
|
||||||
|
cursor.execute('''
|
||||||
|
CREATE TABLE IF NOT EXISTS user_memories (
|
||||||
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||||
|
user_id TEXT,
|
||||||
|
memory_type TEXT,
|
||||||
|
content TEXT,
|
||||||
|
importance REAL,
|
||||||
|
timestamp TEXT,
|
||||||
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||||
|
)
|
||||||
|
''')
|
||||||
|
|
||||||
|
# Create indexes for better performance
|
||||||
|
cursor.execute('CREATE INDEX IF NOT EXISTS idx_conv_channel_time ON conversation_memories(channel_id, timestamp)')
|
||||||
|
cursor.execute('CREATE INDEX IF NOT EXISTS idx_user_mem_user_time ON user_memories(user_id, timestamp)')
|
||||||
|
|
||||||
|
self.connection.commit()
|
||||||
|
logger.debug("Database tables initialized")
|
||||||
|
|
||||||
|
def get_user_profile(self, user_id: str) -> Optional[Dict]:
|
||||||
|
"""Get user profile from SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
cursor.execute('SELECT * FROM user_profiles WHERE user_id = ?', (user_id,))
|
||||||
|
row = cursor.fetchone()
|
||||||
|
|
||||||
|
if row:
|
||||||
|
profile = dict(row)
|
||||||
|
# Parse JSON profile_data if exists
|
||||||
|
if profile.get('profile_data'):
|
||||||
|
try:
|
||||||
|
extra_data = json.loads(profile['profile_data'])
|
||||||
|
profile.update(extra_data)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
del profile['profile_data'] # Remove the JSON field
|
||||||
|
return profile
|
||||||
|
return None
|
||||||
|
|
||||||
|
def save_user_profile(self, user_id: str, profile: Dict):
|
||||||
|
"""Save user profile to SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
|
||||||
|
# Separate known fields from extra data
|
||||||
|
known_fields = {
|
||||||
|
'name', 'display_name', 'first_seen', 'last_seen', 'last_message',
|
||||||
|
'interactions', 'pronouns', 'avatar_url', 'custom_prompt'
|
||||||
|
}
|
||||||
|
|
||||||
|
base_profile = {k: v for k, v in profile.items() if k in known_fields}
|
||||||
|
extra_data = {k: v for k, v in profile.items() if k not in known_fields}
|
||||||
|
|
||||||
|
cursor.execute('''
|
||||||
|
INSERT OR REPLACE INTO user_profiles
|
||||||
|
(user_id, name, display_name, first_seen, last_seen, last_message,
|
||||||
|
interactions, pronouns, avatar_url, custom_prompt, profile_data)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||||
|
''', (
|
||||||
|
user_id,
|
||||||
|
base_profile.get('name'),
|
||||||
|
base_profile.get('display_name'),
|
||||||
|
base_profile.get('first_seen'),
|
||||||
|
base_profile.get('last_seen'),
|
||||||
|
base_profile.get('last_message'),
|
||||||
|
base_profile.get('interactions', 0),
|
||||||
|
base_profile.get('pronouns'),
|
||||||
|
base_profile.get('avatar_url'),
|
||||||
|
base_profile.get('custom_prompt'),
|
||||||
|
json.dumps(extra_data) if extra_data else None
|
||||||
|
))
|
||||||
|
|
||||||
|
self.connection.commit()
|
||||||
|
|
||||||
|
def get_all_user_profiles(self) -> Dict[str, Dict]:
|
||||||
|
"""Get all user profiles from SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
cursor.execute('SELECT * FROM user_profiles')
|
||||||
|
profiles = {}
|
||||||
|
|
||||||
|
for row in cursor.fetchall():
|
||||||
|
profile = dict(row)
|
||||||
|
user_id = profile.pop('user_id')
|
||||||
|
|
||||||
|
# Parse JSON profile_data if exists
|
||||||
|
if profile.get('profile_data'):
|
||||||
|
try:
|
||||||
|
extra_data = json.loads(profile['profile_data'])
|
||||||
|
profile.update(extra_data)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
if 'profile_data' in profile:
|
||||||
|
del profile['profile_data']
|
||||||
|
|
||||||
|
profiles[user_id] = profile
|
||||||
|
|
||||||
|
return profiles
|
||||||
|
|
||||||
|
def store_conversation_memory(self, channel_id: str, user_id: str, content: str,
|
||||||
|
context: str, importance: float, timestamp: str):
|
||||||
|
"""Store conversation memory in SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
cursor.execute('''
|
||||||
|
INSERT INTO conversation_memories
|
||||||
|
(channel_id, user_id, content, context, importance, timestamp)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?)
|
||||||
|
''', (channel_id, user_id, content, context[:500], importance, timestamp))
|
||||||
|
|
||||||
|
# Keep only last 100 memories per channel
|
||||||
|
cursor.execute('''
|
||||||
|
DELETE FROM conversation_memories
|
||||||
|
WHERE channel_id = ? AND id NOT IN (
|
||||||
|
SELECT id FROM conversation_memories
|
||||||
|
WHERE channel_id = ?
|
||||||
|
ORDER BY timestamp DESC LIMIT 100
|
||||||
|
)
|
||||||
|
''', (channel_id, channel_id))
|
||||||
|
|
||||||
|
self.connection.commit()
|
||||||
|
|
||||||
|
def get_conversation_context(self, channel_id: str, hours: int = 24) -> List[Dict]:
|
||||||
|
"""Get recent conversation memories from SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
cutoff_time = (datetime.utcnow() - timedelta(hours=hours)).isoformat()
|
||||||
|
|
||||||
|
cursor.execute('''
|
||||||
|
SELECT * FROM conversation_memories
|
||||||
|
WHERE channel_id = ? AND timestamp > ?
|
||||||
|
ORDER BY importance DESC, timestamp DESC
|
||||||
|
LIMIT 10
|
||||||
|
''', (channel_id, cutoff_time))
|
||||||
|
|
||||||
|
return [dict(row) for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
def store_user_memory(self, user_id: str, memory_type: str, content: str,
|
||||||
|
importance: float, timestamp: str):
|
||||||
|
"""Store user memory in SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
cursor.execute('''
|
||||||
|
INSERT INTO user_memories
|
||||||
|
(user_id, memory_type, content, importance, timestamp)
|
||||||
|
VALUES (?, ?, ?, ?, ?)
|
||||||
|
''', (user_id, memory_type, content, importance, timestamp))
|
||||||
|
|
||||||
|
# Keep only last 50 memories per user
|
||||||
|
cursor.execute('''
|
||||||
|
DELETE FROM user_memories
|
||||||
|
WHERE user_id = ? AND id NOT IN (
|
||||||
|
SELECT id FROM user_memories
|
||||||
|
WHERE user_id = ?
|
||||||
|
ORDER BY timestamp DESC LIMIT 50
|
||||||
|
)
|
||||||
|
''', (user_id, user_id))
|
||||||
|
|
||||||
|
self.connection.commit()
|
||||||
|
|
||||||
|
def get_user_context(self, user_id: str) -> List[Dict]:
|
||||||
|
"""Get user memories from SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
cursor.execute('''
|
||||||
|
SELECT * FROM user_memories
|
||||||
|
WHERE user_id = ?
|
||||||
|
ORDER BY importance DESC, timestamp DESC
|
||||||
|
LIMIT 5
|
||||||
|
''', (user_id,))
|
||||||
|
|
||||||
|
return [dict(row) for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
def cleanup_old_memories(self, days: int = 30):
|
||||||
|
"""Clean up old memories from SQLite"""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
cutoff_time = (datetime.utcnow() - timedelta(days=days)).isoformat()
|
||||||
|
|
||||||
|
# Clean conversation memories
|
||||||
|
cursor.execute('''
|
||||||
|
DELETE FROM conversation_memories
|
||||||
|
WHERE timestamp < ?
|
||||||
|
''', (cutoff_time,))
|
||||||
|
|
||||||
|
# Clean user memories (keep important ones longer)
|
||||||
|
cursor.execute('''
|
||||||
|
DELETE FROM user_memories
|
||||||
|
WHERE timestamp < ? AND importance <= 0.7
|
||||||
|
''', (cutoff_time,))
|
||||||
|
|
||||||
|
deleted_conv = cursor.rowcount
|
||||||
|
self.connection.commit()
|
||||||
|
|
||||||
|
logger.info(f"Cleaned up {deleted_conv} old memories from SQLite")
|
||||||
|
|
||||||
|
|
||||||
|
class JSONBackend(DatabaseBackend):
|
||||||
|
"""JSON file-based backend (existing system)"""
|
||||||
|
|
||||||
|
def __init__(self, profiles_path: str = None, memory_path: str = None):
|
||||||
|
self.profiles_path = profiles_path or os.path.join(os.path.dirname(__file__), "user_profiles.json")
|
||||||
|
self.memory_path = memory_path or os.path.join(os.path.dirname(__file__), "memory.json")
|
||||||
|
self.connect()
|
||||||
|
|
||||||
|
def connect(self):
|
||||||
|
"""Initialize JSON backend"""
|
||||||
|
self._ensure_files()
|
||||||
|
logger.info("Connected to JSON backend")
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""JSON backend doesn't need explicit closing"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _ensure_files(self):
|
||||||
|
"""Ensure JSON files exist"""
|
||||||
|
# Ensure profiles file
|
||||||
|
if not os.path.exists(self.profiles_path):
|
||||||
|
with open(self.profiles_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump({}, f, indent=2)
|
||||||
|
|
||||||
|
# Ensure memory file
|
||||||
|
if not os.path.exists(self.memory_path):
|
||||||
|
initial_data = {
|
||||||
|
"conversations": {},
|
||||||
|
"user_memories": {},
|
||||||
|
"global_events": []
|
||||||
|
}
|
||||||
|
with open(self.memory_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(initial_data, f, indent=2)
|
||||||
|
|
||||||
|
def _load_profiles(self) -> Dict:
|
||||||
|
"""Load profiles from JSON"""
|
||||||
|
try:
|
||||||
|
with open(self.profiles_path, "r", encoding="utf-8") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _save_profiles(self, profiles: Dict):
|
||||||
|
"""Save profiles to JSON"""
|
||||||
|
with open(self.profiles_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(profiles, f, indent=2)
|
||||||
|
|
||||||
|
def _load_memory(self) -> Dict:
|
||||||
|
"""Load memory from JSON"""
|
||||||
|
try:
|
||||||
|
with open(self.memory_path, "r", encoding="utf-8") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except:
|
||||||
|
return {"conversations": {}, "user_memories": {}, "global_events": []}
|
||||||
|
|
||||||
|
def _save_memory(self, memory: Dict):
|
||||||
|
"""Save memory to JSON"""
|
||||||
|
with open(self.memory_path, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(memory, f, indent=2)
|
||||||
|
|
||||||
|
def get_user_profile(self, user_id: str) -> Optional[Dict]:
|
||||||
|
"""Get user profile from JSON"""
|
||||||
|
profiles = self._load_profiles()
|
||||||
|
return profiles.get(user_id)
|
||||||
|
|
||||||
|
def save_user_profile(self, user_id: str, profile: Dict):
|
||||||
|
"""Save user profile to JSON"""
|
||||||
|
profiles = self._load_profiles()
|
||||||
|
profiles[user_id] = profile
|
||||||
|
self._save_profiles(profiles)
|
||||||
|
|
||||||
|
def get_all_user_profiles(self) -> Dict[str, Dict]:
|
||||||
|
"""Get all user profiles from JSON"""
|
||||||
|
return self._load_profiles()
|
||||||
|
|
||||||
|
def store_conversation_memory(self, channel_id: str, user_id: str, content: str,
|
||||||
|
context: str, importance: float, timestamp: str):
|
||||||
|
"""Store conversation memory in JSON"""
|
||||||
|
memory_data = self._load_memory()
|
||||||
|
|
||||||
|
memory_entry = {
|
||||||
|
"timestamp": timestamp,
|
||||||
|
"user_id": user_id,
|
||||||
|
"content": content,
|
||||||
|
"context": context[:500],
|
||||||
|
"importance": importance,
|
||||||
|
"id": f"{channel_id}_{int(datetime.fromisoformat(timestamp).timestamp())}"
|
||||||
|
}
|
||||||
|
|
||||||
|
channel_key = str(channel_id)
|
||||||
|
if channel_key not in memory_data["conversations"]:
|
||||||
|
memory_data["conversations"][channel_key] = []
|
||||||
|
|
||||||
|
memory_data["conversations"][channel_key].append(memory_entry)
|
||||||
|
memory_data["conversations"][channel_key] = memory_data["conversations"][channel_key][-100:]
|
||||||
|
|
||||||
|
self._save_memory(memory_data)
|
||||||
|
|
||||||
|
def get_conversation_context(self, channel_id: str, hours: int = 24) -> List[Dict]:
|
||||||
|
"""Get recent conversation memories from JSON"""
|
||||||
|
memory_data = self._load_memory()
|
||||||
|
channel_key = str(channel_id)
|
||||||
|
|
||||||
|
if channel_key not in memory_data["conversations"]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
cutoff_time = datetime.utcnow() - timedelta(hours=hours)
|
||||||
|
recent_memories = []
|
||||||
|
|
||||||
|
for memory in memory_data["conversations"][channel_key]:
|
||||||
|
memory_time = datetime.fromisoformat(memory["timestamp"])
|
||||||
|
if memory_time > cutoff_time:
|
||||||
|
recent_memories.append(memory)
|
||||||
|
|
||||||
|
recent_memories.sort(key=lambda x: (x["importance"], x["timestamp"]), reverse=True)
|
||||||
|
return recent_memories[:10]
|
||||||
|
|
||||||
|
def store_user_memory(self, user_id: str, memory_type: str, content: str,
|
||||||
|
importance: float, timestamp: str):
|
||||||
|
"""Store user memory in JSON"""
|
||||||
|
memory_data = self._load_memory()
|
||||||
|
|
||||||
|
user_key = str(user_id)
|
||||||
|
if user_key not in memory_data["user_memories"]:
|
||||||
|
memory_data["user_memories"][user_key] = []
|
||||||
|
|
||||||
|
memory_entry = {
|
||||||
|
"timestamp": timestamp,
|
||||||
|
"type": memory_type,
|
||||||
|
"content": content,
|
||||||
|
"importance": importance,
|
||||||
|
"id": f"{user_id}_{memory_type}_{int(datetime.fromisoformat(timestamp).timestamp())}"
|
||||||
|
}
|
||||||
|
|
||||||
|
memory_data["user_memories"][user_key].append(memory_entry)
|
||||||
|
memory_data["user_memories"][user_key] = memory_data["user_memories"][user_key][-50:]
|
||||||
|
|
||||||
|
self._save_memory(memory_data)
|
||||||
|
|
||||||
|
def get_user_context(self, user_id: str) -> List[Dict]:
|
||||||
|
"""Get user memories from JSON"""
|
||||||
|
memory_data = self._load_memory()
|
||||||
|
user_key = str(user_id)
|
||||||
|
|
||||||
|
if user_key not in memory_data["user_memories"]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
user_memories = memory_data["user_memories"][user_key]
|
||||||
|
user_memories.sort(key=lambda x: (x["importance"], x["timestamp"]), reverse=True)
|
||||||
|
return user_memories[:5]
|
||||||
|
|
||||||
|
def cleanup_old_memories(self, days: int = 30):
|
||||||
|
"""Clean up old memories from JSON"""
|
||||||
|
memory_data = self._load_memory()
|
||||||
|
cutoff_time = datetime.utcnow() - timedelta(days=days)
|
||||||
|
cleaned = False
|
||||||
|
|
||||||
|
# Clean conversation memories
|
||||||
|
for channel_id in memory_data["conversations"]:
|
||||||
|
original_count = len(memory_data["conversations"][channel_id])
|
||||||
|
memory_data["conversations"][channel_id] = [
|
||||||
|
memory for memory in memory_data["conversations"][channel_id]
|
||||||
|
if datetime.fromisoformat(memory["timestamp"]) > cutoff_time
|
||||||
|
]
|
||||||
|
if len(memory_data["conversations"][channel_id]) < original_count:
|
||||||
|
cleaned = True
|
||||||
|
|
||||||
|
# Clean user memories (keep important ones longer)
|
||||||
|
for user_id in memory_data["user_memories"]:
|
||||||
|
original_count = len(memory_data["user_memories"][user_id])
|
||||||
|
memory_data["user_memories"][user_id] = [
|
||||||
|
memory for memory in memory_data["user_memories"][user_id]
|
||||||
|
if (datetime.fromisoformat(memory["timestamp"]) > cutoff_time or
|
||||||
|
memory["importance"] > 0.7)
|
||||||
|
]
|
||||||
|
if len(memory_data["user_memories"][user_id]) < original_count:
|
||||||
|
cleaned = True
|
||||||
|
|
||||||
|
if cleaned:
|
||||||
|
self._save_memory(memory_data)
|
||||||
|
logger.info(f"Cleaned up old memories from JSON files")
|
||||||
|
|
||||||
|
|
||||||
|
class DatabaseManager:
|
||||||
|
"""Main database manager that handles backend selection and configuration"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.backend = None
|
||||||
|
self.memory_enabled = True
|
||||||
|
self._load_config()
|
||||||
|
self._init_backend()
|
||||||
|
|
||||||
|
def _load_config(self):
|
||||||
|
"""Load database configuration from settings"""
|
||||||
|
try:
|
||||||
|
settings_path = os.path.join(os.path.dirname(__file__), "settings.yml")
|
||||||
|
with open(settings_path, "r", encoding="utf-8") as f:
|
||||||
|
settings = yaml.safe_load(f)
|
||||||
|
|
||||||
|
db_config = settings.get("database", {})
|
||||||
|
|
||||||
|
# Allow environment variable overrides for Docker
|
||||||
|
self.backend_type = os.getenv("DATABASE_BACKEND", db_config.get("backend", "json")).lower()
|
||||||
|
self.memory_enabled = os.getenv("MEMORY_ENABLED", "true").lower() == "true" if os.getenv("MEMORY_ENABLED") else settings.get("memory", {}).get("enabled", True)
|
||||||
|
|
||||||
|
# SQLite specific config
|
||||||
|
self.sqlite_path = os.getenv("SQLITE_PATH", db_config.get("sqlite_path", "data/deltabot.db"))
|
||||||
|
|
||||||
|
# JSON specific config
|
||||||
|
self.profiles_path = db_config.get("profiles_path", "src/user_profiles.json")
|
||||||
|
self.memory_path = db_config.get("memory_path", "src/memory.json")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to load database config: {e}, using defaults")
|
||||||
|
self.backend_type = "json"
|
||||||
|
self.memory_enabled = True
|
||||||
|
self.sqlite_path = "data/deltabot.db"
|
||||||
|
self.profiles_path = "src/user_profiles.json"
|
||||||
|
self.memory_path = "src/memory.json"
|
||||||
|
|
||||||
|
def _init_backend(self):
|
||||||
|
"""Initialize the selected backend"""
|
||||||
|
if self.backend_type == "sqlite":
|
||||||
|
self.backend = SQLiteBackend(self.sqlite_path)
|
||||||
|
logger.info("Initialized SQLite database backend")
|
||||||
|
else:
|
||||||
|
self.backend = JSONBackend(self.profiles_path, self.memory_path)
|
||||||
|
logger.info("Initialized JSON database backend")
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close database connection"""
|
||||||
|
if self.backend:
|
||||||
|
self.backend.close()
|
||||||
|
|
||||||
|
# User Profile methods
|
||||||
|
def get_user_profile(self, user_id: str) -> Optional[Dict]:
|
||||||
|
return self.backend.get_user_profile(str(user_id))
|
||||||
|
|
||||||
|
def save_user_profile(self, user_id: str, profile: Dict):
|
||||||
|
self.backend.save_user_profile(str(user_id), profile)
|
||||||
|
|
||||||
|
def get_all_user_profiles(self) -> Dict[str, Dict]:
|
||||||
|
return self.backend.get_all_user_profiles()
|
||||||
|
|
||||||
|
# Memory methods (only if memory is enabled)
|
||||||
|
def store_conversation_memory(self, channel_id: str, user_id: str, content: str,
|
||||||
|
context: str, importance: float):
|
||||||
|
if not self.memory_enabled:
|
||||||
|
return
|
||||||
|
|
||||||
|
timestamp = datetime.utcnow().isoformat()
|
||||||
|
self.backend.store_conversation_memory(
|
||||||
|
str(channel_id), str(user_id), content, context, importance, timestamp
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_conversation_context(self, channel_id: str, hours: int = 24) -> List[Dict]:
|
||||||
|
if not self.memory_enabled:
|
||||||
|
return []
|
||||||
|
return self.backend.get_conversation_context(str(channel_id), hours)
|
||||||
|
|
||||||
|
def store_user_memory(self, user_id: str, memory_type: str, content: str, importance: float):
|
||||||
|
if not self.memory_enabled:
|
||||||
|
return
|
||||||
|
|
||||||
|
timestamp = datetime.utcnow().isoformat()
|
||||||
|
self.backend.store_user_memory(str(user_id), memory_type, content, importance, timestamp)
|
||||||
|
|
||||||
|
def get_user_context(self, user_id: str) -> List[Dict]:
|
||||||
|
if not self.memory_enabled:
|
||||||
|
return []
|
||||||
|
return self.backend.get_user_context(str(user_id))
|
||||||
|
|
||||||
|
def cleanup_old_memories(self, days: int = 30):
|
||||||
|
if not self.memory_enabled:
|
||||||
|
return
|
||||||
|
self.backend.cleanup_old_memories(days)
|
||||||
|
|
||||||
|
def is_memory_enabled(self) -> bool:
|
||||||
|
return self.memory_enabled
|
||||||
|
|
||||||
|
def get_backend_type(self) -> str:
|
||||||
|
return self.backend_type
|
||||||
|
|
||||||
|
|
||||||
|
# Global database manager instance
|
||||||
|
db_manager = DatabaseManager()
|
||||||
62
src/enhanced_ai.py
Normal file
62
src/enhanced_ai.py
Normal file
|
|
@ -0,0 +1,62 @@
|
||||||
|
# enhanced_ai.py
|
||||||
|
# Enhanced AI response function with memory integration
|
||||||
|
# This extends your existing ai.py without breaking it
|
||||||
|
|
||||||
|
from ai import get_ai_response as base_get_ai_response, get_model_name, load_model
|
||||||
|
from memory_manager import memory_manager
|
||||||
|
from personality import load_persona
|
||||||
|
from logger import setup_logger, generate_req_id, log_llm_request, log_llm_response
|
||||||
|
import requests
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
logger = setup_logger("enhanced_ai")
|
||||||
|
|
||||||
|
def get_ai_response_with_memory(user_prompt, context=None, user_profile=None, message=None):
|
||||||
|
"""Enhanced AI response that includes memory context"""
|
||||||
|
|
||||||
|
# Get memory context if message is provided
|
||||||
|
memory_context = ""
|
||||||
|
if message and user_profile:
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
channel_id = str(message.channel.id)
|
||||||
|
|
||||||
|
# Store this interaction in memory
|
||||||
|
context_messages = context if isinstance(context, list) else []
|
||||||
|
memory_manager.analyze_and_store_message(message, context_messages)
|
||||||
|
|
||||||
|
# Get formatted memory for prompt
|
||||||
|
memory_context = memory_manager.format_memory_for_prompt(user_id, channel_id)
|
||||||
|
|
||||||
|
# Combine memory context with existing context
|
||||||
|
enhanced_context = ""
|
||||||
|
if memory_context:
|
||||||
|
enhanced_context += f"{memory_context}\n\n"
|
||||||
|
if context:
|
||||||
|
if isinstance(context, str):
|
||||||
|
enhanced_context += f"[Recent Messages]\n{context}"
|
||||||
|
else:
|
||||||
|
# Assume it's a list of messages, format them
|
||||||
|
from context import format_context
|
||||||
|
enhanced_context += f"[Recent Messages]\n{format_context(context)}"
|
||||||
|
|
||||||
|
# Use the original function with enhanced context
|
||||||
|
return base_get_ai_response(user_prompt, enhanced_context, user_profile)
|
||||||
|
|
||||||
|
def analyze_user_message_for_memory(message, context_messages=None):
|
||||||
|
"""Analyze and store a message in memory without generating response"""
|
||||||
|
memory_manager.analyze_and_store_message(message, context_messages)
|
||||||
|
|
||||||
|
def get_user_memory_summary(user_id: str) -> str:
|
||||||
|
"""Get a summary of what we remember about a user"""
|
||||||
|
user_memories = memory_manager.get_user_context(user_id)
|
||||||
|
if not user_memories:
|
||||||
|
return "No specific memories about this user yet."
|
||||||
|
|
||||||
|
summary_lines = []
|
||||||
|
for memory in user_memories:
|
||||||
|
memory_type = memory['type'].title()
|
||||||
|
content = memory['content'][:100]
|
||||||
|
summary_lines.append(f"- {memory_type}: {content}")
|
||||||
|
|
||||||
|
return "What I remember about this user:\n" + "\n".join(summary_lines)
|
||||||
163
src/logger.py
Normal file
163
src/logger.py
Normal file
|
|
@ -0,0 +1,163 @@
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from logging.handlers import RotatingFileHandler
|
||||||
|
|
||||||
|
|
||||||
|
def setup_logger(name: str = "bot"):
|
||||||
|
"""Create a logger with rotating file handler, separate error log, and
|
||||||
|
a concise console output. Behavior is controlled by environment vars:
|
||||||
|
- LOG_LEVEL (default INFO)
|
||||||
|
- LOG_FILE (default bot.log)
|
||||||
|
- LOG_MAX_BYTES (default 5_000_000)
|
||||||
|
- LOG_BACKUP_COUNT (default 5)
|
||||||
|
|
||||||
|
If `colorlog` is installed, console output will be colorized.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Config from environment (all values can be set in .env)
|
||||||
|
level_name = os.getenv("LOG_LEVEL", "INFO").upper()
|
||||||
|
try:
|
||||||
|
base_level = getattr(logging, level_name)
|
||||||
|
except Exception:
|
||||||
|
base_level = logging.INFO
|
||||||
|
|
||||||
|
# Handlers toggle + per-handler levels
|
||||||
|
console_enabled = os.getenv("LOG_CONSOLE", "true").lower() in ("1", "true", "yes")
|
||||||
|
console_level_name = os.getenv("LOG_CONSOLE_LEVEL", level_name).upper()
|
||||||
|
console_to_stdout = os.getenv("LOG_CONSOLE_TO_STDOUT", "true").lower() in ("1", "true", "yes")
|
||||||
|
|
||||||
|
file_enabled = os.getenv("LOG_TO_FILE", "true").lower() in ("1", "true", "yes")
|
||||||
|
file_level_name = os.getenv("LOG_FILE_LEVEL", "DEBUG").upper()
|
||||||
|
log_file = os.getenv("LOG_FILE", "bot.log")
|
||||||
|
max_bytes = int(os.getenv("LOG_MAX_BYTES", 5_000_000))
|
||||||
|
backup_count = int(os.getenv("LOG_BACKUP_COUNT", 5))
|
||||||
|
|
||||||
|
error_file_enabled = os.getenv("LOG_ERROR_FILE", "true").lower() in ("1", "true", "yes")
|
||||||
|
|
||||||
|
# File formatter: include module and line number for easier debugging
|
||||||
|
file_formatter = logging.Formatter(
|
||||||
|
"[%(asctime)s] [%(levelname)s] [%(name)s:%(lineno)d] %(message)s",
|
||||||
|
"%Y-%m-%d %H:%M:%S",
|
||||||
|
)
|
||||||
|
|
||||||
|
# Console formatter: shorter. Try to use colorlog if available.
|
||||||
|
try:
|
||||||
|
import colorlog
|
||||||
|
|
||||||
|
console_formatter = colorlog.ColoredFormatter(
|
||||||
|
"%(log_color)s[%(asctime)s] [%(levelname)s]%(reset)s %(message)s",
|
||||||
|
datefmt="%Y-%m-%d %H:%M:%S",
|
||||||
|
log_colors={
|
||||||
|
"DEBUG": "cyan",
|
||||||
|
"INFO": "green",
|
||||||
|
"WARNING": "yellow",
|
||||||
|
"ERROR": "red",
|
||||||
|
"CRITICAL": "red",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
console_formatter = logging.Formatter(
|
||||||
|
"[%(asctime)s] [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Handlers
|
||||||
|
# Console handler (stdout by default, configurable)
|
||||||
|
console_handler = logging.StreamHandler(sys.stdout if console_to_stdout else sys.stderr)
|
||||||
|
console_handler.setFormatter(console_formatter)
|
||||||
|
try:
|
||||||
|
console_level = getattr(logging, console_level_name)
|
||||||
|
except Exception:
|
||||||
|
console_level = base_level
|
||||||
|
console_handler.setLevel(console_level)
|
||||||
|
|
||||||
|
file_handler = RotatingFileHandler(
|
||||||
|
log_file, maxBytes=max_bytes, backupCount=backup_count, encoding="utf-8"
|
||||||
|
)
|
||||||
|
file_handler.setFormatter(file_formatter)
|
||||||
|
try:
|
||||||
|
file_level = getattr(logging, file_level_name)
|
||||||
|
except Exception:
|
||||||
|
file_level = logging.DEBUG
|
||||||
|
file_handler.setLevel(file_level)
|
||||||
|
|
||||||
|
# Separate error-only rotating file
|
||||||
|
error_log_file = os.path.splitext(log_file)[0] + ".error.log"
|
||||||
|
error_handler = RotatingFileHandler(
|
||||||
|
error_log_file, maxBytes=max_bytes, backupCount=backup_count, encoding="utf-8"
|
||||||
|
)
|
||||||
|
error_handler.setLevel(logging.ERROR)
|
||||||
|
error_handler.setFormatter(file_formatter)
|
||||||
|
|
||||||
|
logger = logging.getLogger(name)
|
||||||
|
|
||||||
|
# Set logger base level to the most permissive of configured levels so handlers can filter
|
||||||
|
levels = [base_level]
|
||||||
|
if console_enabled:
|
||||||
|
levels.append(console_level)
|
||||||
|
if file_enabled:
|
||||||
|
levels.append(file_level)
|
||||||
|
# minimum numeric level means more verbose (DEBUG=10)
|
||||||
|
logger.setLevel(min(levels))
|
||||||
|
|
||||||
|
# Avoid adding duplicate handlers if logger already configured
|
||||||
|
if not logger.handlers:
|
||||||
|
if console_enabled:
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
if file_enabled:
|
||||||
|
logger.addHandler(file_handler)
|
||||||
|
if error_file_enabled:
|
||||||
|
logger.addHandler(error_handler)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
|
|
||||||
|
def generate_req_id(prefix: str = "r") -> str:
|
||||||
|
"""Generate a short request id for correlating logs."""
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
return f"{prefix}{uuid.uuid4().hex[:8]}"
|
||||||
|
|
||||||
|
|
||||||
|
def mask_secret(value: str | None, head: int = 6, tail: int = 4) -> str | None:
|
||||||
|
"""Mask a secret for safe logging."""
|
||||||
|
if not value:
|
||||||
|
return value
|
||||||
|
if len(value) <= head + tail + 3:
|
||||||
|
return "***"
|
||||||
|
return f"{value[:head]}...{value[-tail:]}"
|
||||||
|
|
||||||
|
|
||||||
|
def log_llm_request(logger: logging.Logger, req_id: str, model: str, user: str | None, context_len: int | None):
|
||||||
|
logger.info("%s LLM request start model=%s user=%s context_len=%s", req_id, model, user or "-", context_len or 0)
|
||||||
|
|
||||||
|
|
||||||
|
def log_llm_payload(logger: logging.Logger, req_id: str, payload: object):
|
||||||
|
logger.debug("%s LLM full payload: %s", req_id, payload)
|
||||||
|
|
||||||
|
|
||||||
|
def log_llm_response(logger: logging.Logger, req_id: str, model: str, duration_s: float, short_text: str | None, raw: object | None = None):
|
||||||
|
logger.info("%s LLM response model=%s duration=%.3fs summary=%s", req_id, model, duration_s, (short_text or "[no text]").replace("\n", " ")[:160])
|
||||||
|
if raw is not None:
|
||||||
|
logger.debug("%s LLM raw response: %s", req_id, raw)
|
||||||
|
|
||||||
|
|
||||||
|
class SamplingFilter(logging.Filter):
|
||||||
|
"""Filter that randomly allows a fraction of records through.
|
||||||
|
|
||||||
|
Useful for noisy logs like 'reaction skipped'.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, sample_rate: float = 0.1):
|
||||||
|
super().__init__()
|
||||||
|
try:
|
||||||
|
self.rate = float(sample_rate)
|
||||||
|
except Exception:
|
||||||
|
self.rate = 0.1
|
||||||
|
|
||||||
|
def filter(self, record: logging.LogRecord) -> bool:
|
||||||
|
import random
|
||||||
|
|
||||||
|
if self.rate >= 1.0:
|
||||||
|
return True
|
||||||
|
return random.random() < self.rate
|
||||||
16
src/memory.json
Normal file
16
src/memory.json
Normal file
|
|
@ -0,0 +1,16 @@
|
||||||
|
{
|
||||||
|
"conversations": {
|
||||||
|
"test_channel": [
|
||||||
|
{
|
||||||
|
"timestamp": "2025-10-10T16:57:27.533778",
|
||||||
|
"user_id": "test_user",
|
||||||
|
"content": "test message",
|
||||||
|
"context": "test context",
|
||||||
|
"importance": 0.8,
|
||||||
|
"id": "test_channel_1760129847"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"user_memories": {},
|
||||||
|
"global_events": []
|
||||||
|
}
|
||||||
5
src/memory.json.backup.20251010_125624
Normal file
5
src/memory.json.backup.20251010_125624
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"conversations": {},
|
||||||
|
"user_memories": {},
|
||||||
|
"global_events": []
|
||||||
|
}
|
||||||
5
src/memory.json.backup.20251010_125727
Normal file
5
src/memory.json.backup.20251010_125727
Normal file
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"conversations": {},
|
||||||
|
"user_memories": {},
|
||||||
|
"global_events": []
|
||||||
|
}
|
||||||
263
src/memory.py
Normal file
263
src/memory.py
Normal file
|
|
@ -0,0 +1,263 @@
|
||||||
|
# memory.py
|
||||||
|
# DEPRECATED - Use memory_manager.py instead
|
||||||
|
# This file is kept for backward compatibility
|
||||||
|
# Enhanced memory system building on existing user_profiles.py
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
from user_profiles import load_profiles, save_profiles, PROFILE_PATH
|
||||||
|
from logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger("memory")
|
||||||
|
|
||||||
|
MEMORY_PATH = os.path.join(os.path.dirname(__file__), "memory.json")
|
||||||
|
|
||||||
|
class MemoryManager:
|
||||||
|
"""Lightweight memory system for Delta to remember conversations and user details"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.ensure_memory_file()
|
||||||
|
|
||||||
|
def ensure_memory_file(self):
|
||||||
|
"""Ensure memory file exists"""
|
||||||
|
if not os.path.exists(MEMORY_PATH):
|
||||||
|
initial_data = {
|
||||||
|
"conversations": {}, # channel_id -> list of memories
|
||||||
|
"user_memories": {}, # user_id -> personal memories
|
||||||
|
"global_events": [] # server-wide memorable events
|
||||||
|
}
|
||||||
|
with open(MEMORY_PATH, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(initial_data, f, indent=2)
|
||||||
|
|
||||||
|
def load_memory(self) -> Dict[str, Any]:
|
||||||
|
"""Load memory data"""
|
||||||
|
try:
|
||||||
|
with open(MEMORY_PATH, "r", encoding="utf-8") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load memory: {e}")
|
||||||
|
return {"conversations": {}, "user_memories": {}, "global_events": []}
|
||||||
|
|
||||||
|
def save_memory(self, data: Dict[str, Any]):
|
||||||
|
"""Save memory data"""
|
||||||
|
try:
|
||||||
|
with open(MEMORY_PATH, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save memory: {e}")
|
||||||
|
|
||||||
|
def store_conversation_memory(self, channel_id: str, user_id: str,
|
||||||
|
content: str, context: str, importance_score: float = 1.0):
|
||||||
|
"""Store important conversation moments"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
|
||||||
|
memory_entry = {
|
||||||
|
"timestamp": datetime.utcnow().isoformat(),
|
||||||
|
"user_id": user_id,
|
||||||
|
"content": content,
|
||||||
|
"context": context[:500], # Limit context size
|
||||||
|
"importance": importance_score,
|
||||||
|
"id": f"{channel_id}_{int(datetime.utcnow().timestamp())}"
|
||||||
|
}
|
||||||
|
|
||||||
|
channel_key = str(channel_id)
|
||||||
|
if channel_key not in memory_data["conversations"]:
|
||||||
|
memory_data["conversations"][channel_key] = []
|
||||||
|
|
||||||
|
memory_data["conversations"][channel_key].append(memory_entry)
|
||||||
|
|
||||||
|
# Keep only last 100 memories per channel to prevent bloat
|
||||||
|
memory_data["conversations"][channel_key] = memory_data["conversations"][channel_key][-100:]
|
||||||
|
|
||||||
|
self.save_memory(memory_data)
|
||||||
|
logger.debug(f"Stored conversation memory for channel {channel_id}")
|
||||||
|
|
||||||
|
def store_user_memory(self, user_id: str, memory_type: str, content: str, importance: float = 1.0):
|
||||||
|
"""Store personal user memories (interests, preferences, personal details)"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
|
||||||
|
user_key = str(user_id)
|
||||||
|
if user_key not in memory_data["user_memories"]:
|
||||||
|
memory_data["user_memories"][user_key] = []
|
||||||
|
|
||||||
|
memory_entry = {
|
||||||
|
"timestamp": datetime.utcnow().isoformat(),
|
||||||
|
"type": memory_type, # "interest", "preference", "personal", "relationship"
|
||||||
|
"content": content,
|
||||||
|
"importance": importance,
|
||||||
|
"id": f"{user_id}_{memory_type}_{int(datetime.utcnow().timestamp())}"
|
||||||
|
}
|
||||||
|
|
||||||
|
memory_data["user_memories"][user_key].append(memory_entry)
|
||||||
|
|
||||||
|
# Keep only last 50 user memories to prevent bloat
|
||||||
|
memory_data["user_memories"][user_key] = memory_data["user_memories"][user_key][-50:]
|
||||||
|
|
||||||
|
self.save_memory(memory_data)
|
||||||
|
logger.debug(f"Stored user memory for {user_id}: {memory_type}")
|
||||||
|
|
||||||
|
def get_conversation_context(self, channel_id: str, hours: int = 24) -> List[Dict]:
|
||||||
|
"""Get recent conversation memories for context"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
channel_key = str(channel_id)
|
||||||
|
|
||||||
|
if channel_key not in memory_data["conversations"]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
cutoff_time = datetime.utcnow() - timedelta(hours=hours)
|
||||||
|
recent_memories = []
|
||||||
|
|
||||||
|
for memory in memory_data["conversations"][channel_key]:
|
||||||
|
memory_time = datetime.fromisoformat(memory["timestamp"])
|
||||||
|
if memory_time > cutoff_time:
|
||||||
|
recent_memories.append(memory)
|
||||||
|
|
||||||
|
# Sort by importance and recency
|
||||||
|
recent_memories.sort(key=lambda x: (x["importance"], x["timestamp"]), reverse=True)
|
||||||
|
return recent_memories[:10] # Return top 10 most important recent memories
|
||||||
|
|
||||||
|
def get_user_context(self, user_id: str) -> List[Dict]:
|
||||||
|
"""Get user-specific memories for personalization"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
user_key = str(user_id)
|
||||||
|
|
||||||
|
if user_key not in memory_data["user_memories"]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Sort by importance and recency
|
||||||
|
user_memories = memory_data["user_memories"][user_key]
|
||||||
|
user_memories.sort(key=lambda x: (x["importance"], x["timestamp"]), reverse=True)
|
||||||
|
return user_memories[:5] # Return top 5 most important user memories
|
||||||
|
|
||||||
|
def analyze_and_store_message(self, message, context_messages: List = None):
|
||||||
|
"""Analyze a message and determine if it should be stored as memory"""
|
||||||
|
content = message.content.lower()
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
channel_id = str(message.channel.id)
|
||||||
|
|
||||||
|
# Determine importance based on content analysis
|
||||||
|
importance_score = self._calculate_importance(content)
|
||||||
|
|
||||||
|
if importance_score > 0.3: # Only store moderately important+ messages
|
||||||
|
context_str = ""
|
||||||
|
if context_messages:
|
||||||
|
context_str = " | ".join([f"{msg.author.display_name}: {msg.content[:100]}"
|
||||||
|
for msg in context_messages[-3:]]) # Last 3 messages for context
|
||||||
|
|
||||||
|
self.store_conversation_memory(
|
||||||
|
channel_id, user_id, message.content, context_str, importance_score
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract personal information for user memory
|
||||||
|
self._extract_user_details(message)
|
||||||
|
|
||||||
|
def _calculate_importance(self, content: str) -> float:
|
||||||
|
"""Calculate importance score for a message (0.0 to 1.0)"""
|
||||||
|
importance = 0.0
|
||||||
|
|
||||||
|
# Personal information indicators
|
||||||
|
personal_keywords = ['i am', 'my name', 'i love', 'i hate', 'my favorite',
|
||||||
|
'i work', 'i study', 'my job', 'birthday', 'anniversary']
|
||||||
|
for keyword in personal_keywords:
|
||||||
|
if keyword in content:
|
||||||
|
importance += 0.4
|
||||||
|
|
||||||
|
# Emotional indicators
|
||||||
|
emotional_keywords = ['love', 'hate', 'excited', 'sad', 'angry', 'happy',
|
||||||
|
'frustrated', 'amazing', 'terrible', 'awesome']
|
||||||
|
for keyword in emotional_keywords:
|
||||||
|
if keyword in content:
|
||||||
|
importance += 0.2
|
||||||
|
|
||||||
|
# Question indicators (important for context)
|
||||||
|
if '?' in content:
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Length bonus (longer messages often more important)
|
||||||
|
if len(content) > 100:
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Direct mentions of Delta or bot commands
|
||||||
|
if 'delta' in content or content.startswith('!'):
|
||||||
|
importance += 0.3
|
||||||
|
|
||||||
|
return min(importance, 1.0) # Cap at 1.0
|
||||||
|
|
||||||
|
def _extract_user_details(self, message):
|
||||||
|
"""Extract and store personal details from user messages"""
|
||||||
|
content = message.content.lower()
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
|
||||||
|
# Simple pattern matching for common personal info
|
||||||
|
patterns = {
|
||||||
|
'interest': ['i love', 'i like', 'i enjoy', 'my favorite'],
|
||||||
|
'personal': ['i am', 'my name is', 'i work at', 'my job'],
|
||||||
|
'preference': ['i prefer', 'i usually', 'i always', 'i never']
|
||||||
|
}
|
||||||
|
|
||||||
|
for memory_type, keywords in patterns.items():
|
||||||
|
for keyword in keywords:
|
||||||
|
if keyword in content:
|
||||||
|
# Extract the relevant part of the message
|
||||||
|
start_idx = content.find(keyword)
|
||||||
|
relevant_part = content[start_idx:start_idx+200] # Next 200 chars
|
||||||
|
|
||||||
|
self.store_user_memory(user_id, memory_type, relevant_part, 0.5)
|
||||||
|
break # Only store one per message to avoid spam
|
||||||
|
|
||||||
|
def format_memory_for_prompt(self, user_id: str, channel_id: str) -> str:
|
||||||
|
"""Format memory for inclusion in AI prompts"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Add conversation context
|
||||||
|
conv_memories = self.get_conversation_context(channel_id, hours=48)
|
||||||
|
if conv_memories:
|
||||||
|
lines.append("[Recent Conversation Context]")
|
||||||
|
for memory in conv_memories[:3]: # Top 3 most important
|
||||||
|
timestamp = datetime.fromisoformat(memory["timestamp"]).strftime("%m/%d %H:%M")
|
||||||
|
lines.append(f"- {timestamp}: {memory['content'][:150]}")
|
||||||
|
|
||||||
|
# Add user context
|
||||||
|
user_memories = self.get_user_context(user_id)
|
||||||
|
if user_memories:
|
||||||
|
lines.append("[User Context]")
|
||||||
|
for memory in user_memories[:3]: # Top 3 most important
|
||||||
|
lines.append(f"- {memory['type'].title()}: {memory['content'][:100]}")
|
||||||
|
|
||||||
|
return "\n".join(lines) if lines else ""
|
||||||
|
|
||||||
|
def cleanup_old_memories(self, days: int = 30):
|
||||||
|
"""Clean up memories older than specified days"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
cutoff_time = datetime.utcnow() - timedelta(days=days)
|
||||||
|
cleaned = False
|
||||||
|
|
||||||
|
# Clean conversation memories
|
||||||
|
for channel_id in memory_data["conversations"]:
|
||||||
|
original_count = len(memory_data["conversations"][channel_id])
|
||||||
|
memory_data["conversations"][channel_id] = [
|
||||||
|
memory for memory in memory_data["conversations"][channel_id]
|
||||||
|
if datetime.fromisoformat(memory["timestamp"]) > cutoff_time
|
||||||
|
]
|
||||||
|
if len(memory_data["conversations"][channel_id]) < original_count:
|
||||||
|
cleaned = True
|
||||||
|
|
||||||
|
# Clean user memories (keep important ones longer)
|
||||||
|
for user_id in memory_data["user_memories"]:
|
||||||
|
original_count = len(memory_data["user_memories"][user_id])
|
||||||
|
memory_data["user_memories"][user_id] = [
|
||||||
|
memory for memory in memory_data["user_memories"][user_id]
|
||||||
|
if (datetime.fromisoformat(memory["timestamp"]) > cutoff_time or
|
||||||
|
memory["importance"] > 0.7) # Keep very important memories longer
|
||||||
|
]
|
||||||
|
if len(memory_data["user_memories"][user_id]) < original_count:
|
||||||
|
cleaned = True
|
||||||
|
|
||||||
|
if cleaned:
|
||||||
|
self.save_memory(memory_data)
|
||||||
|
logger.info(f"Cleaned up memories older than {days} days")
|
||||||
|
|
||||||
|
# Global memory manager instance
|
||||||
|
memory_manager = MemoryManager()
|
||||||
155
src/memory_manager.py
Normal file
155
src/memory_manager.py
Normal file
|
|
@ -0,0 +1,155 @@
|
||||||
|
"""
|
||||||
|
memory_manager.py
|
||||||
|
Unified memory management using database abstraction layer
|
||||||
|
"""
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import List, Dict, Optional
|
||||||
|
from database import db_manager
|
||||||
|
from logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger("memory_manager")
|
||||||
|
|
||||||
|
class UnifiedMemoryManager:
|
||||||
|
"""Memory manager that works with any database backend"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.db = db_manager
|
||||||
|
|
||||||
|
def analyze_and_store_message(self, message, context_messages: List = None):
|
||||||
|
"""Analyze a message and determine if it should be stored as memory"""
|
||||||
|
if not self.db.is_memory_enabled():
|
||||||
|
return
|
||||||
|
|
||||||
|
content = message.content.lower()
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
channel_id = str(message.channel.id)
|
||||||
|
|
||||||
|
# Determine importance based on content analysis
|
||||||
|
importance_score = self._calculate_importance(content)
|
||||||
|
|
||||||
|
if importance_score > 0.3: # Only store moderately important+ messages
|
||||||
|
context_str = ""
|
||||||
|
if context_messages:
|
||||||
|
context_str = " | ".join([f"{msg.author.display_name}: {msg.content[:100]}"
|
||||||
|
for msg in context_messages[-3:]]) # Last 3 messages for context
|
||||||
|
|
||||||
|
self.db.store_conversation_memory(
|
||||||
|
channel_id, user_id, message.content, context_str, importance_score
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract personal information for user memory
|
||||||
|
self._extract_user_details(message)
|
||||||
|
|
||||||
|
def _calculate_importance(self, content: str) -> float:
|
||||||
|
"""Calculate importance score for a message (0.0 to 1.0)"""
|
||||||
|
importance = 0.0
|
||||||
|
|
||||||
|
# Personal information indicators
|
||||||
|
personal_keywords = ['i am', 'my name', 'i love', 'i hate', 'my favorite',
|
||||||
|
'i work', 'i study', 'my job', 'birthday', 'anniversary']
|
||||||
|
for keyword in personal_keywords:
|
||||||
|
if keyword in content:
|
||||||
|
importance += 0.4
|
||||||
|
|
||||||
|
# Emotional indicators
|
||||||
|
emotional_keywords = ['love', 'hate', 'excited', 'sad', 'angry', 'happy',
|
||||||
|
'frustrated', 'amazing', 'terrible', 'awesome']
|
||||||
|
for keyword in emotional_keywords:
|
||||||
|
if keyword in content:
|
||||||
|
importance += 0.2
|
||||||
|
|
||||||
|
# Question indicators (important for context)
|
||||||
|
if '?' in content:
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Length bonus (longer messages often more important)
|
||||||
|
if len(content) > 100:
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Direct mentions of Delta or bot commands
|
||||||
|
if 'delta' in content or content.startswith('!'):
|
||||||
|
importance += 0.3
|
||||||
|
|
||||||
|
return min(importance, 1.0) # Cap at 1.0
|
||||||
|
|
||||||
|
def _extract_user_details(self, message):
|
||||||
|
"""Extract and store personal details from user messages"""
|
||||||
|
if not self.db.is_memory_enabled():
|
||||||
|
return
|
||||||
|
|
||||||
|
content = message.content.lower()
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
|
||||||
|
# Simple pattern matching for common personal info
|
||||||
|
patterns = {
|
||||||
|
'interest': ['i love', 'i like', 'i enjoy', 'my favorite'],
|
||||||
|
'personal': ['i am', 'my name is', 'i work at', 'my job'],
|
||||||
|
'preference': ['i prefer', 'i usually', 'i always', 'i never']
|
||||||
|
}
|
||||||
|
|
||||||
|
for memory_type, keywords in patterns.items():
|
||||||
|
for keyword in keywords:
|
||||||
|
if keyword in content:
|
||||||
|
# Extract the relevant part of the message
|
||||||
|
start_idx = content.find(keyword)
|
||||||
|
relevant_part = content[start_idx:start_idx+200] # Next 200 chars
|
||||||
|
|
||||||
|
self.db.store_user_memory(user_id, memory_type, relevant_part, 0.5)
|
||||||
|
break # Only store one per message to avoid spam
|
||||||
|
|
||||||
|
def get_conversation_context(self, channel_id: str, hours: int = 24) -> List[Dict]:
|
||||||
|
"""Get recent conversation memories for context"""
|
||||||
|
return self.db.get_conversation_context(channel_id, hours)
|
||||||
|
|
||||||
|
def get_user_context(self, user_id: str) -> List[Dict]:
|
||||||
|
"""Get user-specific memories for personalization"""
|
||||||
|
return self.db.get_user_context(user_id)
|
||||||
|
|
||||||
|
def format_memory_for_prompt(self, user_id: str, channel_id: str) -> str:
|
||||||
|
"""Format memory for inclusion in AI prompts"""
|
||||||
|
if not self.db.is_memory_enabled():
|
||||||
|
return ""
|
||||||
|
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Add conversation context
|
||||||
|
conv_memories = self.get_conversation_context(channel_id, hours=48)
|
||||||
|
if conv_memories:
|
||||||
|
lines.append("[Recent Conversation Context]")
|
||||||
|
for memory in conv_memories[:3]: # Top 3 most important
|
||||||
|
timestamp = datetime.fromisoformat(memory["timestamp"]).strftime("%m/%d %H:%M")
|
||||||
|
lines.append(f"- {timestamp}: {memory['content'][:150]}")
|
||||||
|
|
||||||
|
# Add user context
|
||||||
|
user_memories = self.get_user_context(user_id)
|
||||||
|
if user_memories:
|
||||||
|
lines.append("[User Context]")
|
||||||
|
for memory in user_memories[:3]: # Top 3 most important
|
||||||
|
memory_type = memory.get('type', memory.get('memory_type', 'unknown'))
|
||||||
|
lines.append(f"- {memory_type.title()}: {memory['content'][:100]}")
|
||||||
|
|
||||||
|
return "\n".join(lines) if lines else ""
|
||||||
|
|
||||||
|
def cleanup_old_memories(self, days: int = 30):
|
||||||
|
"""Clean up memories older than specified days"""
|
||||||
|
if not self.db.is_memory_enabled():
|
||||||
|
return
|
||||||
|
|
||||||
|
self.db.cleanup_old_memories(days)
|
||||||
|
logger.info(f"Cleaned up memories older than {days} days")
|
||||||
|
|
||||||
|
def is_enabled(self) -> bool:
|
||||||
|
"""Check if memory system is enabled"""
|
||||||
|
return self.db.is_memory_enabled()
|
||||||
|
|
||||||
|
def get_backend_info(self) -> Dict[str, str]:
|
||||||
|
"""Get information about current backend"""
|
||||||
|
return {
|
||||||
|
"backend_type": self.db.get_backend_type(),
|
||||||
|
"memory_enabled": str(self.db.is_memory_enabled())
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Global memory manager instance
|
||||||
|
memory_manager = UnifiedMemoryManager()
|
||||||
132
src/modelfile.py
Normal file
132
src/modelfile.py
Normal file
|
|
@ -0,0 +1,132 @@
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
from typing import Dict, Optional, Any
|
||||||
|
|
||||||
|
|
||||||
|
def _read_file(path: str) -> str:
|
||||||
|
with open(path, 'r', encoding='utf-8') as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
|
||||||
|
def _coerce_value(val: str) -> Any:
|
||||||
|
"""Try to coerce a string to int/float/bool otherwise return stripped string."""
|
||||||
|
v = val.strip()
|
||||||
|
if not v:
|
||||||
|
return v
|
||||||
|
# booleans
|
||||||
|
if v.lower() in ("true", "false"):
|
||||||
|
return v.lower() == "true"
|
||||||
|
# numbers
|
||||||
|
try:
|
||||||
|
if '.' in v:
|
||||||
|
return float(v)
|
||||||
|
return int(v)
|
||||||
|
except Exception:
|
||||||
|
# strip surrounding quotes
|
||||||
|
if (v.startswith('"') and v.endswith('"')) or (v.startswith("'") and v.endswith("'")):
|
||||||
|
return v[1:-1]
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
def parse_mod_file(path: str) -> Dict[str, Optional[object]]:
|
||||||
|
"""Parse a .mod DSL or JSON modelfile and return a dict with normalized keys.
|
||||||
|
|
||||||
|
Returns keys: name, base_model, system, template, params, includes
|
||||||
|
"""
|
||||||
|
text = _read_file(path)
|
||||||
|
_, ext = os.path.splitext(path)
|
||||||
|
ext = ext.lower()
|
||||||
|
|
||||||
|
if ext == '.json':
|
||||||
|
data = json.loads(text)
|
||||||
|
return {
|
||||||
|
'name': data.get('name') or os.path.basename(path),
|
||||||
|
'base_model': data.get('from') or data.get('base_model'),
|
||||||
|
'system': data.get('system') or data.get('SYSTEM'),
|
||||||
|
'template': data.get('template'),
|
||||||
|
'params': data.get('params', {}),
|
||||||
|
'includes': data.get('includes', []),
|
||||||
|
}
|
||||||
|
|
||||||
|
# DSL (.mod) parsing
|
||||||
|
base_model = None
|
||||||
|
params: Dict[str, Any] = {}
|
||||||
|
system = None
|
||||||
|
template = None
|
||||||
|
name = os.path.basename(path)
|
||||||
|
includes = []
|
||||||
|
|
||||||
|
# NAME <value>
|
||||||
|
m = re.search(r'^NAME\s+(.+)$', text, flags=re.MULTILINE)
|
||||||
|
if m:
|
||||||
|
name = m.group(1).strip()
|
||||||
|
|
||||||
|
# FROM <model>
|
||||||
|
m = re.search(r'^FROM\s+(.+)$', text, flags=re.MULTILINE)
|
||||||
|
if m:
|
||||||
|
base_model = m.group(1).strip()
|
||||||
|
|
||||||
|
# INCLUDE <path>
|
||||||
|
for im in re.finditer(r'^INCLUDE\s+(.+)$', text, flags=re.MULTILINE):
|
||||||
|
inc = im.group(1).strip().strip('"').strip("'")
|
||||||
|
if inc:
|
||||||
|
includes.append(inc)
|
||||||
|
|
||||||
|
# PARAMETER key value (value may be quoted)
|
||||||
|
for pm in re.finditer(r'^PARAMETER\s+(\w+)\s+(.+)$', text, flags=re.MULTILINE):
|
||||||
|
key = pm.group(1)
|
||||||
|
val = pm.group(2).strip()
|
||||||
|
params[key] = _coerce_value(val)
|
||||||
|
|
||||||
|
# SYSTEM triple-quoted
|
||||||
|
sm = re.search(r'SYSTEM\s+"""([\s\S]*?)"""', text)
|
||||||
|
if sm:
|
||||||
|
system = sm.group(1).strip()
|
||||||
|
|
||||||
|
# TEMPLATE triple-quoted
|
||||||
|
tm = re.search(r'TEMPLATE\s+"""([\s\S]*?)"""', text)
|
||||||
|
if tm:
|
||||||
|
template = tm.group(1).strip()
|
||||||
|
|
||||||
|
return {
|
||||||
|
'name': name,
|
||||||
|
'base_model': base_model,
|
||||||
|
'system': system,
|
||||||
|
'template': template,
|
||||||
|
'params': params,
|
||||||
|
'includes': includes,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def load_modfile_if_exists(path: str) -> Optional[Dict[str, object]]:
|
||||||
|
if not path:
|
||||||
|
return None
|
||||||
|
path_input = os.path.expanduser(path)
|
||||||
|
candidates = []
|
||||||
|
src_dir = os.path.dirname(__file__)
|
||||||
|
repo_root = os.path.normpath(os.path.join(src_dir, '..'))
|
||||||
|
|
||||||
|
if os.path.isabs(path_input):
|
||||||
|
candidates.append(path_input)
|
||||||
|
else:
|
||||||
|
candidates.append(os.path.normpath(os.path.join(src_dir, path_input)))
|
||||||
|
candidates.append(os.path.normpath(os.path.join(repo_root, path_input)))
|
||||||
|
candidates.append(os.path.normpath(os.path.join(repo_root, 'examples', os.path.basename(path_input))))
|
||||||
|
candidates.append(os.path.normpath(os.path.join(repo_root, 'personas', os.path.basename(path_input))))
|
||||||
|
candidates.append(os.path.normpath(os.path.join(src_dir, os.path.basename(path_input))))
|
||||||
|
|
||||||
|
candidates.append(path_input)
|
||||||
|
|
||||||
|
for candidate in candidates:
|
||||||
|
try:
|
||||||
|
if os.path.exists(candidate):
|
||||||
|
parsed = parse_mod_file(candidate)
|
||||||
|
if isinstance(parsed, dict):
|
||||||
|
parsed['_source_path'] = candidate
|
||||||
|
return parsed
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
@ -1,7 +1,8 @@
|
||||||
{
|
{
|
||||||
"name": "Delta",
|
"name": "Delta",
|
||||||
|
"birthday": "2025-05-04",
|
||||||
"emoji": "😼",
|
"emoji": "😼",
|
||||||
"style_prefix": "Delta says:",
|
"nickname_triggers": ["delta", "rgb girl", "catgirl", "queen"],
|
||||||
"prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair."
|
"triggers": ["sushi", "drama", "fight", "weird", "unhinged", "party"],
|
||||||
|
"prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. Always speak with flair. keep messages sort and use emojis moderately. Its ok to swear a little."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2,21 +2,47 @@
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
from logger import setup_logger
|
||||||
|
logger = setup_logger("personality")
|
||||||
|
|
||||||
PERSONA_FILE = "persona.json"
|
PERSONA_FILE = "persona.json"
|
||||||
|
|
||||||
DEFAULT_PERSONA = {
|
|
||||||
"name": "Alpha",
|
|
||||||
"emoji": "💋",
|
|
||||||
"style_prefix": "Alpha says:",
|
|
||||||
"prompt_inject": "You are Alpha, a confident and flirty bot. Respond with charm and wit. keep responses short and engaging. Use emojis to enhance your messages. Your goal is to flirt and engage with the user in a fun way."
|
|
||||||
}
|
|
||||||
|
|
||||||
def load_persona():
|
def load_persona():
|
||||||
if os.path.exists(PERSONA_FILE):
|
base_dir = os.path.dirname(__file__) # Path to /src/
|
||||||
with open(PERSONA_FILE, "r") as f:
|
# If a modelfile is active and contains a system/template, expose it as
|
||||||
return json.load(f)
|
# the persona structure used by the rest of the codebase. Import `ai`
|
||||||
return DEFAULT_PERSONA
|
# lazily to avoid circular imports at module import time.
|
||||||
|
try:
|
||||||
|
import ai
|
||||||
|
if getattr(ai, 'MODFILE', None):
|
||||||
|
MODFILE = ai.MODFILE
|
||||||
|
persona = {
|
||||||
|
'name': MODFILE.get('name') or 'ModPersona',
|
||||||
|
'prompt_inject': MODFILE.get('system') or '',
|
||||||
|
'emoji': '🤖',
|
||||||
|
'style_prefix': (MODFILE.get('name') or 'Delta') + ':'
|
||||||
|
}
|
||||||
|
return persona
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
persona_path = os.path.join(base_dir, "persona.json")
|
||||||
|
|
||||||
|
if not os.path.exists(persona_path):
|
||||||
|
logger.info("⚠️ persona.json not found. Using raw LLM mode.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(persona_path, "r", encoding="utf-8") as f:
|
||||||
|
data = json.load(f)
|
||||||
|
if not data.get("name") or not data.get("prompt_inject"):
|
||||||
|
logger.info("⚠️ persona.json missing fields. Using raw LLM mode.")
|
||||||
|
return None
|
||||||
|
return data
|
||||||
|
except Exception as e:
|
||||||
|
logger.info(f"⚠️ Failed to load persona.json: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def save_persona(description: str):
|
def save_persona(description: str):
|
||||||
persona = {
|
persona = {
|
||||||
|
|
|
||||||
29
src/profilepic.py
Normal file
29
src/profilepic.py
Normal file
|
|
@ -0,0 +1,29 @@
|
||||||
|
# profilepic.py
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import requests
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
logger = logging.getLogger("bot")
|
||||||
|
|
||||||
|
DISCORD_API = "https://discord.com/api/v10"
|
||||||
|
|
||||||
|
def set_avatar_from_bytes(image_bytes: bytes, token: str) -> bool:
|
||||||
|
try:
|
||||||
|
b64_avatar = base64.b64encode(image_bytes).decode("utf-8")
|
||||||
|
payload = {
|
||||||
|
"avatar": f"data:image/png;base64,{b64_avatar}"
|
||||||
|
}
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bot {token}",
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
response = requests.patch(f"{DISCORD_API}/users/@me", json=payload, headers=headers)
|
||||||
|
logger.info(f"🖼️ Avatar update status: {response.status_code} - {response.text}")
|
||||||
|
return response.status_code == 200
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Failed to update avatar: {str(e)}")
|
||||||
|
return False
|
||||||
|
|
@ -6,6 +6,8 @@ import asyncio
|
||||||
import random
|
import random
|
||||||
from ai import get_ai_response
|
from ai import get_ai_response
|
||||||
from . import simple, probabilistic, inactivity
|
from . import simple, probabilistic, inactivity
|
||||||
|
from logger import setup_logger
|
||||||
|
logger = setup_logger("scheduler")
|
||||||
|
|
||||||
def load_settings():
|
def load_settings():
|
||||||
base_dir = os.path.dirname(os.path.dirname(__file__)) # go up from /scheduler/
|
base_dir = os.path.dirname(os.path.dirname(__file__)) # go up from /scheduler/
|
||||||
|
|
@ -23,11 +25,13 @@ async def start_scheduler(bot):
|
||||||
scheduler_settings["channel_id"] = int(channel_id_env)
|
scheduler_settings["channel_id"] = int(channel_id_env)
|
||||||
|
|
||||||
if not scheduler_settings.get("enabled", False):
|
if not scheduler_settings.get("enabled", False):
|
||||||
print("🛑 Scheduler disabled in config.")
|
#print("🛑 Scheduler disabled in config.")
|
||||||
|
logger.info("🛑 Scheduler disabled in config.")
|
||||||
return
|
return
|
||||||
|
|
||||||
mode = scheduler_settings.get("mode", "simple").lower()
|
mode = scheduler_settings.get("mode", "simple").lower()
|
||||||
print(f"🕒 Delta Scheduler started in {mode.upper()} mode.")
|
#print(f"🕒 Delta Scheduler started in {mode.upper()} mode.")
|
||||||
|
logger.info(f"🕒 Delta Scheduler started in {mode.upper()} mode.")
|
||||||
|
|
||||||
if mode == "simple":
|
if mode == "simple":
|
||||||
await simple.run(bot, scheduler_settings, settings)
|
await simple.run(bot, scheduler_settings, settings)
|
||||||
|
|
@ -51,7 +55,8 @@ async def start_scheduler(bot):
|
||||||
message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."]))
|
message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."]))
|
||||||
|
|
||||||
await channel.send(message)
|
await channel.send(message)
|
||||||
print(f"📤 Scheduled message sent to #{channel.name}: {message}")
|
#print(f"📤 Scheduled message sent to #{channel.name}: {message}")
|
||||||
|
logger.info(f"📤 Scheduled message sent to #{channel.name}: {message}")
|
||||||
|
|
||||||
probabilistic.on_post(scheduler_settings["probabilistic"])
|
probabilistic.on_post(scheduler_settings["probabilistic"])
|
||||||
|
|
||||||
|
|
@ -61,4 +66,5 @@ async def start_scheduler(bot):
|
||||||
await inactivity.run(bot, scheduler_settings, settings)
|
await inactivity.run(bot, scheduler_settings, settings)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print(f"❓ Unknown scheduler mode: {mode}")
|
#print(f"❓ Unknown scheduler mode: {mode}")
|
||||||
|
logger.info(f"❓ Unknown scheduler mode: {mode}")
|
||||||
|
|
|
||||||
Binary file not shown.
BIN
src/scheduler/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
src/scheduler/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
src/scheduler/__pycache__/inactivity.cpython-311.pyc
Normal file
BIN
src/scheduler/__pycache__/inactivity.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
src/scheduler/__pycache__/probabilistic.cpython-311.pyc
Normal file
BIN
src/scheduler/__pycache__/probabilistic.cpython-311.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
src/scheduler/__pycache__/simple.cpython-311.pyc
Normal file
BIN
src/scheduler/__pycache__/simple.cpython-311.pyc
Normal file
Binary file not shown.
|
|
@ -1,6 +1,7 @@
|
||||||
import random
|
import random
|
||||||
import datetime
|
import datetime
|
||||||
from ai import get_ai_response
|
from ai import get_ai_response
|
||||||
|
import logger
|
||||||
|
|
||||||
last_post_time = None
|
last_post_time = None
|
||||||
post_chance = None
|
post_chance = None
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ import asyncio
|
||||||
import random
|
import random
|
||||||
import datetime
|
import datetime
|
||||||
from ai import get_ai_response
|
from ai import get_ai_response
|
||||||
|
import logger
|
||||||
|
|
||||||
last_post_time = None
|
last_post_time = None
|
||||||
|
|
||||||
|
|
@ -15,7 +16,8 @@ async def run(bot, scheduler_settings, full_settings):
|
||||||
use_ai = scheduler_settings.get("use_ai", True)
|
use_ai = scheduler_settings.get("use_ai", True)
|
||||||
|
|
||||||
await bot.wait_until_ready()
|
await bot.wait_until_ready()
|
||||||
print("📆 Simple scheduler active.")
|
#print("📆 Simple scheduler active.")
|
||||||
|
logger.info("📆 Simple scheduler active.")
|
||||||
|
|
||||||
while not bot.is_closed():
|
while not bot.is_closed():
|
||||||
now = datetime.datetime.utcnow()
|
now = datetime.datetime.utcnow()
|
||||||
|
|
@ -29,6 +31,7 @@ async def run(bot, scheduler_settings, full_settings):
|
||||||
message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."]))
|
message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."]))
|
||||||
|
|
||||||
await channel.send(message)
|
await channel.send(message)
|
||||||
print(f"📤 [Simple] Sent to #{channel.name}: {message}")
|
#print(f"📤 [Simple] Sent to #{channel.name}: {message}")
|
||||||
|
logger.info(f"📤 [Simple] Sent to #{channel.name}: {message}")
|
||||||
|
|
||||||
await asyncio.sleep(interval * 60)
|
await asyncio.sleep(interval * 60)
|
||||||
|
|
|
||||||
|
|
@ -4,13 +4,38 @@ cooldowns:
|
||||||
|
|
||||||
messages:
|
messages:
|
||||||
cooldown:
|
cooldown:
|
||||||
- "🕒 Chill, mortal. You must wait {seconds}s before trying again. 😼"
|
- "🕒 Chill, wait {seconds}s before trying again."
|
||||||
|
|
||||||
|
autochat:
|
||||||
|
enable_reactions: true
|
||||||
|
emoji_reaction_chance: 0.10 # 35% chance to react to a message
|
||||||
|
engagement_decay_per_minute: 0.15 # how fast Delta loses interest over time
|
||||||
|
|
||||||
|
context:
|
||||||
|
enabled: true # now working with memory system
|
||||||
|
max_messages: 15 # max messages to keep in context
|
||||||
|
|
||||||
|
database:
|
||||||
|
backend: "json" # Options: "json", "sqlite"
|
||||||
|
sqlite_path: "data/deltabot.db" # SQLite database file path
|
||||||
|
profiles_path: "user_profiles.json" # JSON profiles file (for JSON backend)
|
||||||
|
memory_path: "memory.json" # JSON memory file (for JSON backend)
|
||||||
|
|
||||||
|
memory:
|
||||||
|
enabled: true
|
||||||
|
importance_threshold: 0.3 # minimum importance to store (0.0-1.0)
|
||||||
|
max_conversation_memories: 100 # per channel
|
||||||
|
max_user_memories: 50 # per user
|
||||||
|
cleanup_days: 30 # auto-cleanup after X days
|
||||||
|
|
||||||
|
user_profiles:
|
||||||
|
enable_custom_prompt: true # ← Set false to ignore user `custom_prompt` values in replies
|
||||||
|
|
||||||
scheduler:
|
scheduler:
|
||||||
enabled: true
|
enabled: false
|
||||||
mode: probabilistic # <- this activates simple mode
|
mode: simple # <- this activates simple mode
|
||||||
interval_minutes: 1 # <- post every 60 minutes
|
interval_minutes: 0.25 # <- post every 60 minutes
|
||||||
use_ai: true # <- true = use LLM, false = use static messages
|
use_ai: false # <- true = use LLM, false = use static messages
|
||||||
channel_id: 1370420592360161393 # <- your Discord text channel ID
|
channel_id: 1370420592360161393 # <- your Discord text channel ID
|
||||||
|
|
||||||
messages:
|
messages:
|
||||||
|
|
@ -34,3 +59,8 @@ scheduler:
|
||||||
|
|
||||||
inactivity:
|
inactivity:
|
||||||
threshold_minutes: 120
|
threshold_minutes: 120
|
||||||
|
|
||||||
|
ai:
|
||||||
|
use_modfile: true
|
||||||
|
modfile_path: "../examples/gojo.mod"
|
||||||
|
|
||||||
|
|
|
||||||
137
src/templates/base.html
Normal file
137
src/templates/base.html
Normal file
|
|
@ -0,0 +1,137 @@
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>{% block title %}Delta Bot Dashboard{% endblock %}</title>
|
||||||
|
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet">
|
||||||
|
<link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css" rel="stylesheet">
|
||||||
|
<style>
|
||||||
|
.sidebar {
|
||||||
|
min-height: 100vh;
|
||||||
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||||
|
}
|
||||||
|
.sidebar .nav-link {
|
||||||
|
color: rgba(255,255,255,0.8);
|
||||||
|
border-radius: 0.5rem;
|
||||||
|
margin: 0.2rem 0;
|
||||||
|
}
|
||||||
|
.sidebar .nav-link:hover, .sidebar .nav-link.active {
|
||||||
|
color: white;
|
||||||
|
background-color: rgba(255,255,255,0.1);
|
||||||
|
}
|
||||||
|
.stat-card {
|
||||||
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||||
|
color: white;
|
||||||
|
border-radius: 1rem;
|
||||||
|
box-shadow: 0 4px 6px rgba(0,0,0,0.1);
|
||||||
|
}
|
||||||
|
.status-online { color: #28a745; }
|
||||||
|
.status-offline { color: #dc3545; }
|
||||||
|
.status-warning { color: #ffc107; }
|
||||||
|
.navbar-brand {
|
||||||
|
font-weight: bold;
|
||||||
|
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
||||||
|
-webkit-background-clip: text;
|
||||||
|
-webkit-text-fill-color: transparent;
|
||||||
|
background-clip: text;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container-fluid">
|
||||||
|
<div class="row">
|
||||||
|
<!-- Sidebar -->
|
||||||
|
<nav class="col-md-3 col-lg-2 d-md-block sidebar collapse">
|
||||||
|
<div class="position-sticky pt-3">
|
||||||
|
<div class="text-center mb-4">
|
||||||
|
<h4 class="text-white"><i class="fas fa-robot"></i> Delta Bot</h4>
|
||||||
|
<small class="text-white-50">Management Dashboard</small>
|
||||||
|
</div>
|
||||||
|
<ul class="nav flex-column">
|
||||||
|
<li class="nav-item">
|
||||||
|
<a class="nav-link {% if request.endpoint == 'dashboard' %}active{% endif %}" href="{{ url_for('dashboard') }}">
|
||||||
|
<i class="fas fa-tachometer-alt"></i> Dashboard
|
||||||
|
</a>
|
||||||
|
</li>
|
||||||
|
<li class="nav-item">
|
||||||
|
<a class="nav-link {% if request.endpoint == 'config' %}active{% endif %}" href="{{ url_for('config') }}">
|
||||||
|
<i class="fas fa-cog"></i> Configuration
|
||||||
|
</a>
|
||||||
|
</li>
|
||||||
|
<li class="nav-item">
|
||||||
|
<a class="nav-link {% if request.endpoint == 'stats' %}active{% endif %}" href="{{ url_for('stats') }}">
|
||||||
|
<i class="fas fa-chart-bar"></i> Statistics
|
||||||
|
</a>
|
||||||
|
</li>
|
||||||
|
<li class="nav-item">
|
||||||
|
<a class="nav-link {% if request.endpoint == 'memory' %}active{% endif %}" href="{{ url_for('memory') }}">
|
||||||
|
<i class="fas fa-brain"></i> Memory
|
||||||
|
</a>
|
||||||
|
</li>
|
||||||
|
</ul>
|
||||||
|
<hr class="text-white-50">
|
||||||
|
<div class="text-center">
|
||||||
|
<small class="text-white-50">
|
||||||
|
<i class="fas fa-server"></i>
|
||||||
|
{{ db_stats.backend_type|title if db_stats else 'Unknown' }} Backend
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</nav>
|
||||||
|
|
||||||
|
<!-- Main content -->
|
||||||
|
<main class="col-md-9 ms-sm-auto col-lg-10 px-md-4">
|
||||||
|
<div class="d-flex justify-content-between flex-wrap flex-md-nowrap align-items-center pt-3 pb-2 mb-3 border-bottom">
|
||||||
|
<h1 class="h2">{% block page_title %}{% endblock %}</h1>
|
||||||
|
<div class="btn-toolbar mb-2 mb-md-0">
|
||||||
|
<div class="btn-group me-2">
|
||||||
|
<span class="badge bg-success fs-6" id="status-indicator">
|
||||||
|
<i class="fas fa-circle"></i> Online
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Flash messages -->
|
||||||
|
{% with messages = get_flashed_messages(with_categories=true) %}
|
||||||
|
{% if messages %}
|
||||||
|
{% for category, message in messages %}
|
||||||
|
<div class="alert alert-{{ 'danger' if category == 'error' else 'success' }} alert-dismissible fade show" role="alert">
|
||||||
|
{{ message }}
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="alert"></button>
|
||||||
|
</div>
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endwith %}
|
||||||
|
|
||||||
|
{% block content %}{% endblock %}
|
||||||
|
</main>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js"></script>
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||||
|
<script>
|
||||||
|
// Auto-refresh stats every 30 seconds
|
||||||
|
setInterval(function() {
|
||||||
|
fetch('/api/stats')
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(data => {
|
||||||
|
// Update status indicator
|
||||||
|
const statusIndicator = document.getElementById('status-indicator');
|
||||||
|
if (data.bot_stats) {
|
||||||
|
statusIndicator.innerHTML = '<i class="fas fa-circle"></i> Online';
|
||||||
|
statusIndicator.className = 'badge bg-success fs-6';
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(error => {
|
||||||
|
const statusIndicator = document.getElementById('status-indicator');
|
||||||
|
statusIndicator.innerHTML = '<i class="fas fa-circle"></i> Offline';
|
||||||
|
statusIndicator.className = 'badge bg-danger fs-6';
|
||||||
|
});
|
||||||
|
}, 30000);
|
||||||
|
</script>
|
||||||
|
{% block scripts %}{% endblock %}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
161
src/templates/config.html
Normal file
161
src/templates/config.html
Normal file
|
|
@ -0,0 +1,161 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Configuration - Delta Bot{% endblock %}
|
||||||
|
{% block page_title %}Configuration{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<form method="POST" action="{{ url_for('save_config') }}">
|
||||||
|
<div class="row">
|
||||||
|
<!-- Database Configuration -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-database"></i> Database Settings</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="database_backend" class="form-label">Database Backend</label>
|
||||||
|
<select class="form-select" id="database_backend" name="database_backend">
|
||||||
|
<option value="json" {% if settings.database.backend == 'json' %}selected{% endif %}>JSON Files (Backward Compatible)</option>
|
||||||
|
<option value="sqlite" {% if settings.database.backend == 'sqlite' %}selected{% endif %}>SQLite Database (Recommended)</option>
|
||||||
|
</select>
|
||||||
|
<div class="form-text">SQLite is recommended for better performance and data integrity.</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="sqlite_path" class="form-label">SQLite Database Path</label>
|
||||||
|
<input type="text" class="form-control" id="sqlite_path" name="sqlite_path"
|
||||||
|
value="{{ settings.database.sqlite_path or 'data/deltabot.db' }}">
|
||||||
|
<div class="form-text">Path to SQLite database file (only used if SQLite backend is selected).</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Memory Configuration -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-brain"></i> Memory Settings</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="mb-3">
|
||||||
|
<div class="form-check form-switch">
|
||||||
|
<input class="form-check-input" type="checkbox" id="memory_enabled" name="memory_enabled"
|
||||||
|
value="true" {% if settings.memory.enabled %}checked{% endif %}>
|
||||||
|
<label class="form-check-label" for="memory_enabled">
|
||||||
|
Enable Memory System
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="form-text">Allows the bot to remember conversations and user preferences. Disable for privacy-focused deployments.</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="importance_threshold" class="form-label">Importance Threshold</label>
|
||||||
|
<input type="range" class="form-range" id="importance_threshold" name="importance_threshold"
|
||||||
|
min="0" max="1" step="0.1" value="{{ settings.memory.importance_threshold or 0.3 }}"
|
||||||
|
oninput="this.nextElementSibling.value = this.value">
|
||||||
|
<output>{{ settings.memory.importance_threshold or 0.3 }}</output>
|
||||||
|
<div class="form-text">Minimum importance score (0.0-1.0) for storing memories. Higher values store fewer memories.</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="row">
|
||||||
|
<!-- AutoChat Configuration -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-robot"></i> AutoChat Settings</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="mb-3">
|
||||||
|
<div class="form-check form-switch">
|
||||||
|
<input class="form-check-input" type="checkbox" id="autochat_enabled" name="autochat_enabled"
|
||||||
|
value="true" {% if settings.autochat.enable_reactions %}checked{% endif %}>
|
||||||
|
<label class="form-check-label" for="autochat_enabled">
|
||||||
|
Enable Emoji Reactions
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="form-text">Allow the bot to react to messages with emojis.</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="reaction_chance" class="form-label">Reaction Chance</label>
|
||||||
|
<input type="range" class="form-range" id="reaction_chance" name="reaction_chance"
|
||||||
|
min="0" max="1" step="0.05" value="{{ settings.autochat.emoji_reaction_chance or 0.1 }}"
|
||||||
|
oninput="this.nextElementSibling.value = (this.value * 100).toFixed(0) + '%'">
|
||||||
|
<output>{{ ((settings.autochat.emoji_reaction_chance or 0.1) * 100)|round|int }}%</output>
|
||||||
|
<div class="form-text">Probability of reacting to any given message.</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Context Configuration -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-comments"></i> Context Settings</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="mb-3">
|
||||||
|
<div class="form-check form-switch">
|
||||||
|
<input class="form-check-input" type="checkbox" id="context_enabled" name="context_enabled"
|
||||||
|
value="true" {% if settings.context.enabled %}checked{% endif %}>
|
||||||
|
<label class="form-check-label" for="context_enabled">
|
||||||
|
Enable Context Tracking
|
||||||
|
</label>
|
||||||
|
</div>
|
||||||
|
<div class="form-text">Include recent messages in AI responses for better context awareness.</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="mb-3">
|
||||||
|
<label for="max_messages" class="form-label">Max Context Messages</label>
|
||||||
|
<input type="number" class="form-control" id="max_messages" name="max_messages"
|
||||||
|
min="1" max="50" value="{{ settings.context.max_messages or 15 }}">
|
||||||
|
<div class="form-text">Maximum number of recent messages to include in context (1-50).</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Save Button -->
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-12">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-body text-center">
|
||||||
|
<button type="submit" class="btn btn-success btn-lg">
|
||||||
|
<i class="fas fa-save"></i> Save Configuration
|
||||||
|
</button>
|
||||||
|
<div class="mt-2">
|
||||||
|
<small class="text-muted">
|
||||||
|
<i class="fas fa-info-circle"></i>
|
||||||
|
Some changes may require restarting the bot to take effect.
|
||||||
|
</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block scripts %}
|
||||||
|
<script>
|
||||||
|
// Update range slider displays
|
||||||
|
document.querySelectorAll('input[type="range"]').forEach(function(range) {
|
||||||
|
range.addEventListener('input', function() {
|
||||||
|
const output = this.nextElementSibling;
|
||||||
|
if (this.name === 'reaction_chance') {
|
||||||
|
output.textContent = (this.value * 100).toFixed(0) + '%';
|
||||||
|
} else {
|
||||||
|
output.textContent = this.value;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
191
src/templates/dashboard.html
Normal file
191
src/templates/dashboard.html
Normal file
|
|
@ -0,0 +1,191 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Dashboard - Delta Bot{% endblock %}
|
||||||
|
{% block page_title %}Dashboard{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="row">
|
||||||
|
<!-- Bot Status Cards -->
|
||||||
|
<div class="col-xl-3 col-md-6 mb-4">
|
||||||
|
<div class="card stat-card">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row no-gutters align-items-center">
|
||||||
|
<div class="col mr-2">
|
||||||
|
<div class="h5 mb-0 font-weight-bold">{{ bot_stats.uptime }}</div>
|
||||||
|
<div class="small">Uptime</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-auto">
|
||||||
|
<i class="fas fa-clock fa-2x"></i>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="col-xl-3 col-md-6 mb-4">
|
||||||
|
<div class="card stat-card">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row no-gutters align-items-center">
|
||||||
|
<div class="col mr-2">
|
||||||
|
<div class="h5 mb-0 font-weight-bold">{{ bot_stats.message_count }}</div>
|
||||||
|
<div class="small">Messages Processed</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-auto">
|
||||||
|
<i class="fas fa-comments fa-2x"></i>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="col-xl-3 col-md-6 mb-4">
|
||||||
|
<div class="card stat-card">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row no-gutters align-items-center">
|
||||||
|
<div class="col mr-2">
|
||||||
|
<div class="h5 mb-0 font-weight-bold">{{ bot_stats.unique_users }}</div>
|
||||||
|
<div class="small">Active Users</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-auto">
|
||||||
|
<i class="fas fa-users fa-2x"></i>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="col-xl-3 col-md-6 mb-4">
|
||||||
|
<div class="card stat-card">
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row no-gutters align-items-center">
|
||||||
|
<div class="col mr-2">
|
||||||
|
<div class="h5 mb-0 font-weight-bold">{{ db_stats.total_users if db_stats.total_users is defined else '?' }}</div>
|
||||||
|
<div class="small">Total Profiles</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-auto">
|
||||||
|
<i class="fas fa-database fa-2x"></i>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="row">
|
||||||
|
<!-- System Status -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-server"></i> System Status</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-sm-4"><strong>Database Backend:</strong></div>
|
||||||
|
<div class="col-sm-8">
|
||||||
|
<span class="badge bg-primary">{{ db_stats.backend_type|title if db_stats.backend_type else 'Unknown' }}</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-sm-4"><strong>Memory System:</strong></div>
|
||||||
|
<div class="col-sm-8">
|
||||||
|
{% if db_stats.memory_enabled %}
|
||||||
|
<span class="badge bg-success"><i class="fas fa-check"></i> Enabled</span>
|
||||||
|
{% else %}
|
||||||
|
<span class="badge bg-warning"><i class="fas fa-times"></i> Disabled</span>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-sm-4"><strong>Last Activity:</strong></div>
|
||||||
|
<div class="col-sm-8">{{ bot_stats.last_activity }}</div>
|
||||||
|
</div>
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-sm-4"><strong>Active Channels:</strong></div>
|
||||||
|
<div class="col-sm-8">{{ bot_stats.active_channels }}</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Recent Activity -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-activity"></i> Database Info</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{% if db_stats.error %}
|
||||||
|
<div class="alert alert-warning">
|
||||||
|
<i class="fas fa-exclamation-triangle"></i> {{ db_stats.error }}
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-sm-6"><strong>Total Users:</strong></div>
|
||||||
|
<div class="col-sm-6">{{ db_stats.total_users }}</div>
|
||||||
|
</div>
|
||||||
|
{% if db_stats.memory_enabled %}
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-sm-6"><strong>Stored Memories:</strong></div>
|
||||||
|
<div class="col-sm-6">{{ db_stats.total_memories }}</div>
|
||||||
|
</div>
|
||||||
|
<div class="row mb-3">
|
||||||
|
<div class="col-sm-6"><strong>Conversations:</strong></div>
|
||||||
|
<div class="col-sm-6">{{ db_stats.total_conversations }}</div>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="text-muted">
|
||||||
|
<i class="fas fa-info-circle"></i> Memory system is disabled
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Quick Actions -->
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-12">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-bolt"></i> Quick Actions</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-md-3 mb-2">
|
||||||
|
<a href="{{ url_for('config') }}" class="btn btn-primary btn-block w-100">
|
||||||
|
<i class="fas fa-cog"></i> Configure Bot
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-2">
|
||||||
|
<a href="{{ url_for('stats') }}" class="btn btn-info btn-block w-100">
|
||||||
|
<i class="fas fa-chart-bar"></i> View Statistics
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-2">
|
||||||
|
<a href="{{ url_for('memory') }}" class="btn btn-success btn-block w-100">
|
||||||
|
<i class="fas fa-brain"></i> Manage Memory
|
||||||
|
</a>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3 mb-2">
|
||||||
|
<button class="btn btn-warning btn-block w-100" onclick="restartBot()">
|
||||||
|
<i class="fas fa-redo"></i> Restart Bot
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block scripts %}
|
||||||
|
<script>
|
||||||
|
function restartBot() {
|
||||||
|
if (confirm('Are you sure you want to restart the bot? This will temporarily disconnect from Discord.')) {
|
||||||
|
// TODO: Implement bot restart functionality
|
||||||
|
alert('Bot restart functionality will be implemented soon!');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
225
src/templates/memory.html
Normal file
225
src/templates/memory.html
Normal file
|
|
@ -0,0 +1,225 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Memory Management - Delta Bot{% endblock %}
|
||||||
|
{% block page_title %}Memory Management{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
{% if memory_disabled %}
|
||||||
|
<div class="alert alert-warning">
|
||||||
|
<i class="fas fa-exclamation-triangle"></i>
|
||||||
|
<strong>Memory System Disabled</strong><br>
|
||||||
|
The memory system is currently disabled. Enable it in the <a href="{{ url_for('config') }}">configuration</a> to use memory features.
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<div class="row">
|
||||||
|
<!-- Memory Actions -->
|
||||||
|
<div class="col-lg-4 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-tools"></i> Memory Actions</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="d-grid gap-2">
|
||||||
|
<button class="btn btn-warning" onclick="cleanupMemories()">
|
||||||
|
<i class="fas fa-broom"></i> Cleanup Old Memories
|
||||||
|
</button>
|
||||||
|
<button class="btn btn-info" onclick="exportMemories()">
|
||||||
|
<i class="fas fa-download"></i> Export Memories
|
||||||
|
</button>
|
||||||
|
<button class="btn btn-danger" onclick="clearAllMemories()">
|
||||||
|
<i class="fas fa-trash"></i> Clear All Memories
|
||||||
|
</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Users with Memories -->
|
||||||
|
<div class="col-lg-8 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-users"></i> Users with Memories</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{% if users %}
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>User</th>
|
||||||
|
<th>Memory Count</th>
|
||||||
|
<th>Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{% for user in users %}
|
||||||
|
<tr>
|
||||||
|
<td>{{ user.display_name or 'Unknown User' }}</td>
|
||||||
|
<td><span class="badge bg-primary">{{ user.memory_count }}</span></td>
|
||||||
|
<td>
|
||||||
|
<button class="btn btn-sm btn-info" onclick="viewUserMemories('{{ user.user_id }}')">
|
||||||
|
<i class="fas fa-eye"></i> View
|
||||||
|
</button>
|
||||||
|
<button class="btn btn-sm btn-danger" onclick="deleteUserMemories('{{ user.user_id }}')">
|
||||||
|
<i class="fas fa-trash"></i> Delete
|
||||||
|
</button>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<p class="text-muted">No users with stored memories found.</p>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Recent Memories -->
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-12">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-history"></i> Recent Conversation Memories</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{% if memories %}
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>Timestamp</th>
|
||||||
|
<th>User</th>
|
||||||
|
<th>Content</th>
|
||||||
|
<th>Importance</th>
|
||||||
|
<th>Actions</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{% for memory in memories %}
|
||||||
|
<tr>
|
||||||
|
<td><small>{{ memory.timestamp[:16] if memory.timestamp else 'Unknown' }}</small></td>
|
||||||
|
<td>{{ memory.display_name or 'Unknown' }}</td>
|
||||||
|
<td>
|
||||||
|
<div style="max-width: 300px; overflow: hidden; text-overflow: ellipsis;">
|
||||||
|
{{ memory.content[:100] }}{% if memory.content|length > 100 %}...{% endif %}
|
||||||
|
</div>
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
{% set importance = memory.importance or 0 %}
|
||||||
|
{% if importance >= 0.8 %}
|
||||||
|
<span class="badge bg-danger">High</span>
|
||||||
|
{% elif importance >= 0.5 %}
|
||||||
|
<span class="badge bg-warning">Medium</span>
|
||||||
|
{% else %}
|
||||||
|
<span class="badge bg-secondary">Low</span>
|
||||||
|
{% endif %}
|
||||||
|
</td>
|
||||||
|
<td>
|
||||||
|
<button class="btn btn-sm btn-info" onclick="viewMemoryDetails('{{ memory.id }}')">
|
||||||
|
<i class="fas fa-eye"></i>
|
||||||
|
</button>
|
||||||
|
<button class="btn btn-sm btn-danger" onclick="deleteMemory('{{ memory.id }}')">
|
||||||
|
<i class="fas fa-trash"></i>
|
||||||
|
</button>
|
||||||
|
</td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<p class="text-muted">No conversation memories found.</p>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
<!-- Memory Details Modal -->
|
||||||
|
<div class="modal fade" id="memoryModal" tabindex="-1">
|
||||||
|
<div class="modal-dialog modal-lg">
|
||||||
|
<div class="modal-content">
|
||||||
|
<div class="modal-header">
|
||||||
|
<h5 class="modal-title">Memory Details</h5>
|
||||||
|
<button type="button" class="btn-close" data-bs-dismiss="modal"></button>
|
||||||
|
</div>
|
||||||
|
<div class="modal-body" id="memoryModalBody">
|
||||||
|
<!-- Memory details will be loaded here -->
|
||||||
|
</div>
|
||||||
|
<div class="modal-footer">
|
||||||
|
<button type="button" class="btn btn-secondary" data-bs-dismiss="modal">Close</button>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block scripts %}
|
||||||
|
<script>
|
||||||
|
function cleanupMemories() {
|
||||||
|
const days = prompt('Delete memories older than how many days?', '30');
|
||||||
|
if (days && !isNaN(days)) {
|
||||||
|
fetch('/api/memory/cleanup', {
|
||||||
|
method: 'POST',
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
},
|
||||||
|
body: JSON.stringify({ days: parseInt(days) })
|
||||||
|
})
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(data => {
|
||||||
|
if (data.success) {
|
||||||
|
alert(data.message);
|
||||||
|
location.reload();
|
||||||
|
} else {
|
||||||
|
alert('Error: ' + data.error);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.catch(error => {
|
||||||
|
alert('Error: ' + error);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function exportMemories() {
|
||||||
|
alert('Export functionality will be implemented soon!');
|
||||||
|
}
|
||||||
|
|
||||||
|
function clearAllMemories() {
|
||||||
|
if (confirm('Are you sure you want to delete ALL memories? This cannot be undone!')) {
|
||||||
|
alert('Clear all memories functionality will be implemented soon!');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function viewUserMemories(userId) {
|
||||||
|
alert('View user memories functionality will be implemented soon!');
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteUserMemories(userId) {
|
||||||
|
if (confirm('Are you sure you want to delete all memories for this user?')) {
|
||||||
|
alert('Delete user memories functionality will be implemented soon!');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function viewMemoryDetails(memoryId) {
|
||||||
|
document.getElementById('memoryModalBody').innerHTML = '<p>Loading memory details...</p>';
|
||||||
|
const modal = new bootstrap.Modal(document.getElementById('memoryModal'));
|
||||||
|
modal.show();
|
||||||
|
|
||||||
|
// TODO: Fetch memory details from API
|
||||||
|
setTimeout(() => {
|
||||||
|
document.getElementById('memoryModalBody').innerHTML = '<p>Memory details functionality will be implemented soon!</p>';
|
||||||
|
}, 1000);
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteMemory(memoryId) {
|
||||||
|
if (confirm('Are you sure you want to delete this memory?')) {
|
||||||
|
alert('Delete memory functionality will be implemented soon!');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
193
src/templates/stats.html
Normal file
193
src/templates/stats.html
Normal file
|
|
@ -0,0 +1,193 @@
|
||||||
|
{% extends "base.html" %}
|
||||||
|
|
||||||
|
{% block title %}Statistics - Delta Bot{% endblock %}
|
||||||
|
{% block page_title %}Statistics{% endblock %}
|
||||||
|
|
||||||
|
{% block content %}
|
||||||
|
<div class="row">
|
||||||
|
<!-- Bot Performance Chart -->
|
||||||
|
<div class="col-lg-8 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-chart-line"></i> Bot Activity</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<canvas id="activityChart" width="400" height="200"></canvas>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Top Users -->
|
||||||
|
<div class="col-lg-4 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-crown"></i> Top Active Users</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{% if bot_stats.most_active_users %}
|
||||||
|
{% for user_id, count in bot_stats.most_active_users %}
|
||||||
|
<div class="d-flex justify-content-between align-items-center mb-2">
|
||||||
|
<span>User {{ user_id[:8] }}...</span>
|
||||||
|
<span class="badge bg-primary">{{ count }}</span>
|
||||||
|
</div>
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
<p class="text-muted">No user activity recorded yet.</p>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="row">
|
||||||
|
<!-- Database Statistics -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-database"></i> Database Statistics</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-sm-6">
|
||||||
|
<div class="text-center">
|
||||||
|
<h3 class="text-primary">{{ db_stats.total_users if db_stats.total_users is defined else '?' }}</h3>
|
||||||
|
<p class="mb-0">Total User Profiles</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-sm-6">
|
||||||
|
<div class="text-center">
|
||||||
|
<h3 class="text-success">{{ db_stats.total_memories if db_stats.total_memories is defined else '?' }}</h3>
|
||||||
|
<p class="mb-0">Stored Memories</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<hr>
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-sm-6">
|
||||||
|
<div class="text-center">
|
||||||
|
<h3 class="text-info">{{ db_stats.total_conversations if db_stats.total_conversations is defined else '?' }}</h3>
|
||||||
|
<p class="mb-0">Conversation Records</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="col-sm-6">
|
||||||
|
<div class="text-center">
|
||||||
|
<h3 class="text-warning">{{ db_stats.backend_type|title if db_stats.backend_type else '?' }}</h3>
|
||||||
|
<p class="mb-0">Backend Type</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- Recent Users -->
|
||||||
|
<div class="col-lg-6 mb-4">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-users"></i> Recent User Activity</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
{% if recent_users %}
|
||||||
|
<div class="table-responsive">
|
||||||
|
<table class="table table-sm">
|
||||||
|
<thead>
|
||||||
|
<tr>
|
||||||
|
<th>User</th>
|
||||||
|
<th>Interactions</th>
|
||||||
|
<th>Last Seen</th>
|
||||||
|
</tr>
|
||||||
|
</thead>
|
||||||
|
<tbody>
|
||||||
|
{% for user in recent_users %}
|
||||||
|
<tr>
|
||||||
|
<td>{{ user.display_name or user.name or 'Unknown' }}</td>
|
||||||
|
<td><span class="badge bg-secondary">{{ user.interactions or 0 }}</span></td>
|
||||||
|
<td><small>{{ user.last_seen[:16] if user.last_seen else 'Never' }}</small></td>
|
||||||
|
</tr>
|
||||||
|
{% endfor %}
|
||||||
|
</tbody>
|
||||||
|
</table>
|
||||||
|
</div>
|
||||||
|
{% else %}
|
||||||
|
<p class="text-muted">No user data available.</p>
|
||||||
|
{% endif %}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<!-- System Information -->
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-12">
|
||||||
|
<div class="card">
|
||||||
|
<div class="card-header">
|
||||||
|
<h5 class="mb-0"><i class="fas fa-info-circle"></i> System Information</h5>
|
||||||
|
</div>
|
||||||
|
<div class="card-body">
|
||||||
|
<div class="row">
|
||||||
|
<div class="col-md-3">
|
||||||
|
<strong>Bot Uptime:</strong><br>
|
||||||
|
<span class="text-muted">{{ bot_stats.uptime }}</span>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3">
|
||||||
|
<strong>Messages Processed:</strong><br>
|
||||||
|
<span class="text-muted">{{ bot_stats.message_count }}</span>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3">
|
||||||
|
<strong>Active Channels:</strong><br>
|
||||||
|
<span class="text-muted">{{ bot_stats.active_channels }}</span>
|
||||||
|
</div>
|
||||||
|
<div class="col-md-3">
|
||||||
|
<strong>Last Activity:</strong><br>
|
||||||
|
<span class="text-muted">{{ bot_stats.last_activity }}</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{% endblock %}
|
||||||
|
|
||||||
|
{% block scripts %}
|
||||||
|
<script>
|
||||||
|
// Create activity chart
|
||||||
|
const ctx = document.getElementById('activityChart').getContext('2d');
|
||||||
|
const activityChart = new Chart(ctx, {
|
||||||
|
type: 'line',
|
||||||
|
data: {
|
||||||
|
labels: ['6h ago', '5h ago', '4h ago', '3h ago', '2h ago', '1h ago', 'Now'],
|
||||||
|
datasets: [{
|
||||||
|
label: 'Messages',
|
||||||
|
data: [12, 19, 8, 15, 22, 18, 25], // This would be dynamic in real implementation
|
||||||
|
borderColor: 'rgb(102, 126, 234)',
|
||||||
|
backgroundColor: 'rgba(102, 126, 234, 0.1)',
|
||||||
|
tension: 0.4
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
responsive: true,
|
||||||
|
plugins: {
|
||||||
|
legend: {
|
||||||
|
display: false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
scales: {
|
||||||
|
y: {
|
||||||
|
beginAtZero: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Auto-refresh stats every 30 seconds
|
||||||
|
setInterval(function() {
|
||||||
|
fetch('/api/stats')
|
||||||
|
.then(response => response.json())
|
||||||
|
.then(data => {
|
||||||
|
// Update charts and stats here
|
||||||
|
console.log('Stats updated:', data);
|
||||||
|
})
|
||||||
|
.catch(error => console.error('Error fetching stats:', error));
|
||||||
|
}, 30000);
|
||||||
|
</script>
|
||||||
|
{% endblock %}
|
||||||
46
src/time_logger.py
Normal file
46
src/time_logger.py
Normal file
|
|
@ -0,0 +1,46 @@
|
||||||
|
# time_logger.py
|
||||||
|
import csv
|
||||||
|
import os
|
||||||
|
from datetime import datetime
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger("logger")
|
||||||
|
|
||||||
|
_initialized = False
|
||||||
|
LOG_PATH = os.path.join(os.path.dirname(__file__), "data", "activity_log.csv")
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_log_file():
|
||||||
|
"""Create the CSV file if it doesn't exist, including headers."""
|
||||||
|
os.makedirs(os.path.dirname(LOG_PATH), exist_ok=True)
|
||||||
|
if not os.path.exists(LOG_PATH):
|
||||||
|
with open(LOG_PATH, "w", newline='', encoding="utf-8") as f:
|
||||||
|
writer = csv.writer(f)
|
||||||
|
writer.writerow(["timestamp", "user_id", "username", "channel_id", "hour", "weekday"])
|
||||||
|
logger.info("🗂️ Created new activity_log.csv file.")
|
||||||
|
|
||||||
|
|
||||||
|
def log_message_activity(message):
|
||||||
|
"""Log basic time and user info for server heatmap analysis."""
|
||||||
|
global _initialized
|
||||||
|
if not _initialized:
|
||||||
|
ensure_log_file()
|
||||||
|
_initialized = True
|
||||||
|
|
||||||
|
ts = datetime.now()
|
||||||
|
row = [
|
||||||
|
ts.strftime("%Y-%m-%d %H:%M:%S"),
|
||||||
|
str(message.author.id),
|
||||||
|
message.author.display_name,
|
||||||
|
str(message.channel.id),
|
||||||
|
ts.hour,
|
||||||
|
ts.strftime("%A")
|
||||||
|
]
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(LOG_PATH, "a", newline='', encoding="utf-8") as f:
|
||||||
|
writer = csv.writer(f)
|
||||||
|
writer.writerow(row)
|
||||||
|
logger.info(f"📝 Logged activity: {message.author.display_name} at {ts.hour}:00 ({ts.strftime('%A')})")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"❌ Failed to log activity: {e}")
|
||||||
18
src/user_profiles.json
Normal file
18
src/user_profiles.json
Normal file
|
|
@ -0,0 +1,18 @@
|
||||||
|
{
|
||||||
|
"test_user": {
|
||||||
|
"name": "test",
|
||||||
|
"display_name": "Test User",
|
||||||
|
"interactions": 5
|
||||||
|
},
|
||||||
|
"161149541171593216": {
|
||||||
|
"name": "themiloverse",
|
||||||
|
"display_name": "Miguel",
|
||||||
|
"first_seen": "2025-10-10T18:54:58.411792",
|
||||||
|
"last_seen": "2025-10-10T18:54:58.412549",
|
||||||
|
"last_message": "2025-10-10T18:54:58.412549",
|
||||||
|
"interactions": 2,
|
||||||
|
"pronouns": null,
|
||||||
|
"avatar_url": "https://cdn.discordapp.com/avatars/161149541171593216/fb0553a29d9f73175cb6aea24d0e19ec.png?size=1024",
|
||||||
|
"custom_prompt": null
|
||||||
|
}
|
||||||
|
}
|
||||||
24
src/user_profiles.json.backup.20251010_125727
Normal file
24
src/user_profiles.json.backup.20251010_125727
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
{
|
||||||
|
"161149541171593216": {
|
||||||
|
"name": "themiloverse",
|
||||||
|
"display_name": "Miguel",
|
||||||
|
"first_seen": "2025-05-15T03:16:30.011640",
|
||||||
|
"last_seen": "2025-09-20T19:04:27.735898",
|
||||||
|
"last_message": "2025-09-20T19:04:27.735898",
|
||||||
|
"interactions": 364,
|
||||||
|
"pronouns": "he/him",
|
||||||
|
"avatar_url": "https://cdn.discordapp.com/avatars/161149541171593216/fb0553a29d9f73175cb6aea24d0e19ec.png?size=1024",
|
||||||
|
"custom_prompt": "delta is very nice to me since I am her master, and creator"
|
||||||
|
},
|
||||||
|
"1370422629340811405": {
|
||||||
|
"name": "PLEX",
|
||||||
|
"display_name": "PLEX",
|
||||||
|
"first_seen": "2025-09-21T04:14:15.752764",
|
||||||
|
"last_seen": "2025-09-27T14:54:42.041092",
|
||||||
|
"last_message": "2025-09-27T14:54:42.041092",
|
||||||
|
"interactions": 19,
|
||||||
|
"pronouns": null,
|
||||||
|
"avatar_url": "https://cdn.discordapp.com/embed/avatars/0.png",
|
||||||
|
"custom_prompt": null
|
||||||
|
}
|
||||||
|
}
|
||||||
100
src/user_profiles.py
Normal file
100
src/user_profiles.py
Normal file
|
|
@ -0,0 +1,100 @@
|
||||||
|
# user_profiles.py
|
||||||
|
# Handles loading, updating, and storing per-user profiles for Delta
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
PROFILE_PATH = os.path.join(os.path.dirname(__file__), "user_profiles.json")
|
||||||
|
|
||||||
|
def ensure_profile_file():
|
||||||
|
"""Ensure the JSON file exists and is a dict"""
|
||||||
|
if not os.path.exists(PROFILE_PATH):
|
||||||
|
with open(PROFILE_PATH, "w", encoding="utf-8") as f:
|
||||||
|
json.dump({}, f, indent=2)
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(PROFILE_PATH, "r", encoding="utf-8") as f:
|
||||||
|
if not isinstance(json.load(f), dict):
|
||||||
|
raise ValueError
|
||||||
|
except Exception:
|
||||||
|
with open(PROFILE_PATH, "w", encoding="utf-8") as f:
|
||||||
|
json.dump({}, f, indent=2)
|
||||||
|
|
||||||
|
def load_profiles():
|
||||||
|
ensure_profile_file()
|
||||||
|
with open(PROFILE_PATH, "r", encoding="utf-8") as f:
|
||||||
|
return json.load(f)
|
||||||
|
|
||||||
|
def save_profiles(profiles):
|
||||||
|
with open(PROFILE_PATH, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(profiles, f, indent=2)
|
||||||
|
|
||||||
|
def load_user_profile(user):
|
||||||
|
ensure_profile_file()
|
||||||
|
profiles = load_profiles()
|
||||||
|
uid = str(user.id)
|
||||||
|
now = datetime.utcnow().isoformat()
|
||||||
|
|
||||||
|
profile = profiles.get(uid, {
|
||||||
|
"name": user.name,
|
||||||
|
"display_name": user.display_name,
|
||||||
|
"first_seen": now,
|
||||||
|
"last_seen": now,
|
||||||
|
"last_message": now,
|
||||||
|
"interactions": 0,
|
||||||
|
"pronouns": None,
|
||||||
|
"avatar_url": str(user.display_avatar.url),
|
||||||
|
"custom_prompt": None # 🆕 field
|
||||||
|
})
|
||||||
|
|
||||||
|
# Always update timestamp + count
|
||||||
|
profile["last_seen"] = now
|
||||||
|
profile["last_message"] = now
|
||||||
|
profile["interactions"] += 1
|
||||||
|
|
||||||
|
profiles[uid] = profile
|
||||||
|
save_profiles(profiles)
|
||||||
|
return profile
|
||||||
|
|
||||||
|
def update_last_seen(user_id):
|
||||||
|
profiles = load_profiles()
|
||||||
|
uid = str(user_id)
|
||||||
|
if uid in profiles:
|
||||||
|
profiles[uid]["last_seen"] = datetime.utcnow().isoformat()
|
||||||
|
save_profiles(profiles)
|
||||||
|
|
||||||
|
def increment_interactions(user_id):
|
||||||
|
profiles = load_profiles()
|
||||||
|
uid = str(user_id)
|
||||||
|
if uid in profiles:
|
||||||
|
profiles[uid]["interactions"] += 1
|
||||||
|
save_profiles(profiles)
|
||||||
|
|
||||||
|
def set_pronouns(user, pronouns):
|
||||||
|
profiles = load_profiles()
|
||||||
|
uid = str(user.id)
|
||||||
|
profile = load_user_profile(user)
|
||||||
|
profile["pronouns"] = pronouns
|
||||||
|
profiles[uid] = profile
|
||||||
|
save_profiles(profiles)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def set_custom_prompt(user_id, prompt):
|
||||||
|
profiles = load_profiles()
|
||||||
|
uid = str(user_id)
|
||||||
|
if uid in profiles:
|
||||||
|
profiles[uid]["custom_prompt"] = prompt
|
||||||
|
save_profiles(profiles)
|
||||||
|
|
||||||
|
def format_profile_for_block(profile):
|
||||||
|
lines = ["[User Profile]"]
|
||||||
|
lines.append(f"- Name: {profile['display_name']}")
|
||||||
|
if profile.get("pronouns"):
|
||||||
|
lines.append(f"- Pronouns: {profile['pronouns']}")
|
||||||
|
lines.append(f"- Interactions: {profile['interactions']}")
|
||||||
|
if profile.get("custom_prompt"):
|
||||||
|
lines.append(f"- Custom Prompt: {profile['custom_prompt']}")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
|
||||||
114
src/user_profiles_new.py
Normal file
114
src/user_profiles_new.py
Normal file
|
|
@ -0,0 +1,114 @@
|
||||||
|
# user_profiles_new.py
|
||||||
|
# Modern user profiles using database abstraction
|
||||||
|
# This will eventually replace user_profiles.py
|
||||||
|
|
||||||
|
from datetime import datetime
|
||||||
|
from database import db_manager
|
||||||
|
from logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger("user_profiles_new")
|
||||||
|
|
||||||
|
def load_user_profile(user):
|
||||||
|
"""Load user profile from database, creating if it doesn't exist"""
|
||||||
|
user_id = str(user.id)
|
||||||
|
|
||||||
|
# Try to get existing profile
|
||||||
|
profile = db_manager.get_user_profile(user_id)
|
||||||
|
|
||||||
|
if profile:
|
||||||
|
# Update existing profile with current session data
|
||||||
|
profile.update({
|
||||||
|
"name": user.name,
|
||||||
|
"display_name": user.display_name,
|
||||||
|
"avatar_url": str(user.display_avatar.url),
|
||||||
|
"last_seen": datetime.utcnow().isoformat(),
|
||||||
|
"last_message": datetime.utcnow().isoformat(),
|
||||||
|
"interactions": profile.get("interactions", 0) + 1
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# Create new profile
|
||||||
|
now = datetime.utcnow().isoformat()
|
||||||
|
profile = {
|
||||||
|
"name": user.name,
|
||||||
|
"display_name": user.display_name,
|
||||||
|
"first_seen": now,
|
||||||
|
"last_seen": now,
|
||||||
|
"last_message": now,
|
||||||
|
"interactions": 1,
|
||||||
|
"pronouns": None,
|
||||||
|
"avatar_url": str(user.display_avatar.url),
|
||||||
|
"custom_prompt": None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Save updated profile
|
||||||
|
db_manager.save_user_profile(user_id, profile)
|
||||||
|
return profile
|
||||||
|
|
||||||
|
def update_last_seen(user_id):
|
||||||
|
"""Update last seen timestamp for user"""
|
||||||
|
profile = db_manager.get_user_profile(str(user_id))
|
||||||
|
if profile:
|
||||||
|
profile["last_seen"] = datetime.utcnow().isoformat()
|
||||||
|
db_manager.save_user_profile(str(user_id), profile)
|
||||||
|
|
||||||
|
def increment_interactions(user_id):
|
||||||
|
"""Increment interaction count for user"""
|
||||||
|
profile = db_manager.get_user_profile(str(user_id))
|
||||||
|
if profile:
|
||||||
|
profile["interactions"] = profile.get("interactions", 0) + 1
|
||||||
|
db_manager.save_user_profile(str(user_id), profile)
|
||||||
|
|
||||||
|
def set_pronouns(user, pronouns):
|
||||||
|
"""Set pronouns for user"""
|
||||||
|
user_id = str(user.id)
|
||||||
|
profile = db_manager.get_user_profile(user_id) or {}
|
||||||
|
profile["pronouns"] = pronouns
|
||||||
|
|
||||||
|
# Ensure basic profile data exists
|
||||||
|
if not profile.get("name"):
|
||||||
|
profile.update({
|
||||||
|
"name": user.name,
|
||||||
|
"display_name": user.display_name,
|
||||||
|
"avatar_url": str(user.display_avatar.url),
|
||||||
|
"first_seen": datetime.utcnow().isoformat(),
|
||||||
|
"last_seen": datetime.utcnow().isoformat(),
|
||||||
|
"interactions": 0
|
||||||
|
})
|
||||||
|
|
||||||
|
db_manager.save_user_profile(user_id, profile)
|
||||||
|
return True
|
||||||
|
|
||||||
|
def set_custom_prompt(user_id, prompt):
|
||||||
|
"""Set custom prompt for user"""
|
||||||
|
user_id = str(user_id)
|
||||||
|
profile = db_manager.get_user_profile(user_id)
|
||||||
|
if profile:
|
||||||
|
profile["custom_prompt"] = prompt
|
||||||
|
db_manager.save_user_profile(user_id, profile)
|
||||||
|
|
||||||
|
def format_profile_for_block(profile):
|
||||||
|
"""Format profile data for inclusion in AI prompts"""
|
||||||
|
lines = ["[User Profile]"]
|
||||||
|
lines.append(f"- Name: {profile.get('display_name', 'Unknown')}")
|
||||||
|
if profile.get("pronouns"):
|
||||||
|
lines.append(f"- Pronouns: {profile['pronouns']}")
|
||||||
|
lines.append(f"- Interactions: {profile.get('interactions', 0)}")
|
||||||
|
if profile.get("custom_prompt"):
|
||||||
|
lines.append(f"- Custom Prompt: {profile['custom_prompt']}")
|
||||||
|
return "\n".join(lines)
|
||||||
|
|
||||||
|
# Backward compatibility functions - these use the old JSON system if database is disabled
|
||||||
|
def load_profiles():
|
||||||
|
"""Legacy function for backward compatibility"""
|
||||||
|
logger.warning("load_profiles() is deprecated. Use individual profile functions instead.")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def save_profiles(profiles):
|
||||||
|
"""Legacy function for backward compatibility"""
|
||||||
|
logger.warning("save_profiles() is deprecated. Use individual profile functions instead.")
|
||||||
|
pass
|
||||||
|
|
||||||
|
def ensure_profile_file():
|
||||||
|
"""Legacy function for backward compatibility"""
|
||||||
|
logger.warning("ensure_profile_file() is deprecated. Database handles initialization.")
|
||||||
|
pass
|
||||||
332
src/web_ui.py
Normal file
332
src/web_ui.py
Normal file
|
|
@ -0,0 +1,332 @@
|
||||||
|
"""
|
||||||
|
web_ui.py
|
||||||
|
Web dashboard for Discord bot management
|
||||||
|
"""
|
||||||
|
|
||||||
|
from flask import Flask, render_template, request, jsonify, redirect, url_for, flash, session
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
import json
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from functools import wraps
|
||||||
|
import secrets
|
||||||
|
|
||||||
|
# Import bot components
|
||||||
|
import sys
|
||||||
|
sys.path.append('/app/src' if os.path.exists('/app/src') else 'src')
|
||||||
|
|
||||||
|
try:
|
||||||
|
from database import db_manager
|
||||||
|
from memory_manager import memory_manager
|
||||||
|
from logger import setup_logger
|
||||||
|
except ImportError as e:
|
||||||
|
print(f"Warning: Could not import bot components: {e}")
|
||||||
|
db_manager = None
|
||||||
|
memory_manager = None
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
app.secret_key = secrets.token_hex(16) # Generate random secret key
|
||||||
|
logger = setup_logger("webui") if 'setup_logger' in globals() else None
|
||||||
|
|
||||||
|
class BotStats:
|
||||||
|
"""Track bot statistics"""
|
||||||
|
def __init__(self):
|
||||||
|
self.start_time = datetime.now()
|
||||||
|
self.message_count = 0
|
||||||
|
self.user_interactions = {}
|
||||||
|
self.channel_activity = {}
|
||||||
|
self.last_activity = None
|
||||||
|
|
||||||
|
def record_message(self, user_id, channel_id):
|
||||||
|
self.message_count += 1
|
||||||
|
self.user_interactions[user_id] = self.user_interactions.get(user_id, 0) + 1
|
||||||
|
self.channel_activity[channel_id] = self.channel_activity.get(channel_id, 0) + 1
|
||||||
|
self.last_activity = datetime.now()
|
||||||
|
|
||||||
|
def get_stats(self):
|
||||||
|
uptime = datetime.now() - self.start_time
|
||||||
|
return {
|
||||||
|
'uptime': str(uptime).split('.')[0], # Remove microseconds
|
||||||
|
'message_count': self.message_count,
|
||||||
|
'unique_users': len(self.user_interactions),
|
||||||
|
'active_channels': len(self.channel_activity),
|
||||||
|
'last_activity': self.last_activity.strftime('%Y-%m-%d %H:%M:%S') if self.last_activity else 'Never',
|
||||||
|
'most_active_users': sorted(self.user_interactions.items(), key=lambda x: x[1], reverse=True)[:5],
|
||||||
|
'most_active_channels': sorted(self.channel_activity.items(), key=lambda x: x[1], reverse=True)[:5]
|
||||||
|
}
|
||||||
|
|
||||||
|
bot_stats = BotStats()
|
||||||
|
|
||||||
|
# Authentication decorator (placeholder for now)
|
||||||
|
def require_auth(f):
|
||||||
|
@wraps(f)
|
||||||
|
def decorated_function(*args, **kwargs):
|
||||||
|
# TODO: Implement proper authentication
|
||||||
|
# For now, just check if we're in development mode
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
return decorated_function
|
||||||
|
|
||||||
|
def load_settings():
|
||||||
|
"""Load bot settings from YAML file"""
|
||||||
|
try:
|
||||||
|
settings_path = os.path.join('src', 'settings.yml')
|
||||||
|
if not os.path.exists(settings_path):
|
||||||
|
settings_path = os.path.join('/app/src', 'settings.yml')
|
||||||
|
|
||||||
|
with open(settings_path, 'r', encoding='utf-8') as f:
|
||||||
|
return yaml.safe_load(f)
|
||||||
|
except Exception as e:
|
||||||
|
if logger:
|
||||||
|
logger.error(f"Failed to load settings: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def save_settings(settings):
|
||||||
|
"""Save bot settings to YAML file"""
|
||||||
|
try:
|
||||||
|
settings_path = os.path.join('src', 'settings.yml')
|
||||||
|
if not os.path.exists(settings_path):
|
||||||
|
settings_path = os.path.join('/app/src', 'settings.yml')
|
||||||
|
|
||||||
|
with open(settings_path, 'w', encoding='utf-8') as f:
|
||||||
|
yaml.dump(settings, f, default_flow_style=False, indent=2)
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
if logger:
|
||||||
|
logger.error(f"Failed to save settings: {e}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_database_stats():
|
||||||
|
"""Get database statistics"""
|
||||||
|
if not db_manager:
|
||||||
|
return {"error": "Database manager not available"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
stats = {
|
||||||
|
'backend_type': db_manager.get_backend_type(),
|
||||||
|
'memory_enabled': db_manager.is_memory_enabled(),
|
||||||
|
'total_users': 0,
|
||||||
|
'total_memories': 0,
|
||||||
|
'total_conversations': 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# Try to get counts from database
|
||||||
|
if hasattr(db_manager.backend, 'connection') and db_manager.backend.connection:
|
||||||
|
cursor = db_manager.backend.connection.cursor()
|
||||||
|
|
||||||
|
# Count user profiles
|
||||||
|
cursor.execute("SELECT COUNT(*) FROM user_profiles")
|
||||||
|
stats['total_users'] = cursor.fetchone()[0]
|
||||||
|
|
||||||
|
if db_manager.is_memory_enabled():
|
||||||
|
# Count memories
|
||||||
|
cursor.execute("SELECT COUNT(*) FROM user_memories")
|
||||||
|
stats['total_memories'] = cursor.fetchone()[0]
|
||||||
|
|
||||||
|
# Count conversations
|
||||||
|
cursor.execute("SELECT COUNT(*) FROM conversation_memories")
|
||||||
|
stats['total_conversations'] = cursor.fetchone()[0]
|
||||||
|
|
||||||
|
return stats
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
@app.route('/')
|
||||||
|
@require_auth
|
||||||
|
def dashboard():
|
||||||
|
"""Main dashboard"""
|
||||||
|
settings = load_settings()
|
||||||
|
db_stats = get_database_stats()
|
||||||
|
bot_stats_data = bot_stats.get_stats()
|
||||||
|
|
||||||
|
return render_template('dashboard.html',
|
||||||
|
settings=settings,
|
||||||
|
db_stats=db_stats,
|
||||||
|
bot_stats=bot_stats_data)
|
||||||
|
|
||||||
|
@app.route('/config')
|
||||||
|
@require_auth
|
||||||
|
def config():
|
||||||
|
"""Configuration page"""
|
||||||
|
settings = load_settings()
|
||||||
|
return render_template('config.html', settings=settings)
|
||||||
|
|
||||||
|
@app.route('/config/save', methods=['POST'])
|
||||||
|
@require_auth
|
||||||
|
def save_config():
|
||||||
|
"""Save configuration changes"""
|
||||||
|
try:
|
||||||
|
settings = load_settings()
|
||||||
|
|
||||||
|
# Update database settings
|
||||||
|
if 'database_backend' in request.form:
|
||||||
|
settings.setdefault('database', {})['backend'] = request.form['database_backend']
|
||||||
|
|
||||||
|
if 'sqlite_path' in request.form:
|
||||||
|
settings.setdefault('database', {})['sqlite_path'] = request.form['sqlite_path']
|
||||||
|
|
||||||
|
# Update memory settings
|
||||||
|
if 'memory_enabled' in request.form:
|
||||||
|
settings.setdefault('memory', {})['enabled'] = request.form['memory_enabled'] == 'true'
|
||||||
|
|
||||||
|
if 'importance_threshold' in request.form:
|
||||||
|
settings.setdefault('memory', {})['importance_threshold'] = float(request.form['importance_threshold'])
|
||||||
|
|
||||||
|
# Update autochat settings
|
||||||
|
if 'autochat_enabled' in request.form:
|
||||||
|
settings.setdefault('autochat', {})['enable_reactions'] = request.form['autochat_enabled'] == 'true'
|
||||||
|
|
||||||
|
if 'reaction_chance' in request.form:
|
||||||
|
settings.setdefault('autochat', {})['emoji_reaction_chance'] = float(request.form['reaction_chance'])
|
||||||
|
|
||||||
|
# Update context settings
|
||||||
|
if 'context_enabled' in request.form:
|
||||||
|
settings.setdefault('context', {})['enabled'] = request.form['context_enabled'] == 'true'
|
||||||
|
|
||||||
|
if 'max_messages' in request.form:
|
||||||
|
settings.setdefault('context', {})['max_messages'] = int(request.form['max_messages'])
|
||||||
|
|
||||||
|
# Save settings
|
||||||
|
if save_settings(settings):
|
||||||
|
flash('Configuration saved successfully! Restart the bot to apply changes.', 'success')
|
||||||
|
else:
|
||||||
|
flash('Failed to save configuration.', 'error')
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
flash(f'Error saving configuration: {str(e)}', 'error')
|
||||||
|
|
||||||
|
return redirect(url_for('config'))
|
||||||
|
|
||||||
|
@app.route('/stats')
|
||||||
|
@require_auth
|
||||||
|
def stats():
|
||||||
|
"""Statistics page"""
|
||||||
|
db_stats = get_database_stats()
|
||||||
|
bot_stats_data = bot_stats.get_stats()
|
||||||
|
|
||||||
|
# Get recent user activity if database is available
|
||||||
|
recent_users = []
|
||||||
|
if db_manager and hasattr(db_manager.backend, 'connection'):
|
||||||
|
try:
|
||||||
|
cursor = db_manager.backend.connection.cursor()
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT name, display_name, interactions, last_seen
|
||||||
|
FROM user_profiles
|
||||||
|
ORDER BY last_seen DESC
|
||||||
|
LIMIT 10
|
||||||
|
""")
|
||||||
|
recent_users = [dict(row) for row in cursor.fetchall()]
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return render_template('stats.html',
|
||||||
|
db_stats=db_stats,
|
||||||
|
bot_stats=bot_stats_data,
|
||||||
|
recent_users=recent_users)
|
||||||
|
|
||||||
|
@app.route('/memory')
|
||||||
|
@require_auth
|
||||||
|
def memory():
|
||||||
|
"""Memory management page"""
|
||||||
|
if not db_manager or not db_manager.is_memory_enabled():
|
||||||
|
return render_template('memory.html',
|
||||||
|
memories=[],
|
||||||
|
users=[],
|
||||||
|
memory_disabled=True)
|
||||||
|
|
||||||
|
# Get recent memories
|
||||||
|
recent_memories = []
|
||||||
|
users_with_memories = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
if hasattr(db_manager.backend, 'connection'):
|
||||||
|
cursor = db_manager.backend.connection.cursor()
|
||||||
|
|
||||||
|
# Get recent conversation memories
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT cm.*, up.display_name
|
||||||
|
FROM conversation_memories cm
|
||||||
|
LEFT JOIN user_profiles up ON cm.user_id = up.user_id
|
||||||
|
ORDER BY cm.timestamp DESC
|
||||||
|
LIMIT 20
|
||||||
|
""")
|
||||||
|
recent_memories = [dict(row) for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
# Get users with memories
|
||||||
|
cursor.execute("""
|
||||||
|
SELECT up.user_id, up.display_name, COUNT(um.id) as memory_count
|
||||||
|
FROM user_profiles up
|
||||||
|
LEFT JOIN user_memories um ON up.user_id = um.user_id
|
||||||
|
GROUP BY up.user_id
|
||||||
|
HAVING memory_count > 0
|
||||||
|
ORDER BY memory_count DESC
|
||||||
|
LIMIT 20
|
||||||
|
""")
|
||||||
|
users_with_memories = [dict(row) for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if logger:
|
||||||
|
logger.error(f"Error fetching memory data: {e}")
|
||||||
|
|
||||||
|
return render_template('memory.html',
|
||||||
|
memories=recent_memories,
|
||||||
|
users=users_with_memories,
|
||||||
|
memory_disabled=False)
|
||||||
|
|
||||||
|
@app.route('/api/stats')
|
||||||
|
@require_auth
|
||||||
|
def api_stats():
|
||||||
|
"""API endpoint for real-time stats"""
|
||||||
|
return jsonify({
|
||||||
|
'bot_stats': bot_stats.get_stats(),
|
||||||
|
'db_stats': get_database_stats()
|
||||||
|
})
|
||||||
|
|
||||||
|
@app.route('/api/memory/cleanup', methods=['POST'])
|
||||||
|
@require_auth
|
||||||
|
def api_memory_cleanup():
|
||||||
|
"""API endpoint to cleanup old memories"""
|
||||||
|
if not memory_manager or not memory_manager.is_enabled():
|
||||||
|
return jsonify({'error': 'Memory system disabled'}), 400
|
||||||
|
|
||||||
|
try:
|
||||||
|
days = int(request.json.get('days', 30))
|
||||||
|
memory_manager.cleanup_old_memories(days)
|
||||||
|
return jsonify({'success': True, 'message': f'Cleaned up memories older than {days} days'})
|
||||||
|
except Exception as e:
|
||||||
|
return jsonify({'error': str(e)}), 500
|
||||||
|
|
||||||
|
def run_web_server(host='0.0.0.0', port=8080, debug=False):
|
||||||
|
"""Run the web server"""
|
||||||
|
import socket
|
||||||
|
|
||||||
|
# Get local IP for network access
|
||||||
|
try:
|
||||||
|
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||||
|
s.connect(("8.8.8.8", 80))
|
||||||
|
local_ip = s.getsockname()[0]
|
||||||
|
s.close()
|
||||||
|
except:
|
||||||
|
local_ip = "localhost"
|
||||||
|
|
||||||
|
if logger:
|
||||||
|
logger.info(f"Starting web UI server on {host}:{port}")
|
||||||
|
logger.info("=" * 50)
|
||||||
|
logger.info("🌐 WEB UI ACCESS URLS:")
|
||||||
|
logger.info(f" Local: http://localhost:{port}")
|
||||||
|
logger.info(f" Network: http://{local_ip}:{port}")
|
||||||
|
logger.info("=" * 50)
|
||||||
|
else:
|
||||||
|
print("=" * 50)
|
||||||
|
print("🌐 DELTA BOT WEB UI STARTED")
|
||||||
|
print(f" Local: http://localhost:{port}")
|
||||||
|
print(f" Network: http://{local_ip}:{port}")
|
||||||
|
print("=" * 50)
|
||||||
|
|
||||||
|
app.run(host=host, port=port, debug=debug, threaded=True)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
port = int(os.getenv('WEB_PORT', 8080))
|
||||||
|
debug = os.getenv('DEBUG', 'false').lower() == 'true'
|
||||||
|
run_web_server(port=port, debug=debug)
|
||||||
39
tests/test_modelfile.py
Normal file
39
tests/test_modelfile.py
Normal file
|
|
@ -0,0 +1,39 @@
|
||||||
|
import os
|
||||||
|
from src.modelfile import parse_mod_file, load_modfile_if_exists
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_simple_mod(tmp_path):
|
||||||
|
content = '''
|
||||||
|
NAME Gojo
|
||||||
|
FROM gemma3:12b
|
||||||
|
PARAMETER temperature 0.7
|
||||||
|
SYSTEM """
|
||||||
|
You are Gojo, sarcastic and helpful.
|
||||||
|
"""
|
||||||
|
TEMPLATE """
|
||||||
|
{{ .System }}
|
||||||
|
{{ .Prompt }}
|
||||||
|
"""
|
||||||
|
'''
|
||||||
|
p = tmp_path / "gojo.mod"
|
||||||
|
p.write_text(content)
|
||||||
|
parsed = parse_mod_file(str(p))
|
||||||
|
assert parsed['name'] == 'Gojo'
|
||||||
|
assert parsed['base_model'] == 'gemma3:12b'
|
||||||
|
assert parsed['params']['temperature'] == 0.7
|
||||||
|
assert 'Gojo' in parsed['system'] or 'Gojo' in parsed['template']
|
||||||
|
|
||||||
|
|
||||||
|
def test_parse_json_mod(tmp_path):
|
||||||
|
data = {
|
||||||
|
"name": "json-persona",
|
||||||
|
"from": "gemma3:12b",
|
||||||
|
"system": "You are JSON persona",
|
||||||
|
"params": {"temperature": 0.5}
|
||||||
|
}
|
||||||
|
p = tmp_path / "j.mod.json"
|
||||||
|
p.write_text(str(data).replace("'", '"'))
|
||||||
|
parsed = parse_mod_file(str(p))
|
||||||
|
assert parsed['name'] == 'json-persona'
|
||||||
|
assert parsed['base_model'] == 'gemma3:12b'
|
||||||
|
assert parsed['params']['temperature'] == 0.5
|
||||||
1
user_profiles.json
Normal file
1
user_profiles.json
Normal file
|
|
@ -0,0 +1 @@
|
||||||
|
{}
|
||||||
Loading…
Reference in a new issue