Implemented feature #7
This commit is contained in:
parent
864201214d
commit
863d3e3c88
7 changed files with 1003 additions and 22 deletions
932
bot.log
932
bot.log
File diff suppressed because one or more lines are too long
Binary file not shown.
BIN
src/__pycache__/context.cpython-310.pyc
Normal file
BIN
src/__pycache__/context.cpython-310.pyc
Normal file
Binary file not shown.
24
src/ai.py
24
src/ai.py
|
|
@ -82,23 +82,33 @@ def get_current_model():
|
|||
return get_model_name()
|
||||
|
||||
# Main LLM interaction — injects personality and sends prompt to Ollama
|
||||
def get_ai_response(user_prompt):
|
||||
def get_ai_response(user_prompt, context=None):
|
||||
model_name = get_model_name()
|
||||
load_model(model_name) # Ensures the model is pulled and ready
|
||||
load_model(model_name)
|
||||
|
||||
persona = load_persona()
|
||||
full_prompt = ""
|
||||
|
||||
# Inject persona first if available
|
||||
if persona:
|
||||
# Clean fancy quotes and build final prompt with character injection
|
||||
safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'")
|
||||
full_prompt = f"{safe_inject}\nUser: {user_prompt}\n{persona['name']}:"
|
||||
full_prompt += f"{safe_inject}\n"
|
||||
|
||||
# Add recent conversation context, if available
|
||||
if context:
|
||||
logger.info("🧠 Injected context block (pre-prompt):\n" + context)
|
||||
full_prompt += f"[Recent Conversation]\n{context}\n\n"
|
||||
|
||||
# Add user prompt + character or plain ending
|
||||
if persona:
|
||||
full_prompt += f"User: {user_prompt}\n{persona['name']}:"
|
||||
else:
|
||||
full_prompt = user_prompt # fallback to raw prompt if no persona loaded
|
||||
full_prompt += user_prompt
|
||||
|
||||
payload = {
|
||||
"model": model_name, # 🔧 Suggested fix: previously hardcoded to MODEL_NAME
|
||||
"model": model_name,
|
||||
"prompt": full_prompt,
|
||||
"stream": False
|
||||
# optional: add "keep_alive": 300 to keep model warm
|
||||
}
|
||||
|
||||
logger.info("🛰️ SENDING TO OLLAMA /generate")
|
||||
|
|
|
|||
25
src/bot.py
25
src/bot.py
|
|
@ -10,10 +10,11 @@ import random
|
|||
import yaml
|
||||
from scheduler import start_scheduler
|
||||
from profilepic import set_avatar_from_bytes
|
||||
from context import fetch_recent_context, format_context
|
||||
from logger import setup_logger
|
||||
logger = setup_logger("bot")
|
||||
|
||||
from ai import unload_model, load_model, get_current_model
|
||||
from ai import unload_model, load_model, get_current_model, get_ai_response
|
||||
|
||||
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
|
||||
load_dotenv(dotenv_path)
|
||||
|
|
@ -34,16 +35,6 @@ else:
|
|||
|
||||
logger.info(f"✅ Final model in use: {MODEL_NAME}")
|
||||
|
||||
from ai import get_ai_response, load_model
|
||||
MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest")
|
||||
|
||||
if load_model(MODEL_NAME):
|
||||
logger.info(f"🚀 Model `{MODEL_NAME}` preloaded on startup.")
|
||||
else:
|
||||
logger.warning(f"⚠️ Failed to preload model `{MODEL_NAME}`.")
|
||||
|
||||
logger.info(f"✅ Final model in use: {MODEL_NAME}")
|
||||
|
||||
from personality import apply_personality, set_persona
|
||||
from discord.ext.commands import (
|
||||
cooldown,
|
||||
|
|
@ -103,10 +94,14 @@ async def on_message(message):
|
|||
|
||||
if bot.user.mentioned_in(message):
|
||||
prompt = message.content.replace(f"<@{bot.user.id}>", "").strip()
|
||||
if prompt:
|
||||
async with message.channel.typing(): # 👈 Typing indicator!
|
||||
response = get_ai_response(prompt)
|
||||
await message.channel.send(response)
|
||||
context_msgs = await fetch_recent_context(message.channel)
|
||||
formatted_context = format_context(context_msgs)
|
||||
|
||||
logger.info("🧠 Injected context block:\n" + formatted_context)
|
||||
|
||||
async with message.channel.typing():
|
||||
reply = get_ai_response(prompt, context=formatted_context)
|
||||
await message.channel.send(reply)
|
||||
|
||||
await bot.process_commands(message)
|
||||
|
||||
|
|
|
|||
40
src/context.py
Normal file
40
src/context.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
# context.py
|
||||
|
||||
import os
|
||||
import yaml
|
||||
import discord
|
||||
|
||||
base_dir = os.path.dirname(__file__)
|
||||
with open(os.path.join(base_dir, "settings.yml"), "r", encoding="utf-8") as f:
|
||||
settings = yaml.safe_load(f)
|
||||
|
||||
CONTEXT_LIMIT = settings["context"].get("max_messages", 15)
|
||||
|
||||
async def fetch_recent_context(channel, limit=CONTEXT_LIMIT):
|
||||
messages = []
|
||||
async for message in channel.history(limit=100):
|
||||
# Skip other bots (but not Delta herself)
|
||||
if message.author.bot and message.author.id != channel.guild.me.id:
|
||||
continue
|
||||
|
||||
raw = message.clean_content
|
||||
clean = raw.strip().replace("\n", " ").replace("\r", "")
|
||||
clean = " ".join(clean.split()) # Collapse all extra whitespace
|
||||
|
||||
if not clean:
|
||||
continue
|
||||
|
||||
if clean.startswith("!"):
|
||||
continue
|
||||
|
||||
line = f"{message.created_at.strftime('%Y-%m-%d %H:%M')} - {message.author.display_name}: {clean}"
|
||||
messages.append(line)
|
||||
|
||||
if len(messages) >= limit:
|
||||
break
|
||||
|
||||
messages.reverse()
|
||||
return messages
|
||||
|
||||
def format_context(lines: list[str]) -> str:
|
||||
return "\n".join(lines)
|
||||
|
|
@ -6,6 +6,10 @@ messages:
|
|||
cooldown:
|
||||
- "🕒 Chill, wait {seconds}s before trying again."
|
||||
|
||||
context:
|
||||
enabled: true
|
||||
max_messages: 30
|
||||
|
||||
scheduler:
|
||||
enabled: false
|
||||
mode: simple # <- this activates simple mode
|
||||
|
|
|
|||
Loading…
Reference in a new issue