Compare commits

..

No commits in common. "61053f6177ce38a830b930a8046ed40b44c15ea6" and "530405a90536f2c1858bb0eca74767eab1271431" have entirely different histories.

8 changed files with 19 additions and 1204 deletions

1132
bot.log

File diff suppressed because one or more lines are too long

Binary file not shown.

View file

@ -82,33 +82,23 @@ def get_current_model():
return get_model_name()
# Main LLM interaction — injects personality and sends prompt to Ollama
def get_ai_response(user_prompt, context=None):
def get_ai_response(user_prompt):
model_name = get_model_name()
load_model(model_name)
load_model(model_name) # Ensures the model is pulled and ready
persona = load_persona()
full_prompt = ""
# Inject persona first if available
if persona:
# Clean fancy quotes and build final prompt with character injection
safe_inject = persona["prompt_inject"].replace("", "\"").replace("", "\"").replace("", "'")
full_prompt += f"{safe_inject}\n"
# Add recent conversation context, if available
if context:
logger.info("🧠 Injected context block (pre-prompt):\n" + context)
full_prompt += f"[Recent Conversation]\n{context}\n\n"
# Add user prompt + character or plain ending
if persona:
full_prompt += f"User: {user_prompt}\n{persona['name']}:"
full_prompt = f"{safe_inject}\nUser: {user_prompt}\n{persona['name']}:"
else:
full_prompt += user_prompt
full_prompt = user_prompt # fallback to raw prompt if no persona loaded
payload = {
"model": model_name,
"model": model_name, # 🔧 Suggested fix: previously hardcoded to MODEL_NAME
"prompt": full_prompt,
"stream": False
# optional: add "keep_alive": 300 to keep model warm
}
logger.info("🛰️ SENDING TO OLLAMA /generate")

View file

@ -10,11 +10,10 @@ import random
import yaml
from scheduler import start_scheduler
from profilepic import set_avatar_from_bytes
from context import fetch_recent_context, format_context
from logger import setup_logger
logger = setup_logger("bot")
from ai import unload_model, load_model, get_current_model, get_ai_response
from ai import unload_model, load_model, get_current_model
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
load_dotenv(dotenv_path)
@ -35,6 +34,16 @@ else:
logger.info(f"✅ Final model in use: {MODEL_NAME}")
from ai import get_ai_response, load_model
MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest")
if load_model(MODEL_NAME):
logger.info(f"🚀 Model `{MODEL_NAME}` preloaded on startup.")
else:
logger.warning(f"⚠️ Failed to preload model `{MODEL_NAME}`.")
logger.info(f"✅ Final model in use: {MODEL_NAME}")
from personality import apply_personality, set_persona
from discord.ext.commands import (
cooldown,
@ -94,14 +103,6 @@ async def on_message(message):
if bot.user.mentioned_in(message):
prompt = message.content.replace(f"<@{bot.user.id}>", "").strip()
context_msgs = await fetch_recent_context(message.channel)
formatted_context = format_context(context_msgs)
logger.info("🧠 Injected context block:\n" + formatted_context)
async with message.channel.typing():
reply = get_ai_response(prompt, context=formatted_context)
await message.channel.send(reply)
if prompt:
async with message.channel.typing(): # 👈 Typing indicator!
response = get_ai_response(prompt)

View file

@ -1,40 +0,0 @@
# context.py
import os
import yaml
import discord
base_dir = os.path.dirname(__file__)
with open(os.path.join(base_dir, "settings.yml"), "r", encoding="utf-8") as f:
settings = yaml.safe_load(f)
CONTEXT_LIMIT = settings["context"].get("max_messages", 15)
async def fetch_recent_context(channel, limit=CONTEXT_LIMIT):
messages = []
async for message in channel.history(limit=100):
# Skip other bots (but not Delta herself)
if message.author.bot and message.author.id != channel.guild.me.id:
continue
raw = message.clean_content
clean = raw.strip().replace("\n", " ").replace("\r", "")
clean = " ".join(clean.split()) # Collapse all extra whitespace
if not clean:
continue
if clean.startswith("!"):
continue
line = f"{message.created_at.strftime('%Y-%m-%d %H:%M')} - {message.author.display_name}: {clean}"
messages.append(line)
if len(messages) >= limit:
break
messages.reverse()
return messages
def format_context(lines: list[str]) -> str:
return "\n".join(lines)

View file

@ -1,5 +1,5 @@
{
"name": "Delta",
"emoji": "😼",
"prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. Always speak with flair. keep messages sort and use emojis moderately. Its ok to swear a little."
"prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately."
}

View file

@ -6,10 +6,6 @@ messages:
cooldown:
- "🕒 Chill, wait {seconds}s before trying again."
context:
enabled: true
max_messages: 30
scheduler:
enabled: false
mode: simple # <- this activates simple mode