Feature #8 is almost fully implemented. Needs pronouns and it would be good. Check settings.yml

This commit is contained in:
milo 2025-05-15 00:22:24 -04:00
parent 61053f6177
commit ea8983ddb0
10 changed files with 302 additions and 1693 deletions

1665
bot.log

File diff suppressed because one or more lines are too long

Binary file not shown.

Binary file not shown.

View file

@ -7,6 +7,7 @@ import requests
import re import re
from dotenv import load_dotenv from dotenv import load_dotenv
from personality import load_persona from personality import load_persona
from user_profiles import format_profile_for_block
from logger import setup_logger from logger import setup_logger
# Set up logger specifically for AI operations # Set up logger specifically for AI operations
@ -82,28 +83,33 @@ def get_current_model():
return get_model_name() return get_model_name()
# Main LLM interaction — injects personality and sends prompt to Ollama # Main LLM interaction — injects personality and sends prompt to Ollama
def get_ai_response(user_prompt, context=None): def get_ai_response(user_prompt, context=None, user_profile=None):
model_name = get_model_name() model_name = get_model_name()
load_model(model_name) load_model(model_name)
persona = load_persona() persona = load_persona()
full_prompt = "" full_prompt = ""
# Inject persona first if available # Inject Delta's base persona
if persona: if persona:
safe_inject = persona["prompt_inject"].replace("", "\"").replace("", "\"").replace("", "'") safe_inject = persona["prompt_inject"].replace("", "\"").replace("", "\"").replace("", "'")
full_prompt += f"{safe_inject}\n" full_prompt += f"{safe_inject}\n"
# Add recent conversation context, if available # Inject custom user profile prompt as override or influence
if user_profile and user_profile.get("custom_prompt"):
full_prompt += f"[User Instruction]\n{user_profile['custom_prompt']}\n"
logger.info(f"🧠 Injected user custom prompt:\n{user_profile['custom_prompt']}")
# Add recent chat context (this already includes the profile block!)
if context: if context:
logger.info("🧠 Injected context block (pre-prompt):\n" + context) logger.info("🧠 Injected context block (pre-prompt):\n" + context)
full_prompt += f"[Recent Conversation]\n{context}\n\n" full_prompt += f"[Recent Conversation]\n{context}\n"
# Add user prompt + character or plain ending # Add user's message and expected bot reply prefix
if persona: if persona:
full_prompt += f"User: {user_prompt}\n{persona['name']}:" full_prompt += f"\nUser: {user_prompt}\n{persona['name']}:"
else: else:
full_prompt += user_prompt full_prompt += f"\nUser: {user_prompt}\nResponse:"
payload = { payload = {
"model": model_name, "model": model_name,
@ -117,11 +123,9 @@ def get_ai_response(user_prompt, context=None):
try: try:
response = requests.post(GEN_ENDPOINT, json=payload) response = requests.post(GEN_ENDPOINT, json=payload)
logger.info(f"📨 Raw response: {response.text}") logger.info(f"📨 Raw response: {response.text}")
if response.status_code == 200: if response.status_code == 200:
result = response.json() result = response.json()
response_text = result.get("response", "[No message in response]") return result.get("response", "[No message in response]")
return strip_thinking_block(response_text) if not SHOW_THINKING_BLOCKS else response_text
else: else:
return f"[Error {response.status_code}] {response.text}" return f"[Error {response.status_code}] {response.text}"
except Exception as e: except Exception as e:

View file

@ -3,39 +3,10 @@
import os import os
import discord import discord
import yaml import yaml
from discord.ext import commands
from textwrap import wrap
from dotenv import load_dotenv
import random import random
import yaml from dotenv import load_dotenv
from scheduler import start_scheduler from textwrap import wrap
from profilepic import set_avatar_from_bytes from discord.ext import commands
from context import fetch_recent_context, format_context
from logger import setup_logger
logger = setup_logger("bot")
from ai import unload_model, load_model, get_current_model, get_ai_response
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
load_dotenv(dotenv_path)
logger.info(f"🔍 Loaded MODEL_NAME from .env: {os.getenv('MODEL_NAME')}")
MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest")
logger.info(f"🔍 Loaded MODEL_NAME from .env: {MODEL_NAME}")
# 🧽 Try to unload any currently loaded model
logger.info(f"🧹 Attempting to clear VRAM before loading {MODEL_NAME}...")
unload_model(MODEL_NAME)
# 🚀 Load target model from .env
if load_model(MODEL_NAME):
logger.info(f"🚀 Model `{MODEL_NAME}` preloaded on startup.")
else:
logger.warning(f"⚠️ Failed to preload model `{MODEL_NAME}`.")
logger.info(f"✅ Final model in use: {MODEL_NAME}")
from personality import apply_personality, set_persona
from discord.ext.commands import ( from discord.ext.commands import (
cooldown, cooldown,
BucketType, BucketType,
@ -43,9 +14,50 @@ from discord.ext.commands import (
CommandOnCooldown CommandOnCooldown
) )
# Local imports
from scheduler import start_scheduler
from profilepic import set_avatar_from_bytes
from context import fetch_recent_context, format_context
from user_profiles import (
load_user_profile,
update_last_seen,
increment_interactions,
format_profile_for_block,
set_pronouns,
set_custom_prompt
)
from personality import apply_personality, set_persona
from logger import setup_logger
from ai import (
unload_model,
load_model,
get_current_model,
get_ai_response,
TAGS_ENDPOINT
)
from user_profiles import format_profile_for_block as format_user_profile_block
# Setup logger and environment
logger = setup_logger("bot")
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
load_dotenv(dotenv_path)
# Load model settings
MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest")
logger.info(f"🔍 Loaded MODEL_NAME from .env: {MODEL_NAME}")
logger.info(f"🧹 Attempting to clear VRAM before loading {MODEL_NAME}...")
unload_model(MODEL_NAME)
if load_model(MODEL_NAME):
logger.info(f"🚀 Model `{MODEL_NAME}` preloaded on startup.")
else:
logger.warning(f"⚠️ Failed to preload model `{MODEL_NAME}`.")
logger.info(f"✅ Final model in use: {MODEL_NAME}")
# Load YAML settings
base_dir = os.path.dirname(__file__) base_dir = os.path.dirname(__file__)
settings_path = os.path.join(base_dir, "settings.yml") settings_path = os.path.join(base_dir, "settings.yml")
with open(settings_path, "r", encoding="utf-8") as f: with open(settings_path, "r", encoding="utf-8") as f:
settings = yaml.safe_load(f) settings = yaml.safe_load(f)
@ -53,6 +65,7 @@ ROAST_COOLDOWN_SECONDS = settings["cooldowns"]["roast"]
GLOBAL_COOLDOWN_SECONDS = settings["cooldowns"]["global"] GLOBAL_COOLDOWN_SECONDS = settings["cooldowns"]["global"]
COOLDOWN_MSG_TEMPLATE = settings["messages"]["cooldown"] COOLDOWN_MSG_TEMPLATE = settings["messages"]["cooldown"]
# Configure Discord bot
TOKEN = os.getenv("DISCORD_TOKEN") TOKEN = os.getenv("DISCORD_TOKEN")
if not TOKEN: if not TOKEN:
logger.error("❌ DISCORD_TOKEN not set in .env file.") logger.error("❌ DISCORD_TOKEN not set in .env file.")
@ -60,61 +73,75 @@ if not TOKEN:
intents = discord.Intents.default() intents = discord.Intents.default()
intents.message_content = True intents.message_content = True
bot = commands.Bot(command_prefix="!", intents=intents) bot = commands.Bot(command_prefix="!", intents=intents)
# Handle cooldown errors globally
@bot.event @bot.event
async def on_command_error(ctx, error): async def on_command_error(ctx, error):
if isinstance(error, CommandOnCooldown): if isinstance(error, CommandOnCooldown):
retry_secs = round(error.retry_after, 1) retry_secs = round(error.retry_after, 1)
template = random.choice(COOLDOWN_MSG_TEMPLATE) if isinstance(COOLDOWN_MSG_TEMPLATE, list) else COOLDOWN_MSG_TEMPLATE template = random.choice(COOLDOWN_MSG_TEMPLATE) if isinstance(COOLDOWN_MSG_TEMPLATE, list) else COOLDOWN_MSG_TEMPLATE
msg = template.replace("{seconds}", str(retry_secs)) msg = template.replace("{seconds}", str(retry_secs))
logger.info(f"Command {ctx.command} on cooldown. Retry after {retry_secs} seconds.") logger.info(f"Command {ctx.command} on cooldown. Retry after {retry_secs} seconds.")
await ctx.send(msg) await ctx.send(msg)
else: else:
raise error raise error
# Global cooldown bucket # Global cooldown
global_cooldown = CooldownMapping.from_cooldown(1, GLOBAL_COOLDOWN_SECONDS, BucketType.user) global_cooldown = CooldownMapping.from_cooldown(1, GLOBAL_COOLDOWN_SECONDS, BucketType.user)
@bot.check @bot.check
async def global_command_cooldown(ctx): async def global_command_cooldown(ctx):
bucket = global_cooldown.get_bucket(ctx.message) bucket = global_cooldown.get_bucket(ctx.message)
retry_after = bucket.update_rate_limit() retry_after = bucket.update_rate_limit()
if retry_after: if retry_after:
raise CommandOnCooldown(bucket, retry_after, BucketType.user) raise CommandOnCooldown(bucket, retry_after, BucketType.user)
return True return True
# Handle direct bot mentions
@bot.event @bot.event
async def on_message(message): async def on_message(message):
# Ignore messages from the bot itself
if message.author == bot.user: if message.author == bot.user:
return return
# Only respond if the bot is mentioned
if bot.user.mentioned_in(message): if bot.user.mentioned_in(message):
# Strip the mention from the message to extract the actual prompt
prompt = message.content.replace(f"<@{bot.user.id}>", "").strip() prompt = message.content.replace(f"<@{bot.user.id}>", "").strip()
if not prompt:
return # Nothing to respond to
# Update the user's interaction history
user_id = str(message.author.id)
update_last_seen(user_id)
increment_interactions(user_id)
profile = load_user_profile(message.author)
# Log summary info about the profile (but dont inject it)
logger.info(f"🧠 Profile loaded for {profile['display_name']} (interactions: {profile['interactions']})")
# Fetch recent messages for conversation context
context_msgs = await fetch_recent_context(message.channel) context_msgs = await fetch_recent_context(message.channel)
formatted_context = format_context(context_msgs) formatted_context = format_context(context_msgs)
logger.info("🧠 Injected context block:\n" + formatted_context) # Log number of context messages, not the entire block (to reduce clutter)
logger.info(f"📚 Retrieved {len(context_msgs)} messages for context")
# Let ai.py handle all prompt construction (persona + context + profile)
async with message.channel.typing(): async with message.channel.typing():
reply = get_ai_response(prompt, context=formatted_context) reply = get_ai_response(prompt, context=formatted_context, user_profile=profile)
await message.channel.send(reply) await message.channel.send(reply)
if prompt:
async with message.channel.typing(): # 👈 Typing indicator!
response = get_ai_response(prompt)
await message.channel.send(response)
# Ensure bot commands still work (!setprompt, !roast, etc.)
await bot.process_commands(message) await bot.process_commands(message)
# Bot startup event
@bot.event @bot.event
async def on_ready(): async def on_ready():
print(f"✅ Logged in as {bot.user.name}") print(f"✅ Logged in as {bot.user.name}")
logger.info(f"Logged in as {bot.user.name}") logger.info(f"Logged in as {bot.user.name}")
# Optional: rename itself in servers (if it has permission)
for guild in bot.guilds: for guild in bot.guilds:
me = guild.me me = guild.me
if me.nick != "Delta": if me.nick != "Delta":
@ -123,9 +150,22 @@ async def on_ready():
logger.info(f"🔄 Renamed self to Delta in {guild.name}") logger.info(f"🔄 Renamed self to Delta in {guild.name}")
except Exception as e: except Exception as e:
logger.warning(f"⚠️ Failed to rename in {guild.name}: {e}") logger.warning(f"⚠️ Failed to rename in {guild.name}: {e}")
bot.loop.create_task(start_scheduler(bot)) bot.loop.create_task(start_scheduler(bot))
# Commands
@bot.command(name="setprompt")
async def set_prompt_cmd(ctx, *, prompt):
set_custom_prompt(ctx.author.id, prompt)
await ctx.send("✅ Custom prompt saved.")
@bot.command(name="setpronouns")
async def set_pronouns_cmd(ctx, *, pronouns):
success = set_pronouns(ctx.author.id, pronouns)
if success:
await ctx.send(f"✅ Got it, {ctx.author.display_name}! Your pronouns have been updated.")
else:
await ctx.send("⚠️ Failed to update pronouns. Try interacting with Delta first to generate your profile.")
@bot.command() @bot.command()
async def ping(ctx): async def ping(ctx):
await ctx.send("🏓 Pong!") await ctx.send("🏓 Pong!")
@ -134,16 +174,7 @@ async def ping(ctx):
async def chat(ctx, *, prompt): async def chat(ctx, *, prompt):
await ctx.send("🤖 Thinking...") await ctx.send("🤖 Thinking...")
reply = get_ai_response(prompt) reply = get_ai_response(prompt)
MAX_DISCORD_MESSAGE_LENGTH = 2000 for chunk in wrap(reply, 2000):
# Split long replies into chunks that fit Discord limits
chunks = wrap(reply, MAX_DISCORD_MESSAGE_LENGTH)
# Log only if the response is being chunked
if len(chunks) > 1:
logger.warning(f"💬 Splitting response into {len(chunks)} chunks due to length.")
for chunk in chunks:
await ctx.send(chunk) await ctx.send(chunk)
@bot.command() @bot.command()
@ -154,21 +185,13 @@ async def setpersona(ctx, *, description):
@bot.command(name='roast') @bot.command(name='roast')
@cooldown(rate=1, per=ROAST_COOLDOWN_SECONDS, type=BucketType.user) @cooldown(rate=1, per=ROAST_COOLDOWN_SECONDS, type=BucketType.user)
async def roast(ctx): async def roast(ctx):
# Get the mentioned user (or fallback to the author)
target = ctx.message.mentions[0].mention if ctx.message.mentions else ctx.author.mention target = ctx.message.mentions[0].mention if ctx.message.mentions else ctx.author.mention
# Build the roast prompt
prompt = f"Roast {target}. Be dramatic, insulting, and sarcastic. Speak in your usual chaotic RGB catgirl personality." prompt = f"Roast {target}. Be dramatic, insulting, and sarcastic. Speak in your usual chaotic RGB catgirl personality."
# Get AI response
response = get_ai_response(prompt) response = get_ai_response(prompt)
# Send the roast back to the channel
await ctx.send(f"😼 {response}") await ctx.send(f"😼 {response}")
@bot.command(name="clearmodel") @bot.command(name="clearmodel")
async def clear_model(ctx): async def clear_model(ctx):
from ai import unload_model, get_current_model
model = get_current_model() model = get_current_model()
success = unload_model(model) success = unload_model(model)
msg = f"✅ Unloaded model: `{model}`" if success else f"❌ Failed to unload model: `{model}`" msg = f"✅ Unloaded model: `{model}`" if success else f"❌ Failed to unload model: `{model}`"
@ -176,41 +199,31 @@ async def clear_model(ctx):
@bot.command(name="model") @bot.command(name="model")
async def current_model(ctx): async def current_model(ctx):
from ai import get_current_model
model = get_current_model() model = get_current_model()
await ctx.send(f"📦 Current model: `{model}`") await ctx.send(f"📦 Current model: `{model}`")
@bot.command(name="setmodel") @bot.command(name="setmodel")
async def set_model(ctx, *, model_name): async def set_model(ctx, *, model_name):
from ai import get_current_model, load_model, unload_model
current_model = get_current_model() current_model = get_current_model()
if model_name == current_model: if model_name == current_model:
return await ctx.send(f"⚠️ `{model_name}` is already active.") return await ctx.send(f"⚠️ `{model_name}` is already active.")
await ctx.send(f"🔄 Switching from `{current_model}` to `{model_name}`…") await ctx.send(f"🔄 Switching from `{current_model}` to `{model_name}`…")
# 1) Soft-unload old model from VRAM only
if unload_model(current_model): if unload_model(current_model):
await ctx.send(f"🧽 Unloaded `{current_model}` from VRAM.") await ctx.send(f"🧽 Unloaded `{current_model}` from VRAM.")
else: else:
await ctx.send(f"⚠️ Couldnt unload `{current_model}` (it may not have been loaded).") await ctx.send(f"⚠️ Couldnt unload `{current_model}`.")
# 2) Load the new one
if not load_model(model_name): if not load_model(model_name):
return await ctx.send(f"❌ Failed to pull `{model_name}`. Make sure its in `ollama list`.") return await ctx.send(f"❌ Failed to pull `{model_name}`.")
# 3) Update runtime AND .env on disk
os.environ["MODEL_NAME"] = model_name os.environ["MODEL_NAME"] = model_name
env_path = os.path.join(os.path.dirname(__file__), '..', '.env') env_path = os.path.join(os.path.dirname(__file__), '..', '.env')
# Read and rewrite .env
lines = [] lines = []
with open(env_path, 'r', encoding='utf-8') as f: with open(env_path, 'r', encoding='utf-8') as f:
for line in f: for line in f:
if line.startswith("MODEL_NAME="): lines.append(f"MODEL_NAME={model_name}\n" if line.startswith("MODEL_NAME=") else line)
lines.append(f"MODEL_NAME={model_name}\n")
else:
lines.append(line)
with open(env_path, 'w', encoding='utf-8') as f: with open(env_path, 'w', encoding='utf-8') as f:
f.writelines(lines) f.writelines(lines)
@ -219,8 +232,6 @@ async def set_model(ctx, *, model_name):
@bot.command(name="models") @bot.command(name="models")
async def list_models(ctx): async def list_models(ctx):
import requests import requests
from ai import TAGS_ENDPOINT
try: try:
resp = requests.get(TAGS_ENDPOINT) resp = requests.get(TAGS_ENDPOINT)
models = [m["name"] for m in resp.json().get("models", [])] models = [m["name"] for m in resp.json().get("models", [])]
@ -232,28 +243,19 @@ async def list_models(ctx):
await ctx.send(f"❌ Failed to fetch models: {e}") await ctx.send(f"❌ Failed to fetch models: {e}")
@bot.command(name="setavatar") @bot.command(name="setavatar")
@commands.is_owner() # Only the bot owner can run this @commands.is_owner()
async def set_avatar(ctx): async def set_avatar(ctx):
if not ctx.message.attachments: if not ctx.message.attachments:
return await ctx.send("❌ Please attach an image (PNG) to use as the new avatar.") return await ctx.send("❌ Please attach an image (PNG) to use as the new avatar.")
image = ctx.message.attachments[0] image = ctx.message.attachments[0]
image_bytes = await image.read() image_bytes = await image.read()
token = os.getenv("DISCORD_TOKEN") token = os.getenv("DISCORD_TOKEN")
if not token: if not token:
return await ctx.send("❌ Bot token not found in environment.") return await ctx.send("❌ Bot token not found in environment.")
success = set_avatar_from_bytes(image_bytes, token) success = set_avatar_from_bytes(image_bytes, token)
if success: await ctx.send("✅ Avatar updated successfully!" if success else "❌ Failed to update avatar.")
await ctx.send("✅ Avatar updated successfully!")
else:
await ctx.send("❌ Failed to update avatar.")
@bot.event
async def on_ready():
print(f"✅ Logged in as {bot.user.name}")
logger.info(f"Logged in as {bot.user.name}")
bot.loop.create_task(start_scheduler(bot))
# Run bot
bot.run(TOKEN) bot.run(TOKEN)

View file

@ -7,8 +7,11 @@ messages:
- "🕒 Chill, wait {seconds}s before trying again." - "🕒 Chill, wait {seconds}s before trying again."
context: context:
enabled: true enabled: false # not working must implement
max_messages: 30 max_messages: 10 # max messages to keep in context
user_profiles:
enable_custom_prompt: true # ← Set false to ignore user `custom_prompt` values in replies
scheduler: scheduler:
enabled: false enabled: false

13
src/user_profiles.json Normal file
View file

@ -0,0 +1,13 @@
{
"161149541171593216": {
"name": "themiloverse",
"display_name": "Miguel",
"first_seen": "2025-05-15T03:16:30.011640",
"last_seen": "2025-05-15T04:17:43.970172",
"last_message": "2025-05-15T04:17:43.970172",
"interactions": 37,
"pronouns": null,
"avatar_url": "https://cdn.discordapp.com/avatars/161149541171593216/fb0553a29d9f73175cb6aea24d0e19ec.png?size=1024",
"custom_prompt": "delta is very nice to me since I am her master, and creator"
}
}

100
src/user_profiles.py Normal file
View file

@ -0,0 +1,100 @@
# user_profiles.py
# Handles loading, updating, and storing per-user profiles for Delta
import os
import json
from datetime import datetime
PROFILE_PATH = os.path.join(os.path.dirname(__file__), "user_profiles.json")
def ensure_profile_file():
"""Ensure the JSON file exists and is a dict"""
if not os.path.exists(PROFILE_PATH):
with open(PROFILE_PATH, "w", encoding="utf-8") as f:
json.dump({}, f, indent=2)
else:
try:
with open(PROFILE_PATH, "r", encoding="utf-8") as f:
if not isinstance(json.load(f), dict):
raise ValueError
except Exception:
with open(PROFILE_PATH, "w", encoding="utf-8") as f:
json.dump({}, f, indent=2)
def load_profiles():
ensure_profile_file()
with open(PROFILE_PATH, "r", encoding="utf-8") as f:
return json.load(f)
def save_profiles(profiles):
with open(PROFILE_PATH, "w", encoding="utf-8") as f:
json.dump(profiles, f, indent=2)
def load_user_profile(user):
ensure_profile_file()
profiles = load_profiles()
uid = str(user.id)
now = datetime.utcnow().isoformat()
profile = profiles.get(uid, {
"name": user.name,
"display_name": user.display_name,
"first_seen": now,
"last_seen": now,
"last_message": now,
"interactions": 0,
"pronouns": None,
"avatar_url": str(user.display_avatar.url),
"custom_prompt": None # 🆕 field
})
# Always update timestamp + count
profile["last_seen"] = now
profile["last_message"] = now
profile["interactions"] += 1
profiles[uid] = profile
save_profiles(profiles)
return profile
def update_last_seen(user_id):
profiles = load_profiles()
uid = str(user_id)
if uid in profiles:
profiles[uid]["last_seen"] = datetime.utcnow().isoformat()
save_profiles(profiles)
def increment_interactions(user_id):
profiles = load_profiles()
uid = str(user_id)
if uid in profiles:
profiles[uid]["interactions"] += 1
save_profiles(profiles)
def set_pronouns(user, pronouns):
profiles = load_profiles()
uid = str(user.id)
profile = load_user_profile(user)
profile["pronouns"] = pronouns
profiles[uid] = profile
save_profiles(profiles)
return True
def set_custom_prompt(user_id, prompt):
profiles = load_profiles()
uid = str(user_id)
if uid in profiles:
profiles[uid]["custom_prompt"] = prompt
save_profiles(profiles)
def format_profile_for_block(profile):
lines = ["[User Profile]"]
lines.append(f"- Name: {profile['display_name']}")
if profile.get("pronouns"):
lines.append(f"- Pronouns: {profile['pronouns']}")
lines.append(f"- Interactions: {profile['interactions']}")
if profile.get("custom_prompt"):
lines.append(f"- Custom Prompt: {profile['custom_prompt']}")
return "\n".join(lines)