2025-05-08 10:46:43 -04:00
|
|
|
|
# bot.py
|
|
|
|
|
|
|
2025-05-06 11:56:01 -04:00
|
|
|
|
import os
|
2025-05-07 16:33:30 -04:00
|
|
|
|
import discord
|
2025-05-08 10:46:43 -04:00
|
|
|
|
import yaml
|
2025-05-13 10:57:32 -04:00
|
|
|
|
import random
|
2025-05-15 00:22:24 -04:00
|
|
|
|
from dotenv import load_dotenv
|
|
|
|
|
|
from textwrap import wrap
|
|
|
|
|
|
from discord.ext import commands
|
|
|
|
|
|
from discord.ext.commands import (
|
|
|
|
|
|
cooldown,
|
|
|
|
|
|
BucketType,
|
|
|
|
|
|
CooldownMapping,
|
|
|
|
|
|
CommandOnCooldown
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
# Local imports
|
2025-05-13 10:57:32 -04:00
|
|
|
|
from scheduler import start_scheduler
|
2025-05-14 16:01:12 -04:00
|
|
|
|
from profilepic import set_avatar_from_bytes
|
2025-05-14 20:27:49 -04:00
|
|
|
|
from context import fetch_recent_context, format_context
|
2025-05-15 00:22:24 -04:00
|
|
|
|
from user_profiles import (
|
|
|
|
|
|
load_user_profile,
|
|
|
|
|
|
update_last_seen,
|
|
|
|
|
|
increment_interactions,
|
|
|
|
|
|
format_profile_for_block,
|
|
|
|
|
|
set_pronouns,
|
|
|
|
|
|
set_custom_prompt
|
|
|
|
|
|
)
|
|
|
|
|
|
from personality import apply_personality, set_persona
|
2025-05-13 10:57:32 -04:00
|
|
|
|
from logger import setup_logger
|
2025-05-15 00:22:24 -04:00
|
|
|
|
from ai import (
|
|
|
|
|
|
unload_model,
|
|
|
|
|
|
load_model,
|
|
|
|
|
|
get_current_model,
|
|
|
|
|
|
get_ai_response,
|
|
|
|
|
|
TAGS_ENDPOINT
|
|
|
|
|
|
)
|
2025-05-13 10:57:32 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
from user_profiles import format_profile_for_block as format_user_profile_block
|
2025-05-13 10:57:32 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Setup logger and environment
|
|
|
|
|
|
logger = setup_logger("bot")
|
2025-05-10 00:26:28 -04:00
|
|
|
|
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
|
|
|
|
|
|
load_dotenv(dotenv_path)
|
2025-05-13 10:57:32 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Load model settings
|
2025-05-13 10:57:32 -04:00
|
|
|
|
MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest")
|
|
|
|
|
|
logger.info(f"🔍 Loaded MODEL_NAME from .env: {MODEL_NAME}")
|
|
|
|
|
|
logger.info(f"🧹 Attempting to clear VRAM before loading {MODEL_NAME}...")
|
|
|
|
|
|
unload_model(MODEL_NAME)
|
|
|
|
|
|
|
|
|
|
|
|
if load_model(MODEL_NAME):
|
|
|
|
|
|
logger.info(f"🚀 Model `{MODEL_NAME}` preloaded on startup.")
|
|
|
|
|
|
else:
|
|
|
|
|
|
logger.warning(f"⚠️ Failed to preload model `{MODEL_NAME}`.")
|
|
|
|
|
|
logger.info(f"✅ Final model in use: {MODEL_NAME}")
|
|
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Load YAML settings
|
2025-05-09 17:57:09 -04:00
|
|
|
|
base_dir = os.path.dirname(__file__)
|
|
|
|
|
|
settings_path = os.path.join(base_dir, "settings.yml")
|
|
|
|
|
|
with open(settings_path, "r", encoding="utf-8") as f:
|
2025-05-08 10:46:43 -04:00
|
|
|
|
settings = yaml.safe_load(f)
|
|
|
|
|
|
|
|
|
|
|
|
ROAST_COOLDOWN_SECONDS = settings["cooldowns"]["roast"]
|
|
|
|
|
|
GLOBAL_COOLDOWN_SECONDS = settings["cooldowns"]["global"]
|
|
|
|
|
|
COOLDOWN_MSG_TEMPLATE = settings["messages"]["cooldown"]
|
2025-05-06 11:56:01 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Configure Discord bot
|
2025-05-07 16:33:30 -04:00
|
|
|
|
TOKEN = os.getenv("DISCORD_TOKEN")
|
2025-05-13 10:57:32 -04:00
|
|
|
|
if not TOKEN:
|
|
|
|
|
|
logger.error("❌ DISCORD_TOKEN not set in .env file.")
|
|
|
|
|
|
raise SystemExit("DISCORD_TOKEN not set.")
|
2025-05-06 11:56:01 -04:00
|
|
|
|
|
2025-05-07 16:33:30 -04:00
|
|
|
|
intents = discord.Intents.default()
|
|
|
|
|
|
intents.message_content = True
|
|
|
|
|
|
bot = commands.Bot(command_prefix="!", intents=intents)
|
2025-05-06 11:56:01 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Handle cooldown errors globally
|
2025-05-06 11:56:01 -04:00
|
|
|
|
@bot.event
|
2025-05-08 10:46:43 -04:00
|
|
|
|
async def on_command_error(ctx, error):
|
|
|
|
|
|
if isinstance(error, CommandOnCooldown):
|
|
|
|
|
|
retry_secs = round(error.retry_after, 1)
|
2025-05-13 10:57:32 -04:00
|
|
|
|
template = random.choice(COOLDOWN_MSG_TEMPLATE) if isinstance(COOLDOWN_MSG_TEMPLATE, list) else COOLDOWN_MSG_TEMPLATE
|
|
|
|
|
|
msg = template.replace("{seconds}", str(retry_secs))
|
2025-05-11 20:36:31 -04:00
|
|
|
|
logger.info(f"Command {ctx.command} on cooldown. Retry after {retry_secs} seconds.")
|
2025-05-08 10:46:43 -04:00
|
|
|
|
await ctx.send(msg)
|
|
|
|
|
|
else:
|
|
|
|
|
|
raise error
|
|
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Global cooldown
|
2025-05-08 10:46:43 -04:00
|
|
|
|
global_cooldown = CooldownMapping.from_cooldown(1, GLOBAL_COOLDOWN_SECONDS, BucketType.user)
|
|
|
|
|
|
|
|
|
|
|
|
@bot.check
|
|
|
|
|
|
async def global_command_cooldown(ctx):
|
|
|
|
|
|
bucket = global_cooldown.get_bucket(ctx.message)
|
|
|
|
|
|
retry_after = bucket.update_rate_limit()
|
|
|
|
|
|
if retry_after:
|
|
|
|
|
|
raise CommandOnCooldown(bucket, retry_after, BucketType.user)
|
|
|
|
|
|
return True
|
2025-05-06 11:56:01 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Handle direct bot mentions
|
2025-05-14 16:01:12 -04:00
|
|
|
|
@bot.event
|
|
|
|
|
|
async def on_message(message):
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Ignore messages from the bot itself
|
2025-05-14 16:01:12 -04:00
|
|
|
|
if message.author == bot.user:
|
|
|
|
|
|
return
|
|
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Only respond if the bot is mentioned
|
2025-05-14 16:01:12 -04:00
|
|
|
|
if bot.user.mentioned_in(message):
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Strip the mention from the message to extract the actual prompt
|
2025-05-14 16:01:12 -04:00
|
|
|
|
prompt = message.content.replace(f"<@{bot.user.id}>", "").strip()
|
2025-05-15 00:22:24 -04:00
|
|
|
|
if not prompt:
|
|
|
|
|
|
return # Nothing to respond to
|
|
|
|
|
|
|
|
|
|
|
|
# Update the user's interaction history
|
|
|
|
|
|
user_id = str(message.author.id)
|
|
|
|
|
|
update_last_seen(user_id)
|
2025-05-15 11:43:30 -04:00
|
|
|
|
#increment_interactions(user_id) <---------------------------------------------------Test
|
2025-05-15 00:22:24 -04:00
|
|
|
|
profile = load_user_profile(message.author)
|
|
|
|
|
|
|
|
|
|
|
|
# Log summary info about the profile (but don’t inject it)
|
|
|
|
|
|
logger.info(f"🧠 Profile loaded for {profile['display_name']} (interactions: {profile['interactions']})")
|
|
|
|
|
|
|
|
|
|
|
|
# Fetch recent messages for conversation context
|
2025-05-14 20:27:49 -04:00
|
|
|
|
context_msgs = await fetch_recent_context(message.channel)
|
|
|
|
|
|
formatted_context = format_context(context_msgs)
|
|
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Log number of context messages, not the entire block (to reduce clutter)
|
|
|
|
|
|
logger.info(f"📚 Retrieved {len(context_msgs)} messages for context")
|
2025-05-14 20:27:49 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Let ai.py handle all prompt construction (persona + context + profile)
|
2025-05-14 20:27:49 -04:00
|
|
|
|
async with message.channel.typing():
|
2025-05-15 00:22:24 -04:00
|
|
|
|
reply = get_ai_response(prompt, context=formatted_context, user_profile=profile)
|
2025-05-14 20:27:49 -04:00
|
|
|
|
await message.channel.send(reply)
|
2025-05-14 16:01:12 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Ensure bot commands still work (!setprompt, !roast, etc.)
|
2025-05-14 16:01:12 -04:00
|
|
|
|
await bot.process_commands(message)
|
|
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
|
|
|
|
|
|
# Bot startup event
|
2025-05-14 16:01:12 -04:00
|
|
|
|
@bot.event
|
|
|
|
|
|
async def on_ready():
|
|
|
|
|
|
print(f"✅ Logged in as {bot.user.name}")
|
|
|
|
|
|
logger.info(f"Logged in as {bot.user.name}")
|
|
|
|
|
|
for guild in bot.guilds:
|
|
|
|
|
|
me = guild.me
|
|
|
|
|
|
if me.nick != "Delta":
|
|
|
|
|
|
try:
|
|
|
|
|
|
await me.edit(nick="Delta")
|
|
|
|
|
|
logger.info(f"🔄 Renamed self to Delta in {guild.name}")
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
logger.warning(f"⚠️ Failed to rename in {guild.name}: {e}")
|
|
|
|
|
|
bot.loop.create_task(start_scheduler(bot))
|
|
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Commands
|
|
|
|
|
|
@bot.command(name="setprompt")
|
|
|
|
|
|
async def set_prompt_cmd(ctx, *, prompt):
|
|
|
|
|
|
set_custom_prompt(ctx.author.id, prompt)
|
|
|
|
|
|
await ctx.send("✅ Custom prompt saved.")
|
|
|
|
|
|
|
|
|
|
|
|
@bot.command(name="setpronouns")
|
|
|
|
|
|
async def set_pronouns_cmd(ctx, *, pronouns):
|
2025-05-15 11:43:30 -04:00
|
|
|
|
success = set_pronouns(ctx.author, pronouns)
|
2025-05-15 00:22:24 -04:00
|
|
|
|
if success:
|
|
|
|
|
|
await ctx.send(f"✅ Got it, {ctx.author.display_name}! Your pronouns have been updated.")
|
|
|
|
|
|
else:
|
|
|
|
|
|
await ctx.send("⚠️ Failed to update pronouns. Try interacting with Delta first to generate your profile.")
|
|
|
|
|
|
|
2025-05-07 16:33:30 -04:00
|
|
|
|
@bot.command()
|
|
|
|
|
|
async def ping(ctx):
|
|
|
|
|
|
await ctx.send("🏓 Pong!")
|
2025-05-06 11:56:01 -04:00
|
|
|
|
|
2025-05-07 17:20:34 -04:00
|
|
|
|
@bot.command()
|
2025-05-13 10:57:32 -04:00
|
|
|
|
async def chat(ctx, *, prompt):
|
2025-05-07 17:20:34 -04:00
|
|
|
|
await ctx.send("🤖 Thinking...")
|
2025-05-13 10:57:32 -04:00
|
|
|
|
reply = get_ai_response(prompt)
|
2025-05-15 00:22:24 -04:00
|
|
|
|
for chunk in wrap(reply, 2000):
|
2025-05-13 10:57:32 -04:00
|
|
|
|
await ctx.send(chunk)
|
|
|
|
|
|
|
2025-05-07 18:40:28 -04:00
|
|
|
|
@bot.command()
|
|
|
|
|
|
async def setpersona(ctx, *, description):
|
|
|
|
|
|
set_persona(description)
|
|
|
|
|
|
await ctx.send("✅ Persona updated! New style will be used in replies.")
|
|
|
|
|
|
|
2025-05-08 10:46:43 -04:00
|
|
|
|
@bot.command(name='roast')
|
|
|
|
|
|
@cooldown(rate=1, per=ROAST_COOLDOWN_SECONDS, type=BucketType.user)
|
|
|
|
|
|
async def roast(ctx):
|
|
|
|
|
|
target = ctx.message.mentions[0].mention if ctx.message.mentions else ctx.author.mention
|
|
|
|
|
|
prompt = f"Roast {target}. Be dramatic, insulting, and sarcastic. Speak in your usual chaotic RGB catgirl personality."
|
|
|
|
|
|
response = get_ai_response(prompt)
|
|
|
|
|
|
await ctx.send(f"😼 {response}")
|
|
|
|
|
|
|
2025-05-13 10:57:32 -04:00
|
|
|
|
@bot.command(name="clearmodel")
|
|
|
|
|
|
async def clear_model(ctx):
|
|
|
|
|
|
model = get_current_model()
|
|
|
|
|
|
success = unload_model(model)
|
|
|
|
|
|
msg = f"✅ Unloaded model: `{model}`" if success else f"❌ Failed to unload model: `{model}`"
|
|
|
|
|
|
await ctx.send(msg)
|
|
|
|
|
|
|
|
|
|
|
|
@bot.command(name="model")
|
|
|
|
|
|
async def current_model(ctx):
|
|
|
|
|
|
model = get_current_model()
|
|
|
|
|
|
await ctx.send(f"📦 Current model: `{model}`")
|
|
|
|
|
|
|
|
|
|
|
|
@bot.command(name="setmodel")
|
|
|
|
|
|
async def set_model(ctx, *, model_name):
|
|
|
|
|
|
current_model = get_current_model()
|
|
|
|
|
|
if model_name == current_model:
|
|
|
|
|
|
return await ctx.send(f"⚠️ `{model_name}` is already active.")
|
|
|
|
|
|
|
|
|
|
|
|
await ctx.send(f"🔄 Switching from `{current_model}` to `{model_name}`…")
|
|
|
|
|
|
|
|
|
|
|
|
if unload_model(current_model):
|
|
|
|
|
|
await ctx.send(f"🧽 Unloaded `{current_model}` from VRAM.")
|
|
|
|
|
|
else:
|
2025-05-15 00:22:24 -04:00
|
|
|
|
await ctx.send(f"⚠️ Couldn’t unload `{current_model}`.")
|
2025-05-13 10:57:32 -04:00
|
|
|
|
|
|
|
|
|
|
if not load_model(model_name):
|
2025-05-15 00:22:24 -04:00
|
|
|
|
return await ctx.send(f"❌ Failed to pull `{model_name}`.")
|
2025-05-13 10:57:32 -04:00
|
|
|
|
|
|
|
|
|
|
os.environ["MODEL_NAME"] = model_name
|
|
|
|
|
|
env_path = os.path.join(os.path.dirname(__file__), '..', '.env')
|
|
|
|
|
|
lines = []
|
|
|
|
|
|
with open(env_path, 'r', encoding='utf-8') as f:
|
|
|
|
|
|
for line in f:
|
2025-05-15 00:22:24 -04:00
|
|
|
|
lines.append(f"MODEL_NAME={model_name}\n" if line.startswith("MODEL_NAME=") else line)
|
2025-05-13 10:57:32 -04:00
|
|
|
|
with open(env_path, 'w', encoding='utf-8') as f:
|
|
|
|
|
|
f.writelines(lines)
|
|
|
|
|
|
|
|
|
|
|
|
await ctx.send(f"✅ Model switched to `{model_name}` and `.env` updated.")
|
|
|
|
|
|
|
|
|
|
|
|
@bot.command(name="models")
|
|
|
|
|
|
async def list_models(ctx):
|
|
|
|
|
|
import requests
|
|
|
|
|
|
try:
|
|
|
|
|
|
resp = requests.get(TAGS_ENDPOINT)
|
|
|
|
|
|
models = [m["name"] for m in resp.json().get("models", [])]
|
|
|
|
|
|
if models:
|
|
|
|
|
|
await ctx.send("🧠 Available models:\n" + "\n".join(f"- `{m}`" for m in models))
|
|
|
|
|
|
else:
|
|
|
|
|
|
await ctx.send("❌ No models found.")
|
|
|
|
|
|
except Exception as e:
|
|
|
|
|
|
await ctx.send(f"❌ Failed to fetch models: {e}")
|
|
|
|
|
|
|
2025-05-14 16:01:12 -04:00
|
|
|
|
@bot.command(name="setavatar")
|
2025-05-15 00:22:24 -04:00
|
|
|
|
@commands.is_owner()
|
2025-05-14 16:01:12 -04:00
|
|
|
|
async def set_avatar(ctx):
|
|
|
|
|
|
if not ctx.message.attachments:
|
|
|
|
|
|
return await ctx.send("❌ Please attach an image (PNG) to use as the new avatar.")
|
|
|
|
|
|
|
|
|
|
|
|
image = ctx.message.attachments[0]
|
|
|
|
|
|
image_bytes = await image.read()
|
|
|
|
|
|
token = os.getenv("DISCORD_TOKEN")
|
|
|
|
|
|
if not token:
|
|
|
|
|
|
return await ctx.send("❌ Bot token not found in environment.")
|
|
|
|
|
|
|
|
|
|
|
|
success = set_avatar_from_bytes(image_bytes, token)
|
2025-05-15 00:22:24 -04:00
|
|
|
|
await ctx.send("✅ Avatar updated successfully!" if success else "❌ Failed to update avatar.")
|
2025-05-07 17:20:34 -04:00
|
|
|
|
|
2025-05-15 00:22:24 -04:00
|
|
|
|
# Run bot
|
2025-05-07 16:33:30 -04:00
|
|
|
|
bot.run(TOKEN)
|