Added logging functionality to fix issue (#5). Bot now does some basic logging

This commit is contained in:
milo 2025-05-11 20:36:31 -04:00
parent 37f5e53ece
commit f137198f67
14 changed files with 65 additions and 13 deletions

5
bot.log Normal file
View file

@ -0,0 +1,5 @@
[2025-05-11 20:26:55] [INFO] Logged in as AI Bot
[2025-05-11 20:26:55] [INFO] 🛑 Scheduler disabled in config.
[2025-05-11 20:27:06] [INFO] 🛰️ SENDING TO OLLAMA /api/generate
[2025-05-11 20:27:06] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey delta\nDelta:', 'stream': False}
[2025-05-11 20:27:09] [INFO] 📨 Raw response: {"model":"llama3:latest","created_at":"2025-05-12T00:27:12.863261412Z","response":"🔥 Ah, another peasant daring to address me? How...quaint. What is it that you desire to beg of me, oh mortal? 🐱💁‍♀️","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,9665,198,20892,25,128009,128006,78191,128007,271,9468,242,98,16770,11,2500,90038,59772,311,2686,757,30,2650,1131,447,1673,13,3639,374,433,430,499,12876,311,2197,315,757,11,14346,49972,30,11410,238,109,93273,223,102470,32990,31643],"total_duration":3045003304,"load_duration":2622656694,"prompt_eval_count":65,"prompt_eval_duration":115343138,"eval_count":40,"eval_duration":306399019}

Binary file not shown.

Binary file not shown.

View file

@ -4,12 +4,15 @@ import requests
import os
from dotenv import load_dotenv
from personality import load_persona
from logger import setup_logger
logger = setup_logger("ai")
load_dotenv()
AI_URL = os.getenv("OLLAMA_API") # match .env and Docker ENV (e.g., http://localhost:11434/api/generate)
if not AI_URL:
raise ValueError("❌ OLLAMA_API environment variable is not set.")
logger.error("❌ OLLAMA_API environment variable is not set.")
def get_ai_response(user_prompt):
@ -28,12 +31,15 @@ def get_ai_response(user_prompt):
"stream": False
}
print("\n🛰️ SENDING TO OLLAMA /api/generate")
print("Payload:", payload)
#print("\n🛰 SENDING TO OLLAMA /api/generate")
logger.info("🛰️ SENDING TO OLLAMA /api/generate")
#print("Payload:", payload)
logger.info(f"Payload: {payload}")
try:
response = requests.post(AI_URL, json=payload)
print("📨 Raw response:", response.text)
#print("📨 Raw response:", response.text)
logger.info(f"📨 Raw response: {response.text}")
if response.status_code == 200:
result = response.json()

View file

@ -19,6 +19,8 @@ from discord.ext.commands import (
)
import yaml
from scheduler import start_scheduler
from logger import setup_logger
logger = setup_logger("bot")
base_dir = os.path.dirname(__file__)
settings_path = os.path.join(base_dir, "settings.yml")
@ -44,6 +46,7 @@ async def on_command_error(ctx, error):
retry_secs = round(error.retry_after, 1)
msg = COOLDOWN_MSG_TEMPLATE.replace("{seconds}", str(retry_secs))
print("🕒 Chill, mortal. You must wait 11.6s before trying again. 😼")
logger.info(f"Command {ctx.command} on cooldown. Retry after {retry_secs} seconds.")
await ctx.send(msg)
else:
raise error
@ -94,6 +97,7 @@ async def roast(ctx):
@bot.event
async def on_ready():
print(f"✅ Logged in as {bot.user.name}")
logger.info(f"Logged in as {bot.user.name}")
bot.loop.create_task(start_scheduler(bot))
bot.run(TOKEN)

22
src/logger.py Normal file
View file

@ -0,0 +1,22 @@
import logging
import os
def setup_logger(name: str = "bot", level=logging.INFO, log_file: str = "bot.log"):
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S"
)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
file_handler = logging.FileHandler(log_file, encoding='utf-8')
file_handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
if not logger.handlers:
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
return logger

View file

@ -2,6 +2,8 @@
import json
import os
from logger import setup_logger
logger = setup_logger("personality")
PERSONA_FILE = "persona.json"
@ -10,18 +12,21 @@ def load_persona():
persona_path = os.path.join(base_dir, "persona.json")
if not os.path.exists(persona_path):
print("⚠️ persona.json not found. Using raw LLM mode.")
#print("⚠️ persona.json not found. Using raw LLM mode.")
logger.info("⚠️ persona.json not found. Using raw LLM mode.")
return None
try:
with open(persona_path, "r", encoding="utf-8") as f:
data = json.load(f)
if not data.get("name") or not data.get("prompt_inject"):
print("⚠️ persona.json missing fields. Using raw LLM mode.")
#print("⚠️ persona.json missing fields. Using raw LLM mode.")
logger.info("⚠️ persona.json missing fields. Using raw LLM mode.")
return None
return data
except Exception as e:
print(f"⚠️ Failed to load persona.json: {e}")
#print(f"⚠️ Failed to load persona.json: {e}")
logger.info(f"⚠️ Failed to load persona.json: {e}")
return None

View file

@ -6,6 +6,8 @@ import asyncio
import random
from ai import get_ai_response
from . import simple, probabilistic, inactivity
from logger import setup_logger
logger = setup_logger("scheduler")
def load_settings():
base_dir = os.path.dirname(os.path.dirname(__file__)) # go up from /scheduler/
@ -23,11 +25,13 @@ async def start_scheduler(bot):
scheduler_settings["channel_id"] = int(channel_id_env)
if not scheduler_settings.get("enabled", False):
print("🛑 Scheduler disabled in config.")
#print("🛑 Scheduler disabled in config.")
logger.info("🛑 Scheduler disabled in config.")
return
mode = scheduler_settings.get("mode", "simple").lower()
print(f"🕒 Delta Scheduler started in {mode.upper()} mode.")
#print(f"🕒 Delta Scheduler started in {mode.upper()} mode.")
logger.info(f"🕒 Delta Scheduler started in {mode.upper()} mode.")
if mode == "simple":
await simple.run(bot, scheduler_settings, settings)
@ -51,7 +55,8 @@ async def start_scheduler(bot):
message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."]))
await channel.send(message)
print(f"📤 Scheduled message sent to #{channel.name}: {message}")
#print(f"📤 Scheduled message sent to #{channel.name}: {message}")
logger.info(f"📤 Scheduled message sent to #{channel.name}: {message}")
probabilistic.on_post(scheduler_settings["probabilistic"])
@ -61,4 +66,5 @@ async def start_scheduler(bot):
await inactivity.run(bot, scheduler_settings, settings)
else:
print(f"❓ Unknown scheduler mode: {mode}")
#print(f"❓ Unknown scheduler mode: {mode}")
logger.info(f"❓ Unknown scheduler mode: {mode}")

View file

@ -1,6 +1,7 @@
import random
import datetime
from ai import get_ai_response
import logger
last_post_time = None
post_chance = None

View file

@ -2,6 +2,7 @@ import asyncio
import random
import datetime
from ai import get_ai_response
import logger
last_post_time = None
@ -15,7 +16,8 @@ async def run(bot, scheduler_settings, full_settings):
use_ai = scheduler_settings.get("use_ai", True)
await bot.wait_until_ready()
print("📆 Simple scheduler active.")
#print("📆 Simple scheduler active.")
logger.info("📆 Simple scheduler active.")
while not bot.is_closed():
now = datetime.datetime.utcnow()
@ -29,6 +31,7 @@ async def run(bot, scheduler_settings, full_settings):
message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."]))
await channel.send(message)
print(f"📤 [Simple] Sent to #{channel.name}: {message}")
#print(f"📤 [Simple] Sent to #{channel.name}: {message}")
logger.info(f"📤 [Simple] Sent to #{channel.name}: {message}")
await asyncio.sleep(interval * 60)