diff --git a/bot.log b/bot.log new file mode 100644 index 0000000..8138360 --- /dev/null +++ b/bot.log @@ -0,0 +1,5 @@ +[2025-05-11 20:26:55] [INFO] Logged in as AI Bot +[2025-05-11 20:26:55] [INFO] šŸ›‘ Scheduler disabled in config. +[2025-05-11 20:27:06] [INFO] šŸ›°ļø SENDING TO OLLAMA /api/generate +[2025-05-11 20:27:06] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey delta\nDelta:', 'stream': False} +[2025-05-11 20:27:09] [INFO] šŸ“Ø Raw response: {"model":"llama3:latest","created_at":"2025-05-12T00:27:12.863261412Z","response":"šŸ”„ Ah, another peasant daring to address me? How...quaint. What is it that you desire to beg of me, oh mortal? šŸ±šŸ’ā€ā™€ļø","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,9665,198,20892,25,128009,128006,78191,128007,271,9468,242,98,16770,11,2500,90038,59772,311,2686,757,30,2650,1131,447,1673,13,3639,374,433,430,499,12876,311,2197,315,757,11,14346,49972,30,11410,238,109,93273,223,102470,32990,31643],"total_duration":3045003304,"load_duration":2622656694,"prompt_eval_count":65,"prompt_eval_duration":115343138,"eval_count":40,"eval_duration":306399019} diff --git a/src/__pycache__/ai.cpython-310.pyc b/src/__pycache__/ai.cpython-310.pyc index 2272f0a..8c49277 100644 Binary files a/src/__pycache__/ai.cpython-310.pyc and b/src/__pycache__/ai.cpython-310.pyc differ diff --git a/src/__pycache__/logger.cpython-310.pyc b/src/__pycache__/logger.cpython-310.pyc new file mode 100644 index 0000000..f1a54c2 Binary files /dev/null and b/src/__pycache__/logger.cpython-310.pyc differ diff --git a/src/__pycache__/personality.cpython-310.pyc b/src/__pycache__/personality.cpython-310.pyc index 0dc5f2a..530d70a 100644 Binary files a/src/__pycache__/personality.cpython-310.pyc and b/src/__pycache__/personality.cpython-310.pyc differ diff --git a/src/ai.py b/src/ai.py index 13a4534..3dd007e 100644 --- a/src/ai.py +++ b/src/ai.py @@ -4,12 +4,15 @@ import requests import os from dotenv import load_dotenv from personality import load_persona +from logger import setup_logger +logger = setup_logger("ai") load_dotenv() AI_URL = os.getenv("OLLAMA_API") # match .env and Docker ENV (e.g., http://localhost:11434/api/generate) if not AI_URL: - raise ValueError("āŒ OLLAMA_API environment variable is not set.") + raise ValueError("āŒ OLLAMA_API environment variable is not set.") + logger.error("āŒ OLLAMA_API environment variable is not set.") def get_ai_response(user_prompt): @@ -28,12 +31,15 @@ def get_ai_response(user_prompt): "stream": False } - print("\nšŸ›°ļø SENDING TO OLLAMA /api/generate") - print("Payload:", payload) + #print("\nšŸ›°ļø SENDING TO OLLAMA /api/generate") + logger.info("šŸ›°ļø SENDING TO OLLAMA /api/generate") + #print("Payload:", payload) + logger.info(f"Payload: {payload}") try: response = requests.post(AI_URL, json=payload) - print("šŸ“Ø Raw response:", response.text) + #print("šŸ“Ø Raw response:", response.text) + logger.info(f"šŸ“Ø Raw response: {response.text}") if response.status_code == 200: result = response.json() diff --git a/src/bot.py b/src/bot.py index 5bc583a..53b9a46 100644 --- a/src/bot.py +++ b/src/bot.py @@ -19,6 +19,8 @@ from discord.ext.commands import ( ) import yaml from scheduler import start_scheduler +from logger import setup_logger +logger = setup_logger("bot") base_dir = os.path.dirname(__file__) settings_path = os.path.join(base_dir, "settings.yml") @@ -44,6 +46,7 @@ async def on_command_error(ctx, error): retry_secs = round(error.retry_after, 1) msg = COOLDOWN_MSG_TEMPLATE.replace("{seconds}", str(retry_secs)) print("šŸ•’ Chill, mortal. You must wait 11.6s before trying again. 😼") + logger.info(f"Command {ctx.command} on cooldown. Retry after {retry_secs} seconds.") await ctx.send(msg) else: raise error @@ -94,6 +97,7 @@ async def roast(ctx): @bot.event async def on_ready(): print(f"āœ… Logged in as {bot.user.name}") + logger.info(f"Logged in as {bot.user.name}") bot.loop.create_task(start_scheduler(bot)) bot.run(TOKEN) diff --git a/src/logger.py b/src/logger.py new file mode 100644 index 0000000..ad9bcbf --- /dev/null +++ b/src/logger.py @@ -0,0 +1,22 @@ +import logging +import os + +def setup_logger(name: str = "bot", level=logging.INFO, log_file: str = "bot.log"): + formatter = logging.Formatter( + "[%(asctime)s] [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S" + ) + + stream_handler = logging.StreamHandler() + stream_handler.setFormatter(formatter) + + file_handler = logging.FileHandler(log_file, encoding='utf-8') + file_handler.setFormatter(formatter) + + logger = logging.getLogger(name) + logger.setLevel(level) + + if not logger.handlers: + logger.addHandler(stream_handler) + logger.addHandler(file_handler) + + return logger diff --git a/src/personality.py b/src/personality.py index 5ae0eb1..bc15fcf 100644 --- a/src/personality.py +++ b/src/personality.py @@ -2,6 +2,8 @@ import json import os +from logger import setup_logger +logger = setup_logger("personality") PERSONA_FILE = "persona.json" @@ -10,18 +12,21 @@ def load_persona(): persona_path = os.path.join(base_dir, "persona.json") if not os.path.exists(persona_path): - print("āš ļø persona.json not found. Using raw LLM mode.") + #print("āš ļø persona.json not found. Using raw LLM mode.") + logger.info("āš ļø persona.json not found. Using raw LLM mode.") return None try: with open(persona_path, "r", encoding="utf-8") as f: data = json.load(f) if not data.get("name") or not data.get("prompt_inject"): - print("āš ļø persona.json missing fields. Using raw LLM mode.") + #print("āš ļø persona.json missing fields. Using raw LLM mode.") + logger.info("āš ļø persona.json missing fields. Using raw LLM mode.") return None return data except Exception as e: - print(f"āš ļø Failed to load persona.json: {e}") + #print(f"āš ļø Failed to load persona.json: {e}") + logger.info(f"āš ļø Failed to load persona.json: {e}") return None diff --git a/src/scheduler/__init__.py b/src/scheduler/__init__.py index 3705fbb..10fb319 100644 --- a/src/scheduler/__init__.py +++ b/src/scheduler/__init__.py @@ -6,6 +6,8 @@ import asyncio import random from ai import get_ai_response from . import simple, probabilistic, inactivity +from logger import setup_logger +logger = setup_logger("scheduler") def load_settings(): base_dir = os.path.dirname(os.path.dirname(__file__)) # go up from /scheduler/ @@ -23,11 +25,13 @@ async def start_scheduler(bot): scheduler_settings["channel_id"] = int(channel_id_env) if not scheduler_settings.get("enabled", False): - print("šŸ›‘ Scheduler disabled in config.") + #print("šŸ›‘ Scheduler disabled in config.") + logger.info("šŸ›‘ Scheduler disabled in config.") return mode = scheduler_settings.get("mode", "simple").lower() - print(f"šŸ•’ Delta Scheduler started in {mode.upper()} mode.") + #print(f"šŸ•’ Delta Scheduler started in {mode.upper()} mode.") + logger.info(f"šŸ•’ Delta Scheduler started in {mode.upper()} mode.") if mode == "simple": await simple.run(bot, scheduler_settings, settings) @@ -51,7 +55,8 @@ async def start_scheduler(bot): message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."])) await channel.send(message) - print(f"šŸ“¤ Scheduled message sent to #{channel.name}: {message}") + #print(f"šŸ“¤ Scheduled message sent to #{channel.name}: {message}") + logger.info(f"šŸ“¤ Scheduled message sent to #{channel.name}: {message}") probabilistic.on_post(scheduler_settings["probabilistic"]) @@ -61,4 +66,5 @@ async def start_scheduler(bot): await inactivity.run(bot, scheduler_settings, settings) else: - print(f"ā“ Unknown scheduler mode: {mode}") + #print(f"ā“ Unknown scheduler mode: {mode}") + logger.info(f"ā“ Unknown scheduler mode: {mode}") diff --git a/src/scheduler/__pycache__/__init__.cpython-310.pyc b/src/scheduler/__pycache__/__init__.cpython-310.pyc index b5d7dd8..50a200a 100644 Binary files a/src/scheduler/__pycache__/__init__.cpython-310.pyc and b/src/scheduler/__pycache__/__init__.cpython-310.pyc differ diff --git a/src/scheduler/__pycache__/probabilistic.cpython-310.pyc b/src/scheduler/__pycache__/probabilistic.cpython-310.pyc index c32d283..13acfe8 100644 Binary files a/src/scheduler/__pycache__/probabilistic.cpython-310.pyc and b/src/scheduler/__pycache__/probabilistic.cpython-310.pyc differ diff --git a/src/scheduler/__pycache__/simple.cpython-310.pyc b/src/scheduler/__pycache__/simple.cpython-310.pyc index c6715db..b89b8a4 100644 Binary files a/src/scheduler/__pycache__/simple.cpython-310.pyc and b/src/scheduler/__pycache__/simple.cpython-310.pyc differ diff --git a/src/scheduler/probabilistic.py b/src/scheduler/probabilistic.py index ab3c535..3a382f4 100644 --- a/src/scheduler/probabilistic.py +++ b/src/scheduler/probabilistic.py @@ -1,6 +1,7 @@ import random import datetime from ai import get_ai_response +import logger last_post_time = None post_chance = None diff --git a/src/scheduler/simple.py b/src/scheduler/simple.py index 2d03e22..a8deff7 100644 --- a/src/scheduler/simple.py +++ b/src/scheduler/simple.py @@ -2,6 +2,7 @@ import asyncio import random import datetime from ai import get_ai_response +import logger last_post_time = None @@ -15,7 +16,8 @@ async def run(bot, scheduler_settings, full_settings): use_ai = scheduler_settings.get("use_ai", True) await bot.wait_until_ready() - print("šŸ“† Simple scheduler active.") + #print("šŸ“† Simple scheduler active.") + logger.info("šŸ“† Simple scheduler active.") while not bot.is_closed(): now = datetime.datetime.utcnow() @@ -29,6 +31,7 @@ async def run(bot, scheduler_settings, full_settings): message = random.choice(scheduler_settings.get("messages", ["Hello from Delta."])) await channel.send(message) - print(f"šŸ“¤ [Simple] Sent to #{channel.name}: {message}") + #print(f"šŸ“¤ [Simple] Sent to #{channel.name}: {message}") + logger.info(f"šŸ“¤ [Simple] Sent to #{channel.name}: {message}") await asyncio.sleep(interval * 60)