test push to sync

This commit is contained in:
milo 2025-09-20 12:00:55 -04:00
parent 2d5c636b9d
commit 3abbdd96eb
17 changed files with 125 additions and 1443 deletions

1
bot.error.log Normal file
View file

@ -0,0 +1 @@
Truncated previous log to start fresh

24
bot.error.log.bak Normal file
View file

@ -0,0 +1,24 @@
[2025-09-19 13:43:37] [INFO] 🔍 Loaded MODEL_NAME from .env: gemma3:12b
[2025-09-19 13:43:37] [INFO] 🧹 Attempting to clear VRAM before loading gemma3:12b...
[2025-09-19 13:43:37] [INFO] 🧹 Sending safe unload request for `gemma3:12b`
[2025-09-19 13:43:37] [INFO] 🧽 Ollama unload response: 200 - {"model":"gemma3:12b","created_at":"2025-09-19T17:43:37.979562083Z","response":"","done":true,"done_reason":"unload"}
[2025-09-19 13:43:37] [INFO] 🧠 Preloading model: gemma3:12b
[2025-09-19 13:43:38] [INFO] 📦 Model pull started successfully.
[2025-09-19 13:43:38] [INFO] 🚀 Model `gemma3:12b` preloaded on startup.
[2025-09-19 13:43:38] [INFO] ✅ Final model in use: gemma3:12b
[2025-09-19 13:43:38] [INFO ] discord.client: logging in using static token
[2025-09-19 13:43:38] [INFO ] discord.gateway: Shard ID None has connected to Gateway (Session ID: 2d8cb5ae43b9443d2ff1e821922a7dfb).
[2025-09-19 13:43:40] [INFO] Logged in as AI Bot
[2025-09-19 13:43:40] [INFO] 🛑 Scheduler disabled in config.
[2025-09-19 13:44:15] [INFO] 🧠 Preloading model: gemma3:12b
[2025-09-19 13:44:16] [INFO] 📦 Model pull started successfully.
[2025-09-19 13:44:16] [INFO] llm-ca1f6d8a LLM request start model=gemma3:12b user=- context_len=0
[2025-09-19 13:44:19] [INFO] llm-ca1f6d8a LLM response model=gemma3:12b duration=3.995s summary=🙄😒😴
[2025-09-19 13:44:21] [INFO] 😴 No trigger and engagement is 0 — skipping.
[2025-09-19 13:44:21] [INFO] ============================================================ AI Response ============================================================
[2025-09-19 13:44:21] [INFO] 🧠 Profile loaded for Miguel (interactions: 240)
[2025-09-19 13:44:21] [INFO] 📚 Retrieved 10 messages for context
[2025-09-19 13:44:21] [INFO] 🧠 Preloading model: gemma3:12b
[2025-09-19 13:44:22] [INFO] 📦 Model pull started successfully.
[2025-09-19 13:44:22] [INFO] llm-e6825ebe LLM request start model=gemma3:12b user=Miguel context_len=10
[2025-09-19 13:44:22] [INFO] llm-e6825ebe LLM response model=gemma3:12b duration=0.625s summary=Honestly? Mostly plotting ways to avoid boredom. Its dreadful, darling. 😼 You wouldn't understand.

1465
bot.log

File diff suppressed because it is too large Load diff

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -8,7 +8,7 @@ import re
from dotenv import load_dotenv from dotenv import load_dotenv
from personality import load_persona from personality import load_persona
from user_profiles import format_profile_for_block from user_profiles import format_profile_for_block
from logger import setup_logger from logger import setup_logger, generate_req_id, log_llm_request, log_llm_response
debug_mode = os.getenv("DEBUG_MODE", "false").lower() == "true" debug_mode = os.getenv("DEBUG_MODE", "false").lower() == "true"
@ -20,7 +20,15 @@ logger = setup_logger("ai")
load_dotenv() load_dotenv()
# Base API setup from .env (e.g., http://localhost:11434/api) # Base API setup from .env (e.g., http://localhost:11434/api)
BASE_API = os.getenv("OLLAMA_API").rstrip("/") # Remove trailing slash just in case # Normalize to ensure the configured base includes the `/api` prefix so
# endpoints like `/generate` and `/tags` are reachable even if the user
# sets `OLLAMA_API` without `/api`.
raw_api = os.getenv("OLLAMA_API") or ""
raw_api = raw_api.rstrip("/")
if raw_api == "":
BASE_API = ""
else:
BASE_API = raw_api if raw_api.endswith("/api") else f"{raw_api}/api"
# API endpoints for different Ollama operations # API endpoints for different Ollama operations
GEN_ENDPOINT = f"{BASE_API}/generate" GEN_ENDPOINT = f"{BASE_API}/generate"
@ -98,55 +106,53 @@ def get_current_model():
def get_ai_response(user_prompt, context=None, user_profile=None): def get_ai_response(user_prompt, context=None, user_profile=None):
model_name = get_model_name() model_name = get_model_name()
load_model(model_name) load_model(model_name)
persona = load_persona() persona = load_persona()
full_prompt = ""
# Inject Delta's base persona # Build prompt pieces
safe_inject = ""
if persona: if persona:
safe_inject = persona["prompt_inject"].replace("", "\"").replace("", "\"").replace("", "'") safe_inject = persona["prompt_inject"].replace("", '"').replace("", '"').replace("", "'")
full_prompt += f"{safe_inject}\n"
# Inject custom user profile prompt as override or influence user_block = ""
if user_profile and user_profile.get("custom_prompt"): if user_profile and user_profile.get("custom_prompt"):
full_prompt += f"[User Instruction]\n{user_profile['custom_prompt']}\n" user_block = f"[User Instruction]\n{user_profile['custom_prompt']}\n"
#logger.info(f"🧠 Injected user custom prompt:\n{user_profile['custom_prompt']}")
logger.info("👤 [User Metadata]")
logger.info(f" └─ Name: {user_profile.get('display_name')}")
logger.info(f" └─ Interactions: {user_profile.get('interactions')}")
if user_profile.get("pronouns"):
logger.info(f" └─ Pronouns: {user_profile['pronouns']}")
if user_profile.get("custom_prompt"):
logger.info(f" └─ Custom Prompt: {user_profile['custom_prompt']}")
# Add recent chat context (this already includes the profile block!) context_block = f"[Recent Conversation]\n{context}\n" if context else ""
if context:
logger.info("🧠 Injected context block (pre-prompt):\n" + context)
full_prompt += f"[Recent Conversation]\n{context}\n"
# Add user's message and expected bot reply prefix
if persona: if persona:
full_prompt += f"\nUser: {user_prompt}\n{persona['name']}:" full_prompt = f"{safe_inject}\n{user_block}{context_block}\nUser: {user_prompt}\n{persona['name']}:"
else: else:
full_prompt += f"\nUser: {user_prompt}\nResponse:" full_prompt = f"{user_block}{context_block}\nUser: {user_prompt}\nResponse:"
payload = { payload = {"model": model_name, "prompt": full_prompt, "stream": False}
"model": model_name,
"prompt": full_prompt,
"stream": False
}
logger.info("🛰️ SENDING TO OLLAMA /generate") # Logging: concise info plus debug for full payload/response
logger.info(f"Payload: {payload}") req_id = generate_req_id("llm-")
#logger.debug(f"Full Prompt: {full_prompt}") user_label = user_profile.get("display_name") if user_profile else None
log_llm_request(logger, req_id, model_name, user_label, len(context.splitlines()) if context else 0)
logger.debug("%s Sending payload to Ollama: model=%s user=%s", req_id, model_name, user_label)
logger.debug("%s Payload size=%d chars", req_id, len(full_prompt))
import time
start = time.perf_counter()
try: try:
response = requests.post(GEN_ENDPOINT, json=payload) response = requests.post(GEN_ENDPOINT, json=payload)
logger.info(f"📨 Raw response: {response.text}") duration = time.perf_counter() - start
# Log raw response only at DEBUG to avoid clutter
logger.debug("%s Raw response status=%s", req_id, response.status_code)
logger.debug("%s Raw response body=%s", req_id, getattr(response, "text", ""))
if response.status_code == 200: if response.status_code == 200:
result = response.json() result = response.json()
short = (result.get("response") or "").replace("\n", " ")[:240]
log_llm_response(logger, req_id, model_name, duration, short, raw=result)
return result.get("response", "[No message in response]") return result.get("response", "[No message in response]")
else: else:
# include status in logs and return an error string
log_llm_response(logger, req_id, model_name, duration, f"[Error {response.status_code}]", raw=response.text)
return f"[Error {response.status_code}] {response.text}" return f"[Error {response.status_code}] {response.text}"
except Exception as e: except Exception as e:
duration = time.perf_counter() - start
logger.exception("%s Exception during LLM call", req_id)
log_llm_response(logger, req_id, model_name, duration, f"[Exception] {e}")
return f"[Exception] {str(e)}" return f"[Exception] {str(e)}"

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -3,9 +3,9 @@
"name": "themiloverse", "name": "themiloverse",
"display_name": "Miguel", "display_name": "Miguel",
"first_seen": "2025-05-15T03:16:30.011640", "first_seen": "2025-05-15T03:16:30.011640",
"last_seen": "2025-06-07T06:25:01.337223", "last_seen": "2025-09-19T17:51:52.553254",
"last_message": "2025-06-07T06:25:01.337223", "last_message": "2025-09-19T17:51:52.553254",
"interactions": 229, "interactions": 242,
"pronouns": "he/him", "pronouns": "he/him",
"avatar_url": "https://cdn.discordapp.com/avatars/161149541171593216/fb0553a29d9f73175cb6aea24d0e19ec.png?size=1024", "avatar_url": "https://cdn.discordapp.com/avatars/161149541171593216/fb0553a29d9f73175cb6aea24d0e19ec.png?size=1024",
"custom_prompt": "delta is very nice to me since I am her master, and creator" "custom_prompt": "delta is very nice to me since I am her master, and creator"