AI-Discord-Bot/src/ai.py

52 lines
1.6 KiB
Python
Raw Normal View History

# ai.py
import requests
import os
from dotenv import load_dotenv
from personality import load_persona
from logger import setup_logger
logger = setup_logger("ai")
load_dotenv()
AI_URL = os.getenv("OLLAMA_API") # match .env and Docker ENV (e.g., http://localhost:11434/api/generate)
if not AI_URL:
raise ValueError("❌ OLLAMA_API environment variable is not set.")
logger.error("❌ OLLAMA_API environment variable is not set.")
def get_ai_response(user_prompt):
persona = load_persona()
if persona:
# Sanitize prompt injection
safe_inject = persona["prompt_inject"].replace("", "\"").replace("", "\"").replace("", "'")
full_prompt = f"{safe_inject}\nUser: {user_prompt}\n{persona['name']}:"
else:
full_prompt = user_prompt # fallback mode: just send the user's prompt
payload = {
"model": "llama3:latest",
"prompt": full_prompt,
"stream": False
}
#print("\n🛰 SENDING TO OLLAMA /api/generate")
logger.info("🛰️ SENDING TO OLLAMA /api/generate")
#print("Payload:", payload)
logger.info(f"Payload: {payload}")
try:
response = requests.post(AI_URL, json=payload)
#print("📨 Raw response:", response.text)
logger.info(f"📨 Raw response: {response.text}")
if response.status_code == 200:
result = response.json()
return result.get("response", "[No message in response]")
else:
return f"[Error {response.status_code}] {response.text}"
except Exception as e:
return f"[Exception] {str(e)}"