2025-05-07 17:20:34 -04:00
|
|
|
|
# ai.py
|
|
|
|
|
|
|
|
|
|
|
|
import requests
|
|
|
|
|
|
import os
|
|
|
|
|
|
from dotenv import load_dotenv
|
2025-05-07 18:40:28 -04:00
|
|
|
|
from personality import load_persona
|
2025-05-07 17:20:34 -04:00
|
|
|
|
|
|
|
|
|
|
load_dotenv()
|
2025-05-07 23:26:58 -04:00
|
|
|
|
AI_URL = os.getenv("OLLAMA_API_URL") # e.g., http://localhost:11434/api/generate
|
2025-05-07 17:20:34 -04:00
|
|
|
|
|
2025-05-07 18:40:28 -04:00
|
|
|
|
def get_ai_response(user_prompt):
|
|
|
|
|
|
persona = load_persona()
|
2025-05-07 23:26:58 -04:00
|
|
|
|
|
|
|
|
|
|
# Sanitize prompt
|
|
|
|
|
|
safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'")
|
|
|
|
|
|
|
|
|
|
|
|
# Build final prompt for /generate
|
2025-05-07 18:40:28 -04:00
|
|
|
|
full_prompt = (
|
2025-05-07 23:26:58 -04:00
|
|
|
|
f"{safe_inject}\n"
|
2025-05-07 18:40:28 -04:00
|
|
|
|
f"User: {user_prompt}\n"
|
|
|
|
|
|
f"{persona['name']}:"
|
|
|
|
|
|
)
|
|
|
|
|
|
|
2025-05-07 17:20:34 -04:00
|
|
|
|
payload = {
|
2025-05-07 23:26:58 -04:00
|
|
|
|
"model": "llama3:latest",
|
2025-05-07 18:40:28 -04:00
|
|
|
|
"prompt": full_prompt,
|
2025-05-07 17:20:34 -04:00
|
|
|
|
"stream": False
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2025-05-07 23:26:58 -04:00
|
|
|
|
print("\n🛰️ SENDING TO OLLAMA /api/generate")
|
|
|
|
|
|
print("Payload:", payload)
|
|
|
|
|
|
|
2025-05-07 17:20:34 -04:00
|
|
|
|
try:
|
|
|
|
|
|
response = requests.post(AI_URL, json=payload)
|
2025-05-07 23:26:58 -04:00
|
|
|
|
print("📨 Raw response:", response.text)
|
|
|
|
|
|
|
2025-05-07 17:20:34 -04:00
|
|
|
|
if response.status_code == 200:
|
|
|
|
|
|
result = response.json()
|
2025-05-07 23:26:58 -04:00
|
|
|
|
return result.get("response", "[No message in response]")
|
2025-05-07 17:20:34 -04:00
|
|
|
|
else:
|
|
|
|
|
|
return f"[Error {response.status_code}] {response.text}"
|
|
|
|
|
|
except Exception as e:
|
2025-05-07 23:26:58 -04:00
|
|
|
|
return f"[Exception] {str(e)}"
|