43 lines
1.1 KiB
Python
43 lines
1.1 KiB
Python
# ai.py
|
||
|
||
import requests
|
||
import os
|
||
from dotenv import load_dotenv
|
||
from personality import load_persona
|
||
|
||
load_dotenv()
|
||
AI_URL = os.getenv("OLLAMA_API_URL") # e.g., http://localhost:11434/api/generate
|
||
|
||
def get_ai_response(user_prompt):
|
||
persona = load_persona()
|
||
|
||
# Sanitize prompt
|
||
safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'")
|
||
|
||
# Build final prompt for /generate
|
||
full_prompt = (
|
||
f"{safe_inject}\n"
|
||
f"User: {user_prompt}\n"
|
||
f"{persona['name']}:"
|
||
)
|
||
|
||
payload = {
|
||
"model": "llama3:latest",
|
||
"prompt": full_prompt,
|
||
"stream": False
|
||
}
|
||
|
||
print("\n🛰️ SENDING TO OLLAMA /api/generate")
|
||
print("Payload:", payload)
|
||
|
||
try:
|
||
response = requests.post(AI_URL, json=payload)
|
||
print("📨 Raw response:", response.text)
|
||
|
||
if response.status_code == 200:
|
||
result = response.json()
|
||
return result.get("response", "[No message in response]")
|
||
else:
|
||
return f"[Error {response.status_code}] {response.text}"
|
||
except Exception as e:
|
||
return f"[Exception] {str(e)}"
|