Fixed personality injection logic and fallback to raw LLM if missing (#4)
This commit is contained in:
parent
9e27851b9d
commit
37f5e53ece
6 changed files with 31 additions and 28 deletions
Binary file not shown.
Binary file not shown.
16
src/ai.py
16
src/ai.py
|
|
@ -15,15 +15,12 @@ if not AI_URL:
|
|||
def get_ai_response(user_prompt):
|
||||
persona = load_persona()
|
||||
|
||||
# Sanitize prompt
|
||||
safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'")
|
||||
|
||||
# Build final prompt for /generate
|
||||
full_prompt = (
|
||||
f"{safe_inject}\n"
|
||||
f"User: {user_prompt}\n"
|
||||
f"{persona['name']}:"
|
||||
)
|
||||
if persona:
|
||||
# Sanitize prompt injection
|
||||
safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'")
|
||||
full_prompt = f"{safe_inject}\nUser: {user_prompt}\n{persona['name']}:"
|
||||
else:
|
||||
full_prompt = user_prompt # fallback mode: just send the user's prompt
|
||||
|
||||
payload = {
|
||||
"model": "llama3:latest",
|
||||
|
|
@ -45,3 +42,4 @@ def get_ai_response(user_prompt):
|
|||
return f"[Error {response.status_code}] {response.text}"
|
||||
except Exception as e:
|
||||
return f"[Exception] {str(e)}"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,5 @@
|
|||
{
|
||||
"name": "Delta",
|
||||
"emoji": "😼",
|
||||
"style_prefix": "Delta says:",
|
||||
"prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair."
|
||||
}
|
||||
|
||||
"prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately."
|
||||
}
|
||||
|
|
@ -5,18 +5,25 @@ import os
|
|||
|
||||
PERSONA_FILE = "persona.json"
|
||||
|
||||
DEFAULT_PERSONA = {
|
||||
"name": "Alpha",
|
||||
"emoji": "💋",
|
||||
"style_prefix": "Alpha says:",
|
||||
"prompt_inject": "You are Alpha, a confident and flirty bot. Respond with charm and wit. keep responses short and engaging. Use emojis to enhance your messages. Your goal is to flirt and engage with the user in a fun way."
|
||||
}
|
||||
|
||||
def load_persona():
|
||||
if os.path.exists(PERSONA_FILE):
|
||||
with open(PERSONA_FILE, "r") as f:
|
||||
return json.load(f)
|
||||
return DEFAULT_PERSONA
|
||||
base_dir = os.path.dirname(__file__) # Path to /src/
|
||||
persona_path = os.path.join(base_dir, "persona.json")
|
||||
|
||||
if not os.path.exists(persona_path):
|
||||
print("⚠️ persona.json not found. Using raw LLM mode.")
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(persona_path, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if not data.get("name") or not data.get("prompt_inject"):
|
||||
print("⚠️ persona.json missing fields. Using raw LLM mode.")
|
||||
return None
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f"⚠️ Failed to load persona.json: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def save_persona(description: str):
|
||||
persona = {
|
||||
|
|
|
|||
|
|
@ -7,10 +7,10 @@ messages:
|
|||
- "🕒 Chill, mortal. You must wait {seconds}s before trying again. 😼"
|
||||
|
||||
scheduler:
|
||||
enabled: true
|
||||
mode: probabilistic # <- this activates simple mode
|
||||
interval_minutes: 1 # <- post every 60 minutes
|
||||
use_ai: true # <- true = use LLM, false = use static messages
|
||||
enabled: false
|
||||
mode: simple # <- this activates simple mode
|
||||
interval_minutes: 0.25 # <- post every 60 minutes
|
||||
use_ai: false # <- true = use LLM, false = use static messages
|
||||
channel_id: 1370420592360161393 # <- your Discord text channel ID
|
||||
|
||||
messages:
|
||||
|
|
|
|||
Loading…
Reference in a new issue