From 37f5e53ece93184b38d7c8964aaca925bb9138d6 Mon Sep 17 00:00:00 2001 From: milo Date: Sun, 11 May 2025 19:46:13 -0400 Subject: [PATCH] Fixed personality injection logic and fallback to raw LLM if missing (#4) --- src/__pycache__/ai.cpython-310.pyc | Bin 1199 -> 1203 bytes src/__pycache__/personality.cpython-310.pyc | Bin 1418 -> 1579 bytes src/ai.py | 16 +++++------ src/persona.json | 6 ++-- src/personality.py | 29 ++++++++++++-------- src/settings.yml | 8 +++--- 6 files changed, 31 insertions(+), 28 deletions(-) diff --git a/src/__pycache__/ai.cpython-310.pyc b/src/__pycache__/ai.cpython-310.pyc index f84abf2f650041680372ed30ac568565d98dcc0b..2272f0a4bb960507180da1290f9aef35e8e7b587 100644 GIT binary patch delta 83 zcmZ3_xtWtUpO=@50SJDmC}u=VC51o zwx_vhgOJkKD(@7;<_Yl+_$?Ms@YE-G4i#|hWJQ8nkLJwrvCr{0pFg{ppKlOs?|tz4 z1J5VqciTDN7v~s1B#~3B?ND#G5vFa18Q>^J5%o zHa5PDvVx0M19IQ5^zhw}Vlbh|2L;DDhUG~n%zP<}tknH9ABlmkK77FnS*l#&VJgO4 zJ$ISv&Hr+5C+S%5NEgw#NcaV?Yf;=Zv`~hXiGE;eJe3Ye*zXT9soytjRHV5H#BQpz zGJYx3TpL!DBF7;Vf?>9(@wSCF_4VC>D0N!ov2VhyM2S8Y?9PPKcWc1VFrMhdfcTln z%Lw!2&S0%~U!l5sS-M?`-rZc>lIcUyyHyOTOypYiz9=#gZ6sSgo~l71d9+pNo|1#! z8TI2-@2!@5rfJdi&w?QqS8tAPGBu&(jGeFi>w%> zrm6HE?p7(qFx{1b9f6&vR;4JiAk8^)h+E>T5_ys>JKnsv{#EC0`}6+oI~(irB^zSA z%F5W2uh>rQj6=DEJ-1D#=4F?xl83n5>3whoBWeK+r4$yB*Wg(*ScI$pNK3t8TI^~Q zNTDm4Um(=tvwz_Ty^4+|hvWe88h~Z;gdn+iAvh1O4?LDW(&D$}t2lF?Ml13)=d46A zxSflsyoM3^mNiya=Kx5lbXeTXb5-5%IO@ZX@fxYd V;uDKd*{uw%uxUtJON)&~_Alj$c{~6B literal 1418 zcmZ8hPiq@T6rUN*u4H@VKT1m*+NP8AP-KE2R})H2?9hNwO56}41Y)x@l2*~~EHksQ zW#p4{E2YqDQ|g>bzCgc3zrkL6%B^QpNZ%XD#mOx5X5O2(^M3F5-mY#oR|$+iw{MK^ zazg$_tNw>LvDhlzC7QOEzla09+LL8w-AAJPohE@tMj-s0a*pR(_$Q#0FC_f`!7G9 zta)A7cxHD_{(7oQ5gR4E@wSLXqVr)YRqjNbOEH|L#?8bt?KDb8OJ8` zE}(YAbEOJlyn|I%*s(T?3sgReM`=Flh{sk5mFY=ph0{Xi<2X;0nCa4pOj#R`l}+C31COz1gtwb#@L$6Za*Gxl@yl(^tLoRin|31Lt{ z4$oPQl0$Nb*w2nACHOpr>FQ7o)~%>%H!7xW#f#XDt56-M*4Zlhe*e+o!|v|j{)2=4 zit9q<75BB`Qyt4{V{iZd?&E_W21~y-t?2L#F)e_0@gUjh1C_@1vvgFd{=;1Dne>I~ z@9Ct>pl-JRoz9dvh@bUkY7=cFmalJ3(qHz)GhsHdq=rFnR3ch(@$Q#XHW- zN1#?~QrX0$g-dmwAXS&62AltB^UV$IJSXQw0&$XGCo=ei%!v$N@l)!kW3mo3Zk+1^4`f1F>?+Xew$&>e$dDth#m!3unZ zX^uVI4ywQwkgsWi-9#wX(UZ$EE7~>lF)H;X1!fD+k>!QBhKG@jUqBNrz9@9Jn;;lt zbdxd~{u_lYdh4CJfxg_SV^@VnxzfDP6~e+Ffp4)}uyN#!oYFa!^g8*K0B=;*{Ep8- z|1E%wgRa3>PMFv>q2FwaHuXhmGxH@_%oiv=0K63!#dLOw7;BBL7Qg?HeooCz*t}Oq zq!96WY)FxVs+~bQd){dmVq$i%VEP}1TeHet2Kx%mEnXE-gGshmJlkdk-}B@ncZ;^F vS!evc9*}pxa<$LN^WV6eX<1IyHwe>y3ZlUplrvUiz&}=?QC|;Rt1bQ?8DCJ6 diff --git a/src/ai.py b/src/ai.py index c8de49f..13a4534 100644 --- a/src/ai.py +++ b/src/ai.py @@ -15,15 +15,12 @@ if not AI_URL: def get_ai_response(user_prompt): persona = load_persona() - # Sanitize prompt - safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'") - - # Build final prompt for /generate - full_prompt = ( - f"{safe_inject}\n" - f"User: {user_prompt}\n" - f"{persona['name']}:" - ) + if persona: + # Sanitize prompt injection + safe_inject = persona["prompt_inject"].replace("“", "\"").replace("”", "\"").replace("’", "'") + full_prompt = f"{safe_inject}\nUser: {user_prompt}\n{persona['name']}:" + else: + full_prompt = user_prompt # fallback mode: just send the user's prompt payload = { "model": "llama3:latest", @@ -45,3 +42,4 @@ def get_ai_response(user_prompt): return f"[Error {response.status_code}] {response.text}" except Exception as e: return f"[Exception] {str(e)}" + diff --git a/src/persona.json b/src/persona.json index c733019..81948bb 100644 --- a/src/persona.json +++ b/src/persona.json @@ -1,7 +1,5 @@ { "name": "Delta", "emoji": "😼", - "style_prefix": "Delta says:", - "prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair." - } - \ No newline at end of file + "prompt_inject": "You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately." + } \ No newline at end of file diff --git a/src/personality.py b/src/personality.py index d606653..5ae0eb1 100644 --- a/src/personality.py +++ b/src/personality.py @@ -5,18 +5,25 @@ import os PERSONA_FILE = "persona.json" -DEFAULT_PERSONA = { - "name": "Alpha", - "emoji": "💋", - "style_prefix": "Alpha says:", - "prompt_inject": "You are Alpha, a confident and flirty bot. Respond with charm and wit. keep responses short and engaging. Use emojis to enhance your messages. Your goal is to flirt and engage with the user in a fun way." -} - def load_persona(): - if os.path.exists(PERSONA_FILE): - with open(PERSONA_FILE, "r") as f: - return json.load(f) - return DEFAULT_PERSONA + base_dir = os.path.dirname(__file__) # Path to /src/ + persona_path = os.path.join(base_dir, "persona.json") + + if not os.path.exists(persona_path): + print("⚠️ persona.json not found. Using raw LLM mode.") + return None + + try: + with open(persona_path, "r", encoding="utf-8") as f: + data = json.load(f) + if not data.get("name") or not data.get("prompt_inject"): + print("⚠️ persona.json missing fields. Using raw LLM mode.") + return None + return data + except Exception as e: + print(f"⚠️ Failed to load persona.json: {e}") + return None + def save_persona(description: str): persona = { diff --git a/src/settings.yml b/src/settings.yml index b0e0929..7903e65 100644 --- a/src/settings.yml +++ b/src/settings.yml @@ -7,10 +7,10 @@ messages: - "🕒 Chill, mortal. You must wait {seconds}s before trying again. 😼" scheduler: - enabled: true - mode: probabilistic # <- this activates simple mode - interval_minutes: 1 # <- post every 60 minutes - use_ai: true # <- true = use LLM, false = use static messages + enabled: false + mode: simple # <- this activates simple mode + interval_minutes: 0.25 # <- post every 60 minutes + use_ai: false # <- true = use LLM, false = use static messages channel_id: 1370420592360161393 # <- your Discord text channel ID messages: