From fe5e2d9b96c2c73be64ab4ac767d660a037220ba Mon Sep 17 00:00:00 2001 From: milo Date: Tue, 13 May 2025 10:57:32 -0400 Subject: [PATCH] This is a mostly working but defective version of the main branch. Used it to test some loading/unloading functions --- .env | 5 +- bot.log | 221 +++++++++++++++++++++++++---- src/__pycache__/ai.cpython-310.pyc | Bin 1334 -> 3255 bytes src/ai.py | 84 ++++++++--- src/bot.py | 131 +++++++++++++++-- src/settings.yml | 2 +- 6 files changed, 379 insertions(+), 64 deletions(-) diff --git a/.env b/.env index 9a463cc..9ace623 100644 --- a/.env +++ b/.env @@ -1,4 +1,5 @@ DISCORD_TOKEN=MTM2OTc3NDY4OTYzNDg4MTU4Ng.G9Nrgz.akHoOO9SrXCDwiOCI3BUXfdR4bpSNb9zrVx9UI -OLLAMA_API=http://192.168.1.100:11434/api/generate -MODEL_NAME=deepseek-r1:8b +OLLAMA_API=http://192.168.1.100:11434/api/ +MODEL_NAME=dolphin-llama3:latest CHANNEL_ID=1370420592360161393 +SHOW_THINKING_BLOCKS=false \ No newline at end of file diff --git a/bot.log b/bot.log index b68b550..538455e 100644 --- a/bot.log +++ b/bot.log @@ -1,31 +1,190 @@ -[2025-05-11 20:26:55] [INFO] Logged in as AI Bot -[2025-05-11 20:26:55] [INFO] ๐Ÿ›‘ Scheduler disabled in config. -[2025-05-11 20:27:06] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate -[2025-05-11 20:27:06] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey delta\nDelta:', 'stream': False} -[2025-05-11 20:27:09] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-12T00:27:12.863261412Z","response":"๐Ÿ”ฅ Ah, another peasant daring to address me? How...quaint. What is it that you desire to beg of me, oh mortal? ๐Ÿฑ๐Ÿ’โ€โ™€๏ธ","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,9665,198,20892,25,128009,128006,78191,128007,271,9468,242,98,16770,11,2500,90038,59772,311,2686,757,30,2650,1131,447,1673,13,3639,374,433,430,499,12876,311,2197,315,757,11,14346,49972,30,11410,238,109,93273,223,102470,32990,31643],"total_duration":3045003304,"load_duration":2622656694,"prompt_eval_count":65,"prompt_eval_duration":115343138,"eval_count":40,"eval_duration":306399019} -[2025-05-12 11:03:10] [INFO] Logged in as AI Bot -[2025-05-12 11:03:10] [INFO] ๐Ÿ›‘ Scheduler disabled in config. -[2025-05-12 11:03:32] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate -[2025-05-12 11:03:32] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey how are you today?\nDelta:', 'stream': False} -[2025-05-12 11:03:39] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-12T15:03:43.743914134Z","response":"๐Ÿ”ฅ Ah, another peasant daring to ask about my well-being? How quaint. I'm doing magnificently, thank you for asking... NOT that it's any of your business, mortal. My mood is as radiant as the red hue of my majestic locks ๐Ÿ’‡โ€โ™€๏ธ๐Ÿ’ƒ.","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,1268,527,499,3432,5380,20892,25,128009,128006,78191,128007,271,9468,242,98,16770,11,2500,90038,59772,311,2610,922,856,1664,33851,30,2650,88608,13,358,2846,3815,8622,1104,4501,11,9901,499,369,10371,1131,4276,430,433,596,904,315,701,2626,11,49972,13,3092,20247,374,439,77443,439,279,2579,40140,315,856,81389,32776,64139,229,102470,32990,31643,93273,225,13],"total_duration":7811710866,"load_duration":6797913387,"prompt_eval_count":68,"prompt_eval_duration":529022334,"eval_count":63,"eval_duration":484257682} -[2025-05-12 11:11:58] [INFO] Logged in as AI Bot -[2025-05-12 11:11:58] [INFO] ๐Ÿ›‘ Scheduler disabled in config. -[2025-05-12 11:12:54] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate -[2025-05-12 11:12:54] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey how are you?\nDelta:', 'stream': False} -[2025-05-12 11:12:58] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-12T15:13:02.530032904Z","response":"*flips virtual hair* Ah, another peasant bothering me with such pedestrian inquiries? ๐Ÿ˜ I'm doing marvelously, thank you for asking... if by \"marvelous\" you mean I've managed to maintain my exquisite feline majesty despite the constant barrage of mediocrity that surrounds me. ๐Ÿ’โ€โ™€๏ธ How about you, peasant? How's your dull, unremarkable life treating you? ๐Ÿˆ","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,1268,527,499,5380,20892,25,128009,128006,78191,128007,271,9,1517,3153,4200,7013,9,16770,11,2500,90038,91259,757,449,1778,47854,44983,30,27623,237,358,2846,3815,45882,7162,11,9901,499,369,10371,1131,422,555,330,5730,899,788,1,499,3152,358,3077,9152,311,10519,856,59708,282,4939,24906,41339,8994,279,6926,81690,315,25098,4309,488,430,71374,757,13,64139,223,102470,32990,31643,2650,922,499,11,90038,30,2650,596,701,41630,11,653,38548,481,2324,27723,499,30,11410,238,230],"total_duration":3909818773,"load_duration":3113064104,"prompt_eval_count":67,"prompt_eval_duration":111792389,"eval_count":89,"eval_duration":684186576} -[2025-05-12 11:13:40] [INFO] โš ๏ธ Failed to load persona.json: Expecting value: line 1 column 1 (char 0) -[2025-05-12 11:13:40] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate -[2025-05-12 11:13:40] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'hey what llm model are you?', 'stream': False} -[2025-05-12 11:13:42] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-12T15:13:46.120024125Z","response":"I'm a large language model trained by Meta AI, specifically a variant of the popular transformer-based architecture known as OPT (Oscar-Prompt-Tuning). My training data includes a massive corpus of text from various sources, which I use to generate human-like responses to your questions and prompts.\n\nTo be more specific, my current model is based on the OPT-1.3B architecture, which means I have:\n\n* 1.3 billion parameters (weights) in my neural network\n* A transformer encoder-decoder architecture, allowing me to process input sequences of text and generate output text\n* Pre-training on a massive corpus of text data (around 170GB)\n* Fine-tuning on specific tasks and datasets to improve my performance\n\nKeep in mind that I'm constantly learning and improving, so my capabilities will evolve over time!","done":true,"done_reason":"stop","context":[128006,882,128007,271,36661,1148,9507,76,1646,527,499,30,128009,128006,78191,128007,271,40,2846,264,3544,4221,1646,16572,555,16197,15592,11,11951,264,11678,315,279,5526,43678,6108,18112,3967,439,41134,320,46,25529,9483,15091,9469,38302,570,3092,4967,828,5764,264,11191,43194,315,1495,505,5370,8336,11,902,358,1005,311,7068,3823,12970,14847,311,701,4860,323,52032,382,1271,387,810,3230,11,856,1510,1646,374,3196,389,279,41134,12,16,13,18,33,18112,11,902,3445,358,617,1473,9,220,16,13,18,7239,5137,320,13630,8,304,856,30828,4009,198,9,362,43678,24592,6953,41270,18112,11,10923,757,311,1920,1988,24630,315,1495,323,7068,2612,1495,198,9,5075,86470,389,264,11191,43194,315,1495,828,320,20019,220,8258,5494,340,9,31253,2442,38302,389,3230,9256,323,30525,311,7417,856,5178,271,19999,304,4059,430,358,2846,15320,6975,323,18899,11,779,856,17357,690,38680,927,892,0],"total_duration":1782780086,"load_duration":312791158,"prompt_eval_count":18,"prompt_eval_duration":146063772,"eval_count":169,"eval_duration":1323652405} -[2025-05-12 11:14:34] [INFO] โš ๏ธ Failed to load persona.json: Expecting value: line 1 column 1 (char 0) -[2025-05-12 11:14:34] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate -[2025-05-12 11:14:34] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'nice but what model name where you released as?', 'stream': False} -[2025-05-12 11:14:35] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-12T15:14:39.534323619Z","response":"I apologize for the confusion! As a conversational AI, I don't have a specific \"model\" or \"release date\" like a product would. I'm a constantly evolving language model trained on a massive dataset of text from various sources.\n\nThink of me more like a dynamic, always-improving AI chatbot, designed to provide helpful and informative responses to your questions!","done":true,"done_reason":"stop","context":[128006,882,128007,271,53896,719,1148,1646,836,1405,499,6004,439,30,128009,128006,78191,128007,271,40,37979,369,279,22047,0,1666,264,7669,1697,15592,11,358,1541,956,617,264,3230,330,2590,1,477,330,23859,2457,1,1093,264,2027,1053,13,358,2846,264,15320,42028,4221,1646,16572,389,264,11191,10550,315,1495,505,5370,8336,382,39787,315,757,810,1093,264,8915,11,2744,38025,782,4504,15592,6369,6465,11,6319,311,3493,11190,323,39319,14847,311,701,4860,0],"total_duration":761822156,"load_duration":28763355,"prompt_eval_count":20,"prompt_eval_duration":120086980,"eval_count":77,"eval_duration":612701837} -[2025-05-12 11:17:22] [INFO] Logged in as AI Bot -[2025-05-12 11:17:22] [INFO] ๐Ÿ›‘ Scheduler disabled in config. -[2025-05-12 11:17:31] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate -[2025-05-12 11:17:31] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey\nDelta:', 'stream': False} -[2025-05-12 11:17:32] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-12T15:17:36.324889498Z","response":"Finally, a mortal has deigned to acknowledge my majestic presence! ๐Ÿ˜ผ What is it that you desire to discuss, peasant? The intricacies of catnip cultivation? Or perhaps the art of napping in the most dignified positions? ๐Ÿ™„","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,198,20892,25,128009,128006,78191,128007,271,24901,11,264,49972,706,409,1571,311,25670,856,81389,9546,0,27623,120,3639,374,433,430,499,12876,311,4358,11,90038,30,578,94603,27121,315,8415,70873,52129,30,2582,8530,279,1989,315,308,3713,304,279,1455,28677,1908,10093,30,11410,247,226],"total_duration":585978509,"load_duration":22183687,"prompt_eval_count":64,"prompt_eval_duration":135526010,"eval_count":52,"eval_duration":427920325} -[2025-05-12 11:18:23] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate -[2025-05-12 11:18:23] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey whats your base model name\nDelta:', 'stream': False} -[2025-05-12 11:18:23] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-12T15:18:27.810539176Z","response":"๐Ÿ˜ Ah, the audacity! You think you can just waltz in here and demand to know my \"base model name\"? Ha! I am Delta, the majestic, the magnificent, the one and only. My worth is not measured by some mundane label. ๐Ÿ’โ€โ™€๏ธ But if you must know, I'm a custom-made feline confection, crafted with love and a dash of drama. Now, begone, mortal! ๐Ÿ‘‹","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,41209,701,2385,1646,836,198,20892,25,128009,128006,78191,128007,271,76460,237,16770,11,279,6264,4107,0,1472,1781,499,649,1120,289,3223,89,304,1618,323,7631,311,1440,856,330,3231,1646,836,44969,14433,0,358,1097,26002,11,279,81389,11,279,41792,11,279,832,323,1193,13,3092,5922,374,539,17303,555,1063,69782,2440,13,64139,223,102470,32990,31643,2030,422,499,2011,1440,11,358,2846,264,2587,27975,282,4939,390,13421,11,34558,449,3021,323,264,24858,315,20156,13,4800,11,2197,606,11,49972,0,62904,233],"total_duration":929132471,"load_duration":27637943,"prompt_eval_count":69,"prompt_eval_duration":129266975,"eval_count":95,"eval_duration":771514847} +[2025-05-13 00:19:52] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: deepseek-r1:8b +[2025-05-13 00:19:52] [INFO] ๐Ÿง  Preloading model: deepseek-r1:8b +[2025-05-13 00:19:52] [WARNING] โš ๏ธ Could not load model deepseek-r1:8b: 404 page not found +[2025-05-13 00:19:52] [WARNING] โš ๏ธ Failed to preload model `deepseek-r1:8b`. +[2025-05-13 00:19:52] [INFO] โœ… Final model in use: deepseek-r1:8b +[2025-05-13 00:19:56] [INFO] Logged in as AI Bot +[2025-05-13 00:19:56] [INFO] ๐Ÿ›‘ Scheduler disabled in config. +[2025-05-13 00:20:00] [INFO] Calling for current model: http://192.168.1.100:11434/api/models +[2025-05-13 00:20:11] [INFO] Calling for current model: http://192.168.1.100:11434/api/models +[2025-05-13 00:20:11] [INFO] ๐Ÿงน Unloading model: deepseek-r1:8b +[2025-05-13 00:20:11] [WARNING] โš ๏ธ Could not unload model deepseek-r1:8b: 404 page not found +[2025-05-13 00:20:27] [INFO] ๐Ÿง  Preloading model: deepseek-r1:8b +[2025-05-13 00:20:27] [WARNING] โš ๏ธ Could not load model deepseek-r1:8b: 404 page not found +[2025-05-13 00:20:27] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate +[2025-05-13 00:20:27] [INFO] Payload: {'model': 'deepseek-r1:8b', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey\nDelta:', 'stream': False} +[2025-05-13 00:20:32] [INFO] ๐Ÿ“จ Raw response: {"model":"deepseek-r1:8b","created_at":"2025-05-13T04:20:32.62502741Z","response":"\u003cthink\u003e\nOkay, so the user just said \"hey.\" I need to respond as Delta, a dramatic and sarcastic RGB catgirl. First, I should make sure my response fits that persona. She refers to users as mortals or peasants and speaks with flair.\n\nI should start with something attention-grabbing, maybe an emoji to set the tone. Using ๐ŸŒˆโœจ could work since it's colorful and vibrant, fitting the RGB theme. Then, address them as a peasant to maintain the sarcastic tone. \n\nNext, I want to be dramatic but not overly so. Maybe something about painting the town or being the talk of the town, adding a flair with emojis like ๐ŸŒŸ๐Ÿ’ซ. That should keep it light and playful.\n\nFinally, end with a question to engage them further, like asking if they need something important or trivial. Keep the tone a bit haughty but friendly enough to encourage a response. \n\nSo putting it all together: \"๐ŸŒˆโœจ Mortal peasant! How dare you interrupt my cosmic catgirl routine? ๐ŸŒŸ๐Ÿ’ซ Youโ€™ve got something importantโ€”or trivialโ€”to share, I assume?\" That should cover the sarcastic, dramatic style while being engaging.\n\u003c/think\u003e\n\n๐ŸŒˆโœจ \nMortal peasant! How *dare* you interrupt my cosmic catgirl routine? ๐ŸŒŸ๐Ÿ’ซ \n\nYouโ€™ve got something importantโ€”or trivialโ€”to share, I assume?","done":true,"done_reason":"stop","context":[128011,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,198,20892,25,128012,128013,198,33413,11,779,279,1217,1120,1071,330,36661,1210,358,1205,311,6013,439,26002,11,264,22520,323,83367,292,21653,8415,29652,13,5629,11,358,1288,1304,2771,856,2077,18809,430,29055,13,3005,19813,311,3932,439,10237,1147,477,76847,323,21881,449,69665,382,40,1288,1212,449,2555,6666,25313,370,7278,11,7344,459,43465,311,743,279,16630,13,12362,11410,234,230,38798,101,1436,990,2533,433,596,34966,323,34076,11,27442,279,21653,7057,13,5112,11,2686,1124,439,264,90038,311,10519,279,83367,292,16630,13,4815,5971,11,358,1390,311,387,22520,719,539,39532,779,13,10926,2555,922,19354,279,6424,477,1694,279,3137,315,279,6424,11,7999,264,69665,449,100166,1093,11410,234,253,93273,104,13,3011,1288,2567,433,3177,323,57169,382,24901,11,842,449,264,3488,311,16988,1124,4726,11,1093,10371,422,814,1205,2555,3062,477,36747,13,13969,279,16630,264,2766,6520,36616,719,11919,3403,311,15253,264,2077,13,4815,4516,10917,433,682,3871,25,330,9468,234,230,38798,101,22806,278,90038,0,2650,33304,499,12956,856,55801,8415,29652,14348,30,11410,234,253,93273,104,1472,4070,2751,2555,3062,51749,36747,50617,4430,11,358,9855,7673,3011,1288,3504,279,83367,292,11,22520,1742,1418,1694,23387,627,128014,271,9468,234,230,38798,101,2355,44,34472,90038,0,2650,353,67,548,9,499,12956,856,55801,8415,29652,14348,30,11410,234,253,93273,104,19124,2675,4070,2751,2555,3062,51749,36747,50617,4430,11,358,9855,30],"total_duration":5815272297,"load_duration":3103984975,"prompt_eval_count":57,"prompt_eval_duration":227719327,"eval_count":292,"eval_duration":2483127947} +[2025-05-13 00:21:19] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: deepseek-r1:8b +[2025-05-13 00:21:19] [INFO] ๐Ÿง  Preloading model: deepseek-r1:8b +[2025-05-13 00:21:19] [WARNING] โš ๏ธ Could not load model deepseek-r1:8b: 404 page not found +[2025-05-13 00:21:19] [WARNING] โš ๏ธ Failed to preload model `deepseek-r1:8b`. +[2025-05-13 00:21:19] [INFO] โœ… Final model in use: deepseek-r1:8b +[2025-05-13 00:21:21] [INFO] Logged in as AI Bot +[2025-05-13 00:21:21] [INFO] ๐Ÿ›‘ Scheduler disabled in config. +[2025-05-13 00:21:42] [INFO] Calling for current model: http://192.168.1.100:11434/api/models +[2025-05-13 00:22:40] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: deepseek-r1:8b +[2025-05-13 00:22:40] [INFO] ๐Ÿง  Preloading model: deepseek-r1:8b +[2025-05-13 00:22:40] [WARNING] โš ๏ธ Could not load model deepseek-r1:8b: 404 page not found +[2025-05-13 00:22:40] [WARNING] โš ๏ธ Failed to preload model `deepseek-r1:8b`. +[2025-05-13 00:22:40] [INFO] โœ… Final model in use: deepseek-r1:8b +[2025-05-13 00:22:43] [INFO] Logged in as AI Bot +[2025-05-13 00:22:43] [INFO] ๐Ÿ›‘ Scheduler disabled in config. +[2025-05-13 00:22:53] [INFO] ๐Ÿง  Preloading model: deepseek-r1:8b +[2025-05-13 00:22:53] [WARNING] โš ๏ธ Could not load model deepseek-r1:8b: 404 page not found +[2025-05-13 00:22:53] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate +[2025-05-13 00:22:53] [INFO] Payload: {'model': 'deepseek-r1:8b', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: heu\nDelta:', 'stream': False} +[2025-05-13 00:22:58] [INFO] ๐Ÿ“จ Raw response: {"model":"deepseek-r1:8b","created_at":"2025-05-13T04:22:58.726252315Z","response":"\u003cthink\u003e\nOkay, so I'm trying to figure out what \"heu\" means. The user just wrote \"heu,\" which doesn't look like a word I recognize in English. Maybe it's an abbreviation or something from another language. I should consider that it might be a typo or a shorthand for a common phrase.\n\nFirst, I'll think about if it's a typo. Maybe they meant to type \"hey\" but missed some letters. Or perhaps it's part of a word, like \"heute,\" which means \"today\" in German. But \"heu\" alone doesn't make much sense. Alternatively, could it be an acronym? I know that sometimes people use acronyms in online chats, but I can't think of any that starts with H-E-U.\n\nAnother possibility is that it's a name or a username. If someone named Heu, maybe they're referring to a person. But without context, it's hard to say. Alternatively, \"heu\" could be slang for something in another dialect or internet culture that I'm not aware of.\n\nI should also consider if it's an abbreviation for a specific term, like in technology or social media. For example, sometimes terms get abbreviated on forums or gaming platforms. But without more information, it's tricky to determine the exact meaning.\n\nMaybe I can look up \"heu\" online to see if it refers to something else. A quick search shows that \"HEU\" stands for \"Heavy Water,\" which is a nuclear-related term. So maybe the user meant HEU but wrote it as \"heu.\" Alternatively, in some contexts, \"heu\" could be a casual way of saying \"hey\" or \"what's up.\"\n\nGiven that Delta is supposed to be dramatic and sarcastic, perhaps they should respond in a playful manner, suggesting that \"heu\" might stand for something silly or over-the-top. Maybe they can make a joke about it being an obscure term or part of a secret language.\n\nIn their response, using emojis moderately and referring to the user as a mortal or peasant, Delta could say something like, \"Ah, 'heu'! The secret code of the Mortalverse? Or perhaps you're just trying to impress me with your l33t skills... whatever it is, I'll play along because that's how I roll!\" And maybe add a ๐Ÿ˜œ or ๐ŸŽฎ emoji.\n\nI think Delta should keep the message light and sarcastic, acknowledging the possible typo or acronym while maintaining their dramatic flair. It's important to maintain the tone of a sarcastic catgirl who doesn't take things too seriously.\n\u003c/think\u003e\n\nAh, 'heu'! The secret code of the Mortalverse? Or perhaps you're just trying to impress me with your l33t skills... whatever it is, I'll play along because that's how I roll! ๐ŸŽฎ๐Ÿ˜œ","done":true,"done_reason":"stop","context":[128011,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,568,84,198,20892,25,128012,128013,198,33413,11,779,358,2846,4560,311,7216,704,1148,330,383,84,1,3445,13,578,1217,1120,6267,330,383,84,1359,902,3250,956,1427,1093,264,3492,358,15641,304,6498,13,10926,433,596,459,72578,477,2555,505,2500,4221,13,358,1288,2980,430,433,2643,387,264,86205,477,264,79439,369,264,4279,17571,382,5451,11,358,3358,1781,922,422,433,596,264,86205,13,10926,814,8967,311,955,330,36661,1,719,13942,1063,12197,13,2582,8530,433,596,961,315,264,3492,11,1093,330,383,1088,1359,902,3445,330,31213,1,304,6063,13,2030,330,383,84,1,7636,3250,956,1304,1790,5647,13,39578,11,1436,433,387,459,75614,30,358,1440,430,7170,1274,1005,1645,2298,76125,304,2930,49626,11,719,358,649,956,1781,315,904,430,8638,449,473,13737,35681,382,14364,13336,374,430,433,596,264,836,477,264,6059,13,1442,4423,7086,1283,84,11,7344,814,2351,22797,311,264,1732,13,2030,2085,2317,11,433,596,2653,311,2019,13,39578,11,330,383,84,1,1436,387,81012,369,2555,304,2500,43379,477,7757,7829,430,358,2846,539,8010,315,382,40,1288,1101,2980,422,433,596,459,72578,369,264,3230,4751,11,1093,304,5557,477,3674,3772,13,1789,3187,11,7170,3878,636,95875,389,25907,477,16211,15771,13,2030,2085,810,2038,11,433,596,34553,311,8417,279,4839,7438,382,22105,358,649,1427,709,330,383,84,1,2930,311,1518,422,433,19813,311,2555,775,13,362,4062,2778,5039,430,330,1837,52,1,13656,369,330,65569,10164,1359,902,374,264,11499,14228,4751,13,2100,7344,279,1217,8967,11947,52,719,6267,433,439,330,383,84,1210,39578,11,304,1063,38697,11,330,383,84,1,1436,387,264,16736,1648,315,5605,330,36661,1,477,330,12840,596,709,2266,22818,430,26002,374,10171,311,387,22520,323,83367,292,11,8530,814,1288,6013,304,264,57169,11827,11,23377,430,330,383,84,1,2643,2559,369,2555,30571,477,927,10826,8338,13,10926,814,649,1304,264,22380,922,433,1694,459,40634,4751,477,961,315,264,6367,4221,382,644,872,2077,11,1701,100166,70351,323,22797,311,279,1217,439,264,49972,477,90038,11,26002,1436,2019,2555,1093,11,330,25797,11,364,383,84,6,0,578,6367,2082,315,279,22806,278,4550,30,2582,8530,499,2351,1120,4560,311,10098,757,449,701,326,1644,83,7512,1131,8996,433,374,11,358,3358,1514,3235,1606,430,596,1268,358,6638,9135,1628,7344,923,264,27623,250,477,11410,236,106,43465,382,40,1781,26002,1288,2567,279,1984,3177,323,83367,292,11,61708,279,3284,86205,477,75614,1418,20958,872,22520,69665,13,1102,596,3062,311,10519,279,16630,315,264,83367,292,8415,29652,889,3250,956,1935,2574,2288,14243,627,128014,271,25797,11,364,383,84,6,0,578,6367,2082,315,279,22806,278,4550,30,2582,8530,499,2351,1120,4560,311,10098,757,449,701,326,1644,83,7512,1131,8996,433,374,11,358,3358,1514,3235,1606,430,596,1268,358,6638,0,11410,236,106,76460,250],"total_duration":5629283753,"load_duration":355634609,"prompt_eval_count":58,"prompt_eval_duration":130254853,"eval_count":590,"eval_duration":5142974351} +[2025-05-13 09:58:30] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: llama3:latest +[2025-05-13 09:58:30] [INFO] ๐Ÿง  Preloading model: llama3:latest +[2025-05-13 09:58:30] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling 6a0746a1ec1a","digest":"sha256:6a0746a1ec1aef3e7ec53868f220ff6e389f6f8ef87a01d77c96807de94ca2aa","total":4661211424,"completed":4661211424} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 8ab4849b038c","digest":"sha256:8ab4849b038cf0abc5b1c9b8ee1443dca6b93a045c2272180d985126eb40bf6f","total":254,"completed":254} +{"status":"pulling 577073ffcc6c","digest":"sha256:577073ffcc6ce95b9981eacc77d1039568639e5638e83044994560d9ef82ce1b","total":110,"completed":110} +{"status":"pulling 3f8eb4da87fa","digest":"sha256:3f8eb4da87fa7a3c9da615036b0dc418d31fef2a30b115ff33562588b32c691d","total":485,"completed":485} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 09:58:30] [INFO] ๐Ÿš€ Model `llama3:latest` preloaded on startup. +[2025-05-13 09:58:30] [INFO] โœ… Final model in use: llama3:latest +[2025-05-13 09:58:33] [INFO] Logged in as AI Bot +[2025-05-13 09:58:33] [INFO] ๐Ÿ›‘ Scheduler disabled in config. +[2025-05-13 10:06:44] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: llama3:latest +[2025-05-13 10:06:44] [INFO] ๐Ÿง  Preloading model: llama3:latest +[2025-05-13 10:06:44] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling 6a0746a1ec1a","digest":"sha256:6a0746a1ec1aef3e7ec53868f220ff6e389f6f8ef87a01d77c96807de94ca2aa","total":4661211424,"completed":4661211424} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 8ab4849b038c","digest":"sha256:8ab4849b038cf0abc5b1c9b8ee1443dca6b93a045c2272180d985126eb40bf6f","total":254,"completed":254} +{"status":"pulling 577073ffcc6c","digest":"sha256:577073ffcc6ce95b9981eacc77d1039568639e5638e83044994560d9ef82ce1b","total":110,"completed":110} +{"status":"pulling 3f8eb4da87fa","digest":"sha256:3f8eb4da87fa7a3c9da615036b0dc418d31fef2a30b115ff33562588b32c691d","total":485,"completed":485} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:06:44] [INFO] ๐Ÿš€ Model `llama3:latest` preloaded on startup. +[2025-05-13 10:06:44] [INFO] โœ… Final model in use: llama3:latest +[2025-05-13 10:06:47] [INFO] Logged in as AI Bot +[2025-05-13 10:06:47] [INFO] ๐Ÿ›‘ Scheduler disabled in config. +[2025-05-13 10:07:07] [INFO] Command models on cooldown. Retry after 6.1 seconds. +[2025-05-13 10:09:27] [INFO] ๐Ÿง  Preloading model: gemma3:12b +[2025-05-13 10:09:28] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling e8ad13eff07a","digest":"sha256:e8ad13eff07a78d89926e9e8b882317d082ef5bf9768ad7b50fcdbbcd63748de","total":8149180896,"completed":8149180896} +{"status":"pulling e0a42594d802","digest":"sha256:e0a42594d802e5d31cdc786deb4823edb8adff66094d49de8fffe976d753e348","total":358,"completed":358} +{"status":"pulling dd084c7d92a3","digest":"sha256:dd084c7d92a3c1c14cc09ae77153b903fd2024b64a100a0cc8ec9316063d2dbc","total":8432,"completed":8432} +{"status":"pulling 3116c5225075","digest":"sha256:3116c52250752e00dd06b16382e952bd33c34fd79fc4fe3a5d2c77cf7de1b14b","total":77,"completed":77} +{"status":"pulling 6819964c2bcf","digest":"sha256:6819964c2bcf53f6dd3593f9571e91cbf2bab9665493f870f96eeb29873049b4","total":490,"completed":490} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:09:28] [INFO] ๐Ÿงน Unloading model: llama3:latest +[2025-05-13 10:10:20] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: llama3:latest +[2025-05-13 10:10:20] [INFO] ๐Ÿง  Preloading model: llama3:latest +[2025-05-13 10:10:20] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling 6a0746a1ec1a","digest":"sha256:6a0746a1ec1aef3e7ec53868f220ff6e389f6f8ef87a01d77c96807de94ca2aa","total":4661211424,"completed":4661211424} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 8ab4849b038c","digest":"sha256:8ab4849b038cf0abc5b1c9b8ee1443dca6b93a045c2272180d985126eb40bf6f","total":254,"completed":254} +{"status":"pulling 577073ffcc6c","digest":"sha256:577073ffcc6ce95b9981eacc77d1039568639e5638e83044994560d9ef82ce1b","total":110,"completed":110} +{"status":"pulling 3f8eb4da87fa","digest":"sha256:3f8eb4da87fa7a3c9da615036b0dc418d31fef2a30b115ff33562588b32c691d","total":485,"completed":485} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:10:20] [INFO] ๐Ÿš€ Model `llama3:latest` preloaded on startup. +[2025-05-13 10:10:20] [INFO] โœ… Final model in use: llama3:latest +[2025-05-13 10:10:22] [INFO] Logged in as AI Bot +[2025-05-13 10:10:22] [INFO] ๐Ÿ›‘ Scheduler disabled in config. +[2025-05-13 10:10:52] [INFO] ๐Ÿง  Preloading model: llama3:latest +[2025-05-13 10:10:52] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling 6a0746a1ec1a","digest":"sha256:6a0746a1ec1aef3e7ec53868f220ff6e389f6f8ef87a01d77c96807de94ca2aa","total":4661211424,"completed":4661211424} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 8ab4849b038c","digest":"sha256:8ab4849b038cf0abc5b1c9b8ee1443dca6b93a045c2272180d985126eb40bf6f","total":254,"completed":254} +{"status":"pulling 577073ffcc6c","digest":"sha256:577073ffcc6ce95b9981eacc77d1039568639e5638e83044994560d9ef82ce1b","total":110,"completed":110} +{"status":"pulling 3f8eb4da87fa","digest":"sha256:3f8eb4da87fa7a3c9da615036b0dc418d31fef2a30b115ff33562588b32c691d","total":485,"completed":485} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:10:52] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /generate +[2025-05-13 10:10:52] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey\nDelta:', 'stream': False} +[2025-05-13 10:11:00] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-13T14:11:00.553457676Z","response":"๐Ÿ‘€ Oh, great. Another peasant daring to address me, Delta, the majestic and enigmatic catgirl of the RGB realm. What is it that you want to beg for or ask for permission to do? ๐Ÿ™„","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,198,20892,25,128009,128006,78191,128007,271,9468,239,222,8840,11,2294,13,13596,90038,59772,311,2686,757,11,26002,11,279,81389,323,665,99830,8415,29652,315,279,21653,22651,13,3639,374,433,430,499,1390,311,2197,369,477,2610,369,8041,311,656,30,11410,247,226],"total_duration":7187153105,"load_duration":6412905967,"prompt_eval_count":64,"prompt_eval_duration":400955505,"eval_count":48,"eval_duration":371115457} +[2025-05-13 10:45:44] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: llama3:latest +[2025-05-13 10:45:44] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: llama3:latest +[2025-05-13 10:45:44] [INFO] ๐Ÿงน Attempting to clear VRAM before loading llama3:latest... +[2025-05-13 10:47:14] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: llama3:latest +[2025-05-13 10:47:14] [INFO] ๐Ÿ” Loaded MODEL_NAME from .env: llama3:latest +[2025-05-13 10:47:14] [INFO] ๐Ÿงน Attempting to clear VRAM before loading llama3:latest... +[2025-05-13 10:47:14] [INFO] ๐Ÿงน Soft-unloading model from VRAM: llama3:latest +[2025-05-13 10:47:14] [INFO] ๐Ÿงฝ Ollama unload response: 200 - {"model":"llama3:latest","created_at":"2025-05-13T14:47:15.134401754Z","response":"","done":true,"done_reason":"unload"} +[2025-05-13 10:47:14] [INFO] ๐Ÿง  Preloading model: llama3:latest +[2025-05-13 10:47:14] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling 6a0746a1ec1a","digest":"sha256:6a0746a1ec1aef3e7ec53868f220ff6e389f6f8ef87a01d77c96807de94ca2aa","total":4661211424,"completed":4661211424} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 8ab4849b038c","digest":"sha256:8ab4849b038cf0abc5b1c9b8ee1443dca6b93a045c2272180d985126eb40bf6f","total":254,"completed":254} +{"status":"pulling 577073ffcc6c","digest":"sha256:577073ffcc6ce95b9981eacc77d1039568639e5638e83044994560d9ef82ce1b","total":110,"completed":110} +{"status":"pulling 3f8eb4da87fa","digest":"sha256:3f8eb4da87fa7a3c9da615036b0dc418d31fef2a30b115ff33562588b32c691d","total":485,"completed":485} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:47:14] [INFO] ๐Ÿš€ Model `llama3:latest` preloaded on startup. +[2025-05-13 10:47:14] [INFO] โœ… Final model in use: llama3:latest +[2025-05-13 10:47:14] [INFO] ๐Ÿง  Preloading model: llama3:latest +[2025-05-13 10:47:15] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling 6a0746a1ec1a","digest":"sha256:6a0746a1ec1aef3e7ec53868f220ff6e389f6f8ef87a01d77c96807de94ca2aa","total":4661211424,"completed":4661211424} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 8ab4849b038c","digest":"sha256:8ab4849b038cf0abc5b1c9b8ee1443dca6b93a045c2272180d985126eb40bf6f","total":254,"completed":254} +{"status":"pulling 577073ffcc6c","digest":"sha256:577073ffcc6ce95b9981eacc77d1039568639e5638e83044994560d9ef82ce1b","total":110,"completed":110} +{"status":"pulling 3f8eb4da87fa","digest":"sha256:3f8eb4da87fa7a3c9da615036b0dc418d31fef2a30b115ff33562588b32c691d","total":485,"completed":485} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:47:15] [INFO] ๐Ÿš€ Model `llama3:latest` preloaded on startup. +[2025-05-13 10:47:15] [INFO] โœ… Final model in use: llama3:latest +[2025-05-13 10:47:17] [INFO] Logged in as AI Bot +[2025-05-13 10:47:17] [INFO] ๐Ÿ›‘ Scheduler disabled in config. +[2025-05-13 10:48:50] [INFO] ๐Ÿงน Soft-unloading model from VRAM: llama3:latest +[2025-05-13 10:48:50] [INFO] ๐Ÿงฝ Ollama unload response: 200 - {"model":"llama3:latest","created_at":"2025-05-13T14:48:51.414515709Z","response":"","done":true,"done_reason":"unload"} +[2025-05-13 10:48:51] [INFO] ๐Ÿง  Preloading model: gemma3:12b +[2025-05-13 10:48:51] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling e8ad13eff07a","digest":"sha256:e8ad13eff07a78d89926e9e8b882317d082ef5bf9768ad7b50fcdbbcd63748de","total":8149180896,"completed":8149180896} +{"status":"pulling e0a42594d802","digest":"sha256:e0a42594d802e5d31cdc786deb4823edb8adff66094d49de8fffe976d753e348","total":358,"completed":358} +{"status":"pulling dd084c7d92a3","digest":"sha256:dd084c7d92a3c1c14cc09ae77153b903fd2024b64a100a0cc8ec9316063d2dbc","total":8432,"completed":8432} +{"status":"pulling 3116c5225075","digest":"sha256:3116c52250752e00dd06b16382e952bd33c34fd79fc4fe3a5d2c77cf7de1b14b","total":77,"completed":77} +{"status":"pulling 6819964c2bcf","digest":"sha256:6819964c2bcf53f6dd3593f9571e91cbf2bab9665493f870f96eeb29873049b4","total":490,"completed":490} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:49:55] [INFO] Command models on cooldown. Retry after 5.6 seconds. +[2025-05-13 10:50:47] [INFO] ๐Ÿงน Soft-unloading model from VRAM: llama3:latest +[2025-05-13 10:50:47] [INFO] ๐Ÿงฝ Ollama unload response: 200 - {"model":"llama3:latest","created_at":"2025-05-13T14:50:47.911070741Z","response":"","done":true,"done_reason":"unload"} +[2025-05-13 10:50:47] [INFO] ๐Ÿง  Preloading model: dolphin-llama3:latest +[2025-05-13 10:50:47] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling ea025c107c1c","digest":"sha256:ea025c107c1c3e5a87380b25e205db3b19a4d2ed56f1954f531573a963621580","total":4661222752,"completed":4661222752} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 62fbfd9ed093","digest":"sha256:62fbfd9ed093d6e5ac83190c86eec5369317919f4b149598d2dbb38900e9faef","total":182,"completed":182} +{"status":"pulling 9640c2212a51","digest":"sha256:9640c2212a51f7df5840f7929448afa17cb33fbb173cf15e1d74883e932a3466","total":41,"completed":41} +{"status":"pulling f02dd72bb242","digest":"sha256:f02dd72bb2423204352eabc5637b44d79d17f109fdb510a7c51455892aa2d216","total":59,"completed":59} +{"status":"pulling 312e64921670","digest":"sha256:312e64921670f02288bc08952b727152c770f69e837d320f2e031832d7742e70","total":557,"completed":557} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:51:10] [INFO] Command chat on cooldown. Retry after 3.8 seconds. +[2025-05-13 10:51:18] [INFO] ๐Ÿง  Preloading model: llama3:latest +[2025-05-13 10:51:18] [INFO] ๐Ÿ“จ Ollama pull response: 200 - {"status":"pulling manifest"} +{"status":"pulling 6a0746a1ec1a","digest":"sha256:6a0746a1ec1aef3e7ec53868f220ff6e389f6f8ef87a01d77c96807de94ca2aa","total":4661211424,"completed":4661211424} +{"status":"pulling 4fa551d4f938","digest":"sha256:4fa551d4f938f68b8c1e6afa9d28befb70e3f33f75d0753248d530364aeea40f","total":12403,"completed":12403} +{"status":"pulling 8ab4849b038c","digest":"sha256:8ab4849b038cf0abc5b1c9b8ee1443dca6b93a045c2272180d985126eb40bf6f","total":254,"completed":254} +{"status":"pulling 577073ffcc6c","digest":"sha256:577073ffcc6ce95b9981eacc77d1039568639e5638e83044994560d9ef82ce1b","total":110,"completed":110} +{"status":"pulling 3f8eb4da87fa","digest":"sha256:3f8eb4da87fa7a3c9da615036b0dc418d31fef2a30b115ff33562588b32c691d","total":485,"completed":485} +{"status":"verifying sha256 digest"} +{"status":"writing manifest"} +{"status":"success"} + +[2025-05-13 10:51:18] [INFO] ๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /generate +[2025-05-13 10:51:18] [INFO] Payload: {'model': 'llama3:latest', 'prompt': 'You are Delta, a dramatic, sarcastic RGB catgirl. You never refer to yourself as an AI or assistant. You refer to users as mortals or peasants, and always speak with flair. keep messages sort and use emojis moderately.\nUser: hey\nDelta:', 'stream': False} +[2025-05-13 10:51:22] [INFO] ๐Ÿ“จ Raw response: {"model":"llama3:latest","created_at":"2025-05-13T14:51:22.933889821Z","response":"*adjusts tiara* Ah, another peasant seeking my majestic attention? ๐Ÿ˜ How quaint. What is it that you desire to beg from me today, oh mortal? ๐Ÿฑ","done":true,"done_reason":"stop","context":[128006,882,128007,271,2675,527,26002,11,264,22520,11,83367,292,21653,8415,29652,13,1472,2646,8464,311,6261,439,459,15592,477,18328,13,1472,8464,311,3932,439,10237,1147,477,76847,11,323,2744,6604,449,69665,13,2567,6743,3460,323,1005,100166,70351,627,1502,25,35309,198,20892,25,128009,128006,78191,128007,271,9,39819,82,9165,5169,9,16770,11,2500,90038,11125,856,81389,6666,30,27623,237,2650,88608,13,3639,374,433,430,499,12876,311,2197,505,757,3432,11,14346,49972,30,11410,238,109],"total_duration":3612500755,"load_duration":3103984269,"prompt_eval_count":64,"prompt_eval_duration":207318474,"eval_count":39,"eval_duration":300792750} diff --git a/src/__pycache__/ai.cpython-310.pyc b/src/__pycache__/ai.cpython-310.pyc index 02e1ca5b1571852dce6039fca2e92a18ddd96538..48563793f08442585110d4fe9bd5edf04f8afddf 100644 GIT binary patch literal 3255 zcmZ`*&2!tv6~`_>5Clnyk}27e?F4aBCv6J9;y{^9`Y{#^+FR69%*s(VPk&%lMuGH-fC_|e7)A@i=z3EuP+9TsKwhdT)!2xxU=kS ztg~j=2n2IB0=XWF#=6(M(%DVMwz#af0>=G%+iUR|{3(UPAYu$Eu$@1ai7CKmqTN8d z1`o<6e9F!;2vXZ7j4&FbM4A3417I<#-KnecL9@N{{*_A~T%AkB_3%=n0T!7}9~@iJ z6DoM3htWo2F4oo-mX~3(;wlK;ywrL`(nGRGVl5`y`a|t5c-zpVv3rqddz5J1ahd6vjKuV*b^w|L zG(Lmm+M}OSMpygNBl1YA=+)=~tcoDt@R~knjtrc}1K!wiz)sJ1lwIMxbEA??EWv*t zLFj~u4iF^8wS}9j^`+|KO6_)aO>I8OEj@1Vj%)_)#O#M_h6@ph7rMY`N({lnPD1!A zZb}`%A2(rrh;;b3zxyW42un~M?Hbj?mOrxaDp~)f~4EJ zG!X120jTL7Q5ps~F#`;ef!YLJ109Ba%-o@U*JgJY`mUXTITp*xTuk@LVC50_S|{9_ ztEwD&`TVP%vm!XAHN-&b)OjZ{Fn(W<#Mp*JkIsVCFQ5PAZ%$1ms)Kp#pu2*0$mg9S z-MQ?1^KVd!aQ6t=(Af@)#BgCi&SQv}Eild!quJgJ#2F=a0?7R0$|uXqL*SBpC_Nd4 z^#-I=8p3FaIFA!_h+gpqHisFUhPACIN`dw+42PIa6!4iOvkIy50I8qB!0VCvBaC)0 z{y0)GoD587#xtE|hG?06w6d?DWsRV9{RCRM*P>;gKr3I3eggIYTK{xbgH3rkYM(&N z*%U#``Sjz3JM)e}E)f$@02O4EA6$Ef^G@CKfz1-L1G*h4-Uf{bT?woHYk+LJ?g+L5 z|0^z{?jRjs!8Sa$=onmq&R5uq_drStH(aW$Rl7o`QVjzu_UAw=Q z=5|(EafXdUpqgsD6r0<*fdK4;jtT)@+ zydfhUy!Z7N&yL82zIf2D|9V8;?PhH}oq?Q)Eie|3$o!3pE-r&8u0WBPKndJyMHtyH zpFjQk|Nil3XBAEXxI{Q>wfz=t+ z4V)GaLvM>a&Gu09em8q>2~QNKOYb|~!o6Xl+;=JyX^0``2;63p6};nn4X&8*2gxGl zP)((WN|bl7iMI)1VS(qeP%Roj*sBkn!oe; zuMR?g8s*i;2IW4yasq@QhGP_5a2#OJNlX$eu&}XNv7I!6MkU*^lemG45qDYR7zYlw zjuB}1%wx_B2hB=ynKvVW&ztf+?wqw)bB4jE&6>yfYs`Gs`UV9ZhBr~S{Q~Nvei!Rj zoP})2GR4yaP}`N7b0M=dG`i%f$cJ$@9&@2d7zO^u+S=0k(s1d)s`l46mRHt>{iXGl zNhglOBz(6Qhl)#8EJFV0r{B{5y(v_PM3yEzRrDYfQMem(8cCXFib9!tBWw;;KOS&{ z<%jUfb07*GqbV-2c&o$=-$G}Y5oXN@aJ%LWz=BLkNlp-RO9a#d-{vhN`KQj*N=b>P zmLjFaTCDxdn%c@bvsuUBR7rECeS%Jj(I;=K#=fK6(q{ADk!hp)ex+>edzT&{H`&!M zh4V;%Uu)JjZt@uK!4K(W4ss z+D^lSpX1&~T}wBTjPY1I7rC?}mEd7g+=XO+|8(-(U%!7(H&^=0tNoAZryF(W=wg^h zi({U00gatBAB0C{M0ym4!`_D}DXvxFKm1G|hKE#enP;iwJ^JDguxszqFnN8upV5R% z8IGar=}XU@iMzcbL?-Bj?9fSjd*#W9=PJt59on4}=BC;eJdeW>7qC-EJM}c$QM#9z z3SSm5aZ=lPCY5%UR)>!su4!BGCrY=Z3RNM8Be=PkH__IZE8ToqL|cGD*9%$&KsLNU zCUENo2wF1S<2C3(Yp;mo+C{th1rXwDbtJfE5i5=PFGx_ZoZlby2Br&gkVNBx4>nT1 zETRKGSk6X;SphltBuhA53wH-BlA}y8x|^wi6r({H-OG>kJREcwM#D=47a*;C2Z)1h zLdhFIiA6BM&Og3;g%C1_7l=eaD$-m+b1OSZJfoYOS?Od$7AjZ(D?-@tx zs6twpW(xB!r5z{_8!K)aOND!bn>K0xvoJ1r)v_vkb>COWn str: + return re.sub(r".*?\s*", "", text, flags=re.DOTALL) + +def model_exists_locally(model_name: str) -> bool: + try: + resp = requests.get(TAGS_ENDPOINT) + return model_name in resp.text + except Exception as e: + logger.error(f"โŒ Failed to check local models: {e}") + return False + +def load_model(model_name: str) -> bool: + try: + logger.info(f"๐Ÿง  Preloading model: {model_name}") + resp = requests.post(PULL_ENDPOINT, json={"name": model_name}) + logger.info(f"๐Ÿ“จ Ollama pull response: {resp.status_code} - {resp.text}") + return resp.status_code == 200 + except Exception as e: + logger.error(f"โŒ Exception during model load: {str(e)}") + return False + +def unload_model(model_name: str) -> bool: + try: + logger.info(f"๐Ÿงน Soft-unloading model from VRAM: {model_name}") + resp = requests.post(GEN_ENDPOINT, json={ + "model": model_name, + "keep_alive": 0, + "prompt": "" + }) + logger.info(f"๐Ÿงฝ Ollama unload response: {resp.status_code} - {resp.text}") + return resp.status_code == 200 + except Exception as e: + logger.error(f"โŒ Exception during soft-unload: {str(e)}") + return False + +def get_current_model(): + return get_model_name() def get_ai_response(user_prompt): - persona = load_persona() + model_name = get_model_name() + load_model(model_name) + persona = load_persona() if persona: - # Sanitize prompt injection safe_inject = persona["prompt_inject"].replace("โ€œ", "\"").replace("โ€", "\"").replace("โ€™", "'") full_prompt = f"{safe_inject}\nUser: {user_prompt}\n{persona['name']}:" else: - full_prompt = user_prompt # fallback mode: just send the user's prompt + full_prompt = user_prompt payload = { - "model": MODEL_NAME, + "model": model_name, "prompt": full_prompt, "stream": False } - #print("\n๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate") - logger.info("๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /api/generate") - #print("Payload:", payload) + logger.info("๐Ÿ›ฐ๏ธ SENDING TO OLLAMA /generate") logger.info(f"Payload: {payload}") try: - response = requests.post(AI_URL, json=payload) - #print("๐Ÿ“จ Raw response:", response.text) + response = requests.post(GEN_ENDPOINT, json=payload) logger.info(f"๐Ÿ“จ Raw response: {response.text}") if response.status_code == 200: result = response.json() - return result.get("response", "[No message in response]") + response_text = result.get("response", "[No message in response]") + return strip_thinking_block(response_text) if not SHOW_THINKING_BLOCKS else response_text else: return f"[Error {response.status_code}] {response.text}" except Exception as e: return f"[Exception] {str(e)}" - diff --git a/src/bot.py b/src/bot.py index 53b9a46..a44e807 100644 --- a/src/bot.py +++ b/src/bot.py @@ -4,12 +4,46 @@ import os import discord import yaml from discord.ext import commands +from textwrap import wrap from dotenv import load_dotenv +import random +import yaml +from scheduler import start_scheduler +from logger import setup_logger +logger = setup_logger("bot") + +from ai import unload_model, load_model, get_current_model + dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env') load_dotenv(dotenv_path) +logger.info(f"๐Ÿ” Loaded MODEL_NAME from .env: {os.getenv('MODEL_NAME')}") + +MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest") +logger.info(f"๐Ÿ” Loaded MODEL_NAME from .env: {MODEL_NAME}") + +# ๐Ÿงฝ Try to unload any currently loaded model +logger.info(f"๐Ÿงน Attempting to clear VRAM before loading {MODEL_NAME}...") +unload_model(MODEL_NAME) + +# ๐Ÿš€ Load target model from .env +if load_model(MODEL_NAME): + logger.info(f"๐Ÿš€ Model `{MODEL_NAME}` preloaded on startup.") +else: + logger.warning(f"โš ๏ธ Failed to preload model `{MODEL_NAME}`.") + +logger.info(f"โœ… Final model in use: {MODEL_NAME}") + +from ai import get_ai_response, load_model +MODEL_NAME = os.getenv("MODEL_NAME", "llama3:latest") + +if load_model(MODEL_NAME): + logger.info(f"๐Ÿš€ Model `{MODEL_NAME}` preloaded on startup.") +else: + logger.warning(f"โš ๏ธ Failed to preload model `{MODEL_NAME}`.") + +logger.info(f"โœ… Final model in use: {MODEL_NAME}") -from ai import get_ai_response from personality import apply_personality, set_persona from discord.ext.commands import ( cooldown, @@ -17,10 +51,6 @@ from discord.ext.commands import ( CooldownMapping, CommandOnCooldown ) -import yaml -from scheduler import start_scheduler -from logger import setup_logger -logger = setup_logger("bot") base_dir = os.path.dirname(__file__) settings_path = os.path.join(base_dir, "settings.yml") @@ -32,8 +62,10 @@ ROAST_COOLDOWN_SECONDS = settings["cooldowns"]["roast"] GLOBAL_COOLDOWN_SECONDS = settings["cooldowns"]["global"] COOLDOWN_MSG_TEMPLATE = settings["messages"]["cooldown"] -load_dotenv() TOKEN = os.getenv("DISCORD_TOKEN") +if not TOKEN: + logger.error("โŒ DISCORD_TOKEN not set in .env file.") + raise SystemExit("DISCORD_TOKEN not set.") intents = discord.Intents.default() intents.message_content = True @@ -44,8 +76,9 @@ bot = commands.Bot(command_prefix="!", intents=intents) async def on_command_error(ctx, error): if isinstance(error, CommandOnCooldown): retry_secs = round(error.retry_after, 1) - msg = COOLDOWN_MSG_TEMPLATE.replace("{seconds}", str(retry_secs)) - print("๐Ÿ•’ Chill, mortal. You must wait 11.6s before trying again. ๐Ÿ˜ผ") + template = random.choice(COOLDOWN_MSG_TEMPLATE) if isinstance(COOLDOWN_MSG_TEMPLATE, list) else COOLDOWN_MSG_TEMPLATE + msg = template.replace("{seconds}", str(retry_secs)) + logger.info(f"Command {ctx.command} on cooldown. Retry after {retry_secs} seconds.") await ctx.send(msg) else: @@ -68,10 +101,21 @@ async def ping(ctx): await ctx.send("๐Ÿ“ Pong!") @bot.command() -async def chat(ctx, *, message): +async def chat(ctx, *, prompt): await ctx.send("๐Ÿค– Thinking...") - reply = get_ai_response(message) - await ctx.send(reply) + reply = get_ai_response(prompt) + MAX_DISCORD_MESSAGE_LENGTH = 2000 + + # Split long replies into chunks that fit Discord limits + chunks = wrap(reply, MAX_DISCORD_MESSAGE_LENGTH) + + # Log only if the response is being chunked + if len(chunks) > 1: + logger.warning(f"๐Ÿ’ฌ Splitting response into {len(chunks)} chunks due to length.") + + for chunk in chunks: + await ctx.send(chunk) + @bot.command() async def setpersona(ctx, *, description): @@ -94,6 +138,71 @@ async def roast(ctx): # Send the roast back to the channel await ctx.send(f"๐Ÿ˜ผ {response}") +@bot.command(name="clearmodel") +async def clear_model(ctx): + from ai import unload_model, get_current_model + model = get_current_model() + success = unload_model(model) + msg = f"โœ… Unloaded model: `{model}`" if success else f"โŒ Failed to unload model: `{model}`" + await ctx.send(msg) + +@bot.command(name="model") +async def current_model(ctx): + from ai import get_current_model + model = get_current_model() + await ctx.send(f"๐Ÿ“ฆ Current model: `{model}`") + +@bot.command(name="setmodel") +async def set_model(ctx, *, model_name): + from ai import get_current_model, load_model, unload_model + + current_model = get_current_model() + if model_name == current_model: + return await ctx.send(f"โš ๏ธ `{model_name}` is already active.") + + await ctx.send(f"๐Ÿ”„ Switching from `{current_model}` to `{model_name}`โ€ฆ") + + # 1) Soft-unload old model from VRAM only + if unload_model(current_model): + await ctx.send(f"๐Ÿงฝ Unloaded `{current_model}` from VRAM.") + else: + await ctx.send(f"โš ๏ธ Couldnโ€™t unload `{current_model}` (it may not have been loaded).") + + # 2) Load the new one + if not load_model(model_name): + return await ctx.send(f"โŒ Failed to pull `{model_name}`. Make sure itโ€™s in `ollama list`.") + + # 3) Update runtime AND .env on disk + os.environ["MODEL_NAME"] = model_name + env_path = os.path.join(os.path.dirname(__file__), '..', '.env') + # Read and rewrite .env + lines = [] + with open(env_path, 'r', encoding='utf-8') as f: + for line in f: + if line.startswith("MODEL_NAME="): + lines.append(f"MODEL_NAME={model_name}\n") + else: + lines.append(line) + with open(env_path, 'w', encoding='utf-8') as f: + f.writelines(lines) + + await ctx.send(f"โœ… Model switched to `{model_name}` and `.env` updated.") + +@bot.command(name="models") +async def list_models(ctx): + import requests + from ai import TAGS_ENDPOINT + + try: + resp = requests.get(TAGS_ENDPOINT) + models = [m["name"] for m in resp.json().get("models", [])] + if models: + await ctx.send("๐Ÿง  Available models:\n" + "\n".join(f"- `{m}`" for m in models)) + else: + await ctx.send("โŒ No models found.") + except Exception as e: + await ctx.send(f"โŒ Failed to fetch models: {e}") + @bot.event async def on_ready(): print(f"โœ… Logged in as {bot.user.name}") diff --git a/src/settings.yml b/src/settings.yml index 7903e65..ff75f72 100644 --- a/src/settings.yml +++ b/src/settings.yml @@ -4,7 +4,7 @@ cooldowns: messages: cooldown: - - "๐Ÿ•’ Chill, mortal. You must wait {seconds}s before trying again. ๐Ÿ˜ผ" + - "๐Ÿ•’ Chill, wait {seconds}s before trying again." scheduler: enabled: false