🧠 Add enhanced memory system with conversation tracking and user context
This commit is contained in:
parent
4e2dfdfba6
commit
6d4de79ac2
8 changed files with 1013 additions and 6 deletions
205
bot.log
205
bot.log
|
|
@ -1160,3 +1160,208 @@ nohup: ignoring input
|
||||||
[2025-09-20 15:04:27] [DEBUG] [bot:243] on_message: observed own message id=1419036507569786931 channel=1380999713272238151
|
[2025-09-20 15:04:27] [DEBUG] [bot:243] on_message: observed own message id=1419036507569786931 channel=1380999713272238151
|
||||||
[2025-09-20 15:23:13] [INFO] [ai:69] 🔁 Modelfile loaded: ../examples/gojo.mod
|
[2025-09-20 15:23:13] [INFO] [ai:69] 🔁 Modelfile loaded: ../examples/gojo.mod
|
||||||
[2025-09-20 15:30:27] [INFO] [ai:69] 🔁 Modelfile loaded: ../examples/gojo.mod
|
[2025-09-20 15:30:27] [INFO] [ai:69] 🔁 Modelfile loaded: ../examples/gojo.mod
|
||||||
|
[2025-09-20 17:25:57] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-20 18:56:21] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-20 21:10:39] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-20 23:45:51] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 00:14:15] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 00:14:15] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 00:14:15] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 03:20:46] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 06:16:09] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 06:54:20] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 09:49:43] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 09:49:43] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 10:36:38] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 10:51:44] [INFO] 🎯 Trigger word detected — Delta fully engaged.
|
||||||
|
[2025-09-21 10:51:44] [INFO] [autochat:151] 🎯 Trigger word detected — Delta fully engaged.
|
||||||
|
[2025-09-21 10:51:44] [INFO] 🤖 Considering passive reply (author: PLEX)
|
||||||
|
[2025-09-21 10:51:44] [INFO] [autochat:167] 🤖 Considering passive reply (author: PLEX)
|
||||||
|
[2025-09-21 10:51:44] [INFO] 📚 Retrieved 0 messages for context
|
||||||
|
[2025-09-21 10:51:44] [INFO] [autochat:168] 📚 Retrieved 0 messages for context
|
||||||
|
[2025-09-21 10:51:44] [INFO] 🧠 Preloading model: gemma3:12b
|
||||||
|
[2025-09-21 10:51:44] [INFO] [ai:93] 🧠 Preloading model: gemma3:12b
|
||||||
|
[2025-09-21 10:51:45] [INFO] 📦 Model pull started successfully.
|
||||||
|
[2025-09-21 10:51:45] [INFO] [ai:100] 📦 Model pull started successfully.
|
||||||
|
[2025-09-21 10:51:45] [INFO] llm-4b1d8b1d LLM request start model=gemma3:12b user=PLEX context_len=0
|
||||||
|
[2025-09-21 10:51:45] [INFO] [ai:132] llm-4b1d8b1d LLM request start model=gemma3:12b user=PLEX context_len=0
|
||||||
|
[2025-09-21 10:51:45] [DEBUG] [ai:184] llm-4b1d8b1d Sending payload to Ollama: model=gemma3:12b user=PLEX
|
||||||
|
[2025-09-21 10:51:45] [DEBUG] [ai:185] llm-4b1d8b1d Payload size=292 chars
|
||||||
|
[2025-09-21 10:51:55] [WARNING ] discord.gateway: Shard ID None heartbeat blocked for more than 10 seconds.
|
||||||
|
Loop thread traceback (most recent call last):
|
||||||
|
File "/home/milo/Documents/AI-Discord-Bot/src/bot.py", line 546, in <module>
|
||||||
|
bot.run(TOKEN)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/discord/client.py", line 929, in run
|
||||||
|
asyncio.run(runner())
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/asyncio/runners.py", line 190, in run
|
||||||
|
return runner.run(main)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/asyncio/runners.py", line 118, in run
|
||||||
|
return self._loop.run_until_complete(task)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/asyncio/base_events.py", line 641, in run_until_complete
|
||||||
|
self.run_forever()
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/asyncio/base_events.py", line 608, in run_forever
|
||||||
|
self._run_once()
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/asyncio/base_events.py", line 1936, in _run_once
|
||||||
|
handle._run()
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/asyncio/events.py", line 84, in _run
|
||||||
|
self._context.run(self._callback, *self._args)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/discord/client.py", line 504, in _run_event
|
||||||
|
await coro(*args, **kwargs)
|
||||||
|
File "/home/milo/Documents/AI-Discord-Bot/src/bot.py", line 258, in on_message
|
||||||
|
reply = await generate_auto_reply(message, bot)
|
||||||
|
File "/home/milo/Documents/AI-Discord-Bot/src/autochat.py", line 171, in generate_auto_reply
|
||||||
|
reply = get_ai_response(
|
||||||
|
File "/home/milo/Documents/AI-Discord-Bot/src/ai.py", line 190, in get_ai_response
|
||||||
|
return resp.status_code == 200
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/requests/api.py", line 115, in post
|
||||||
|
return request("post", url, data=data, json=json, **kwargs)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/requests/api.py", line 59, in request
|
||||||
|
return session.request(method=method, url=url, **kwargs)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/requests/sessions.py", line 589, in request
|
||||||
|
resp = self.send(prep, **send_kwargs)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/requests/sessions.py", line 703, in send
|
||||||
|
r = adapter.send(request, **kwargs)
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/requests/adapters.py", line 644, in send
|
||||||
|
resp = conn.urlopen(
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/urllib3/connectionpool.py", line 787, in urlopen
|
||||||
|
response = self._make_request(
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/urllib3/connectionpool.py", line 534, in _make_request
|
||||||
|
response = conn.getresponse()
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/site-packages/urllib3/connection.py", line 565, in getresponse
|
||||||
|
httplib_response = super().getresponse()
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/http/client.py", line 1395, in getresponse
|
||||||
|
response.begin()
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/http/client.py", line 325, in begin
|
||||||
|
version, status, reason = self._read_status()
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/http/client.py", line 286, in _read_status
|
||||||
|
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
|
||||||
|
File "/home/milo/miniconda3/envs/discord-bot/lib/python3.11/socket.py", line 718, in readinto
|
||||||
|
return self._sock.recv_into(b)
|
||||||
|
|
||||||
|
[2025-09-21 10:51:57] [DEBUG] [ai:193] llm-4b1d8b1d Raw response status=200
|
||||||
|
[2025-09-21 10:51:57] [DEBUG] [ai:194] llm-4b1d8b1d Raw response body={"model":"gemma3:12b","created_at":"2025-09-21T14:51:57.90355062Z","response":"Seriously? You're getting excited about *that*? A Plex add-on? Look, I appreciate you sharing, but my tastes tend to run a little more… refined. Like, maybe a flawlessly executed Domain Expansion, you know? \n\nBut hey, if it makes you happy, that's cool. Just don't expect me to start obsessing over it. Though... \"Epicfighter,\" huh? Does it, like, *fight* things? Maybe it’s got some hidden potential. I'm always interested in things with potential. 😉\n\nDon’t go getting all flustered now. Just trying to be friendly. Now, tell me something *truly* interesting.","done":true,"done_reason":"stop","context":[105,2364,107,236820,236909,9731,111038,3048,659,555,1277,236756,3764,5400,699,219769,208651,751,119814,236761,1599,236858,500,16690,236762,236764,8632,236764,532,1378,14620,236764,840,19297,20111,529,822,3272,236761,95419,528,496,4532,15737,236761,15428,19921,611,236789,500,496,9894,21603,236909,643,111038,2887,236787,558,1474,115274,568,43175,236768,251,107,236836,19221,75591,236764,48349,97639,753,175625,75591,236764,48349,97639,691,5569,3742,531,127610,236761,107,236820,236909,111457,111038,106,107,105,4368,107,155420,236881,1599,236789,500,3978,9775,1003,808,7705,236829,236881,562,127610,1138,236772,498,236881,11696,236764,564,14756,611,11211,236764,840,1041,38527,6316,531,1845,496,2268,919,237064,33093,236761,9929,236764,7463,496,174109,19284,49822,81909,236764,611,1281,236881,236743,108,4573,31251,236764,768,625,3590,611,5293,236764,600,236789,236751,5427,236761,5393,1537,236789,236745,2414,786,531,1502,88751,522,1024,625,236761,20043,1390,623,130254,97639,2098,78345,236881,14300,625,236764,1133,236764,808,74364,236829,2432,236881,15704,625,236858,236751,2506,1070,11497,3435,236761,564,236789,236757,2462,7467,528,2432,607,3435,236761,85345,108,8993,236858,236745,817,3978,784,1378,707,3747,1492,236761,5393,4875,531,577,10841,236761,4224,236764,3442,786,2613,808,1168,3623,236829,6121,236761],"total_duration":12671648586,"load_duration":10067437581,"prompt_eval_count":95,"prompt_eval_duration":602342069,"eval_count":146,"eval_duration":2001253770}
|
||||||
|
[2025-09-21 10:51:57] [INFO] llm-4b1d8b1d LLM response model=gemma3:12b duration=12.684s summary=Seriously? You're getting excited about *that*? A Plex add-on? Look, I appreciate you sharing, but my tastes tend to run a little more… refined. Like, maybe a f
|
||||||
|
[2025-09-21 10:51:57] [INFO] [ai:140] llm-4b1d8b1d LLM response model=gemma3:12b duration=12.684s summary=Seriously? You're getting excited about *that*? A Plex add-on? Look, I appreciate you sharing, but my tastes tend to run a little more… refined. Like, maybe a f
|
||||||
|
[2025-09-21 10:51:57] [DEBUG] [ai:142] llm-4b1d8b1d LLM raw response: {'model': 'gemma3:12b', 'created_at': '2025-09-21T14:51:57.90355062Z', 'response': 'Seriously? You\'re getting excited about *that*? A Plex add-on? Look, I appreciate you sharing, but my tastes tend to run a little more… refined. Like, maybe a flawlessly executed Domain Expansion, you know? \n\nBut hey, if it makes you happy, that\'s cool. Just don\'t expect me to start obsessing over it. Though... "Epicfighter," huh? Does it, like, *fight* things? Maybe it’s got some hidden potential. I\'m always interested in things with potential. 😉\n\nDon’t go getting all flustered now. Just trying to be friendly. Now, tell me something *truly* interesting.', 'done': True, 'done_reason': 'stop', 'context': [105, 2364, 107, 236820, 236909, 9731, 111038, 3048, 659, 555, 1277, 236756, 3764, 5400, 699, 219769, 208651, 751, 119814, 236761, 1599, 236858, 500, 16690, 236762, 236764, 8632, 236764, 532, 1378, 14620, 236764, 840, 19297, 20111, 529, 822, 3272, 236761, 95419, 528, 496, 4532, 15737, 236761, 15428, 19921, 611, 236789, 500, 496, 9894, 21603, 236909, 643, 111038, 2887, 236787, 558, 1474, 115274, 568, 43175, 236768, 251, 107, 236836, 19221, 75591, 236764, 48349, 97639, 753, 175625, 75591, 236764, 48349, 97639, 691, 5569, 3742, 531, 127610, 236761, 107, 236820, 236909, 111457, 111038, 106, 107, 105, 4368, 107, 155420, 236881, 1599, 236789, 500, 3978, 9775, 1003, 808, 7705, 236829, 236881, 562, 127610, 1138, 236772, 498, 236881, 11696, 236764, 564, 14756, 611, 11211, 236764, 840, 1041, 38527, 6316, 531, 1845, 496, 2268, 919, 237064, 33093, 236761, 9929, 236764, 7463, 496, 174109, 19284, 49822, 81909, 236764, 611, 1281, 236881, 236743, 108, 4573, 31251, 236764, 768, 625, 3590, 611, 5293, 236764, 600, 236789, 236751, 5427, 236761, 5393, 1537, 236789, 236745, 2414, 786, 531, 1502, 88751, 522, 1024, 625, 236761, 20043, 1390, 623, 130254, 97639, 2098, 78345, 236881, 14300, 625, 236764, 1133, 236764, 808, 74364, 236829, 2432, 236881, 15704, 625, 236858, 236751, 2506, 1070, 11497, 3435, 236761, 564, 236789, 236757, 2462, 7467, 528, 2432, 607, 3435, 236761, 85345, 108, 8993, 236858, 236745, 817, 3978, 784, 1378, 707, 3747, 1492, 236761, 5393, 4875, 531, 577, 10841, 236761, 4224, 236764, 3442, 786, 2613, 808, 1168, 3623, 236829, 6121, 236761], 'total_duration': 12671648586, 'load_duration': 10067437581, 'prompt_eval_count': 95, 'prompt_eval_duration': 602342069, 'eval_count': 146, 'eval_duration': 2001253770}
|
||||||
|
[2025-09-21 10:51:58] [DEBUG] [bot:243] on_message: observed own message id=1419335352275046534 channel=1370422222900035834
|
||||||
|
[2025-09-21 11:02:34] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 11:24:52] [INFO] 📉 Engagement decayed by 4.97, new score: 0.00
|
||||||
|
[2025-09-21 11:24:52] [INFO] [autochat:72] 📉 Engagement decayed by 4.97, new score: 0.00
|
||||||
|
[2025-09-21 11:24:52] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 11:24:52] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 13:53:00] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 14:14:42] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 14:14:42] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-21 17:49:42] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 20:06:03] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-21 22:12:35] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 00:13:17] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 00:37:30] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 01:36:43] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 02:36:46] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 03:34:17] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 04:37:30] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 07:02:18] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 07:47:37] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-22 07:47:37] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-22 07:49:26] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 10:03:32] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 11:04:18] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 11:10:42] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 11:26:29] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-22 11:26:29] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-22 12:23:43] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-22 12:23:43] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-22 14:05:06] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 17:10:17] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 19:41:15] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 22:11:56] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-22 22:52:48] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-22 22:52:48] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-23 02:09:31] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 04:38:52] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 04:40:02] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 06:16:50] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 07:01:26] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 08:06:48] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 08:55:06] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 10:34:38] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 12:26:12] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 13:02:50] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 13:59:03] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 14:28:51] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 14:57:40] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 15:38:37] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 17:41:38] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-23 20:39:12] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 00:27:45] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 02:17:48] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 02:32:45] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 03:35:31] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 04:35:28] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-24 04:35:28] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-24 05:47:42] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 06:29:59] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 07:57:09] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 10:42:52] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 11:18:29] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 12:04:05] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 12:23:24] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 12:49:12] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 15:09:15] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 17:04:23] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 18:18:21] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-24 18:18:21] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-24 20:33:34] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 21:36:46] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 22:41:17] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-24 23:59:24] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 02:25:55] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-25 02:25:55] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-25 02:33:19] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 04:17:56] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 07:55:57] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 08:54:52] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-25 08:54:52] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-25 11:41:09] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 15:29:36] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 16:18:12] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 19:53:29] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 22:05:52] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 23:27:46] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-25 23:58:21] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 02:42:06] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 04:34:48] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 05:31:11] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 06:03:12] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 07:18:08] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 07:53:59] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 07:53:59] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 09:09:22] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 10:27:28] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 11:26:58] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 11:45:11] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 11:45:11] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 12:43:23] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 13:03:55] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 13:08:57] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 13:29:44] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 14:30:16] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 15:20:03] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 15:41:41] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 17:18:05] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 17:55:25] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 19:33:49] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 19:33:49] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 20:22:28] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-26 20:22:55] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 20:22:55] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-26 23:49:29] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-27 03:04:51] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-27 03:57:04] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-27 04:48:46] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-27 07:06:34] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-27 10:40:30] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
[2025-09-27 10:54:42] [INFO] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-27 10:54:42] [INFO] [autochat:161] 😴 No trigger and engagement is 0 — skipping.
|
||||||
|
[2025-09-27 12:38:20] [INFO ] discord.gateway: Shard ID None has successfully RESUMED session eed4b6aaccccd97b5456c73e342e25a1.
|
||||||
|
|
|
||||||
416
plan.md
Normal file
416
plan.md
Normal file
|
|
@ -0,0 +1,416 @@
|
||||||
|
# 📋 DeltaBot Implementation Plan
|
||||||
|
|
||||||
|
**Generated:** October 8, 2025
|
||||||
|
**Based on:** ROADMAP.md analysis and codebase review
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🎯 **Executive Summary**
|
||||||
|
|
||||||
|
This implementation plan addresses the 9 open Alpha issues and provides a structured approach to complete DeltaBot's core functionality. The plan prioritizes immediate blockers, foundational improvements, and then advanced features.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🔥 **Phase 1: Critical Fixes & Foundations**
|
||||||
|
*Estimated Time: 2-3 weeks*
|
||||||
|
|
||||||
|
### **Issue #10 — Post "Reply" (HIGH PRIORITY)**
|
||||||
|
**Problem:** Bot posts new messages instead of replies, breaking conversation flow
|
||||||
|
**Solution:** Implement Discord reply functionality
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Modify `scheduler/simple.py`:**
|
||||||
|
```python
|
||||||
|
# Instead of: await channel.send(message)
|
||||||
|
# Get recent message and reply to it
|
||||||
|
recent_msgs = [msg async for msg in channel.history(limit=3) if not msg.author.bot]
|
||||||
|
if recent_msgs:
|
||||||
|
await recent_msgs[0].reply(message)
|
||||||
|
else:
|
||||||
|
await channel.send(message)
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Update `autochat.py`:**
|
||||||
|
```python
|
||||||
|
# In generate_auto_reply function, return reply object instead of string
|
||||||
|
return {"content": reply, "reference": message}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Modify `bot.py` message handling:**
|
||||||
|
```python
|
||||||
|
# Handle reply objects properly
|
||||||
|
if isinstance(reply, dict) and reply.get("reference"):
|
||||||
|
await reply["reference"].reply(reply["content"])
|
||||||
|
else:
|
||||||
|
await message.channel.send(reply)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Scheduled messages reply to recent user messages
|
||||||
|
- [ ] Auto-replies properly thread conversations
|
||||||
|
- [ ] Fallback to regular message when no recent messages exist
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #36 — Memory Persistence (HIGH PRIORITY)**
|
||||||
|
**Problem:** No persistent context beyond immediate messages
|
||||||
|
**Solution:** Implement SQLite-based conversation memory
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `memory.py` module:**
|
||||||
|
```python
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
class ConversationMemory:
|
||||||
|
def __init__(self, db_path="data/memory.db"):
|
||||||
|
self.db_path = db_path
|
||||||
|
self.init_db()
|
||||||
|
|
||||||
|
def store_message(self, channel_id, user_id, content, timestamp):
|
||||||
|
# Store message with sentiment analysis
|
||||||
|
|
||||||
|
def get_context(self, channel_id, hours=24, max_messages=50):
|
||||||
|
# Retrieve relevant context
|
||||||
|
|
||||||
|
def get_user_context(self, user_id, days=7):
|
||||||
|
# Get user-specific conversation history
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Integrate with existing context system:**
|
||||||
|
- Replace `context.py` JSON approach with database queries
|
||||||
|
- Add memory cleanup for old conversations (>30 days)
|
||||||
|
- Include user interaction patterns in memory
|
||||||
|
|
||||||
|
3. **Database Schema:**
|
||||||
|
```sql
|
||||||
|
CREATE TABLE conversations (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
channel_id TEXT,
|
||||||
|
user_id TEXT,
|
||||||
|
username TEXT,
|
||||||
|
content TEXT,
|
||||||
|
timestamp DATETIME,
|
||||||
|
sentiment REAL,
|
||||||
|
importance_score REAL
|
||||||
|
);
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Messages stored in SQLite database
|
||||||
|
- [ ] Context retrieval includes conversation history
|
||||||
|
- [ ] Memory cleanup prevents database bloat
|
||||||
|
- [ ] User-specific context tracking
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #25 — Enable Modelfile Support (MEDIUM PRIORITY)**
|
||||||
|
**Problem:** Modelfile system partially implemented but not fully functional
|
||||||
|
**Solution:** Complete modelfile integration and testing
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Fix modelfile loading issues:**
|
||||||
|
- Debug why personality switching doesn't work
|
||||||
|
- Ensure `MODFILE` global variable updates properly
|
||||||
|
- Add validation for modelfile syntax
|
||||||
|
|
||||||
|
2. **Enhance `modelfile.py`:**
|
||||||
|
```python
|
||||||
|
def validate_modfile(modfile_dict):
|
||||||
|
"""Validate modfile has required fields"""
|
||||||
|
required = ['name', 'base_model']
|
||||||
|
return all(key in modfile_dict for key in required)
|
||||||
|
|
||||||
|
def apply_modfile_to_persona(modfile):
|
||||||
|
"""Convert modfile to persona format for compatibility"""
|
||||||
|
return {
|
||||||
|
'name': modfile.get('name'),
|
||||||
|
'prompt_inject': modfile.get('system', ''),
|
||||||
|
'emoji': '🤖', # Default or extract from system prompt
|
||||||
|
'style_prefix': f"{modfile.get('name', 'Bot')}:"
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Add runtime switching:**
|
||||||
|
- Complete `!modfile switch` command implementation
|
||||||
|
- Add validation and error handling
|
||||||
|
- Test with existing examples (gojo.mod, delta.mod)
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Modelfile personality switching works in real-time
|
||||||
|
- [ ] `!modfile info` shows current active modelfile
|
||||||
|
- [ ] Error handling for invalid modelfiles
|
||||||
|
- [ ] Backward compatibility with persona.json
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚀 **Phase 2: Core Features Enhancement**
|
||||||
|
*Estimated Time: 3-4 weeks*
|
||||||
|
|
||||||
|
### **Issue #17 — Image Generation (HIGH PRIORITY)**
|
||||||
|
**Problem:** No image generation capability
|
||||||
|
**Solution:** Integrate with local Stable Diffusion or external API
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `image_gen.py` module:**
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
class ImageGenerator:
|
||||||
|
def __init__(self):
|
||||||
|
self.api_url = os.getenv("SD_API_URL", "http://localhost:7860")
|
||||||
|
|
||||||
|
async def generate_image(self, prompt, style="anime"):
|
||||||
|
"""Generate image using Stable Diffusion API"""
|
||||||
|
# Implementation for local SD or external service
|
||||||
|
|
||||||
|
def enhance_prompt(self, user_prompt, persona):
|
||||||
|
"""Add persona-specific style to prompts"""
|
||||||
|
return f"{user_prompt}, {persona.get('image_style', 'digital art')}"
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Add Discord command:**
|
||||||
|
```python
|
||||||
|
@bot.command(name="generate", aliases=["img", "draw"])
|
||||||
|
async def generate_image(ctx, *, prompt):
|
||||||
|
async with ctx.typing():
|
||||||
|
image_data = await image_generator.generate_image(prompt)
|
||||||
|
if image_data:
|
||||||
|
file = discord.File(BytesIO(image_data), "generated.png")
|
||||||
|
await ctx.send(file=file)
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Integration options:**
|
||||||
|
- **Option A:** Local Stable Diffusion WebUI API
|
||||||
|
- **Option B:** External service (Replicate, HuggingFace)
|
||||||
|
- **Option C:** Simple DALL-E API integration
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] `!generate <prompt>` command works
|
||||||
|
- [ ] Images posted directly to Discord
|
||||||
|
- [ ] Persona-aware prompt enhancement
|
||||||
|
- [ ] Error handling for generation failures
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #16 — Image Interpretation (MEDIUM PRIORITY)**
|
||||||
|
**Problem:** Bot cannot analyze or respond to images
|
||||||
|
**Solution:** Integrate vision model for image understanding
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Add vision capability to `ai.py`:**
|
||||||
|
```python
|
||||||
|
async def analyze_image(image_url, prompt="Describe this image"):
|
||||||
|
"""Use vision model to analyze images"""
|
||||||
|
# Options: LLaVA, BLIP, or multimodal API
|
||||||
|
|
||||||
|
async def generate_image_response(image_url, context=""):
|
||||||
|
"""Generate contextual response to images"""
|
||||||
|
analysis = await analyze_image(image_url)
|
||||||
|
return get_ai_response(f"Image shows: {analysis}. {context}")
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Extend message handling in `bot.py`:**
|
||||||
|
```python
|
||||||
|
@bot.event
|
||||||
|
async def on_message(message):
|
||||||
|
# Existing logic...
|
||||||
|
|
||||||
|
# Handle image attachments
|
||||||
|
if message.attachments:
|
||||||
|
for attachment in message.attachments:
|
||||||
|
if attachment.content_type.startswith('image/'):
|
||||||
|
response = await generate_image_response(
|
||||||
|
attachment.url,
|
||||||
|
f"User {message.author.display_name} shared this image"
|
||||||
|
)
|
||||||
|
await message.reply(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Bot responds to image uploads
|
||||||
|
- [ ] Accurate image description capability
|
||||||
|
- [ ] Integration with existing personality system
|
||||||
|
- [ ] Support for memes and screenshots
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Issue #22 — Remote Admin Panel (MEDIUM-LOW PRIORITY)**
|
||||||
|
**Problem:** No web interface for bot management
|
||||||
|
**Solution:** Create simple web dashboard
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `admin_panel.py`:**
|
||||||
|
```python
|
||||||
|
from flask import Flask, render_template, request, jsonify
|
||||||
|
import json
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
@app.route("/")
|
||||||
|
def dashboard():
|
||||||
|
return render_template("dashboard.html")
|
||||||
|
|
||||||
|
@app.route("/api/settings", methods=["GET", "POST"])
|
||||||
|
def settings_api():
|
||||||
|
# Handle settings updates
|
||||||
|
|
||||||
|
@app.route("/api/users")
|
||||||
|
def users_api():
|
||||||
|
# Return user profiles data
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Basic dashboard features:**
|
||||||
|
- View active users and interaction stats
|
||||||
|
- Modify bot settings (cooldowns, scheduling)
|
||||||
|
- Switch personalities/modelfiles
|
||||||
|
- View recent conversations
|
||||||
|
- Basic moderation controls
|
||||||
|
|
||||||
|
**Acceptance Criteria:**
|
||||||
|
- [ ] Web interface accessible on local network
|
||||||
|
- [ ] Real-time bot statistics
|
||||||
|
- [ ] Settings modification capability
|
||||||
|
- [ ] Authentication/security for admin access
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🧪 **Phase 3: Advanced Features**
|
||||||
|
*Estimated Time: 4-5 weeks*
|
||||||
|
|
||||||
|
### **Issue #37 — LoRA Support (LOW PRIORITY)**
|
||||||
|
**Problem:** No fine-tuning capability for model behavior
|
||||||
|
**Solution:** Research and implement LoRA model fine-tuning
|
||||||
|
|
||||||
|
**Implementation Notes:**
|
||||||
|
- This is highly technical and may require external tools
|
||||||
|
- Consider if it's necessary for core functionality
|
||||||
|
- Could be postponed to future releases
|
||||||
|
|
||||||
|
### **Issue #26 — Web Usage (MEDIUM PRIORITY)**
|
||||||
|
**Problem:** Bot cannot access web content
|
||||||
|
**Solution:** Add web scraping and API integration
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
1. **Create `web_tools.py`:**
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
class WebTools:
|
||||||
|
async def search_reddit(self, query, subreddit="memes"):
|
||||||
|
"""Search Reddit for content"""
|
||||||
|
|
||||||
|
async def get_news_headlines(self):
|
||||||
|
"""Fetch trending news"""
|
||||||
|
|
||||||
|
async def search_web(self, query):
|
||||||
|
"""DuckDuckGo search integration"""
|
||||||
|
```
|
||||||
|
|
||||||
|
2. **Add web-aware commands:**
|
||||||
|
- `!news` - Get current headlines
|
||||||
|
- `!meme` - Fetch random meme from Reddit
|
||||||
|
- `!search <query>` - Web search with summarized results
|
||||||
|
|
||||||
|
### **Issue #24 — Monetization Setup (LOW PRIORITY)**
|
||||||
|
**Problem:** No monetization framework
|
||||||
|
**Solution:** Add subscription/donation infrastructure
|
||||||
|
|
||||||
|
**Implementation Steps:**
|
||||||
|
- Integration with payment processors
|
||||||
|
- Feature gating for premium users
|
||||||
|
- Usage analytics and billing
|
||||||
|
- **Note:** This should be implemented last after core features are stable
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📊 **Implementation Priority Matrix**
|
||||||
|
|
||||||
|
| Issue | Priority | Complexity | User Impact | Timeline |
|
||||||
|
|-------|----------|------------|-------------|----------|
|
||||||
|
| #10 Reply Posts | 🔴 High | Low | High | Week 1 |
|
||||||
|
| #36 Memory | 🔴 High | Medium | High | Week 2-3 |
|
||||||
|
| #25 Modelfile | 🟡 Medium | Medium | Medium | Week 4 |
|
||||||
|
| #17 Image Gen | 🟡 Medium | High | High | Week 5-6 |
|
||||||
|
| #16 Image Vision | 🟡 Medium | High | Medium | Week 7-8 |
|
||||||
|
| #22 Admin Panel | 🟢 Low | Medium | Low | Week 9-10 |
|
||||||
|
| #26 Web Usage | 🟢 Low | Medium | Medium | Week 11-12 |
|
||||||
|
| #37 LoRA | 🟢 Low | Very High | Low | Future |
|
||||||
|
| #24 Monetization | 🟢 Low | High | Low | Future |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🛠 **Technical Recommendations**
|
||||||
|
|
||||||
|
### **Code Quality Improvements:**
|
||||||
|
1. **Add type hints throughout codebase**
|
||||||
|
2. **Implement proper error handling and logging**
|
||||||
|
3. **Create unit tests for core functions**
|
||||||
|
4. **Add configuration validation**
|
||||||
|
5. **Implement proper database migrations**
|
||||||
|
|
||||||
|
### **Infrastructure:**
|
||||||
|
1. **Set up proper logging and monitoring**
|
||||||
|
2. **Add health check endpoints**
|
||||||
|
3. **Implement graceful shutdown handling**
|
||||||
|
4. **Add backup/restore functionality**
|
||||||
|
|
||||||
|
### **Security:**
|
||||||
|
1. **Sanitize user inputs**
|
||||||
|
2. **Add rate limiting**
|
||||||
|
3. **Implement proper secret management**
|
||||||
|
4. **Add CORS and authentication for admin panel**
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📈 **Success Metrics**
|
||||||
|
|
||||||
|
### **Phase 1 Success Criteria:**
|
||||||
|
- [ ] Bot reliably replies to messages (not new posts)
|
||||||
|
- [ ] Persistent conversation memory working
|
||||||
|
- [ ] Modelfile switching functional
|
||||||
|
- [ ] Zero critical bugs in core functionality
|
||||||
|
|
||||||
|
### **Phase 2 Success Criteria:**
|
||||||
|
- [ ] Image generation and analysis working
|
||||||
|
- [ ] Admin panel accessible and functional
|
||||||
|
- [ ] User engagement increased by 20%
|
||||||
|
- [ ] System stable with multiple concurrent users
|
||||||
|
|
||||||
|
### **Phase 3 Success Criteria:**
|
||||||
|
- [ ] Web integration providing value
|
||||||
|
- [ ] Advanced features enhance user experience
|
||||||
|
- [ ] Bot ready for production deployment
|
||||||
|
- [ ] Documentation complete for self-hosting
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 🚦 **Next Actions**
|
||||||
|
|
||||||
|
### **Week 1 - Immediate Steps:**
|
||||||
|
1. **Fix Issue #10** - Implement reply functionality
|
||||||
|
2. **Start Issue #36** - Set up memory database schema
|
||||||
|
3. **Test current modelfile system** - Identify specific issues with #25
|
||||||
|
4. **Set up development environment** with proper logging and debugging
|
||||||
|
|
||||||
|
### **Week 2 - Foundation Building:**
|
||||||
|
1. **Complete memory system implementation**
|
||||||
|
2. **Fix modelfile personality switching**
|
||||||
|
3. **Add comprehensive error handling**
|
||||||
|
4. **Create basic test suite**
|
||||||
|
|
||||||
|
### **Beyond Week 2:**
|
||||||
|
- Follow the priority matrix above
|
||||||
|
- Regular testing and user feedback integration
|
||||||
|
- Incremental feature rollouts
|
||||||
|
- Performance optimization as needed
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**📝 Note:** This plan assumes development time of 10-15 hours per week. Adjust timelines based on actual availability and complexity discovered during implementation.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
*Last updated: October 8, 2025*
|
||||||
|
|
@ -168,10 +168,13 @@ async def generate_auto_reply(message, bot):
|
||||||
logger.info(f"📚 Retrieved {len(raw_msgs)} messages for context")
|
logger.info(f"📚 Retrieved {len(raw_msgs)} messages for context")
|
||||||
|
|
||||||
async with message.channel.typing():
|
async with message.channel.typing():
|
||||||
reply = get_ai_response(
|
# Use memory-enhanced response for auto-chat
|
||||||
|
from enhanced_ai import get_ai_response_with_memory
|
||||||
|
reply = get_ai_response_with_memory(
|
||||||
user_prompt=message.content,
|
user_prompt=message.content,
|
||||||
context=formatted_context,
|
context=raw_msgs[-CONTEXT_LIMIT:], # Pass raw messages
|
||||||
user_profile=profile
|
user_profile=profile,
|
||||||
|
message=message # Important: pass message for memory storage
|
||||||
)
|
)
|
||||||
update_reply_timer()
|
update_reply_timer()
|
||||||
return reply.strip() if reply else None
|
return reply.strip() if reply else None
|
||||||
|
|
|
||||||
44
src/bot.py
44
src/bot.py
|
|
@ -37,6 +37,7 @@ from ai import (
|
||||||
get_ai_response,
|
get_ai_response,
|
||||||
TAGS_ENDPOINT
|
TAGS_ENDPOINT
|
||||||
)
|
)
|
||||||
|
from enhanced_ai import get_ai_response_with_memory, analyze_user_message_for_memory
|
||||||
from ai import load_modelfile, unload_modelfile, get_modelfile_info
|
from ai import load_modelfile, unload_modelfile, get_modelfile_info
|
||||||
from time_logger import log_message_activity
|
from time_logger import log_message_activity
|
||||||
from autochat import should_auto_reply, generate_auto_reply, update_reply_timer, maybe_react_to_message
|
from autochat import should_auto_reply, generate_auto_reply, update_reply_timer, maybe_react_to_message
|
||||||
|
|
@ -277,7 +278,13 @@ async def on_message(message):
|
||||||
logger.info(f"📚 Retrieved {len(context_msgs)} messages for context")
|
logger.info(f"📚 Retrieved {len(context_msgs)} messages for context")
|
||||||
|
|
||||||
async with message.channel.typing():
|
async with message.channel.typing():
|
||||||
reply = get_ai_response(prompt, context=formatted_context, user_profile=profile)
|
# Use memory-enhanced AI response
|
||||||
|
reply = get_ai_response_with_memory(
|
||||||
|
prompt,
|
||||||
|
context=context_msgs, # Pass raw messages for better context
|
||||||
|
user_profile=profile,
|
||||||
|
message=message
|
||||||
|
)
|
||||||
await message.channel.send(reply)
|
await message.channel.send(reply)
|
||||||
|
|
||||||
await bot.process_commands(message)
|
await bot.process_commands(message)
|
||||||
|
|
@ -504,6 +511,41 @@ async def list_models(ctx):
|
||||||
await ctx.send(f"❌ Failed to fetch models: {e}")
|
await ctx.send(f"❌ Failed to fetch models: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
@bot.command(name="memory")
|
||||||
|
@commands.is_owner()
|
||||||
|
async def memory_cmd(ctx, action: str = "info", *, target: str = None):
|
||||||
|
"""Memory management: !memory info [@user], !memory cleanup, !memory summary"""
|
||||||
|
from enhanced_ai import get_user_memory_summary
|
||||||
|
from memory import memory_manager
|
||||||
|
|
||||||
|
if action == "info":
|
||||||
|
user_id = str(ctx.author.id)
|
||||||
|
if ctx.message.mentions:
|
||||||
|
user_id = str(ctx.message.mentions[0].id)
|
||||||
|
|
||||||
|
summary = get_user_memory_summary(user_id)
|
||||||
|
await ctx.send(f"```\n{summary}\n```")
|
||||||
|
|
||||||
|
elif action == "cleanup":
|
||||||
|
memory_manager.cleanup_old_memories(days=30)
|
||||||
|
await ctx.send("🧹 Cleaned up old memories (30+ days)")
|
||||||
|
|
||||||
|
elif action == "summary":
|
||||||
|
channel_id = str(ctx.channel.id)
|
||||||
|
memories = memory_manager.get_conversation_context(channel_id, hours=48)
|
||||||
|
if memories:
|
||||||
|
summary_lines = [f"Recent channel memories ({len(memories)} total):"]
|
||||||
|
for i, memory in enumerate(memories[:5]):
|
||||||
|
timestamp = memory['timestamp'][:16].replace('T', ' ')
|
||||||
|
content = memory['content'][:100]
|
||||||
|
summary_lines.append(f"{i+1}. {timestamp}: {content}")
|
||||||
|
await ctx.send(f"```\n" + "\n".join(summary_lines) + "\n```")
|
||||||
|
else:
|
||||||
|
await ctx.send("No recent memories for this channel.")
|
||||||
|
|
||||||
|
else:
|
||||||
|
await ctx.send("Usage: `!memory info [@user]`, `!memory cleanup`, `!memory summary`")
|
||||||
|
|
||||||
@bot.command(name="dryrun")
|
@bot.command(name="dryrun")
|
||||||
@commands.is_owner()
|
@commands.is_owner()
|
||||||
async def dryrun(ctx, *, prompt: str):
|
async def dryrun(ctx, *, prompt: str):
|
||||||
|
|
|
||||||
62
src/enhanced_ai.py
Normal file
62
src/enhanced_ai.py
Normal file
|
|
@ -0,0 +1,62 @@
|
||||||
|
# enhanced_ai.py
|
||||||
|
# Enhanced AI response function with memory integration
|
||||||
|
# This extends your existing ai.py without breaking it
|
||||||
|
|
||||||
|
from ai import get_ai_response as base_get_ai_response, get_model_name, load_model
|
||||||
|
from memory import memory_manager
|
||||||
|
from personality import load_persona
|
||||||
|
from logger import setup_logger, generate_req_id, log_llm_request, log_llm_response
|
||||||
|
import requests
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
|
||||||
|
logger = setup_logger("enhanced_ai")
|
||||||
|
|
||||||
|
def get_ai_response_with_memory(user_prompt, context=None, user_profile=None, message=None):
|
||||||
|
"""Enhanced AI response that includes memory context"""
|
||||||
|
|
||||||
|
# Get memory context if message is provided
|
||||||
|
memory_context = ""
|
||||||
|
if message and user_profile:
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
channel_id = str(message.channel.id)
|
||||||
|
|
||||||
|
# Store this interaction in memory
|
||||||
|
context_messages = context if isinstance(context, list) else []
|
||||||
|
memory_manager.analyze_and_store_message(message, context_messages)
|
||||||
|
|
||||||
|
# Get formatted memory for prompt
|
||||||
|
memory_context = memory_manager.format_memory_for_prompt(user_id, channel_id)
|
||||||
|
|
||||||
|
# Combine memory context with existing context
|
||||||
|
enhanced_context = ""
|
||||||
|
if memory_context:
|
||||||
|
enhanced_context += f"{memory_context}\n\n"
|
||||||
|
if context:
|
||||||
|
if isinstance(context, str):
|
||||||
|
enhanced_context += f"[Recent Messages]\n{context}"
|
||||||
|
else:
|
||||||
|
# Assume it's a list of messages, format them
|
||||||
|
from context import format_context
|
||||||
|
enhanced_context += f"[Recent Messages]\n{format_context(context)}"
|
||||||
|
|
||||||
|
# Use the original function with enhanced context
|
||||||
|
return base_get_ai_response(user_prompt, enhanced_context, user_profile)
|
||||||
|
|
||||||
|
def analyze_user_message_for_memory(message, context_messages=None):
|
||||||
|
"""Analyze and store a message in memory without generating response"""
|
||||||
|
memory_manager.analyze_and_store_message(message, context_messages)
|
||||||
|
|
||||||
|
def get_user_memory_summary(user_id: str) -> str:
|
||||||
|
"""Get a summary of what we remember about a user"""
|
||||||
|
user_memories = memory_manager.get_user_context(user_id)
|
||||||
|
if not user_memories:
|
||||||
|
return "No specific memories about this user yet."
|
||||||
|
|
||||||
|
summary_lines = []
|
||||||
|
for memory in user_memories:
|
||||||
|
memory_type = memory['type'].title()
|
||||||
|
content = memory['content'][:100]
|
||||||
|
summary_lines.append(f"- {memory_type}: {content}")
|
||||||
|
|
||||||
|
return "What I remember about this user:\n" + "\n".join(summary_lines)
|
||||||
261
src/memory.py
Normal file
261
src/memory.py
Normal file
|
|
@ -0,0 +1,261 @@
|
||||||
|
# memory.py
|
||||||
|
# Enhanced memory system building on existing user_profiles.py
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from typing import Dict, List, Any, Optional
|
||||||
|
from user_profiles import load_profiles, save_profiles, PROFILE_PATH
|
||||||
|
from logger import setup_logger
|
||||||
|
|
||||||
|
logger = setup_logger("memory")
|
||||||
|
|
||||||
|
MEMORY_PATH = os.path.join(os.path.dirname(__file__), "memory.json")
|
||||||
|
|
||||||
|
class MemoryManager:
|
||||||
|
"""Lightweight memory system for Delta to remember conversations and user details"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.ensure_memory_file()
|
||||||
|
|
||||||
|
def ensure_memory_file(self):
|
||||||
|
"""Ensure memory file exists"""
|
||||||
|
if not os.path.exists(MEMORY_PATH):
|
||||||
|
initial_data = {
|
||||||
|
"conversations": {}, # channel_id -> list of memories
|
||||||
|
"user_memories": {}, # user_id -> personal memories
|
||||||
|
"global_events": [] # server-wide memorable events
|
||||||
|
}
|
||||||
|
with open(MEMORY_PATH, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(initial_data, f, indent=2)
|
||||||
|
|
||||||
|
def load_memory(self) -> Dict[str, Any]:
|
||||||
|
"""Load memory data"""
|
||||||
|
try:
|
||||||
|
with open(MEMORY_PATH, "r", encoding="utf-8") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to load memory: {e}")
|
||||||
|
return {"conversations": {}, "user_memories": {}, "global_events": []}
|
||||||
|
|
||||||
|
def save_memory(self, data: Dict[str, Any]):
|
||||||
|
"""Save memory data"""
|
||||||
|
try:
|
||||||
|
with open(MEMORY_PATH, "w", encoding="utf-8") as f:
|
||||||
|
json.dump(data, f, indent=2)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to save memory: {e}")
|
||||||
|
|
||||||
|
def store_conversation_memory(self, channel_id: str, user_id: str,
|
||||||
|
content: str, context: str, importance_score: float = 1.0):
|
||||||
|
"""Store important conversation moments"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
|
||||||
|
memory_entry = {
|
||||||
|
"timestamp": datetime.utcnow().isoformat(),
|
||||||
|
"user_id": user_id,
|
||||||
|
"content": content,
|
||||||
|
"context": context[:500], # Limit context size
|
||||||
|
"importance": importance_score,
|
||||||
|
"id": f"{channel_id}_{int(datetime.utcnow().timestamp())}"
|
||||||
|
}
|
||||||
|
|
||||||
|
channel_key = str(channel_id)
|
||||||
|
if channel_key not in memory_data["conversations"]:
|
||||||
|
memory_data["conversations"][channel_key] = []
|
||||||
|
|
||||||
|
memory_data["conversations"][channel_key].append(memory_entry)
|
||||||
|
|
||||||
|
# Keep only last 100 memories per channel to prevent bloat
|
||||||
|
memory_data["conversations"][channel_key] = memory_data["conversations"][channel_key][-100:]
|
||||||
|
|
||||||
|
self.save_memory(memory_data)
|
||||||
|
logger.debug(f"Stored conversation memory for channel {channel_id}")
|
||||||
|
|
||||||
|
def store_user_memory(self, user_id: str, memory_type: str, content: str, importance: float = 1.0):
|
||||||
|
"""Store personal user memories (interests, preferences, personal details)"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
|
||||||
|
user_key = str(user_id)
|
||||||
|
if user_key not in memory_data["user_memories"]:
|
||||||
|
memory_data["user_memories"][user_key] = []
|
||||||
|
|
||||||
|
memory_entry = {
|
||||||
|
"timestamp": datetime.utcnow().isoformat(),
|
||||||
|
"type": memory_type, # "interest", "preference", "personal", "relationship"
|
||||||
|
"content": content,
|
||||||
|
"importance": importance,
|
||||||
|
"id": f"{user_id}_{memory_type}_{int(datetime.utcnow().timestamp())}"
|
||||||
|
}
|
||||||
|
|
||||||
|
memory_data["user_memories"][user_key].append(memory_entry)
|
||||||
|
|
||||||
|
# Keep only last 50 user memories to prevent bloat
|
||||||
|
memory_data["user_memories"][user_key] = memory_data["user_memories"][user_key][-50:]
|
||||||
|
|
||||||
|
self.save_memory(memory_data)
|
||||||
|
logger.debug(f"Stored user memory for {user_id}: {memory_type}")
|
||||||
|
|
||||||
|
def get_conversation_context(self, channel_id: str, hours: int = 24) -> List[Dict]:
|
||||||
|
"""Get recent conversation memories for context"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
channel_key = str(channel_id)
|
||||||
|
|
||||||
|
if channel_key not in memory_data["conversations"]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
cutoff_time = datetime.utcnow() - timedelta(hours=hours)
|
||||||
|
recent_memories = []
|
||||||
|
|
||||||
|
for memory in memory_data["conversations"][channel_key]:
|
||||||
|
memory_time = datetime.fromisoformat(memory["timestamp"])
|
||||||
|
if memory_time > cutoff_time:
|
||||||
|
recent_memories.append(memory)
|
||||||
|
|
||||||
|
# Sort by importance and recency
|
||||||
|
recent_memories.sort(key=lambda x: (x["importance"], x["timestamp"]), reverse=True)
|
||||||
|
return recent_memories[:10] # Return top 10 most important recent memories
|
||||||
|
|
||||||
|
def get_user_context(self, user_id: str) -> List[Dict]:
|
||||||
|
"""Get user-specific memories for personalization"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
user_key = str(user_id)
|
||||||
|
|
||||||
|
if user_key not in memory_data["user_memories"]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Sort by importance and recency
|
||||||
|
user_memories = memory_data["user_memories"][user_key]
|
||||||
|
user_memories.sort(key=lambda x: (x["importance"], x["timestamp"]), reverse=True)
|
||||||
|
return user_memories[:5] # Return top 5 most important user memories
|
||||||
|
|
||||||
|
def analyze_and_store_message(self, message, context_messages: List = None):
|
||||||
|
"""Analyze a message and determine if it should be stored as memory"""
|
||||||
|
content = message.content.lower()
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
channel_id = str(message.channel.id)
|
||||||
|
|
||||||
|
# Determine importance based on content analysis
|
||||||
|
importance_score = self._calculate_importance(content)
|
||||||
|
|
||||||
|
if importance_score > 0.3: # Only store moderately important+ messages
|
||||||
|
context_str = ""
|
||||||
|
if context_messages:
|
||||||
|
context_str = " | ".join([f"{msg.author.display_name}: {msg.content[:100]}"
|
||||||
|
for msg in context_messages[-3:]]) # Last 3 messages for context
|
||||||
|
|
||||||
|
self.store_conversation_memory(
|
||||||
|
channel_id, user_id, message.content, context_str, importance_score
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extract personal information for user memory
|
||||||
|
self._extract_user_details(message)
|
||||||
|
|
||||||
|
def _calculate_importance(self, content: str) -> float:
|
||||||
|
"""Calculate importance score for a message (0.0 to 1.0)"""
|
||||||
|
importance = 0.0
|
||||||
|
|
||||||
|
# Personal information indicators
|
||||||
|
personal_keywords = ['i am', 'my name', 'i love', 'i hate', 'my favorite',
|
||||||
|
'i work', 'i study', 'my job', 'birthday', 'anniversary']
|
||||||
|
for keyword in personal_keywords:
|
||||||
|
if keyword in content:
|
||||||
|
importance += 0.4
|
||||||
|
|
||||||
|
# Emotional indicators
|
||||||
|
emotional_keywords = ['love', 'hate', 'excited', 'sad', 'angry', 'happy',
|
||||||
|
'frustrated', 'amazing', 'terrible', 'awesome']
|
||||||
|
for keyword in emotional_keywords:
|
||||||
|
if keyword in content:
|
||||||
|
importance += 0.2
|
||||||
|
|
||||||
|
# Question indicators (important for context)
|
||||||
|
if '?' in content:
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Length bonus (longer messages often more important)
|
||||||
|
if len(content) > 100:
|
||||||
|
importance += 0.1
|
||||||
|
|
||||||
|
# Direct mentions of Delta or bot commands
|
||||||
|
if 'delta' in content or content.startswith('!'):
|
||||||
|
importance += 0.3
|
||||||
|
|
||||||
|
return min(importance, 1.0) # Cap at 1.0
|
||||||
|
|
||||||
|
def _extract_user_details(self, message):
|
||||||
|
"""Extract and store personal details from user messages"""
|
||||||
|
content = message.content.lower()
|
||||||
|
user_id = str(message.author.id)
|
||||||
|
|
||||||
|
# Simple pattern matching for common personal info
|
||||||
|
patterns = {
|
||||||
|
'interest': ['i love', 'i like', 'i enjoy', 'my favorite'],
|
||||||
|
'personal': ['i am', 'my name is', 'i work at', 'my job'],
|
||||||
|
'preference': ['i prefer', 'i usually', 'i always', 'i never']
|
||||||
|
}
|
||||||
|
|
||||||
|
for memory_type, keywords in patterns.items():
|
||||||
|
for keyword in keywords:
|
||||||
|
if keyword in content:
|
||||||
|
# Extract the relevant part of the message
|
||||||
|
start_idx = content.find(keyword)
|
||||||
|
relevant_part = content[start_idx:start_idx+200] # Next 200 chars
|
||||||
|
|
||||||
|
self.store_user_memory(user_id, memory_type, relevant_part, 0.5)
|
||||||
|
break # Only store one per message to avoid spam
|
||||||
|
|
||||||
|
def format_memory_for_prompt(self, user_id: str, channel_id: str) -> str:
|
||||||
|
"""Format memory for inclusion in AI prompts"""
|
||||||
|
lines = []
|
||||||
|
|
||||||
|
# Add conversation context
|
||||||
|
conv_memories = self.get_conversation_context(channel_id, hours=48)
|
||||||
|
if conv_memories:
|
||||||
|
lines.append("[Recent Conversation Context]")
|
||||||
|
for memory in conv_memories[:3]: # Top 3 most important
|
||||||
|
timestamp = datetime.fromisoformat(memory["timestamp"]).strftime("%m/%d %H:%M")
|
||||||
|
lines.append(f"- {timestamp}: {memory['content'][:150]}")
|
||||||
|
|
||||||
|
# Add user context
|
||||||
|
user_memories = self.get_user_context(user_id)
|
||||||
|
if user_memories:
|
||||||
|
lines.append("[User Context]")
|
||||||
|
for memory in user_memories[:3]: # Top 3 most important
|
||||||
|
lines.append(f"- {memory['type'].title()}: {memory['content'][:100]}")
|
||||||
|
|
||||||
|
return "\n".join(lines) if lines else ""
|
||||||
|
|
||||||
|
def cleanup_old_memories(self, days: int = 30):
|
||||||
|
"""Clean up memories older than specified days"""
|
||||||
|
memory_data = self.load_memory()
|
||||||
|
cutoff_time = datetime.utcnow() - timedelta(days=days)
|
||||||
|
cleaned = False
|
||||||
|
|
||||||
|
# Clean conversation memories
|
||||||
|
for channel_id in memory_data["conversations"]:
|
||||||
|
original_count = len(memory_data["conversations"][channel_id])
|
||||||
|
memory_data["conversations"][channel_id] = [
|
||||||
|
memory for memory in memory_data["conversations"][channel_id]
|
||||||
|
if datetime.fromisoformat(memory["timestamp"]) > cutoff_time
|
||||||
|
]
|
||||||
|
if len(memory_data["conversations"][channel_id]) < original_count:
|
||||||
|
cleaned = True
|
||||||
|
|
||||||
|
# Clean user memories (keep important ones longer)
|
||||||
|
for user_id in memory_data["user_memories"]:
|
||||||
|
original_count = len(memory_data["user_memories"][user_id])
|
||||||
|
memory_data["user_memories"][user_id] = [
|
||||||
|
memory for memory in memory_data["user_memories"][user_id]
|
||||||
|
if (datetime.fromisoformat(memory["timestamp"]) > cutoff_time or
|
||||||
|
memory["importance"] > 0.7) # Keep very important memories longer
|
||||||
|
]
|
||||||
|
if len(memory_data["user_memories"][user_id]) < original_count:
|
||||||
|
cleaned = True
|
||||||
|
|
||||||
|
if cleaned:
|
||||||
|
self.save_memory(memory_data)
|
||||||
|
logger.info(f"Cleaned up memories older than {days} days")
|
||||||
|
|
||||||
|
# Global memory manager instance
|
||||||
|
memory_manager = MemoryManager()
|
||||||
|
|
@ -12,8 +12,15 @@ autochat:
|
||||||
engagement_decay_per_minute: 0.15 # how fast Delta loses interest over time
|
engagement_decay_per_minute: 0.15 # how fast Delta loses interest over time
|
||||||
|
|
||||||
context:
|
context:
|
||||||
enabled: false # not working must implement
|
enabled: true # now working with memory system
|
||||||
max_messages: 0 # max messages to keep in context
|
max_messages: 15 # max messages to keep in context
|
||||||
|
|
||||||
|
memory:
|
||||||
|
enabled: true
|
||||||
|
importance_threshold: 0.3 # minimum importance to store (0.0-1.0)
|
||||||
|
max_conversation_memories: 100 # per channel
|
||||||
|
max_user_memories: 50 # per user
|
||||||
|
cleanup_days: 30 # auto-cleanup after X days
|
||||||
|
|
||||||
user_profiles:
|
user_profiles:
|
||||||
enable_custom_prompt: true # ← Set false to ignore user `custom_prompt` values in replies
|
enable_custom_prompt: true # ← Set false to ignore user `custom_prompt` values in replies
|
||||||
|
|
|
||||||
|
|
@ -9,5 +9,16 @@
|
||||||
"pronouns": "he/him",
|
"pronouns": "he/him",
|
||||||
"avatar_url": "https://cdn.discordapp.com/avatars/161149541171593216/fb0553a29d9f73175cb6aea24d0e19ec.png?size=1024",
|
"avatar_url": "https://cdn.discordapp.com/avatars/161149541171593216/fb0553a29d9f73175cb6aea24d0e19ec.png?size=1024",
|
||||||
"custom_prompt": "delta is very nice to me since I am her master, and creator"
|
"custom_prompt": "delta is very nice to me since I am her master, and creator"
|
||||||
|
},
|
||||||
|
"1370422629340811405": {
|
||||||
|
"name": "PLEX",
|
||||||
|
"display_name": "PLEX",
|
||||||
|
"first_seen": "2025-09-21T04:14:15.752764",
|
||||||
|
"last_seen": "2025-09-27T14:54:42.041092",
|
||||||
|
"last_message": "2025-09-27T14:54:42.041092",
|
||||||
|
"interactions": 19,
|
||||||
|
"pronouns": null,
|
||||||
|
"avatar_url": "https://cdn.discordapp.com/embed/avatars/0.png",
|
||||||
|
"custom_prompt": null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Loading…
Reference in a new issue