Bot can how reply when using the "!chat" command then prompting it. The .env file points to an ollama instance, the ai.py file has the model being used.
This commit is contained in:
parent
63c8e70b1c
commit
166b286d42
4 changed files with 35 additions and 1 deletions
2
.env
2
.env
|
|
@ -1,2 +1,2 @@
|
||||||
DISCORD_TOKEN=MTM2OTc3NDY4OTYzNDg4MTU4Ng.G9Nrgz.akHoOO9SrXCDwiOCI3BUXfdR4bpSNb9zrVx9UI
|
DISCORD_TOKEN=MTM2OTc3NDY4OTYzNDg4MTU4Ng.G9Nrgz.akHoOO9SrXCDwiOCI3BUXfdR4bpSNb9zrVx9UI
|
||||||
OLLAMA_API_URL=https://your-ollama-api-url.com/v1/models/your-model-name/generate
|
OLLAMA_API_URL=http://192.168.1.100:11434/api/generate
|
||||||
|
|
|
||||||
BIN
__pycache__/ai.cpython-310.pyc
Normal file
BIN
__pycache__/ai.cpython-310.pyc
Normal file
Binary file not shown.
25
ai.py
Normal file
25
ai.py
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
# ai.py
|
||||||
|
|
||||||
|
import requests
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
AI_URL = os.getenv("OLLAMA_API_URL")
|
||||||
|
|
||||||
|
def get_ai_response(prompt):
|
||||||
|
payload = {
|
||||||
|
"model": "mistral:7b", # Adjust to match your model
|
||||||
|
"prompt": prompt,
|
||||||
|
"stream": False
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.post(AI_URL, json=payload)
|
||||||
|
if response.status_code == 200:
|
||||||
|
result = response.json()
|
||||||
|
return result.get("response", "[No response]")
|
||||||
|
else:
|
||||||
|
return f"[Error {response.status_code}] {response.text}"
|
||||||
|
except Exception as e:
|
||||||
|
return f"[Request failed] {str(e)}"
|
||||||
9
bot.py
9
bot.py
|
|
@ -2,6 +2,8 @@ import os
|
||||||
import discord
|
import discord
|
||||||
from discord.ext import commands
|
from discord.ext import commands
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
|
from ai import get_ai_response
|
||||||
|
|
||||||
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
TOKEN = os.getenv("DISCORD_TOKEN")
|
TOKEN = os.getenv("DISCORD_TOKEN")
|
||||||
|
|
@ -19,4 +21,11 @@ async def on_ready():
|
||||||
async def ping(ctx):
|
async def ping(ctx):
|
||||||
await ctx.send("🏓 Pong!")
|
await ctx.send("🏓 Pong!")
|
||||||
|
|
||||||
|
@bot.command()
|
||||||
|
async def chat(ctx, *, message):
|
||||||
|
await ctx.send("🤖 Thinking...")
|
||||||
|
reply = get_ai_response(message)
|
||||||
|
await ctx.send(reply)
|
||||||
|
|
||||||
|
|
||||||
bot.run(TOKEN)
|
bot.run(TOKEN)
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue