Bot can how reply when using the "!chat" command then prompting it. The .env file points to an ollama instance, the ai.py file has the model being used.

This commit is contained in:
milo 2025-05-07 17:20:34 -04:00
parent 63c8e70b1c
commit 166b286d42
4 changed files with 35 additions and 1 deletions

2
.env
View file

@ -1,2 +1,2 @@
DISCORD_TOKEN=MTM2OTc3NDY4OTYzNDg4MTU4Ng.G9Nrgz.akHoOO9SrXCDwiOCI3BUXfdR4bpSNb9zrVx9UI
OLLAMA_API_URL=https://your-ollama-api-url.com/v1/models/your-model-name/generate
OLLAMA_API_URL=http://192.168.1.100:11434/api/generate

Binary file not shown.

25
ai.py Normal file
View file

@ -0,0 +1,25 @@
# ai.py
import requests
import os
from dotenv import load_dotenv
load_dotenv()
AI_URL = os.getenv("OLLAMA_API_URL")
def get_ai_response(prompt):
payload = {
"model": "mistral:7b", # Adjust to match your model
"prompt": prompt,
"stream": False
}
try:
response = requests.post(AI_URL, json=payload)
if response.status_code == 200:
result = response.json()
return result.get("response", "[No response]")
else:
return f"[Error {response.status_code}] {response.text}"
except Exception as e:
return f"[Request failed] {str(e)}"

9
bot.py
View file

@ -2,6 +2,8 @@ import os
import discord
from discord.ext import commands
from dotenv import load_dotenv
from ai import get_ai_response
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
@ -19,4 +21,11 @@ async def on_ready():
async def ping(ctx):
await ctx.send("🏓 Pong!")
@bot.command()
async def chat(ctx, *, message):
await ctx.send("🤖 Thinking...")
reply = get_ai_response(message)
await ctx.send(reply)
bot.run(TOKEN)