diff --git a/.env b/.env index 93137f0..ceff269 100644 --- a/.env +++ b/.env @@ -1,2 +1,2 @@ DISCORD_TOKEN=MTM2OTc3NDY4OTYzNDg4MTU4Ng.G9Nrgz.akHoOO9SrXCDwiOCI3BUXfdR4bpSNb9zrVx9UI -OLLAMA_API_URL=https://your-ollama-api-url.com/v1/models/your-model-name/generate +OLLAMA_API_URL=http://192.168.1.100:11434/api/generate diff --git a/__pycache__/ai.cpython-310.pyc b/__pycache__/ai.cpython-310.pyc new file mode 100644 index 0000000..25c3561 Binary files /dev/null and b/__pycache__/ai.cpython-310.pyc differ diff --git a/ai.py b/ai.py new file mode 100644 index 0000000..dc9c632 --- /dev/null +++ b/ai.py @@ -0,0 +1,25 @@ +# ai.py + +import requests +import os +from dotenv import load_dotenv + +load_dotenv() +AI_URL = os.getenv("OLLAMA_API_URL") + +def get_ai_response(prompt): + payload = { + "model": "mistral:7b", # Adjust to match your model + "prompt": prompt, + "stream": False + } + + try: + response = requests.post(AI_URL, json=payload) + if response.status_code == 200: + result = response.json() + return result.get("response", "[No response]") + else: + return f"[Error {response.status_code}] {response.text}" + except Exception as e: + return f"[Request failed] {str(e)}" diff --git a/bot.py b/bot.py index f120523..dd12767 100644 --- a/bot.py +++ b/bot.py @@ -2,6 +2,8 @@ import os import discord from discord.ext import commands from dotenv import load_dotenv +from ai import get_ai_response + load_dotenv() TOKEN = os.getenv("DISCORD_TOKEN") @@ -19,4 +21,11 @@ async def on_ready(): async def ping(ctx): await ctx.send("🏓 Pong!") +@bot.command() +async def chat(ctx, *, message): + await ctx.send("🤖 Thinking...") + reply = get_ai_response(message) + await ctx.send(reply) + + bot.run(TOKEN)