diff --git a/commands/misc/chat.js b/commands/misc/chat.js deleted file mode 100644 index 13484bb..0000000 --- a/commands/misc/chat.js +++ /dev/null @@ -1,57 +0,0 @@ -require("dotenv").config(); - -module.exports = { - name: 'chat', - description: 'A chat command that uses an LLM to answer your prompts (server must be whitelisted)', - needsWhitelist: true, - async execute({ message, args }) { - - if(args.length === 0){ - message.channel.send("You have to set your prompt in the arguments"); - return; - } - const prompt = args.join(" "); - let answer = ""; - const initialMessage = await message.channel.send("Generating response... This may take a moment.") - message.channel.sendTyping(); - // TODO: More configuration. Have a basic setup but allow setting system prompt, max tokens and model. - await fetch(`https://openrouter.ai/api/v1/chat/completions`, { - method: `POST`, - headers: { - "Authorization": `Bearer ${process.env.OPENROUTER_API_KEY}`, - "Content-Type": `application/json` - }, - body: JSON.stringify({ - "model": `deepseek/deepseek-chat-v3-0324:free`, - "messages": [ - { - "role": 'system', - "content": "You are a person in a small discord server. Always keep responses to the point like you're a person in the conversation. " - }, - { - "role": `user`, - "content": prompt - } - ], - "max_tokens": 250 - }) - }).then(response => response.json()).then(data => { - if(!data.error) - answer = data.choices[0].message.content; - else{ - console.error(data.error.code); - answer = `Something went wrong. Got error code ${data.error.code}.` - } - }).catch(error => { - console.error(error); - }); - if(answer.length > 0){ - if(answer.length > 1999){ - initialMessage.edit("Unfortunately the length of the message was longer than what discord allows.") - } - initialMessage.edit(answer) - }else { - initialMessage.edit("Something went wrong. The owner should check the console.") - } - } -}; \ No newline at end of file