Skip to content

Commit

Permalink
Merge pull request #1 from alexdeploy/events
Browse files Browse the repository at this point in the history
Adding simple events and OpenAI create completion functions.
  • Loading branch information
alexdeploy authored May 7, 2023
2 parents a1c4a29 + 67df863 commit 1a88e50
Show file tree
Hide file tree
Showing 5 changed files with 214 additions and 0 deletions.
24 changes: 24 additions & 0 deletions client/src/events/interactionCreate.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
module.exports = {
name: 'interactionCreate',
once: false,

async execute(bot, interaction) {

// if interaction is a command.
if(interaction.isCommand()){

// Get the command type from the interaction.
const command = interaction.client.commands.get(interaction.commandName);

// If the command doesn't exist, return nothing.
if(!command) return;

try {
await command.execute(interaction, bot);
} catch (err) {
console.error(err);
await interaction.reply({ content: 'There was an error while executing this command!', ephemeral: true });
}
}
}
}
51 changes: 51 additions & 0 deletions client/src/events/messageCreate.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
const { chat } = require('../utils/openai');

module.exports = {
name: 'messageCreate',
once: false,

async execute(bot, interaction) {

/**
* Get only the first mention in the message.
* TODO: Handle if there are more than one mention.
*/
const mention = interaction.mentions.users.first();

// If the message is not a mention, return nothing.
if(!mention) return;

// If the mention is the bot, generate a response.
if(mention.id == bot.user.id){

// Get the message content
const content = interaction.content;

// PROVISIONAL
// If the message is not a mention, return nothing.
// Get the user who sent the message.
const user = interaction.author;

// Get the prompt without the mention.
const prompt = content.replace(`<@${bot.user.id}>`, "");

// Set the bot is thinking with a new interaction.
const interactionReply = await interaction.reply({ content: 'Thinking...', fetchReply: true });

/** HERE IS WHERE THE MAGIC HAPPENS
* TODO: Security check to prevent the bot from responding to itself.
* TODO: Security check to prevent the bot from responding to other bots.
* TODO: Security check to prevent the bot from responding to users with a certain role, name... (Blacklist)
* ? Maybe this checks should be done in the client or server side??
* ? Maybe it should be done in the request to the API?
* @see OpenAI Safety best Practices: https://platform.openai.com/docs/guides/safety-best-practices
*/

// Get the response from the chatGPT-3
const response = await chat(prompt);

// Edit the interaction with the response
await interactionReply.edit(response);
};
}
}
8 changes: 8 additions & 0 deletions client/src/events/ready.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
module.exports = {
name: 'ready',
once: true,

async execute(bot, interaction) {
console.log(`Ready! Logged in as ${bot.user.tag}`);
},
};
8 changes: 8 additions & 0 deletions client/src/models/Event.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
class Event {
constructor(event, execute) {
this.event = event;
this.execute = execute;
}
}

module.exports = Event;
123 changes: 123 additions & 0 deletions client/src/utils/openai.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,123 @@
/**
* OpenAI API
* @module openai
* @description OpenAI API Configuration and functions.
* @see Documentation https://beta.openai.com/docs/api-reference
*/

/**
* INFO ABOUT OPEN AI API
* * Models: https://beta.openai.com/docs/api-reference/models
* * Endpoints: https://platform.openai.com/docs/models/model-endpoint-compatibility
*/

/* CONFIGURATION */
/********************************************************/

const { Configuration, OpenAIApi } = require("openai");

const configuration = new Configuration({
apiKey: process.env.OPENAI_API_KEY,
});

const openai = new OpenAIApi(configuration);

const model = {
ADA: "ada",
BABBAGE: "",
CURIE: "curie",
DAVINCI: "text-davinci-003",
GPT3_TURBO: "gpt-3.5-turbo"
};

const role = {
USER: "user",
SYSTEM: "system",
ASSISTANT: "assistant"
}

const instruction = {
ASSISTANT: "You are a helpful assistant.",
BOBO: "Responde con un nivel cultural bajo y con un lenguaje informal.",
RAYKIRE: "Responde con una actitud de superioridad y maleducado. Y ruega que no te vuelva a preguntar."
}

/********************************************************/

/**
* CREATE COMPLETION
*
* @param {*} prompt is an string sent by the user.
* @see Documentation https://beta.openai.com/docs/api-reference/completions/create
* @returns Response object with the completion.
*/
const createCompletion = async (prompt) => {

try {

const completion = await openai.createCompletion({
model: model.GPT3_TURBO,
prompt: prompt,
});

const response = completion.data.choices[0].text;

return response;

} catch(error) {

// Handle errors
const errorCode = error.response.status;

// If error is 429 Too Many Requests, then retry after the specified time.
if (errorCode === 429) {
// Code
}

// If error is 401 Unauthorized, then the API key is invalid.
if (errorCode === 401) {
// Code
}
console.log("Error: " + errorCode + " | " + error.response.statusText);
}
}

/**
* CREATE CHAT COMPLETION
*
* @param {*} messages is an array of objects with the role and content of the message.
* @see Documentation https://platform.openai.com/docs/api-reference/chat/create
* @returns Response object with the chat completion.
*
* TODO: Take a look at the documentation for learn about the different parameters that Create Completion accepts.
* TODO: Investigate if the chat is able to learn from the user's messages. (Remember conversations)
* TODO: Improve the security of the prompt. ->
* TODO: Handle errors.
*/
const createChatCompletion = async (prompt) => {

const messages = [
{ role: role.SYSTEM, content: instruction.ASSISTANT },
{ role: role.USER, content: prompt },
]

try{

const completion = await openai.createChatCompletion({
model: model.GPT3_TURBO,
messages: messages
});

const response = completion.data.choices[0].message;

return response;

} catch(error) {
console.log(error);
}
}

module.exports = {
send: createCompletion,
chat: createChatCompletion
}

0 comments on commit 1a88e50

Please sign in to comment.