Skip to content

Commit

Permalink
Merge pull request #58 from alan-turing-institute/emoji-before-proces…
Browse files Browse the repository at this point in the history
…sing

Emoji react before processing the response
  • Loading branch information
jemrobinson authored Jun 16, 2023
2 parents 21fc710 + 1e58c52 commit dfc393f
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 44 deletions.
59 changes: 36 additions & 23 deletions slack_bot/slack_bot/bot/bot.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Standard library imports
import logging
from typing import Optional

# Third-party imports
from slack_sdk.socket_mode import SocketModeClient
Expand All @@ -21,57 +22,57 @@ def __call__(self, client: SocketModeClient, req: SocketModeRequest) -> None:
return None

# Acknowledge the request
logging.info(f"Received an events_api request")
response = SocketModeResponse(envelope_id=req.envelope_id)
client.send_socket_mode_response(response)

try:
# Extract user and message information
# Extract event from payload
event = req.payload["event"]
sender_is_bot = "bot_id" in event

# Ignore messages from bots
if sender_is_bot:
logging.info(f"Ignoring an event triggered by a bot.")
return None

# Extract user and message information
message = event["text"]
user_id = event["user"]
event_type = event["type"]
event_subtype = event.get("subtype", None)
sender_is_bot = "bot_id" in event

# Ignore changes to messages.
if event_type == "message" and event_subtype == "message_changed":
logging.info(f"Ignoring changes to messages.")
logging.info(f"Ignoring a change to a message.")
return None

logging.info(f"Received message '{message}' from user '{user_id}'")

# Ignore messages from bots
if sender_is_bot:
logging.info(f"Ignoring messages from bots.")
return None
# Start processing the message
logging.info(f"Processing message '{message}' from user '{user_id}'.")

# If this is a direct message to REGinald...
if event_type == "message" and event_subtype is None:
self.react(client, event["channel"], event["ts"])
model_response = self.model.direct_message(message, user_id)

# If @REGinald is mentioned in a channel
elif event_type == "app_mention":
self.react(client, event["channel"], event["ts"])
model_response = self.model.channel_mention(message, user_id)

# Otherwise
else:
logging.info(f"Received unexpected event of type '{event['type']}'")
logging.info(f"Received unexpected event of type '{event['type']}'.")
return None

# Add an emoji and a reply as required
if model_response:
if model_response.emoji:
logging.info(f"Applying emoji {model_response.emoji}")
client.web_client.reactions_add(
name=model_response.emoji,
channel=event["channel"],
timestamp=event["ts"],
)
if model_response.message:
logging.info(f"Posting reply {model_response.message}")
client.web_client.chat_postMessage(
channel=event["channel"], text=model_response.message
)
if model_response and model_response.message:
logging.info(f"Posting reply {model_response.message}.")
client.web_client.chat_postMessage(
channel=event["channel"], text=model_response.message
)
else:
logging.info(f"No reply was generated.")

except KeyError as exc:
logging.warning(f"Attempted to access key that does not exist.\n{str(exc)}")
Expand All @@ -81,3 +82,15 @@ def __call__(self, client: SocketModeClient, req: SocketModeRequest) -> None:
f"Something went wrong in processing a Slack request.\nPayload: {req.payload}.\n{str(exc)}"
)
raise

def react(self, client: SocketModeClient, channel: str, timestamp: str) -> None:
"""Emoji react to the input message"""
if self.model.emoji:
logging.info(f"Reacting with emoji {self.model.emoji}.")
client.web_client.reactions_add(
name=self.model.emoji,
channel=channel,
timestamp=timestamp,
)
else:
logging.info(f"No emoji defined for this model.")
7 changes: 3 additions & 4 deletions slack_bot/slack_bot/models/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,13 @@


class MessageResponse:
def __init__(self, message: Optional[str], emoji: Optional[str]) -> None:
def __init__(self, message: Optional[str]) -> None:
self.message = message
self.emoji = emoji


class ResponseModel(ABC):
def __init__(self, *args: Any, **kwargs: Any):
pass
def __init__(self, emoji: Optional[str], *args: Any, **kwargs: Any):
self.emoji = emoji

def direct_message(self, message: str, user_id: str) -> MessageResponse:
"""When the strategy receives a message it should return a MessageResponse where both are optional"""
Expand Down
27 changes: 15 additions & 12 deletions slack_bot/slack_bot/models/chat_completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,14 @@
from .base import MessageResponse, ResponseModel


class ChatCompletionAzure(ResponseModel):
def __init__(self, **kwargs: Any) -> None:
class ChatCompletionBase(ResponseModel):
def __init__(self, *args, **kwargs) -> None:
super().__init__(emoji="books")


class ChatCompletionAzure(ChatCompletionBase):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.api_base = os.getenv("OPENAI_AZURE_API_BASE")
self.api_key = os.getenv("OPENAI_AZURE_API_KEY")
self.api_type = "azure"
Expand Down Expand Up @@ -39,8 +45,7 @@ def direct_message(self, message: str, user_id: str) -> MessageResponse:
temperature=self.temperature,
top_p=self.top_p,
)
text = response["choices"][0]["text"]
return MessageResponse(text, "open_hands")
return MessageResponse(response["choices"][0]["text"])

def channel_mention(self, message: str, user_id: str) -> MessageResponse:
openai.api_base = self.api_base
Expand All @@ -58,26 +63,24 @@ def channel_mention(self, message: str, user_id: str) -> MessageResponse:
temperature=self.temperature,
top_p=self.top_p,
)
text = response["choices"][0]["text"]
return MessageResponse(text, "open_hands")
return MessageResponse(response["choices"][0]["text"])


class ChatCompletionOpenAI(ResponseModel):
def __init__(self, **kwargs) -> None:
class ChatCompletionOpenAI(ChatCompletionBase):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.api_key = os.getenv("OPENAI_API_KEY")

def direct_message(self, message: str, user_id: str) -> MessageResponse:
openai.api_key = self.api_key
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": message}]
)
text = response["choices"][0]["message"]["content"]
return MessageResponse(text, "open_hands")
return MessageResponse(response["choices"][0]["message"]["content"])

def channel_mention(self, message: str, user_id: str) -> MessageResponse:
openai.api_key = self.api_key
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": message}]
)
text = response["choices"][0]["message"]["content"]
return MessageResponse(text, "open_hands")
return MessageResponse(response["choices"][0]["message"]["content"])
7 changes: 5 additions & 2 deletions slack_bot/slack_bot/models/hello.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,11 @@


class Hello(ResponseModel):
def __init__(self):
super().__init__(emoji="wave")

def direct_message(self, message: str, user_id: str) -> MessageResponse:
return MessageResponse(None, "tada")
return MessageResponse("Let's discuss this in a channel!")

def channel_mention(self, message: str, user_id: str) -> MessageResponse:
return MessageResponse(f"Hello <@{user_id}>", "+1")
return MessageResponse(f"Hello <@{user_id}>")
7 changes: 4 additions & 3 deletions slack_bot/slack_bot/models/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,12 @@ def __init__(
max_input_size: int,
data_dir: pathlib.Path,
which_index: str,
num_output: int = 256,
chunk_size_limit: Optional[int] = None,
chunk_overlap_ratio: float = 0.1,
force_new_index: bool = False,
num_output: int = 256,
) -> None:
super().__init__(emoji="llama")
logging.info("Setting up Huggingface backend.")
self.max_input_size = max_input_size
self.model_name = model_name
Expand Down Expand Up @@ -214,11 +215,11 @@ def _prep_llm_predictor(self) -> LLMPredictor:

def direct_message(self, message: str, user_id: str) -> MessageResponse:
backend_response = self._get_response(message, user_id)
return MessageResponse(backend_response, "llama")
return MessageResponse(backend_response)

def channel_mention(self, message: str, user_id: str) -> MessageResponse:
backend_response = self._get_response(message, user_id)
return MessageResponse(backend_response, "llama")
return MessageResponse(backend_response)


class LlamaDistilGPT2(Llama):
Expand Down

0 comments on commit dfc393f

Please sign in to comment.