Skip to content
This repository has been archived by the owner on Jan 11, 2024. It is now read-only.

Commit

Permalink
Add final touches for cloud deployment
Browse files Browse the repository at this point in the history
  • Loading branch information
strikoder committed Jun 5, 2023
1 parent 49cf7b5 commit 54bb39a
Show file tree
Hide file tree
Showing 3 changed files with 30 additions and 22 deletions.
Binary file modified __pycache__/utils.cpython-310.pyc
Binary file not shown.
18 changes: 10 additions & 8 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,14 @@
import streamlit as st
from utils import get_ideal_prompt, print_outputs

"""
hide_st_style =
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
st.markdown(hide_st_style, unsafe_allow_html=True)
"""
st.markdown(hide_st_style, unsafe_allow_html=True)

st.title('Act as GPT')

Expand All @@ -34,11 +32,15 @@
if user_input != "":
openai.api_key = OPENAI_API_KEY

# predicting the input
modified_input = get_ideal_prompt(user_input, 150)
input_area = st.text_area("modified_input", value=modified_input, key="text_area", height=200)
# Generate the ideal prompt based on user input
predict_prompt = get_ideal_prompt(user_input, max_tokens)
# Text area for modified input prompt
prompt = st.text_area("modified_input", value=predict_prompt, key="text_area", height=200)
# Columns for buttons and output display
but_col1, _, _, _ = st.columns([1, 1, 1, 1])
write_col1, write_col2, write_col3 = st.columns([1, 1, 1])

if but_col1.button("Run"):
print_outputs(input_area, user_input, model, write_col1, write_col2, write_col3)
print_outputs(prompt, user_input, model, write_col1, write_col2, write_col3)


34 changes: 20 additions & 14 deletions utils.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
import streamlit as st
import openai
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig
import nltk

nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM


@st.cache_resource()
Expand All @@ -22,13 +17,6 @@ def get_completion(
prompt,
system_prompt="You will act as an advanced language model. You will be given an input prompt from a user, your task is just to paraphrase the prompt, don't do anything else.",
model="gpt-3.5-turbo"):
"""
This function servers as a portal into Gpt API
:param system_prompt:
:param prompt:
:param model:
:return:
"""
if not prompt:
return prompt # Return an empty string if the prompt is invalid
# you will be provided with a prompt to act as someone or something, try to give the most ideal output for an unexpierienced user and epxlain everything from scratch
Expand All @@ -46,13 +34,31 @@ def get_completion(


def get_ideal_prompt(user_prompt, max_new_tokens=150):
"""
Generate the ideal prompt based on the user input and maximum number of new tokens.
Args:
user_prompt (str): The user input.
max_new_tokens (int): Maximum number of new tokens to predict the output prompt.
Returns:
str: The generated ideal prompt.
"""
batch = tokenizer(user_prompt, return_tensors="pt")
generated_ids = model.generate(batch["input_ids"], max_new_tokens)
generated_ids = model.generate(batch["input_ids"], max_length=max_new_tokens)
output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
return output[0]


def print_outputs(input_text, system_prompt="", model='gpt-3.5-turbo', *args):
"""
Print the outputs using the GPT API through the specified model and an internal function.
Args:
input_text (str): The input text.
system_prompt (str): The system prompt.
model (str): The GPT model to use.
*args: Additional arguments for writing the output.
"""
for arg in args:
output = get_completion(input_text, system_prompt, model)
arg.write(output)

0 comments on commit 54bb39a

Please sign in to comment.