From 54bb39a36d302c7af641cad68b792148dbd0a0a0 Mon Sep 17 00:00:00 2001 From: Strikoder Date: Mon, 5 Jun 2023 17:48:48 +0300 Subject: [PATCH] Add final touches for cloud deployment --- __pycache__/utils.cpython-310.pyc | Bin 1879 -> 2255 bytes app.py | 18 +++++++++------- utils.py | 34 ++++++++++++++++++------------ 3 files changed, 30 insertions(+), 22 deletions(-) diff --git a/__pycache__/utils.cpython-310.pyc b/__pycache__/utils.cpython-310.pyc index a8aeb1beddc49dc59f6eaf97c8b0c6a97e0873d1..c8d4a6f762104378450475e9fc6cdfd006fb5e7d 100644 GIT binary patch delta 1286 zcmZuxL2J}N6wV~sY&P9Zx1|&+D@=<>w}n#BQxUNu6hTYXf>?w#-I;DvH36vjRUe#qc$ESdX5fj~oZncO_SySST)C#4=OD4Zp>8qZvn?HrHV+s(`60U~JnarsWd*k89ee zPQfJjI;Yiwtfj~HOz$Q*X`^c}!pt}3zNyKM*)^FpWpoL%0kO2PV|6W7U=DoQU7HoB z47ijV%n-xyC_F1%a2aS$^){y=<3XEtByiDmGfvVbTKCES&w6-%$b$Bz}3PGbNX1ty05(p4Ub16W^twc1j zp9c58s7EMzP@#tAdzudEkt>YoxgP4Z(meW)c=^Uzl~FrD>LL=X1*Wi11t zfN#X5k|w%ai2Kc;xy>6e_i-YdT;eY(<0`ZEsp01w8q7V!_x#u3-h#p9J62+iFfKv_|?Cd^-+fa delta 881 zcmZuvO=}ZD7~XfkGfB3o{lHk2X)jW=MeIQ=Di#Il!J^oUK?qA`+Qx2nmf2a0k`k~z z2?}xlK;uokDBi`h7mvdJ1MwdSitlVoDdOz%@P5rZ&phvZ8~+{zqFyHkuGeoA`{@mX zv+#0wadyyiTP_&PU=VpNk9!9`vqk_$Y1zk?Q(ACP85zuF-oCL1EyBs3@z`utTeXT` zsjyRwuTH75S%6l+CRu}xqcz6hp)o)8Lo-;Lw~G2oro_6~9*)jkD2R4$i+H2E zSvW}+Z>z%F5t7AR6_r$pL_6v5^Lw|3+kegE02BEOm>#d9o4N>rNx%mge0m4dFnGRC zD6aLbZb~<@xTCrvrYV+a0 z+04PL-M5+BG*EqSEx+f?=6#2{S~yAax6Y%be+V=*fiB}=baeg8nu3o|g9Zdz<%N9F zy+d?ISerK`%|Ck0!TGWyLSct;Gc7Dcc}WNVIiQ|Ml)Wu7m1HWtiWOydA8Pp7f8-KI zuZ-y-^K`!I&kUrE6@?u~YP;~6*oh+%vLfh&oh{x* z#YHANTn4&LIf_yRs@(+*`s5RAOWx&B{OdU5EaEo-D^h(^-|%4)wIKNJ2)GuaYgQZ+ K$Zs+ecz*$P2+^4U diff --git a/app.py b/app.py index 1988e74..60312a4 100644 --- a/app.py +++ b/app.py @@ -2,16 +2,14 @@ import streamlit as st from utils import get_ideal_prompt, print_outputs -""" -hide_st_style = +hide_st_style = """ - -st.markdown(hide_st_style, unsafe_allow_html=True) """ +st.markdown(hide_st_style, unsafe_allow_html=True) st.title('Act as GPT') @@ -34,11 +32,15 @@ if user_input != "": openai.api_key = OPENAI_API_KEY - # predicting the input - modified_input = get_ideal_prompt(user_input, 150) - input_area = st.text_area("modified_input", value=modified_input, key="text_area", height=200) + # Generate the ideal prompt based on user input + predict_prompt = get_ideal_prompt(user_input, max_tokens) + # Text area for modified input prompt + prompt = st.text_area("modified_input", value=predict_prompt, key="text_area", height=200) + # Columns for buttons and output display but_col1, _, _, _ = st.columns([1, 1, 1, 1]) write_col1, write_col2, write_col3 = st.columns([1, 1, 1]) if but_col1.button("Run"): - print_outputs(input_area, user_input, model, write_col1, write_col2, write_col3) + print_outputs(prompt, user_input, model, write_col1, write_col2, write_col3) + + diff --git a/utils.py b/utils.py index 8853d39..0f8393b 100644 --- a/utils.py +++ b/utils.py @@ -1,11 +1,6 @@ import streamlit as st import openai -from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoConfig -import nltk - -nltk.download('punkt') -nltk.download('wordnet') -nltk.download('stopwords') +from transformers import AutoTokenizer, AutoModelForSeq2SeqLM @st.cache_resource() @@ -22,13 +17,6 @@ def get_completion( prompt, system_prompt="You will act as an advanced language model. You will be given an input prompt from a user, your task is just to paraphrase the prompt, don't do anything else.", model="gpt-3.5-turbo"): - """ - This function servers as a portal into Gpt API - :param system_prompt: - :param prompt: - :param model: - :return: - """ if not prompt: return prompt # Return an empty string if the prompt is invalid # you will be provided with a prompt to act as someone or something, try to give the most ideal output for an unexpierienced user and epxlain everything from scratch @@ -46,13 +34,31 @@ def get_completion( def get_ideal_prompt(user_prompt, max_new_tokens=150): + """ + Generate the ideal prompt based on the user input and maximum number of new tokens. + Args: + user_prompt (str): The user input. + max_new_tokens (int): Maximum number of new tokens to predict the output prompt. + + Returns: + str: The generated ideal prompt. + """ batch = tokenizer(user_prompt, return_tensors="pt") - generated_ids = model.generate(batch["input_ids"], max_new_tokens) + generated_ids = model.generate(batch["input_ids"], max_length=max_new_tokens) output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return output[0] def print_outputs(input_text, system_prompt="", model='gpt-3.5-turbo', *args): + """ + Print the outputs using the GPT API through the specified model and an internal function. + + Args: + input_text (str): The input text. + system_prompt (str): The system prompt. + model (str): The GPT model to use. + *args: Additional arguments for writing the output. + """ for arg in args: output = get_completion(input_text, system_prompt, model) arg.write(output)