diff --git a/Examples/chat_completion.py b/Examples/chat_completion.py index 002e247..1f567da 100644 --- a/Examples/chat_completion.py +++ b/Examples/chat_completion.py @@ -14,15 +14,17 @@ n = 1 -debug, caching = False, True +debug = False max_tokens = None -user_content = """ -用c实现目前已知最快平方根算法 -""" +# user_content = """ +# 用c实现目前已知最快平方根算法 +# """ # user_content = '最初有1000千克的蘑菇,其中99%的成分是水。经过几天的晴天晾晒后,蘑菇中的水分含量现在是98%,蘑菇中减少了多少水分?' -user_content = "讲个简短的笑话" +# user_content = "Write down the most romantic sentence you can think of." +user_content = "光散射中的Mie理论的理论公式是怎样的?请用latex语法表示它公式使用$$符号包裹。" + model = "gpt-3.5-turbo" # model="gpt-4" @@ -37,7 +39,7 @@ n=n, max_tokens=max_tokens, timeout=30, - extra_body={"caching": caching}, + # extra_body={"caching": True}, ) if stream: diff --git a/openai_forward/webui/interface.py b/openai_forward/webui/interface.py index 94388e6..09bb8a4 100644 --- a/openai_forward/webui/interface.py +++ b/openai_forward/webui/interface.py @@ -30,10 +30,6 @@ class Forward(Base): ), ] - # CHAT_COMPLETION_ROUTE: str = '/v1/chat/completions' - # COMPLETION_ROUTE: str = '/v1/completions' - # EMBEDDING_ROUTE: str = '/v1/embeddings' - def convert_to_env(self, set_env=False): env_dict = {'FORWARD_CONFIG': json.dumps([i.to_dict() for i in self.forward])} @@ -47,7 +43,7 @@ def convert_to_env(self, set_env=False): class CacheConfig(Base): backend: str = 'LevelDB' root_path_or_url: str = './FLAXKV_DB' - default_request_caching_value: bool = False + default_request_caching_value: bool = True cache_openai: bool = False cache_general: bool = False cache_routes: List = ['/v1/chat/completions'] diff --git a/openai_forward/webui/run.py b/openai_forward/webui/run.py index 3f29bfd..4e7ca75 100644 --- a/openai_forward/webui/run.py +++ b/openai_forward/webui/run.py @@ -195,12 +195,14 @@ def display_cache_configuration(): st.subheader("Cache Configuration") cache_openai = st.checkbox("Cache OpenAI route", cache.cache_openai) - cache_general = st.checkbox("Cache General route", cache.cache_general) - cache_default_request_caching_value = st.checkbox( - "Default Request Caching Value", cache.default_request_caching_value + "For OpenAI API, return using cache by default", + cache.default_request_caching_value, + disabled=not cache_openai, ) + cache_general = st.checkbox("Cache General route", cache.cache_general) + cache_backend = st.selectbox( "Cache Backend", ["MEMORY", "LMDB", "LevelDB"],