diff --git a/berkeley-function-call-leaderboard/model_handler/gpt_handler.py b/berkeley-function-call-leaderboard/model_handler/gpt_handler.py index 2c3417259..f8e4de938 100644 --- a/berkeley-function-call-leaderboard/model_handler/gpt_handler.py +++ b/berkeley-function-call-leaderboard/model_handler/gpt_handler.py @@ -34,8 +34,7 @@ def inference(self, prompt,functions,test_category): }, { "role": "user", - "content": "Questions:" - + USER_PROMPT_FOR_CHAT_MODEL.format( + "content": USER_PROMPT_FOR_CHAT_MODEL.format( user_prompt=prompt, functions=str(functions) ), }, @@ -55,7 +54,7 @@ def inference(self, prompt,functions,test_category): functions = language_specific_pre_processing(functions, test_category, True) if type(functions) is not list: functions = [functions] - message = [{"role": "user", "content": "Questions:" + prompt}] + message = [{"role": "user", "content": prompt}] oai_tool = convert_to_tool( functions, GORILLA_TO_OPENAPI, self.model_style, test_category, True )