From 975c531400d1a4b89e41833491e04100363ef552 Mon Sep 17 00:00:00 2001 From: Bikatr7 Date: Wed, 12 Jun 2024 12:27:28 -0600 Subject: [PATCH] added is_cote option --- demo/translation_settings.json | 3 +- handlers/json_handler.py | 5 +- .../translation_settings_description.txt | 4 +- modules/common/file_ensurer.py | 1 + webgui.py | 59 ++++++++++--------- 5 files changed, 40 insertions(+), 32 deletions(-) diff --git a/demo/translation_settings.json b/demo/translation_settings.json index 271b9c7..0378637 100644 --- a/demo/translation_settings.json +++ b/demo/translation_settings.json @@ -7,7 +7,8 @@ "number_of_malformed_batch_retries": 1, "batch_retry_timeout": 700, "number_of_concurrent_batches": 2, - "gender_context_insertion": true + "gender_context_insertion": true, + "is_cote": true }, "openai settings": { diff --git a/handlers/json_handler.py b/handlers/json_handler.py index 2312bbd..5ff0514 100644 --- a/handlers/json_handler.py +++ b/handlers/json_handler.py @@ -44,7 +44,8 @@ def validate_json() -> None: "number_of_malformed_batch_retries", "batch_retry_timeout", "number_of_concurrent_batches", - "gender_context_insertion" + "gender_context_insertion", + "is_cote" ] openai_keys = [ @@ -88,6 +89,7 @@ def validate_json() -> None: "number_of_malformed_batch_retries": lambda x: isinstance(x, int) and x >= 0, "batch_retry_timeout": lambda x: isinstance(x, int) and x >= 0, "gender_context_insertion": lambda x: isinstance(x, bool), + "is_cote": lambda x: isinstance(x, bool), "number_of_concurrent_batches": lambda x: isinstance(x, int) and x >= 0, "openai_model": lambda x: isinstance(x, str) and x in ALLOWED_OPENAI_MODELS, "openai_system_message": lambda x: x not in ["", "None", None], @@ -329,6 +331,7 @@ def convert_to_correct_type(setting_name:str, initial_value:str) -> typing.Any: "batch_retry_timeout": {"type": int, "constraints": lambda x: x >= 0}, "number_of_concurrent_batches": {"type": int, "constraints": lambda x: x >= 0}, "gender_context_insertion": {"type": bool, "constraints": lambda x: isinstance(x, bool)}, + "is_cote": {"type": bool, "constraints": lambda x: isinstance(x, bool)}, "openai_model": {"type": str, "constraints": lambda x: x in ALLOWED_OPENAI_MODELS}, "openai_system_message": {"type": str, "constraints": lambda x: x not in ["", "None", None]}, "openai_temperature": {"type": float, "constraints": lambda x: 0 <= x <= 2}, diff --git a/lib/common/translation_settings_description.txt b/lib/common/translation_settings_description.txt index 6785fd5..b5f21ba 100644 --- a/lib/common/translation_settings_description.txt +++ b/lib/common/translation_settings_description.txt @@ -15,7 +15,9 @@ batch_retry_timeout : How long Kudasai will try to translate a batch in seconds, number_of_concurrent_batches : How many translations batches Kudasai will send to the translation API at a time. For OpenAI, be conservative as rate-limiting is aggressive, I'd suggest 3-5. For Gemini, do not exceed 15 for 1.0 or 2 for 1.5. This setting more or less doesn't matter for DeepL. -gender_context_insertion : true of false - Whether to insert gender info into system prompts when translating text. Kudasai will look for a "gender.json" (exactly) in the root directory and scan each text to translate for the names in the file. If a name is found, Kudasai will insert the gender info into the system prompt. For more info look at the README.md +gender_context_insertion : true or false - Whether to insert gender info into system prompts when translating text. Kudasai will look for a "gender.json" (exactly) in the root directory and scan each text to translate for the names in the file. If a name is found, Kudasai will insert the gender info into the system prompt. For more info look at the README.md. + +is_cote: true or false - Whether you are translating COTE (Classroom of the Elite), Kudasai has specialized functions for COTE that will be enabled if this is set to true. ---------------------------------------------------------------------------------- Open AI Settings: See https://platform.openai.com/docs/api-reference/chat/create for further details diff --git a/modules/common/file_ensurer.py b/modules/common/file_ensurer.py index e67ff22..2a5f4c6 100644 --- a/modules/common/file_ensurer.py +++ b/modules/common/file_ensurer.py @@ -89,6 +89,7 @@ class FileEnsurer(): "batch_retry_timeout": 300, "number_of_concurrent_batches": 5, "gender_context_insertion": False, + "is_cote": False, }, "openai settings": { diff --git a/webgui.py b/webgui.py index 9d5b5d3..a93bac9 100644 --- a/webgui.py +++ b/webgui.py @@ -56,35 +56,36 @@ class KudasaiGUI: "batch_retry_timeout": lines[14-1].strip(), "number_of_concurrent_batches": lines[16-1].strip(), "gender_context_insertion": lines[18-1].strip(), - "openai_help_link": lines[21-1].strip(), - "openai_model": lines[23-1].strip(), - "openai_system_message": lines[25-1].strip(), - "openai_temperature": lines[27-1].strip(), - "openai_top_p": lines[29-1].strip(), - "openai_n": lines[31-1].strip(), - "openai_stream": lines[33-1].strip(), - "openai_stop": lines[35-1].strip(), - "openai_logit_bias": lines[37-1].strip(), - "openai_max_tokens": lines[39-1].strip(), - "openai_presence_penalty": lines[41-1].strip(), - "openai_frequency_penalty": lines[43-1].strip(), - "openai_disclaimer": lines[45-1].strip(), - "gemini_help_link": lines[48-1].strip(), - "gemini_model": lines[50-1].strip(), - "gemini_prompt": lines[52-1].strip(), - "gemini_temperature": lines[54-1].strip(), - "gemini_top_p": lines[56-1].strip(), - "gemini_top_k": lines[58-1].strip(), - "gemini_candidate_count": lines[60-1].strip(), - "gemini_stream": lines[62-1].strip(), - "gemini_stop_sequences": lines[64-1].strip(), - "gemini_max_output_tokens": lines[66-1].strip(), - "gemini_disclaimer": lines[68-1].strip(), - "deepl_help_link": lines[71-1].strip(), - "deepl_context": lines[73-1].strip(), - "deepl_split_sentences": lines[75-1].strip(), - "deepl_preserve_formatting": lines[77-1].strip(), - "deepl_formality": lines[79-1].strip(), + "is_cote": lines[20-1].strip(), + "openai_help_link": lines[23-1].strip(), + "openai_model": lines[25-1].strip(), + "openai_system_message": lines[27-1].strip(), + "openai_temperature": lines[29-1].strip(), + "openai_top_p": lines[31-1].strip(), + "openai_n": lines[33-1].strip(), + "openai_stream": lines[35-1].strip(), + "openai_stop": lines[37-1].strip(), + "openai_logit_bias": lines[39-1].strip(), + "openai_max_tokens": lines[41-1].strip(), + "openai_presence_penalty": lines[43-1].strip(), + "openai_frequency_penalty": lines[45-1].strip(), + "openai_disclaimer": lines[47-1].strip(), + "gemini_help_link": lines[50-1].strip(), + "gemini_model": lines[52-1].strip(), + "gemini_prompt": lines[54-1].strip(), + "gemini_temperature": lines[56-1].strip(), + "gemini_top_p": lines[58-1].strip(), + "gemini_top_k": lines[60-1].strip(), + "gemini_candidate_count": lines[62-1].strip(), + "gemini_stream": lines[64-1].strip(), + "gemini_stop_sequences": lines[66-1].strip(), + "gemini_max_output_tokens": lines[68-1].strip(), + "gemini_disclaimer": lines[70-1].strip(), + "deepl_help_link": lines[73-1].strip(), + "deepl_context": lines[75-1].strip(), + "deepl_split_sentences": lines[77-1].strip(), + "deepl_preserve_formatting": lines[79-1].strip(), + "deepl_formality": lines[81-1].strip(), } ##-------------------start-of-build_gui()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------