Skip to content

Commit

Permalink
added is_cote option
Browse files Browse the repository at this point in the history
  • Loading branch information
Bikatr7 committed Jun 12, 2024
1 parent 85a9d70 commit 975c531
Show file tree
Hide file tree
Showing 5 changed files with 40 additions and 32 deletions.
3 changes: 2 additions & 1 deletion demo/translation_settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
"number_of_malformed_batch_retries": 1,
"batch_retry_timeout": 700,
"number_of_concurrent_batches": 2,
"gender_context_insertion": true
"gender_context_insertion": true,
"is_cote": true
},

"openai settings": {
Expand Down
5 changes: 4 additions & 1 deletion handlers/json_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,8 @@ def validate_json() -> None:
"number_of_malformed_batch_retries",
"batch_retry_timeout",
"number_of_concurrent_batches",
"gender_context_insertion"
"gender_context_insertion",
"is_cote"
]

openai_keys = [
Expand Down Expand Up @@ -88,6 +89,7 @@ def validate_json() -> None:
"number_of_malformed_batch_retries": lambda x: isinstance(x, int) and x >= 0,
"batch_retry_timeout": lambda x: isinstance(x, int) and x >= 0,
"gender_context_insertion": lambda x: isinstance(x, bool),
"is_cote": lambda x: isinstance(x, bool),
"number_of_concurrent_batches": lambda x: isinstance(x, int) and x >= 0,
"openai_model": lambda x: isinstance(x, str) and x in ALLOWED_OPENAI_MODELS,
"openai_system_message": lambda x: x not in ["", "None", None],
Expand Down Expand Up @@ -329,6 +331,7 @@ def convert_to_correct_type(setting_name:str, initial_value:str) -> typing.Any:
"batch_retry_timeout": {"type": int, "constraints": lambda x: x >= 0},
"number_of_concurrent_batches": {"type": int, "constraints": lambda x: x >= 0},
"gender_context_insertion": {"type": bool, "constraints": lambda x: isinstance(x, bool)},
"is_cote": {"type": bool, "constraints": lambda x: isinstance(x, bool)},
"openai_model": {"type": str, "constraints": lambda x: x in ALLOWED_OPENAI_MODELS},
"openai_system_message": {"type": str, "constraints": lambda x: x not in ["", "None", None]},
"openai_temperature": {"type": float, "constraints": lambda x: 0 <= x <= 2},
Expand Down
4 changes: 3 additions & 1 deletion lib/common/translation_settings_description.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@ batch_retry_timeout : How long Kudasai will try to translate a batch in seconds,

number_of_concurrent_batches : How many translations batches Kudasai will send to the translation API at a time. For OpenAI, be conservative as rate-limiting is aggressive, I'd suggest 3-5. For Gemini, do not exceed 15 for 1.0 or 2 for 1.5. This setting more or less doesn't matter for DeepL.

gender_context_insertion : true of false - Whether to insert gender info into system prompts when translating text. Kudasai will look for a "gender.json" (exactly) in the root directory and scan each text to translate for the names in the file. If a name is found, Kudasai will insert the gender info into the system prompt. For more info look at the README.md
gender_context_insertion : true or false - Whether to insert gender info into system prompts when translating text. Kudasai will look for a "gender.json" (exactly) in the root directory and scan each text to translate for the names in the file. If a name is found, Kudasai will insert the gender info into the system prompt. For more info look at the README.md.

is_cote: true or false - Whether you are translating COTE (Classroom of the Elite), Kudasai has specialized functions for COTE that will be enabled if this is set to true.
----------------------------------------------------------------------------------
Open AI Settings:
See https://platform.openai.com/docs/api-reference/chat/create for further details
Expand Down
1 change: 1 addition & 0 deletions modules/common/file_ensurer.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ class FileEnsurer():
"batch_retry_timeout": 300,
"number_of_concurrent_batches": 5,
"gender_context_insertion": False,
"is_cote": False,
},

"openai settings": {
Expand Down
59 changes: 30 additions & 29 deletions webgui.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,35 +56,36 @@ class KudasaiGUI:
"batch_retry_timeout": lines[14-1].strip(),
"number_of_concurrent_batches": lines[16-1].strip(),
"gender_context_insertion": lines[18-1].strip(),
"openai_help_link": lines[21-1].strip(),
"openai_model": lines[23-1].strip(),
"openai_system_message": lines[25-1].strip(),
"openai_temperature": lines[27-1].strip(),
"openai_top_p": lines[29-1].strip(),
"openai_n": lines[31-1].strip(),
"openai_stream": lines[33-1].strip(),
"openai_stop": lines[35-1].strip(),
"openai_logit_bias": lines[37-1].strip(),
"openai_max_tokens": lines[39-1].strip(),
"openai_presence_penalty": lines[41-1].strip(),
"openai_frequency_penalty": lines[43-1].strip(),
"openai_disclaimer": lines[45-1].strip(),
"gemini_help_link": lines[48-1].strip(),
"gemini_model": lines[50-1].strip(),
"gemini_prompt": lines[52-1].strip(),
"gemini_temperature": lines[54-1].strip(),
"gemini_top_p": lines[56-1].strip(),
"gemini_top_k": lines[58-1].strip(),
"gemini_candidate_count": lines[60-1].strip(),
"gemini_stream": lines[62-1].strip(),
"gemini_stop_sequences": lines[64-1].strip(),
"gemini_max_output_tokens": lines[66-1].strip(),
"gemini_disclaimer": lines[68-1].strip(),
"deepl_help_link": lines[71-1].strip(),
"deepl_context": lines[73-1].strip(),
"deepl_split_sentences": lines[75-1].strip(),
"deepl_preserve_formatting": lines[77-1].strip(),
"deepl_formality": lines[79-1].strip(),
"is_cote": lines[20-1].strip(),
"openai_help_link": lines[23-1].strip(),
"openai_model": lines[25-1].strip(),
"openai_system_message": lines[27-1].strip(),
"openai_temperature": lines[29-1].strip(),
"openai_top_p": lines[31-1].strip(),
"openai_n": lines[33-1].strip(),
"openai_stream": lines[35-1].strip(),
"openai_stop": lines[37-1].strip(),
"openai_logit_bias": lines[39-1].strip(),
"openai_max_tokens": lines[41-1].strip(),
"openai_presence_penalty": lines[43-1].strip(),
"openai_frequency_penalty": lines[45-1].strip(),
"openai_disclaimer": lines[47-1].strip(),
"gemini_help_link": lines[50-1].strip(),
"gemini_model": lines[52-1].strip(),
"gemini_prompt": lines[54-1].strip(),
"gemini_temperature": lines[56-1].strip(),
"gemini_top_p": lines[58-1].strip(),
"gemini_top_k": lines[60-1].strip(),
"gemini_candidate_count": lines[62-1].strip(),
"gemini_stream": lines[64-1].strip(),
"gemini_stop_sequences": lines[66-1].strip(),
"gemini_max_output_tokens": lines[68-1].strip(),
"gemini_disclaimer": lines[70-1].strip(),
"deepl_help_link": lines[73-1].strip(),
"deepl_context": lines[75-1].strip(),
"deepl_split_sentences": lines[77-1].strip(),
"deepl_preserve_formatting": lines[79-1].strip(),
"deepl_formality": lines[81-1].strip(),
}

##-------------------start-of-build_gui()---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Expand Down

0 comments on commit 975c531

Please sign in to comment.