From 86d9612882a7f4f3db7b1905df91ac7cb2285971 Mon Sep 17 00:00:00 2001 From: mrT23 Date: Sun, 28 Jul 2024 08:55:01 +0300 Subject: [PATCH 01/11] docs: update usage guide for changing models; add custom model support and reorganize sections --- .../usage-guide/additional_configurations.md | 159 --------------- docs/docs/usage-guide/changing_a_model.md | 187 ++++++++++++++++++ docs/docs/usage-guide/index.md | 3 +- docs/mkdocs.yml | 2 + pr_agent/algo/utils.py | 16 +- pr_agent/settings/configuration.toml | 2 + 6 files changed, 206 insertions(+), 163 deletions(-) create mode 100644 docs/docs/usage-guide/changing_a_model.md diff --git a/docs/docs/usage-guide/additional_configurations.md b/docs/docs/usage-guide/additional_configurations.md index c8cb788e2..4ae014148 100644 --- a/docs/docs/usage-guide/additional_configurations.md +++ b/docs/docs/usage-guide/additional_configurations.md @@ -47,165 +47,6 @@ However, for very large PRs, or in case you want to emphasize quality over speed which divides the PR to chunks, and processes each chunk separately. With this mode, regardless of the model, no compression will be done (but for large PRs, multiple model calls may occur) -## Changing a model - -See [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py) for the list of available models. -To use a different model than the default (GPT-4), you need to edit [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L2). -For models and environments not from OPENAI, you might need to provide additional keys and other parameters. See below for instructions. - -### Azure - -To use Azure, set in your `.secrets.toml` (working from CLI), or in the GitHub `Settings > Secrets and variables` (working from GitHub App or GitHub Action): -``` -[openai] -key = "" # your azure api key -api_type = "azure" -api_version = '2023-05-15' # Check Azure documentation for the current API version -api_base = "" # The base URL for your Azure OpenAI resource. e.g. "https://.openai.azure.com" -deployment_id = "" # The deployment name you chose when you deployed the engine -``` - -and set in your configuration file: -``` -[config] -model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) -``` - -### Hugging Face - -**Local** -You can run Hugging Face models locally through either [VLLM](https://docs.litellm.ai/docs/providers/vllm) or [Ollama](https://docs.litellm.ai/docs/providers/ollama) - -E.g. to use a new Hugging Face model locally via Ollama, set: -``` -[__init__.py] -MAX_TOKENS = { - "model-name-on-ollama": -} -e.g. -MAX_TOKENS={ - ..., - "ollama/llama2": 4096 -} - - -[config] # in configuration.toml -model = "ollama/llama2" -model_turbo = "ollama/llama2" - -[ollama] # in .secrets.toml -api_base = ... # the base url for your Hugging Face inference endpoint -# e.g. if running Ollama locally, you may use: -api_base = "http://localhost:11434/" -``` - -### Inference Endpoints - -To use a new model with Hugging Face Inference Endpoints, for example, set: -``` -[__init__.py] -MAX_TOKENS = { - "model-name-on-huggingface": -} -e.g. -MAX_TOKENS={ - ..., - "meta-llama/Llama-2-7b-chat-hf": 4096 -} -[config] # in configuration.toml -model = "huggingface/meta-llama/Llama-2-7b-chat-hf" -model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf" - -[huggingface] # in .secrets.toml -key = ... # your Hugging Face api key -api_base = ... # the base url for your Hugging Face inference endpoint -``` -(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api)) - -### Replicate - -To use Llama2 model with Replicate, for example, set: -``` -[config] # in configuration.toml -model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" -model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" -[replicate] # in .secrets.toml -key = ... -``` -(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api)) - - -Also, review the [AiHandler](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/ai_handler.py) file for instructions on how to set keys for other models. - -### Groq - -To use Llama3 model with Groq, for example, set: -``` -[config] # in configuration.toml -model = "llama3-70b-8192" -model_turbo = "llama3-70b-8192" -fallback_models = ["groq/llama3-70b-8192"] -[groq] # in .secrets.toml -key = ... # your Groq api key -``` -(you can obtain a Groq key from [here](https://console.groq.com/keys)) - -### Vertex AI - -To use Google's Vertex AI platform and its associated models (chat-bison/codechat-bison) set: - -``` -[config] # in configuration.toml -model = "vertex_ai/codechat-bison" -model_turbo = "vertex_ai/codechat-bison" -fallback_models="vertex_ai/codechat-bison" - -[vertexai] # in .secrets.toml -vertex_project = "my-google-cloud-project" -vertex_location = "" -``` - -Your [application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) will be used for authentication so there is no need to set explicit credentials in most environments. - -If you do want to set explicit credentials then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file. - -### Anthropic - -To use Anthropic models, set the relevant models in the configuration section of the configuration file: -``` -[config] -model="anthropic/claude-3-opus-20240229" -model_turbo="anthropic/claude-3-opus-20240229" -fallback_models=["anthropic/claude-3-opus-20240229"] -``` - -And also set the api key in the .secrets.toml file: -``` -[anthropic] -KEY = "..." -``` - -### Amazon Bedrock - -To use Amazon Bedrock and its foundational models, add the below configuration: - -``` -[config] # in configuration.toml -model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" -model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" -fallback_models=["bedrock/anthropic.claude-v2:1"] -``` - -Note that you have to add access to foundational models before using them. Please refer to [this document](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) for more details. - -If you are using the claude-3 model, please configure the following settings as there are parameters incompatible with claude-3. -``` -[litellm] -drop_params = true -``` - -AWS session is automatically authenticated from your environment, but you can also explicitly set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION_NAME` environment variables. Please refer to [this document](https://litellm.vercel.app/docs/providers/bedrock) for more details. - ## Patch Extra Lines diff --git a/docs/docs/usage-guide/changing_a_model.md b/docs/docs/usage-guide/changing_a_model.md new file mode 100644 index 000000000..9ba605f87 --- /dev/null +++ b/docs/docs/usage-guide/changing_a_model.md @@ -0,0 +1,187 @@ +## Changing a model + +See [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py) for the list of available models. +To use a different model than the default (GPT-4), you need to edit [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L2) the fields: +``` +[config] +model = "..." +model_turbo = "..." +fallback_models = ["..."] +``` + +For models and environments not from OpenAI, you might need to provide additional keys and other parameters. +You can give parameters via a configuration file (see below for instructions), or from environment variables. see [litellm documentation](https://litellm.vercel.app/docs/proxy/quick_start#supported-llms) for the environment variables you can set per model. + +### Azure + +To use Azure, set in your `.secrets.toml` (working from CLI), or in the GitHub `Settings > Secrets and variables` (working from GitHub App or GitHub Action): +``` +[openai] +key = "" # your azure api key +api_type = "azure" +api_version = '2023-05-15' # Check Azure documentation for the current API version +api_base = "" # The base URL for your Azure OpenAI resource. e.g. "https://.openai.azure.com" +deployment_id = "" # The deployment name you chose when you deployed the engine +``` + +and set in your configuration file: +``` +[config] +model="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) +model_turbo="" # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) +fallback_models=["..."] # the OpenAI model you've deployed on Azure (e.g. gpt-3.5-turbo) +``` + +### Hugging Face + +**Local** +You can run Hugging Face models locally through either [VLLM](https://docs.litellm.ai/docs/providers/vllm) or [Ollama](https://docs.litellm.ai/docs/providers/ollama) + +E.g. to use a new Hugging Face model locally via Ollama, set: +``` +[__init__.py] +MAX_TOKENS = { + "model-name-on-ollama": +} +e.g. +MAX_TOKENS={ + ..., + "ollama/llama2": 4096 +} + + +[config] # in configuration.toml +model = "ollama/llama2" +model_turbo = "ollama/llama2" +fallback_models=["ollama/llama2"] + +[ollama] # in .secrets.toml +api_base = ... # the base url for your Hugging Face inference endpoint +# e.g. if running Ollama locally, you may use: +api_base = "http://localhost:11434/" +``` + +### Inference Endpoints + +To use a new model with Hugging Face Inference Endpoints, for example, set: +``` +[__init__.py] +MAX_TOKENS = { + "model-name-on-huggingface": +} +e.g. +MAX_TOKENS={ + ..., + "meta-llama/Llama-2-7b-chat-hf": 4096 +} +[config] # in configuration.toml +model = "huggingface/meta-llama/Llama-2-7b-chat-hf" +model_turbo = "huggingface/meta-llama/Llama-2-7b-chat-hf" +fallback_models=["huggingface/meta-llama/Llama-2-7b-chat-hf"] + +[huggingface] # in .secrets.toml +key = ... # your Hugging Face api key +api_base = ... # the base url for your Hugging Face inference endpoint +``` +(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api)) + +### Replicate + +To use Llama2 model with Replicate, for example, set: +``` +[config] # in configuration.toml +model = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" +model_turbo = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" +fallback_models=["replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"] +[replicate] # in .secrets.toml +key = ... +``` +(you can obtain a Llama2 key from [here](https://replicate.com/replicate/llama-2-70b-chat/api)) + + +Also, review the [AiHandler](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/ai_handler.py) file for instructions on how to set keys for other models. + +### Groq + +To use Llama3 model with Groq, for example, set: +``` +[config] # in configuration.toml +model = "llama3-70b-8192" +model_turbo = "llama3-70b-8192" +fallback_models = ["groq/llama3-70b-8192"] +[groq] # in .secrets.toml +key = ... # your Groq api key +``` +(you can obtain a Groq key from [here](https://console.groq.com/keys)) + +### Vertex AI + +To use Google's Vertex AI platform and its associated models (chat-bison/codechat-bison) set: + +``` +[config] # in configuration.toml +model = "vertex_ai/codechat-bison" +model_turbo = "vertex_ai/codechat-bison" +fallback_models="vertex_ai/codechat-bison" + +[vertexai] # in .secrets.toml +vertex_project = "my-google-cloud-project" +vertex_location = "" +``` + +Your [application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) will be used for authentication so there is no need to set explicit credentials in most environments. + +If you do want to set explicit credentials then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file. + +### Anthropic + +To use Anthropic models, set the relevant models in the configuration section of the configuration file: +``` +[config] +model="anthropic/claude-3-opus-20240229" +model_turbo="anthropic/claude-3-opus-20240229" +fallback_models=["anthropic/claude-3-opus-20240229"] +``` + +And also set the api key in the .secrets.toml file: +``` +[anthropic] +KEY = "..." +``` + +### Amazon Bedrock + +To use Amazon Bedrock and its foundational models, add the below configuration: + +``` +[config] # in configuration.toml +model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" +model_turbo="bedrock/anthropic.claude-3-sonnet-20240229-v1:0" +fallback_models=["bedrock/anthropic.claude-v2:1"] +``` + +Note that you have to add access to foundational models before using them. Please refer to [this document](https://docs.aws.amazon.com/bedrock/latest/userguide/setting-up.html) for more details. + +If you are using the claude-3 model, please configure the following settings as there are parameters incompatible with claude-3. +``` +[litellm] +drop_params = true +``` + +AWS session is automatically authenticated from your environment, but you can also explicitly set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION_NAME` environment variables. Please refer to [this document](https://litellm.vercel.app/docs/providers/bedrock) for more details. + +### custom models +If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py), you can still use it as a custom model: +(1) Set the model name in the configuration file: +``` +[config] +model="custom_model_name" +model_turbo="custom_model_name" +fallback_models=["custom_model_name"] +``` +(2) Set the maximal tokens for the model: +``` +[config] +custom_model_max_tokens= ... +``` +(3) Go to [litellm documentation](https://litellm.vercel.app/docs/proxy/quick_start#supported-llms), find the model you want to use, and set the relevant environment variables. diff --git a/docs/docs/usage-guide/index.md b/docs/docs/usage-guide/index.md index 8304aded4..328489cec 100644 --- a/docs/docs/usage-guide/index.md +++ b/docs/docs/usage-guide/index.md @@ -15,6 +15,7 @@ It includes information on how to adjust PR-Agent configurations, define which t - [BitBucket App](./automations_and_usage.md#bitbucket-app) - [Azure DevOps Provider](./automations_and_usage.md#azure-devops-provider) - [Managing Mail Notifications](./mail_notifications.md) +- [Changing a Model](./changing_a_model.md) - [Additional Configurations Walkthrough](./additional_configurations.md) - [Ignoring files from analysis](./additional_configurations.md#ignoring-files-from-analysis) - [Extra instructions](./additional_configurations.md#extra-instructions) @@ -22,4 +23,4 @@ It includes information on how to adjust PR-Agent configurations, define which t - [Changing a model](./additional_configurations.md#changing-a-model) - [Patch Extra Lines](./additional_configurations.md#patch-extra-lines) - [Editing the prompts](./additional_configurations.md#editing-the-prompts) -- [PR-Agent Pro Models](./PR_agent_pro_models.md) \ No newline at end of file +- [PR-Agent Pro Models 💎](./PR_agent_pro_models.md) \ No newline at end of file diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index cf34d800c..8d8dc3f8f 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -21,7 +21,9 @@ nav: - Configuration File: 'usage-guide/configuration_options.md' - Usage and Automation: 'usage-guide/automations_and_usage.md' - Managing Mail Notifications: 'usage-guide/mail_notifications.md' + - Changing a Model: 'usage-guide/changing_a_model.md' - Additional Configurations: 'usage-guide/additional_configurations.md' + - 💎 PR-Agent Pro Models: 'usage-guide/PR_agent_pro_models' - Tools: - 'tools/index.md' - Describe: 'tools/describe.md' diff --git a/pr_agent/algo/utils.py b/pr_agent/algo/utils.py index fae84eea9..e759d78ca 100644 --- a/pr_agent/algo/utils.py +++ b/pr_agent/algo/utils.py @@ -693,15 +693,25 @@ def get_user_labels(current_labels: List[str] = None): def get_max_tokens(model): + """ + Get the maximum number of tokens allowed for a model. + logic: + (1) If the model is in './pr_agent/algo/__init__.py', use the value from there. + (2) else, the user needs to define explicitly 'config.custom_model_max_tokens' + + For both cases, we further limit the number of tokens to 'config.max_model_tokens' if it is set. + This aims to improve the algorithmic quality, as the AI model degrades in performance when the input is too long. + """ settings = get_settings() if model in MAX_TOKENS: max_tokens_model = MAX_TOKENS[model] + elif model in settings.config.custom_model_max_tokens > 0: + max_tokens_model = settings.config.custom_model_max_tokens else: - raise Exception(f"MAX_TOKENS must be set for model {model} in ./pr_agent/algo/__init__.py") + raise Exception(f"MAX_TOKENS must be set for model {model} in ./pr_agent/algo/__init__.py, or set config.custom_model_max_tokens") - if settings.config.max_model_tokens: + if settings.config.max_model_tokens and settings.config.max_model_tokens > 0: max_tokens_model = min(settings.config.max_model_tokens, max_tokens_model) - # get_logger().debug(f"limiting max tokens to {max_tokens_model}") return max_tokens_model diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml index e0a10a179..194781f93 100644 --- a/pr_agent/settings/configuration.toml +++ b/pr_agent/settings/configuration.toml @@ -18,6 +18,8 @@ ai_timeout=120 # 2minutes max_description_tokens = 500 max_commits_tokens = 500 max_model_tokens = 32000 # Limits the maximum number of tokens that can be used by any model, regardless of the model's default capabilities. +custom_model_max_tokens=-1 # for models not in the default list +# patch_extra_lines = 1 secret_provider="" cli_mode=false From 6ba7b3eea24be52a470336e09df5def80752ab4f Mon Sep 17 00:00:00 2001 From: mrT23 Date: Sun, 28 Jul 2024 08:57:39 +0300 Subject: [PATCH 02/11] fix condition --- pr_agent/algo/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pr_agent/algo/utils.py b/pr_agent/algo/utils.py index e759d78ca..dd0109217 100644 --- a/pr_agent/algo/utils.py +++ b/pr_agent/algo/utils.py @@ -705,7 +705,7 @@ def get_max_tokens(model): settings = get_settings() if model in MAX_TOKENS: max_tokens_model = MAX_TOKENS[model] - elif model in settings.config.custom_model_max_tokens > 0: + elif settings.config.custom_model_max_tokens > 0: max_tokens_model = settings.config.custom_model_max_tokens else: raise Exception(f"MAX_TOKENS must be set for model {model} in ./pr_agent/algo/__init__.py, or set config.custom_model_max_tokens") From 27d6560de80d8723d4e16ba1fb14230b13ff8fc0 Mon Sep 17 00:00:00 2001 From: Tal Date: Sun, 28 Jul 2024 08:58:03 +0300 Subject: [PATCH 03/11] Update pr_agent/algo/utils.py Co-authored-by: codiumai-pr-agent-pro[bot] <151058649+codiumai-pr-agent-pro[bot]@users.noreply.github.com> --- pr_agent/algo/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pr_agent/algo/utils.py b/pr_agent/algo/utils.py index dd0109217..4bd6c631b 100644 --- a/pr_agent/algo/utils.py +++ b/pr_agent/algo/utils.py @@ -708,7 +708,7 @@ def get_max_tokens(model): elif settings.config.custom_model_max_tokens > 0: max_tokens_model = settings.config.custom_model_max_tokens else: - raise Exception(f"MAX_TOKENS must be set for model {model} in ./pr_agent/algo/__init__.py, or set config.custom_model_max_tokens") + raise Exception(f"Ensure {model} is defined in MAX_TOKENS in ./pr_agent/algo/__init__.py or set a positive value for it in config.custom_model_max_tokens") if settings.config.max_model_tokens and settings.config.max_model_tokens > 0: max_tokens_model = min(settings.config.max_model_tokens, max_tokens_model) From e946a0ea9faba1b59d452214fff5b99d807eebf6 Mon Sep 17 00:00:00 2001 From: mrT23 Date: Sun, 28 Jul 2024 09:30:21 +0300 Subject: [PATCH 04/11] docs: update usage guide and README; fix minor formatting issues in utils.py --- README.md | 5 +++++ docs/docs/usage-guide/changing_a_model.md | 2 +- docs/docs/usage-guide/index.md | 2 +- pr_agent/algo/utils.py | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 86903c254..c9bbb4aba 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,11 @@ CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by p ## News and Updates +### July 28, 2024 + +(1) improved support for bitbucket server - [auto commands](https://github.com/Codium-ai/pr-agent/pull/1059) and [direct links](https://github.com/Codium-ai/pr-agent/pull/1061) +(2) custom models are now [supported](https://pr-agent-docs.codium.ai/usage-guide/changing_a_model/#custom-models) + ### July 6, 2024 v0.23 has been released. See full log changes [here](https://github.com/Codium-ai/pr-agent/releases/tag/v0.23). diff --git a/docs/docs/usage-guide/changing_a_model.md b/docs/docs/usage-guide/changing_a_model.md index 9ba605f87..e8e501b39 100644 --- a/docs/docs/usage-guide/changing_a_model.md +++ b/docs/docs/usage-guide/changing_a_model.md @@ -170,7 +170,7 @@ drop_params = true AWS session is automatically authenticated from your environment, but you can also explicitly set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION_NAME` environment variables. Please refer to [this document](https://litellm.vercel.app/docs/providers/bedrock) for more details. -### custom models +### Custom models If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py), you can still use it as a custom model: (1) Set the model name in the configuration file: ``` diff --git a/docs/docs/usage-guide/index.md b/docs/docs/usage-guide/index.md index 328489cec..637048c19 100644 --- a/docs/docs/usage-guide/index.md +++ b/docs/docs/usage-guide/index.md @@ -23,4 +23,4 @@ It includes information on how to adjust PR-Agent configurations, define which t - [Changing a model](./additional_configurations.md#changing-a-model) - [Patch Extra Lines](./additional_configurations.md#patch-extra-lines) - [Editing the prompts](./additional_configurations.md#editing-the-prompts) -- [PR-Agent Pro Models 💎](./PR_agent_pro_models.md) \ No newline at end of file +- [PR-Agent Pro Models](./PR_agent_pro_models.md) \ No newline at end of file diff --git a/pr_agent/algo/utils.py b/pr_agent/algo/utils.py index dd0109217..afeae685a 100644 --- a/pr_agent/algo/utils.py +++ b/pr_agent/algo/utils.py @@ -557,7 +557,7 @@ def _fix_key_value(key: str, value: str): def load_yaml(response_text: str, keys_fix_yaml: List[str] = [], first_key="", last_key="") -> dict: - response_text = response_text.removeprefix('```yaml').rstrip('`') + response_text = response_text.strip('\n').removeprefix('```yaml').rstrip('`') try: data = yaml.safe_load(response_text) except Exception as e: From c2c69f29505d2f55d86dc890c3e3e09fd9f4ecbe Mon Sep 17 00:00:00 2001 From: mrT23 Date: Sun, 28 Jul 2024 09:32:54 +0300 Subject: [PATCH 05/11] No code suggestions found for PR. --- pr_agent/tools/pr_code_suggestions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pr_agent/tools/pr_code_suggestions.py b/pr_agent/tools/pr_code_suggestions.py index 84e3cc8f1..2cb479114 100644 --- a/pr_agent/tools/pr_code_suggestions.py +++ b/pr_agent/tools/pr_code_suggestions.py @@ -100,8 +100,8 @@ async def run(self): data = {"code_suggestions": []} if data is None or 'code_suggestions' not in data or not data['code_suggestions']: - get_logger().error('No code suggestions found for PR.') - pr_body = "## PR Code Suggestions ✨\n\nNo code suggestions found for PR." + get_logger().error('No code suggestions found for the PR.') + pr_body = "## PR Code Suggestions ✨\n\nNo code suggestions found for the PR." get_logger().debug(f"PR output", artifact=pr_body) if self.progress_response: self.git_provider.edit_comment(self.progress_response, body=pr_body) From 110e593d03455bef9d4be479224cdd3e64b8f32b Mon Sep 17 00:00:00 2001 From: Tal Date: Sun, 28 Jul 2024 09:36:41 +0300 Subject: [PATCH 06/11] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index c9bbb4aba..e1cbf3fed 100644 --- a/README.md +++ b/README.md @@ -45,6 +45,7 @@ CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by p ### July 28, 2024 (1) improved support for bitbucket server - [auto commands](https://github.com/Codium-ai/pr-agent/pull/1059) and [direct links](https://github.com/Codium-ai/pr-agent/pull/1061) + (2) custom models are now [supported](https://pr-agent-docs.codium.ai/usage-guide/changing_a_model/#custom-models) ### July 6, 2024 From 452eda25cd860cb0f2b18cc9fb8863d14861932c Mon Sep 17 00:00:00 2001 From: Tal Date: Sun, 28 Jul 2024 09:57:23 +0300 Subject: [PATCH 07/11] Update changing_a_model.md --- docs/docs/usage-guide/changing_a_model.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/docs/usage-guide/changing_a_model.md b/docs/docs/usage-guide/changing_a_model.md index e8e501b39..9d994d173 100644 --- a/docs/docs/usage-guide/changing_a_model.md +++ b/docs/docs/usage-guide/changing_a_model.md @@ -1,7 +1,7 @@ ## Changing a model -See [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py) for the list of available models. -To use a different model than the default (GPT-4), you need to edit [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L2) the fields: +See [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py) for a list of available models. +To use a different model than the default (GPT-4), you need to edit in the [configuration file](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/settings/configuration.toml#L2) the fields: ``` [config] model = "..." @@ -10,7 +10,7 @@ fallback_models = ["..."] ``` For models and environments not from OpenAI, you might need to provide additional keys and other parameters. -You can give parameters via a configuration file (see below for instructions), or from environment variables. see [litellm documentation](https://litellm.vercel.app/docs/proxy/quick_start#supported-llms) for the environment variables you can set per model. +You can give parameters via a configuration file (see below for instructions), or from environment variables. See [litellm documentation](https://litellm.vercel.app/docs/proxy/quick_start#supported-llms) for the environment variables relevant per model. ### Azure @@ -131,7 +131,7 @@ vertex_location = "" Your [application default credentials](https://cloud.google.com/docs/authentication/application-default-credentials) will be used for authentication so there is no need to set explicit credentials in most environments. -If you do want to set explicit credentials then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file. +If you do want to set explicit credentials, then you can use the `GOOGLE_APPLICATION_CREDENTIALS` environment variable set to a path to a json credentials file. ### Anthropic @@ -171,7 +171,9 @@ drop_params = true AWS session is automatically authenticated from your environment, but you can also explicitly set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and `AWS_REGION_NAME` environment variables. Please refer to [this document](https://litellm.vercel.app/docs/providers/bedrock) for more details. ### Custom models + If the relevant model doesn't appear [here](https://github.com/Codium-ai/pr-agent/blob/main/pr_agent/algo/__init__.py), you can still use it as a custom model: + (1) Set the model name in the configuration file: ``` [config] From 232b540f6012f67fbbdec266ca5e8b24ebfaac6f Mon Sep 17 00:00:00 2001 From: Tal Date: Sun, 28 Jul 2024 17:34:08 +0300 Subject: [PATCH 08/11] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e1cbf3fed..3c43ed49a 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,7 @@
-CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedbacks and suggestions +CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by providing AI feedback and suggestions [![GitHub license](https://img.shields.io/badge/License-Apache_2.0-blue.svg)](https://github.com/Codium-ai/pr-agent/blob/main/LICENSE) @@ -18,6 +18,7 @@ CodiumAI PR-Agent aims to help efficiently review and handle pull requests, by p [![Static Badge](https://img.shields.io/badge/Code-Benchmark-blue)](https://pr-agent-docs.codium.ai/finetuning_benchmark/) [![Discord](https://badgen.net/badge/icon/discord?icon=discord&label&color=purple)](https://discord.com/channels/1057273017547378788/1126104260430528613) [![Twitter](https://img.shields.io/twitter/follow/codiumai)](https://twitter.com/codiumai) +[![Cheat Sheet](https://img.shields.io/badge/Cheat-Sheet-red)](https://www.codium.ai/images/pr_agent/cheat_sheet.pdf) GitHub From 927f124dcabc0c61808e855df2e72682b286906c Mon Sep 17 00:00:00 2001 From: Tal Date: Sun, 28 Jul 2024 18:12:22 +0300 Subject: [PATCH 09/11] Update configuration_options.md --- docs/docs/usage-guide/configuration_options.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/usage-guide/configuration_options.md b/docs/docs/usage-guide/configuration_options.md index d0cf0bae2..70e9f9f8f 100644 --- a/docs/docs/usage-guide/configuration_options.md +++ b/docs/docs/usage-guide/configuration_options.md @@ -26,15 +26,15 @@ The advantage of this method is that it allows to set configurations without nee ![wiki_configuration](https://codium.ai/images/pr_agent/wiki_configuration.png){width=512} -Click [here](https://codium.ai/images/pr_agent/wiki_configuration_pr_agent.mp4) to see a short instructional video. We recommend surrounding the configuration content with triple-quotes, to allow better presentation when displayed in the wiki as markdown. +Click [here](https://codium.ai/images/pr_agent/wiki_configuration_pr_agent.mp4) to see a short instructional video. We recommend surrounding the configuration content with triple-quotes (or \`\`\`toml), to allow better presentation when displayed in the wiki as markdown. An example content: -``` +```toml [pr_description] generate_ai_title=true ``` -PR-Agent will know to remove the triple-quotes when reading the configuration content. +PR-Agent will know to remove the surrounding quotes when reading the configuration content. ## Local configuration file From f50832e19b72218342adcea4b15a1e219fd7fcb7 Mon Sep 17 00:00:00 2001 From: Tal Date: Mon, 29 Jul 2024 08:32:34 +0300 Subject: [PATCH 10/11] Update __init__.py --- pr_agent/algo/__init__.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pr_agent/algo/__init__.py b/pr_agent/algo/__init__.py index 27b24c56e..baea8a3ab 100644 --- a/pr_agent/algo/__init__.py +++ b/pr_agent/algo/__init__.py @@ -52,4 +52,10 @@ 'groq/llama-3.1-70b-versatile': 131072, 'groq/llama-3.1-405b-reasoning': 131072, 'ollama/llama3': 4096, + 'watsonx/meta-llama/llama-3-8b-instruct': 4096, + "watsonx/meta-llama/llama-3-70b-instruct": 4096, + "watsonx/meta-llama/llama-3-405b-instruct": 16384, + "watsonx/ibm/granite-13b-chat-v2": 8191, + "watsonx/ibm/granite-34b-code-instruct": 8191, + "watsonx/mistralai/mistral-large": 32768, } From 0390a85f5a2221a9c6dd27fa539e9af8dd267376 Mon Sep 17 00:00:00 2001 From: MarkRx Date: Tue, 30 Jul 2024 09:45:45 -0400 Subject: [PATCH 11/11] Add missing bitbucket_server.pr_commands default --- pr_agent/settings/configuration.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pr_agent/settings/configuration.toml b/pr_agent/settings/configuration.toml index 194781f93..b589461e7 100644 --- a/pr_agent/settings/configuration.toml +++ b/pr_agent/settings/configuration.toml @@ -253,6 +253,11 @@ pr_commands = [ # URL to the BitBucket Server instance # url = "https://git.bitbucket.com" url = "" +pr_commands = [ + "/describe", + "/review --pr_reviewer.num_code_suggestions=0", + "/improve --pr_code_suggestions.commitable_code_suggestions=true --pr_code_suggestions.suggestions_score_threshold=7", +] [litellm] # use_client = false