From 314bc1d0988c4a8b3a7278a27b72e75a212c4460 Mon Sep 17 00:00:00 2001 From: Celina Hanouti Date: Mon, 30 Dec 2024 11:28:08 +0100 Subject: [PATCH 1/2] update logit bias doc --- src/huggingface_hub/inference/_client.py | 7 +------ src/huggingface_hub/inference/_generated/_async_client.py | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/src/huggingface_hub/inference/_client.py b/src/huggingface_hub/inference/_client.py index a448f6ce62..c3edc9c6ff 100644 --- a/src/huggingface_hub/inference/_client.py +++ b/src/huggingface_hub/inference/_client.py @@ -576,18 +576,13 @@ def chat_completion( The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used. See https://huggingface.co/tasks/text-generation for more details. - If `model` is a model ID, it is passed to the server as the `model` parameter. If you want to define a custom URL while setting `model` in the request payload, you must set `base_url` when initializing [`InferenceClient`]. frequency_penalty (`float`, *optional*): Penalizes new tokens based on their existing frequency in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. logit_bias (`List[float]`, *optional*): - Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens - (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, - the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, - but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should - result in a ban or exclusive selection of the relevant token. Defaults to None. + UNUSED. logprobs (`bool`, *optional*): Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. diff --git a/src/huggingface_hub/inference/_generated/_async_client.py b/src/huggingface_hub/inference/_generated/_async_client.py index db9adf20f2..b57809c1e2 100644 --- a/src/huggingface_hub/inference/_generated/_async_client.py +++ b/src/huggingface_hub/inference/_generated/_async_client.py @@ -612,18 +612,13 @@ async def chat_completion( The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used. See https://huggingface.co/tasks/text-generation for more details. - If `model` is a model ID, it is passed to the server as the `model` parameter. If you want to define a custom URL while setting `model` in the request payload, you must set `base_url` when initializing [`InferenceClient`]. frequency_penalty (`float`, *optional*): Penalizes new tokens based on their existing frequency in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. logit_bias (`List[float]`, *optional*): - Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens - (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, - the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, - but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should - result in a ban or exclusive selection of the relevant token. Defaults to None. + UNUSED. logprobs (`bool`, *optional*): Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. From 6b4dbe379e5439f6283c1077e457257f69fc857f Mon Sep 17 00:00:00 2001 From: Celina Hanouti Date: Thu, 2 Jan 2025 15:03:25 +0100 Subject: [PATCH 2/2] improve unused parameters documentation --- src/huggingface_hub/inference/_client.py | 4 ++-- src/huggingface_hub/inference/_generated/_async_client.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/huggingface_hub/inference/_client.py b/src/huggingface_hub/inference/_client.py index c3edc9c6ff..09d5af0d3a 100644 --- a/src/huggingface_hub/inference/_client.py +++ b/src/huggingface_hub/inference/_client.py @@ -582,14 +582,14 @@ def chat_completion( Penalizes new tokens based on their existing frequency in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. logit_bias (`List[float]`, *optional*): - UNUSED. + UNUSED. Currently not implemented in text-generation-inference (TGI). Kept as a parameter for OpenAI compatibility. logprobs (`bool`, *optional*): Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. max_tokens (`int`, *optional*): Maximum number of tokens allowed in the response. Defaults to 100. n (`int`, *optional*): - UNUSED. + UNUSED. Currently not implemented in text-generation-inference (TGI). Kept as a parameter for OpenAI compatibility. presence_penalty (`float`, *optional*): Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. diff --git a/src/huggingface_hub/inference/_generated/_async_client.py b/src/huggingface_hub/inference/_generated/_async_client.py index b57809c1e2..5fa2fcb5d8 100644 --- a/src/huggingface_hub/inference/_generated/_async_client.py +++ b/src/huggingface_hub/inference/_generated/_async_client.py @@ -618,14 +618,14 @@ async def chat_completion( Penalizes new tokens based on their existing frequency in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. logit_bias (`List[float]`, *optional*): - UNUSED. + UNUSED. Currently not implemented in text-generation-inference (TGI). Kept as a parameter for OpenAI compatibility. logprobs (`bool`, *optional*): Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the content of message. max_tokens (`int`, *optional*): Maximum number of tokens allowed in the response. Defaults to 100. n (`int`, *optional*): - UNUSED. + UNUSED. Currently not implemented in text-generation-inference (TGI). Kept as a parameter for OpenAI compatibility. presence_penalty (`float`, *optional*): Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.