Skip to content

Commit

Permalink
[Bot] Update inference types (#2688)
Browse files Browse the repository at this point in the history
* Update inference types (automated commit)

* fix quality after merging main

* another fix

* fix tests

* Update inference types (automated commit)

* Update inference types (automated commit)

* fix quality

* Update inference types (automated commit)

* Update inference types (automated commit)

* Update inference types (automated commit)

* fix client

* activate automatic update for table-question-answering

* fix

---------

Co-authored-by: Wauplin <11801849+Wauplin@users.noreply.github.com>
Co-authored-by: Celina Hanouti <hanouticelina@gmail.com>
  • Loading branch information
3 people authored Dec 3, 2024
1 parent 503d353 commit f50fe7c
Show file tree
Hide file tree
Showing 8 changed files with 64 additions and 9 deletions.
2 changes: 2 additions & 0 deletions docs/source/en/package_reference/inference_types.md
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,8 @@ This part of the lib is still under development and will be improved in future r

[[autodoc]] huggingface_hub.TableQuestionAnsweringOutputElement

[[autodoc]] huggingface_hub.TableQuestionAnsweringParameters



## text2text_generation
Expand Down
2 changes: 2 additions & 0 deletions docs/source/ko/package_reference/inference_types.md
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,8 @@ rendered properly in your Markdown viewer.

[[autodoc]] huggingface_hub.TableQuestionAnsweringOutputElement

[[autodoc]] huggingface_hub.TableQuestionAnsweringParameters



## text2text_generation[[huggingface_hub.Text2TextGenerationInput]]
Expand Down
4 changes: 4 additions & 0 deletions src/huggingface_hub/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -351,6 +351,7 @@
"ObjectDetectionInput",
"ObjectDetectionOutputElement",
"ObjectDetectionParameters",
"Padding",
"QuestionAnsweringInput",
"QuestionAnsweringInputData",
"QuestionAnsweringOutputElement",
Expand All @@ -364,6 +365,7 @@
"TableQuestionAnsweringInput",
"TableQuestionAnsweringInputData",
"TableQuestionAnsweringOutputElement",
"TableQuestionAnsweringParameters",
"Text2TextGenerationInput",
"Text2TextGenerationOutput",
"Text2TextGenerationParameters",
Expand Down Expand Up @@ -880,6 +882,7 @@ def __dir__():
ObjectDetectionInput, # noqa: F401
ObjectDetectionOutputElement, # noqa: F401
ObjectDetectionParameters, # noqa: F401
Padding, # noqa: F401
QuestionAnsweringInput, # noqa: F401
QuestionAnsweringInputData, # noqa: F401
QuestionAnsweringOutputElement, # noqa: F401
Expand All @@ -893,6 +896,7 @@ def __dir__():
TableQuestionAnsweringInput, # noqa: F401
TableQuestionAnsweringInputData, # noqa: F401
TableQuestionAnsweringOutputElement, # noqa: F401
TableQuestionAnsweringParameters, # noqa: F401
Text2TextGenerationInput, # noqa: F401
Text2TextGenerationOutput, # noqa: F401
Text2TextGenerationParameters, # noqa: F401
Expand Down
20 changes: 17 additions & 3 deletions src/huggingface_hub/inference/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
ImageToImageTargetSize,
ImageToTextOutput,
ObjectDetectionOutputElement,
Padding,
QuestionAnsweringOutputElement,
SummarizationOutput,
SummarizationTruncationStrategy,
Expand Down Expand Up @@ -1654,7 +1655,9 @@ def table_question_answering(
query: str,
*,
model: Optional[str] = None,
parameters: Optional[Dict[str, Any]] = None,
padding: Optional["Padding"] = None,
sequential: Optional[bool] = None,
truncation: Optional[bool] = None,
) -> TableQuestionAnsweringOutputElement:
"""
Retrieve the answer to a question from information given in a table.
Expand All @@ -1668,8 +1671,14 @@ def table_question_answering(
model (`str`):
The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face
Hub or a URL to a deployed Inference Endpoint.
parameters (`Dict[str, Any]`, *optional*):
Additional inference parameters. Defaults to None.
padding (`"Padding"`, *optional*):
Activates and controls padding.
sequential (`bool`, *optional*):
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
inference to be done sequentially to extract relations within sequences, given their conversational
nature.
truncation (`bool`, *optional*):
Activates and controls truncation.
Returns:
[`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used.
Expand All @@ -1690,6 +1699,11 @@ def table_question_answering(
TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE')
```
"""
parameters = {
"padding": padding,
"sequential": sequential,
"truncation": truncation,
}
inputs = {
"query": query,
"table": table,
Expand Down
20 changes: 17 additions & 3 deletions src/huggingface_hub/inference/_generated/_async_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@
ImageToImageTargetSize,
ImageToTextOutput,
ObjectDetectionOutputElement,
Padding,
QuestionAnsweringOutputElement,
SummarizationOutput,
SummarizationTruncationStrategy,
Expand Down Expand Up @@ -1713,7 +1714,9 @@ async def table_question_answering(
query: str,
*,
model: Optional[str] = None,
parameters: Optional[Dict[str, Any]] = None,
padding: Optional["Padding"] = None,
sequential: Optional[bool] = None,
truncation: Optional[bool] = None,
) -> TableQuestionAnsweringOutputElement:
"""
Retrieve the answer to a question from information given in a table.
Expand All @@ -1727,8 +1730,14 @@ async def table_question_answering(
model (`str`):
The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face
Hub or a URL to a deployed Inference Endpoint.
parameters (`Dict[str, Any]`, *optional*):
Additional inference parameters. Defaults to None.
padding (`"Padding"`, *optional*):
Activates and controls padding.
sequential (`bool`, *optional*):
Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the
inference to be done sequentially to extract relations within sequences, given their conversational
nature.
truncation (`bool`, *optional*):
Activates and controls truncation.
Returns:
[`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used.
Expand All @@ -1750,6 +1759,11 @@ async def table_question_answering(
TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE')
```
"""
parameters = {
"padding": padding,
"sequential": sequential,
"truncation": truncation,
}
inputs = {
"query": query,
"table": table,
Expand Down
2 changes: 2 additions & 0 deletions src/huggingface_hub/inference/_generated/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,11 @@
SummarizationTruncationStrategy,
)
from .table_question_answering import (
Padding,
TableQuestionAnsweringInput,
TableQuestionAnsweringInputData,
TableQuestionAnsweringOutputElement,
TableQuestionAnsweringParameters,
)
from .text2text_generation import (
Text2TextGenerationInput,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from typing import Dict, List, Literal, Optional

from .base import BaseInferenceType

Expand All @@ -19,13 +19,31 @@ class TableQuestionAnsweringInputData(BaseInferenceType):
"""The table to serve as context for the questions"""


Padding = Literal["do_not_pad", "longest", "max_length"]


@dataclass
class TableQuestionAnsweringParameters(BaseInferenceType):
"""Additional inference parameters for Table Question Answering"""

padding: Optional["Padding"] = None
"""Activates and controls padding."""
sequential: Optional[bool] = None
"""Whether to do inference sequentially or as a batch. Batching is faster, but models like
SQA require the inference to be done sequentially to extract relations within sequences,
given their conversational nature.
"""
truncation: Optional[bool] = None
"""Activates and controls truncation."""


@dataclass
class TableQuestionAnsweringInput(BaseInferenceType):
"""Inputs for Table Question Answering inference"""

inputs: TableQuestionAnsweringInputData
"""One (table, question) pair to answer"""
parameters: Optional[Dict[str, Any]] = None
parameters: Optional[TableQuestionAnsweringParameters] = None
"""Additional inference parameters for Table Question Answering"""


Expand Down
1 change: 0 additions & 1 deletion utils/check_task_parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@
"audio_to_audio",
"feature_extraction",
"sentence_similarity",
"table_question_answering",
"automatic_speech_recognition",
"image_to_text",
]
Expand Down

0 comments on commit f50fe7c

Please sign in to comment.