From e1bbb7cafd611986e0df8155b70c0adda7792d7b Mon Sep 17 00:00:00 2001 From: github-actions Date: Fri, 17 May 2024 15:08:00 +0000 Subject: [PATCH] v0.3.54 --- README.md | 61 +++- premai/models/__init__.py | 8 +- premai/models/chat_completion_input.py | 279 ------------------ ...chat_completion_input_logit_bias_type_0.py | 49 --- ...completion_input_response_format_type_0.py | 49 --- premai/models/message.py | 46 ++- ..._input_tools_item.py => message_params.py} | 14 +- pyproject.toml | 2 +- 8 files changed, 107 insertions(+), 401 deletions(-) delete mode 100644 premai/models/chat_completion_input_logit_bias_type_0.py delete mode 100644 premai/models/chat_completion_input_response_format_type_0.py rename premai/models/{chat_completion_input_tools_item.py => message_params.py} (77%) diff --git a/README.md b/README.md index bdaf79c..f1aec7c 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,64 @@ for chunk in response: print(chunk.choices[0].delta["content"], end="") ``` +### Prompt Templates + +Should your operations entail the frequent utilization of identical prompt structures, the **Prompt Template** functionality facilitates the streamlining of this process. It enables the instantiation and subsequent reuse of predefined prompts, thereby optimizing efficiency and maintaining uniformity across interactions. + +#### Creating a Prompt Template +Create your own Prompt Template in just a few steps: + +- Navigate to the **Launchpad** section of your project. +- Click on the **Templates** tab. +- Hit the button to create a new Prompt Template. + +From here, you can either create your custom Prompt Template or choose one of our default presets. + +Within the template, you can include placeholders for dynamic content by using the `${placeholder_name}` syntax, as illustrated below: + +```markdown +Summarize the following text: +""" +${text} +""" +``` + +In this example, we have a placeholder named `text`. To implement this through our SDK, follow this sample code: + +```python +# Text you want to summarize +text_to_summarize = "This is the great tale of ... " +# Construct the message with the template +messages = [ + { + "role": "user", + "template_id": TEMPLATE_ID, # Your template's ID + "params": {"text": text_to_summarize} + } +] + +response = client.chat.completions.create( + project_id=project_id, + messages=messages +) +``` + +#### Key Points for Using Prompt Templates +When using prompt templates, remember these important guidelines: +- Replace the `content` field with `template_id` and `params`. +- Both `template_id` (the unique ID of your prompt template) and `params` (a key-value object mapping your placeholders to their desired values) are required to utilize prompt templates. + +Any keys in `params` not matching placeholders will be ignored. If a placeholder is omitted in `params`, it defaults to an empty string. For instance, if you provide the following message set, the `${text}` placeholder will be left empty: + +```python +messages = [ + { + "role": "user", + "template_id": TEMPLATE_ID, + "params": {} # No parameters provided for placeholders + } +] +``` ### Optional parameters By default, the `chat.completions` module uses the default launchpad parameters. You can also specify the following optional parameters: @@ -74,9 +132,6 @@ By default, the `chat.completions` module uses the default launchpad parameters. - `session_id`: A unique identifier to maintain session context, useful for tracking conversations or data across multiple requests. - `temperature`: The temperature to use for completion. If omitted, the default launchpad temperature will be used. - `max_tokens`: The maximum number of tokens to generate for completion. If omitted, the default launchpad max tokens will be used. -- `top_p`: The nucleus sampling probability to use for completion. If omitted, the default launchpad top p will be used. -- `frequency_penalty`: The frequency penalty to use for completion. If omitted, the default launchpad frequency penalty will be used. -- `presence_penalty`: The presence penalty to use for completion. If omitted, the default launchpad presence penalty will be used. Example: diff --git a/premai/models/__init__.py b/premai/models/__init__.py index e2fb47e..4c4c3a9 100644 --- a/premai/models/__init__.py +++ b/premai/models/__init__.py @@ -8,9 +8,6 @@ from .catch_all_error import CatchAllError from .catch_all_error_code_enum import CatchAllErrorCodeEnum from .chat_completion_input import ChatCompletionInput, ChatCompletionInputDict -from .chat_completion_input_logit_bias_type_0 import ChatCompletionInputLogitBiasType0 -from .chat_completion_input_response_format_type_0 import ChatCompletionInputResponseFormatType0 -from .chat_completion_input_tools_item import ChatCompletionInputToolsItem from .chat_completion_response import ChatCompletionResponse from .conflict_error import ConflictError from .conflict_error_code_enum import ConflictErrorCodeEnum @@ -26,6 +23,7 @@ from .feedback_create import FeedbackCreate, FeedbackCreateDict from .feedback_create_feedback import FeedbackCreateFeedback from .message import Message +from .message_params import MessageParams from .message_role_enum import MessageRoleEnum from .messages import Messages from .messages_role_enum import MessagesRoleEnum @@ -73,9 +71,6 @@ "CatchAllError", "CatchAllErrorCodeEnum", "ChatCompletionInput", - "ChatCompletionInputLogitBiasType0", - "ChatCompletionInputResponseFormatType0", - "ChatCompletionInputToolsItem", "ChatCompletionResponse", "ConflictError", "ConflictErrorCodeEnum", @@ -91,6 +86,7 @@ "FeedbackCreate", "FeedbackCreateFeedback", "Message", + "MessageParams", "MessageRoleEnum", "Messages", "MessagesRoleEnum", diff --git a/premai/models/chat_completion_input.py b/premai/models/chat_completion_input.py index bc0ea5a..e9cd854 100644 --- a/premai/models/chat_completion_input.py +++ b/premai/models/chat_completion_input.py @@ -5,9 +5,6 @@ from attrs import field as _attrs_field from typing_extensions import Any, NotRequired, TypedDict, TypeVar -from ..models.chat_completion_input_logit_bias_type_0 import ChatCompletionInputLogitBiasType0 -from ..models.chat_completion_input_response_format_type_0 import ChatCompletionInputResponseFormatType0 -from ..models.chat_completion_input_tools_item import ChatCompletionInputToolsItem from ..models.enhancement import Enhancement from ..models.message import Message from ..types import UNSET, Unset @@ -22,18 +19,9 @@ class ChatCompletionInputDict(TypedDict): repositories: NotRequired[Union[Unset, Enhancement]] model: NotRequired[Union[Unset, str]] system_prompt: NotRequired[Union[Unset, str]] - frequency_penalty: NotRequired[Union[Unset, float]] - logit_bias: NotRequired[Union["ChatCompletionInputLogitBiasType0", None, Unset]] max_tokens: NotRequired[Union[None, Unset, int]] - presence_penalty: NotRequired[Union[Unset, float]] - response_format: NotRequired[Union["ChatCompletionInputResponseFormatType0", None, Unset]] - seed: NotRequired[Union[None, Unset, int]] - stop: NotRequired[Union[None, Unset, str]] stream: NotRequired[Union[Unset, bool]] temperature: NotRequired[Union[None, Unset, float]] - top_p: NotRequired[Union[None, Unset, float]] - tools: NotRequired[Union[Unset, List["ChatCompletionInputToolsItem"]]] - user: NotRequired[Union[None, Unset, str]] pass @@ -47,24 +35,9 @@ class ChatCompletionInput: repositories (Union[Unset, Enhancement]): model (Union[Unset, str]): ID of the model to use. See the model endpoint compatibility table for details. system_prompt (Union[Unset, str]): The system prompt to use. - frequency_penalty (Union[Unset, float]): Number between -2.0 and 2.0. Positive values penalize new tokens based - on their existing frequency. - logit_bias (Union['ChatCompletionInputLogitBiasType0', None, Unset]): JSON object that maps tokens to an - associated bias value from -100 to 100. max_tokens (Union[None, Unset, int]): The maximum number of tokens to generate in the chat completion. - presence_penalty (Union[Unset, float]): Number between -2.0 and 2.0. Positive values penalize new tokens based - on whether they appear in the text so far. - response_format (Union['ChatCompletionInputResponseFormatType0', None, Unset]): An object specifying the format - that the model must output. - seed (Union[None, Unset, int]): This feature is in Beta. If specified, our system will make a best effort to - sample deterministically. - stop (Union[None, Unset, str]): Up to 4 sequences where the API will stop generating further tokens. stream (Union[Unset, bool]): If set, partial message deltas will be sent, like in ChatGPT. temperature (Union[None, Unset, float]): What sampling temperature to use, between 0 and 2. - top_p (Union[None, Unset, float]): An alternative to sampling with temperature, called nucleus sampling. - tools (Union[Unset, List['ChatCompletionInputToolsItem']]): A list of tools the model may call. Currently, only - functions are supported as a tool. - user (Union[None, Unset, str]): A unique identifier representing your end-user. """ project_id: int @@ -73,25 +46,13 @@ class ChatCompletionInput: repositories: Union[Unset, "Enhancement"] = UNSET model: Union[Unset, str] = UNSET system_prompt: Union[Unset, str] = UNSET - frequency_penalty: Union[Unset, float] = UNSET - logit_bias: Union["ChatCompletionInputLogitBiasType0", None, Unset] = UNSET max_tokens: Union[None, Unset, int] = UNSET - presence_penalty: Union[Unset, float] = UNSET - response_format: Union["ChatCompletionInputResponseFormatType0", None, Unset] = UNSET - seed: Union[None, Unset, int] = UNSET - stop: Union[None, Unset, str] = UNSET stream: Union[Unset, bool] = UNSET temperature: Union[None, Unset, float] = UNSET - top_p: Union[None, Unset, float] = UNSET - tools: Union[Unset, List["ChatCompletionInputToolsItem"]] = UNSET - user: Union[None, Unset, str] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: - from ..models.chat_completion_input_logit_bias_type_0 import ChatCompletionInputLogitBiasType0 - from ..models.chat_completion_input_response_format_type_0 import ChatCompletionInputResponseFormatType0 - project_id = self.project_id messages = [] @@ -109,44 +70,12 @@ def to_dict(self) -> Dict[str, Any]: system_prompt = self.system_prompt - frequency_penalty = self.frequency_penalty - - logit_bias: Union[Dict[str, Any], None, Unset] - if isinstance(self.logit_bias, Unset): - logit_bias = UNSET - elif isinstance(self.logit_bias, ChatCompletionInputLogitBiasType0): - logit_bias = self.logit_bias.to_dict() - else: - logit_bias = self.logit_bias - max_tokens: Union[None, Unset, int] if isinstance(self.max_tokens, Unset): max_tokens = UNSET else: max_tokens = self.max_tokens - presence_penalty = self.presence_penalty - - response_format: Union[Dict[str, Any], None, Unset] - if isinstance(self.response_format, Unset): - response_format = UNSET - elif isinstance(self.response_format, ChatCompletionInputResponseFormatType0): - response_format = self.response_format.to_dict() - else: - response_format = self.response_format - - seed: Union[None, Unset, int] - if isinstance(self.seed, Unset): - seed = UNSET - else: - seed = self.seed - - stop: Union[None, Unset, str] - if isinstance(self.stop, Unset): - stop = UNSET - else: - stop = self.stop - stream = self.stream temperature: Union[None, Unset, float] @@ -155,25 +84,6 @@ def to_dict(self) -> Dict[str, Any]: else: temperature = self.temperature - top_p: Union[None, Unset, float] - if isinstance(self.top_p, Unset): - top_p = UNSET - else: - top_p = self.top_p - - tools: Union[Unset, List[Dict[str, Any]]] = UNSET - if not isinstance(self.tools, Unset): - tools = [] - for tools_item_data in self.tools: - tools_item = tools_item_data.to_dict() - tools.append(tools_item) - - user: Union[None, Unset, str] - if isinstance(self.user, Unset): - user = UNSET - else: - user = self.user - field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( @@ -190,30 +100,12 @@ def to_dict(self) -> Dict[str, Any]: field_dict["model"] = model if system_prompt is not UNSET: field_dict["system_prompt"] = system_prompt - if frequency_penalty is not UNSET: - field_dict["frequency_penalty"] = frequency_penalty - if logit_bias is not UNSET: - field_dict["logit_bias"] = logit_bias if max_tokens is not UNSET: field_dict["max_tokens"] = max_tokens - if presence_penalty is not UNSET: - field_dict["presence_penalty"] = presence_penalty - if response_format is not UNSET: - field_dict["response_format"] = response_format - if seed is not UNSET: - field_dict["seed"] = seed - if stop is not UNSET: - field_dict["stop"] = stop if stream is not UNSET: field_dict["stream"] = stream if temperature is not UNSET: field_dict["temperature"] = temperature - if top_p is not UNSET: - field_dict["top_p"] = top_p - if tools is not UNSET: - field_dict["tools"] = tools - if user is not UNSET: - field_dict["user"] = user return field_dict @@ -248,52 +140,12 @@ def to_multipart(self) -> Dict[str, Any]: else (None, str(self.system_prompt).encode(), "text/plain") ) - frequency_penalty = ( - self.frequency_penalty - if isinstance(self.frequency_penalty, Unset) - else (None, str(self.frequency_penalty).encode(), "text/plain") - ) - - logit_bias: Union[None, Tuple[None, bytes, str], Unset] - if isinstance(self.logit_bias, Unset): - logit_bias = UNSET - elif isinstance(self.logit_bias, ChatCompletionInputLogitBiasType0): - logit_bias = (None, json.dumps(self.logit_bias.to_dict()).encode(), "application/json") - else: - logit_bias = self.logit_bias - max_tokens: Union[None, Unset, int] if isinstance(self.max_tokens, Unset): max_tokens = UNSET else: max_tokens = self.max_tokens - presence_penalty = ( - self.presence_penalty - if isinstance(self.presence_penalty, Unset) - else (None, str(self.presence_penalty).encode(), "text/plain") - ) - - response_format: Union[None, Tuple[None, bytes, str], Unset] - if isinstance(self.response_format, Unset): - response_format = UNSET - elif isinstance(self.response_format, ChatCompletionInputResponseFormatType0): - response_format = (None, json.dumps(self.response_format.to_dict()).encode(), "application/json") - else: - response_format = self.response_format - - seed: Union[None, Unset, int] - if isinstance(self.seed, Unset): - seed = UNSET - else: - seed = self.seed - - stop: Union[None, Unset, str] - if isinstance(self.stop, Unset): - stop = UNSET - else: - stop = self.stop - stream = self.stream if isinstance(self.stream, Unset) else (None, str(self.stream).encode(), "text/plain") temperature: Union[None, Unset, float] @@ -302,26 +154,6 @@ def to_multipart(self) -> Dict[str, Any]: else: temperature = self.temperature - top_p: Union[None, Unset, float] - if isinstance(self.top_p, Unset): - top_p = UNSET - else: - top_p = self.top_p - - tools: Union[Unset, Tuple[None, bytes, str]] = UNSET - if not isinstance(self.tools, Unset): - _temp_tools = [] - for tools_item_data in self.tools: - tools_item = tools_item_data.to_dict() - _temp_tools.append(tools_item) - tools = (None, json.dumps(_temp_tools).encode(), "application/json") - - user: Union[None, Unset, str] - if isinstance(self.user, Unset): - user = UNSET - else: - user = self.user - field_dict: Dict[str, Any] = {} field_dict.update( {key: (None, str(value).encode(), "text/plain") for key, value in self.additional_properties.items()} @@ -340,38 +172,17 @@ def to_multipart(self) -> Dict[str, Any]: field_dict["model"] = model if system_prompt is not UNSET: field_dict["system_prompt"] = system_prompt - if frequency_penalty is not UNSET: - field_dict["frequency_penalty"] = frequency_penalty - if logit_bias is not UNSET: - field_dict["logit_bias"] = logit_bias if max_tokens is not UNSET: field_dict["max_tokens"] = max_tokens - if presence_penalty is not UNSET: - field_dict["presence_penalty"] = presence_penalty - if response_format is not UNSET: - field_dict["response_format"] = response_format - if seed is not UNSET: - field_dict["seed"] = seed - if stop is not UNSET: - field_dict["stop"] = stop if stream is not UNSET: field_dict["stream"] = stream if temperature is not UNSET: field_dict["temperature"] = temperature - if top_p is not UNSET: - field_dict["top_p"] = top_p - if tools is not UNSET: - field_dict["tools"] = tools - if user is not UNSET: - field_dict["user"] = user return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - from ..models.chat_completion_input_logit_bias_type_0 import ChatCompletionInputLogitBiasType0 - from ..models.chat_completion_input_response_format_type_0 import ChatCompletionInputResponseFormatType0 - from ..models.chat_completion_input_tools_item import ChatCompletionInputToolsItem from ..models.enhancement import Enhancement from ..models.message import Message @@ -398,25 +209,6 @@ def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: system_prompt = d.pop("system_prompt", UNSET) - frequency_penalty = d.pop("frequency_penalty", UNSET) - - def _parse_logit_bias(data: object) -> Union["ChatCompletionInputLogitBiasType0", None, Unset]: - if data is None: - return data - if isinstance(data, Unset): - return data - try: - if not isinstance(data, dict): - raise TypeError() - logit_bias_type_0 = ChatCompletionInputLogitBiasType0.from_dict(data) - - return logit_bias_type_0 - except: # noqa: E722 - pass - return cast(Union["ChatCompletionInputLogitBiasType0", None, Unset], data) - - logit_bias = _parse_logit_bias(d.pop("logit_bias", UNSET)) - def _parse_max_tokens(data: object) -> Union[None, Unset, int]: if data is None: return data @@ -426,43 +218,6 @@ def _parse_max_tokens(data: object) -> Union[None, Unset, int]: max_tokens = _parse_max_tokens(d.pop("max_tokens", UNSET)) - presence_penalty = d.pop("presence_penalty", UNSET) - - def _parse_response_format(data: object) -> Union["ChatCompletionInputResponseFormatType0", None, Unset]: - if data is None: - return data - if isinstance(data, Unset): - return data - try: - if not isinstance(data, dict): - raise TypeError() - response_format_type_0 = ChatCompletionInputResponseFormatType0.from_dict(data) - - return response_format_type_0 - except: # noqa: E722 - pass - return cast(Union["ChatCompletionInputResponseFormatType0", None, Unset], data) - - response_format = _parse_response_format(d.pop("response_format", UNSET)) - - def _parse_seed(data: object) -> Union[None, Unset, int]: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(Union[None, Unset, int], data) - - seed = _parse_seed(d.pop("seed", UNSET)) - - def _parse_stop(data: object) -> Union[None, Unset, str]: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(Union[None, Unset, str], data) - - stop = _parse_stop(d.pop("stop", UNSET)) - stream = d.pop("stream", UNSET) def _parse_temperature(data: object) -> Union[None, Unset, float]: @@ -474,31 +229,6 @@ def _parse_temperature(data: object) -> Union[None, Unset, float]: temperature = _parse_temperature(d.pop("temperature", UNSET)) - def _parse_top_p(data: object) -> Union[None, Unset, float]: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(Union[None, Unset, float], data) - - top_p = _parse_top_p(d.pop("top_p", UNSET)) - - tools = [] - _tools = d.pop("tools", UNSET) - for tools_item_data in _tools or []: - tools_item = ChatCompletionInputToolsItem.from_dict(tools_item_data) - - tools.append(tools_item) - - def _parse_user(data: object) -> Union[None, Unset, str]: - if data is None: - return data - if isinstance(data, Unset): - return data - return cast(Union[None, Unset, str], data) - - user = _parse_user(d.pop("user", UNSET)) - chat_completion_input = cls( project_id=project_id, messages=messages, @@ -506,18 +236,9 @@ def _parse_user(data: object) -> Union[None, Unset, str]: repositories=repositories, model=model, system_prompt=system_prompt, - frequency_penalty=frequency_penalty, - logit_bias=logit_bias, max_tokens=max_tokens, - presence_penalty=presence_penalty, - response_format=response_format, - seed=seed, - stop=stop, stream=stream, temperature=temperature, - top_p=top_p, - tools=tools, - user=user, ) chat_completion_input.additional_properties = d diff --git a/premai/models/chat_completion_input_logit_bias_type_0.py b/premai/models/chat_completion_input_logit_bias_type_0.py deleted file mode 100644 index df57641..0000000 --- a/premai/models/chat_completion_input_logit_bias_type_0.py +++ /dev/null @@ -1,49 +0,0 @@ -from typing import Dict, List, Type - -from attrs import define as _attrs_define -from attrs import field as _attrs_field -from typing_extensions import Any, TypedDict, TypeVar - -T = TypeVar("T", bound="ChatCompletionInputLogitBiasType0") - - -class ChatCompletionInputLogitBiasType0Dict(TypedDict): - pass - - -@_attrs_define -class ChatCompletionInputLogitBiasType0: - """JSON object that maps tokens to an associated bias value from -100 to 100.""" - - additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> Dict[str, Any]: - field_dict: Dict[str, Any] = {} - field_dict.update(self.additional_properties) - field_dict.update({}) - - return field_dict - - @classmethod - def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - d = src_dict.copy() if src_dict else {} - chat_completion_input_logit_bias_type_0 = cls() - - chat_completion_input_logit_bias_type_0.additional_properties = d - return chat_completion_input_logit_bias_type_0 - - @property - def additional_keys(self) -> List[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/premai/models/chat_completion_input_response_format_type_0.py b/premai/models/chat_completion_input_response_format_type_0.py deleted file mode 100644 index 7fc4731..0000000 --- a/premai/models/chat_completion_input_response_format_type_0.py +++ /dev/null @@ -1,49 +0,0 @@ -from typing import Dict, List, Type - -from attrs import define as _attrs_define -from attrs import field as _attrs_field -from typing_extensions import Any, TypedDict, TypeVar - -T = TypeVar("T", bound="ChatCompletionInputResponseFormatType0") - - -class ChatCompletionInputResponseFormatType0Dict(TypedDict): - pass - - -@_attrs_define -class ChatCompletionInputResponseFormatType0: - """An object specifying the format that the model must output.""" - - additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) - - def to_dict(self) -> Dict[str, Any]: - field_dict: Dict[str, Any] = {} - field_dict.update(self.additional_properties) - field_dict.update({}) - - return field_dict - - @classmethod - def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: - d = src_dict.copy() if src_dict else {} - chat_completion_input_response_format_type_0 = cls() - - chat_completion_input_response_format_type_0.additional_properties = d - return chat_completion_input_response_format_type_0 - - @property - def additional_keys(self) -> List[str]: - return list(self.additional_properties.keys()) - - def __getitem__(self, key: str) -> Any: - return self.additional_properties[key] - - def __setitem__(self, key: str, value: Any) -> None: - self.additional_properties[key] = value - - def __delitem__(self, key: str) -> None: - del self.additional_properties[key] - - def __contains__(self, key: str) -> bool: - return key in self.additional_properties diff --git a/premai/models/message.py b/premai/models/message.py index d8b845e..b905525 100644 --- a/premai/models/message.py +++ b/premai/models/message.py @@ -1,17 +1,21 @@ -from typing import Dict, List, Type +from typing import Dict, List, Type, Union from attrs import define as _attrs_define from attrs import field as _attrs_field -from typing_extensions import Any, TypedDict, TypeVar +from typing_extensions import Any, NotRequired, TypedDict, TypeVar +from ..models.message_params import MessageParams from ..models.message_role_enum import MessageRoleEnum +from ..types import UNSET, Unset T = TypeVar("T", bound="Message") class MessageDict(TypedDict): role: MessageRoleEnum - content: str + content: NotRequired[Union[Unset, str]] + template_id: NotRequired[Union[Unset, str]] + params: NotRequired[Union[Unset, MessageParams]] pass @@ -21,11 +25,15 @@ class Message: Attributes: role (MessageRoleEnum): * `user` - user * `assistant` - assistant - content (str): The content of the message. + content (Union[Unset, str]): The content of the message. + template_id (Union[Unset, str]): The ID of the template to use. + params (Union[Unset, MessageParams]): The parameters (key: value) to use with the given template. """ role: MessageRoleEnum - content: str + content: Union[Unset, str] = UNSET + template_id: Union[Unset, str] = UNSET + params: Union[Unset, "MessageParams"] = UNSET additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) @@ -34,27 +42,51 @@ def to_dict(self) -> Dict[str, Any]: content = self.content + template_id = self.template_id + + params: Union[Unset, Dict[str, Any]] = UNSET + if not isinstance(self.params, Unset): + params = self.params.to_dict() + field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update( { "role": role, - "content": content, } ) + if content is not UNSET: + field_dict["content"] = content + if template_id is not UNSET: + field_dict["template_id"] = template_id + if params is not UNSET: + field_dict["params"] = params return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: + from ..models.message_params import MessageParams + d = src_dict.copy() if src_dict else {} role = MessageRoleEnum(d.pop("role")) - content = d.pop("content") + content = d.pop("content", UNSET) + + template_id = d.pop("template_id", UNSET) + + _params = d.pop("params", UNSET) + params: Union[Unset, MessageParams] + if isinstance(_params, Unset): + params = UNSET + else: + params = MessageParams.from_dict(_params) message = cls( role=role, content=content, + template_id=template_id, + params=params, ) message.additional_properties = d diff --git a/premai/models/chat_completion_input_tools_item.py b/premai/models/message_params.py similarity index 77% rename from premai/models/chat_completion_input_tools_item.py rename to premai/models/message_params.py index f8520d0..ed839fd 100644 --- a/premai/models/chat_completion_input_tools_item.py +++ b/premai/models/message_params.py @@ -4,16 +4,16 @@ from attrs import field as _attrs_field from typing_extensions import Any, TypedDict, TypeVar -T = TypeVar("T", bound="ChatCompletionInputToolsItem") +T = TypeVar("T", bound="MessageParams") -class ChatCompletionInputToolsItemDict(TypedDict): +class MessageParamsDict(TypedDict): pass @_attrs_define -class ChatCompletionInputToolsItem: - """ """ +class MessageParams: + """The parameters (key: value) to use with the given template.""" additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict) @@ -27,10 +27,10 @@ def to_dict(self) -> Dict[str, Any]: @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() if src_dict else {} - chat_completion_input_tools_item = cls() + message_params = cls() - chat_completion_input_tools_item.additional_properties = d - return chat_completion_input_tools_item + message_params.additional_properties = d + return message_params @property def additional_keys(self) -> List[str]: diff --git a/pyproject.toml b/pyproject.toml index 7bca4f0..c38b35a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "premai" -version = "0.3.53" +version = "0.3.54" description = "A client library for accessing Prem APIs" authors = [] readme = "README.md"