diff --git a/OpenAI-DotNet/Chat/ChatRequest.cs b/OpenAI-DotNet/Chat/ChatRequest.cs
index 025cfc2d..6032085a 100644
--- a/OpenAI-DotNet/Chat/ChatRequest.cs
+++ b/OpenAI-DotNet/Chat/ChatRequest.cs
@@ -190,7 +190,7 @@ public ChatRequest(
}
else
{
- Modalities = Modality.Text;
+ Modalities = Modality.Text & Modality.Audio;
}
FrequencyPenalty = frequencyPenalty;
@@ -238,12 +238,14 @@ public ChatRequest(
/// Whether or not to store the output of this chat completion request for use in our model distillation or evals products.
///
[JsonPropertyName("store")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public bool? Store { get; set; }
///
/// Developer-defined tags and values used for filtering completions in the dashboard.
///
[JsonPropertyName("metadata")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public IReadOnlyDictionary Metadata { get; set; }
///
@@ -253,6 +255,7 @@ public ChatRequest(
/// Defaults to 0
///
[JsonPropertyName("frequency_penalty")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? FrequencyPenalty { get; }
///
@@ -275,7 +278,7 @@ public ChatRequest(
/// This option is currently not available on the gpt-4-vision-preview model.
///
[JsonPropertyName("logprobs")]
- [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public bool? LogProbs { get; }
///
@@ -286,7 +289,7 @@ public ChatRequest(
/// must be set to true if this parameter is used.
///
[JsonPropertyName("top_logprobs")]
- [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public int? TopLogProbs { get; }
///
@@ -295,12 +298,14 @@ public ChatRequest(
///
[JsonPropertyName("max_tokens")]
[Obsolete("Use MaxCompletionTokens instead")]
- public int? MaxTokens { get; }
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
+ public int? MaxTokens => MaxCompletionTokens;
///
/// An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
///
[JsonPropertyName("max_completion_tokens")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public int? MaxCompletionTokens { get; }
///
@@ -312,6 +317,7 @@ public ChatRequest(
[JsonPropertyName("modalities")]
[JsonConverter(typeof(ModalityConverter))]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public Modality Modalities { get; }
///
@@ -319,6 +325,7 @@ public ChatRequest(
/// This is most common when you are regenerating a file with only minor changes to most of the content.
///
[JsonPropertyName("prediction")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public object Prediction { get; set; }
///
@@ -335,6 +342,7 @@ public ChatRequest(
/// Defaults to 0
///
[JsonPropertyName("presence_penalty")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? PresencePenalty { get; }
[JsonPropertyName("response_format")]
@@ -363,6 +371,7 @@ public ChatRequest(
/// monitor changes in the backend.
///
[JsonPropertyName("seed")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public int? Seed { get; }
///
@@ -374,12 +383,14 @@ public ChatRequest(
/// When this parameter is set, the response body will include the service_tier utilized.
///
[JsonPropertyName("service_tier")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public string ServiceTier { get; set; }
///
/// Up to 4 sequences where the API will stop generating further tokens.
///
[JsonPropertyName("stop")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public string[] Stops { get; }
///
@@ -403,6 +414,7 @@ public ChatRequest(
/// Defaults to 1
///
[JsonPropertyName("temperature")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? Temperature { get; }
///
@@ -413,6 +425,7 @@ public ChatRequest(
/// Defaults to 1
///
[JsonPropertyName("top_p")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public double? TopP { get; }
///
@@ -420,6 +433,7 @@ public ChatRequest(
/// Use this to provide a list of functions the model may generate JSON inputs for.
///
[JsonPropertyName("tools")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public IReadOnlyList Tools { get; }
///
@@ -432,18 +446,21 @@ public ChatRequest(
/// 'auto' is the default if functions are present.
///
[JsonPropertyName("tool_choice")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public dynamic ToolChoice { get; }
///
/// Whether to enable parallel function calling during tool use.
///
[JsonPropertyName("parallel_tool_calls")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingNull)]
public bool? ParallelToolCalls { get; }
///
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
///
[JsonPropertyName("user")]
+ [JsonIgnore(Condition = JsonIgnoreCondition.WhenWritingDefault)]
public string User { get; }
///
diff --git a/OpenAI-DotNet/OpenAI-DotNet.csproj b/OpenAI-DotNet/OpenAI-DotNet.csproj
index 4e582d68..a47a6de0 100644
--- a/OpenAI-DotNet/OpenAI-DotNet.csproj
+++ b/OpenAI-DotNet/OpenAI-DotNet.csproj
@@ -29,8 +29,10 @@ More context [on Roger Pincombe's blog](https://rogerpincombe.com/openai-dotnet-
OpenAI-DotNet.pfx
true
true
- 8.4.0
+ 8.4.1
+Version 8.4.1
+- Fix ChatRequest serialization for Azure OpenAI
Version 8.4.0
- Add realtime support
- Added o1, o1-mini, gpt-4o-mini, and gpt-4o-realtime, gpt-4o-audio model convenience properties