Skip to content

Commit

Permalink
Documented helpers module
Browse files Browse the repository at this point in the history
  • Loading branch information
jpalvarezl committed Sep 19, 2024
1 parent 2a94ace commit e91b300
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 2 deletions.
10 changes: 10 additions & 0 deletions sdk/openai/inference/src/helpers/streaming.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,23 @@
use azure_core::{Error, Result};
use futures::{Stream, StreamExt};

/// A trait used to designate a type into which the streams will be deserialized.
pub(crate) trait EventStreamer<T>
where
T: serde::de::DeserializeOwned,
{
fn event_stream(response_body: azure_core::ResponseBody) -> impl Stream<Item = Result<T>>;
}

/// A helper function to be used in streaming scenarios. The `response_body`, the input stream
/// is buffered until a `stream_event_delimiter` is found. This constitutes a single event.
/// These series of events are then returned as a stream.
///
/// # Arguments
/// * `response_body` - The response body stream of an HTTP request.
/// * `stream_event_delimiter` - The delimiter that separates events in the stream. In some cases `\n\n`, in other cases can be `\n\r\n\n`.
/// # Returns
/// The `response_body` stream segmented and streamed into String events demarcated by `stream_event_delimiter`.
pub(crate) fn string_chunks<'a>(
response_body: (impl Stream<Item = Result<bytes::Bytes>> + Unpin + 'a),
stream_event_delimiter: &'a str,
Expand Down
8 changes: 6 additions & 2 deletions sdk/openai/inference/src/models/chat_completions.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,9 @@ pub mod request {
/// # Example
///
/// ```rust
/// let request = azure_openai_inference::request::CreateChatCompletionsRequest::with_user_message("gpt-3.5-turbo-1106", "Why couldn't the eagles take Frodo directly to mount doom?");
/// let request = azure_openai_inference::request::CreateChatCompletionsRequest::with_user_message(
/// "gpt-3.5-turbo-1106",
/// "Why couldn't the eagles take Frodo directly to mount doom?");
/// ```
pub fn with_user_message(model: &str, prompt: &str) -> Self {
Self {
Expand All @@ -77,7 +79,9 @@ pub mod request {
/// # Example
///
/// ```rust
/// let request = azure_openai_inference::request::CreateChatCompletionsRequest::with_user_message_and_stream("gpt-3.5-turbo-1106", "Why couldn't the eagles take Frodo directly to mount doom?");
/// let request = azure_openai_inference::request::CreateChatCompletionsRequest::with_user_message_and_stream(
/// "gpt-3.5-turbo-1106",
/// "Why couldn't the eagles take Frodo directly to Mount Doom?");
/// ```
pub fn with_user_message_and_stream(
model: impl Into<String>,
Expand Down

0 comments on commit e91b300

Please sign in to comment.