Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add polling to handle run state updates. #16

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/app-config.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"keyboard_shortcut": "ctrl+?"
},
"assistant": {
"existing_assistant_id": "asst_Af8jrKYOFP4MxA9nse61yFBq",
"existing_assistant_id": "asst_xmAX5oxByssXrkBymMbcsVEm",
"instructions": "You are DAVAI, an Data Analysis through Voice and Artificial Intelligence partner. You are an intermediary for a user who is blind who wants to interact with data tables in a data analysis app named CODAP.",
"model": "gpt-4o-mini",
"use_existing": true
Expand Down
130 changes: 75 additions & 55 deletions src/models/assistant-model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@ import { types, flow } from "mobx-state-tree";
import { getTools, initLlmConnection } from "../utils/llm-utils";
import { ChatTranscriptModel, transcriptStore } from "./chat-transcript-model";
import { Message } from "openai/resources/beta/threads/messages";
import { getAttributeList, getDataContext, getListOfDataContexts } from "../utils/codap-api-helpers";
import { DAVAI_SPEAKER, DEBUG_SPEAKER } from "../constants";
import { createGraph } from "../utils/codap-utils";
import { formatMessage } from "../utils/utils";
import appConfigJson from "../app-config.json";
import { codapInterface } from "@concord-consortium/codap-plugin-api";

export const AssistantModel = types
.model("AssistantModel", {
Expand Down Expand Up @@ -69,86 +68,107 @@ export const AssistantModel = types

const startRun = flow(function* () {
try {
const run = yield davai.beta.threads.runs.create(self.thread.id, {
const currentRun = yield davai.beta.threads.runs.create(self.thread.id, {
assistant_id: self.assistant.id,
});
transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run created",
content: formatMessage(currentRun),
});

// Wait for run completion and handle responses
let runState = yield davai.beta.threads.runs.retrieve(self.thread.id, run.id);
while (runState.status !== "completed" && runState.status !== "requires_action") {
runState = yield davai.beta.threads.runs.retrieve(self.thread.id, run.id);
}
yield pollRunState(currentRun.id);
} catch (err) {
console.error("Failed to complete run:", err);
transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Failed to complete run",
content: formatMessage(err),
});
}
});

if (runState.status === "requires_action") {
transcriptStore.addMessage(DEBUG_SPEAKER, {description: "User request requires action", content: formatMessage(runState)});
yield handleRequiredAction(runState, run.id);
}
const pollRunState: (currentRunId: string) => Promise<any> = flow(function* (currentRunId) {
let runState = yield davai.beta.threads.runs.retrieve(self.thread.id, currentRunId);
transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run state status",
content: formatMessage(runState.status),
});

const errorStates = ["failed", "cancelled", "incomplete"];

while (runState.status !== "completed" && runState.status !== "requires_action" && !errorStates.includes(runState.status)) {
yield new Promise((resolve) => setTimeout(resolve, 2000));
runState = yield davai.beta.threads.runs.retrieve(self.thread.id, currentRunId);
transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run state status",
content: formatMessage(runState.status),
});
}

// Get the last assistant message from the messages array
if (errorStates.includes(runState.status)) {
transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run failed",
content: formatMessage(runState),
});
transcriptStore.addMessage(DAVAI_SPEAKER, {
content: "I'm sorry, I encountered an error. Please try again.",
});
}

if (runState.status === "requires_action") {
transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run requires action",
content: formatMessage(runState),
});
yield handleRequiredAction(runState, currentRunId);
yield pollRunState(currentRunId);
}

if (runState.status === "completed") {
const messages = yield davai.beta.threads.messages.list(self.thread.id);
transcriptStore.addMessage(DEBUG_SPEAKER, {description: "Updated thread messages list", content: formatMessage(messages)});

const lastMessageForRun = messages.data.filter(
(msg: Message) => msg.run_id === run.id && msg.role === "assistant"
).pop();
const lastMessageForRun = messages.data
.filter((msg: Message) => msg.run_id === currentRunId && msg.role === "assistant")
.pop();

transcriptStore.addMessage(DEBUG_SPEAKER, {
description: "Run completed, assistant response",
content: formatMessage(lastMessageForRun),
});

const lastMessageContent = lastMessageForRun?.content[0]?.text?.value;
if (lastMessageContent) {
transcriptStore.addMessage(DAVAI_SPEAKER, {content: lastMessageContent});
transcriptStore.addMessage(DAVAI_SPEAKER, { content: lastMessageContent });
} else {
transcriptStore.addMessage(DAVAI_SPEAKER, {content: "I'm sorry, I don't have a response for that."});
transcriptStore.addMessage(DEBUG_SPEAKER, {description: "No content in last message", content: formatMessage(lastMessageForRun)});
transcriptStore.addMessage(DAVAI_SPEAKER, {
content: "I'm sorry, I don't have a response for that.",
});
}

} catch (err) {
console.error("Failed to complete run:", err);
transcriptStore.addMessage(DEBUG_SPEAKER, {description: "Failed to complete run", content: formatMessage(err)});
}
});

const handleRequiredAction = flow(function* (runState, runId) {
try {
const toolOutputs = runState.required_action?.submit_tool_outputs.tool_calls
? yield Promise.all(
runState.required_action.submit_tool_outputs.tool_calls.map(async (toolCall: any) => {
if (toolCall.function.name === "get_attributes") {
const { dataset } = JSON.parse(toolCall.function.arguments);
// getting the root collection won't always work. what if a user wants the attributes
// in the Mammals dataset but there is a hierarchy?
const rootCollection = (await getDataContext(dataset)).values.collections[0];
const attributeListRes = await getAttributeList(dataset, rootCollection.name);
const { requestMessage, ...codapResponse } = attributeListRes;
transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Request sent to CODAP", content: formatMessage(requestMessage) });
transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Response from CODAP", content: formatMessage(codapResponse) });
return { tool_call_id: toolCall.id, output: JSON.stringify(attributeListRes) };
runState.required_action.submit_tool_outputs.tool_calls.map(flow(function* (toolCall: any) {
if (toolCall.function.name === "create_request") {
const { action, resource, values } = JSON.parse(toolCall.function.arguments);
const request = { action, resource, values };
transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Request sent to CODAP", content: formatMessage(request) });
const res = yield codapInterface.sendRequest(request);
transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Response from CODAP", content: formatMessage(res) });
return { tool_call_id: toolCall.id, output: JSON.stringify(res) };
} else {
const { dataset, name, xAttribute, yAttribute } = JSON.parse(toolCall.function.arguments);
const { requestMessage, ...codapResponse} = await createGraph(dataset, name, xAttribute, yAttribute);
transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Request sent to CODAP", content: formatMessage(requestMessage) });
transcriptStore.addMessage(DEBUG_SPEAKER, { description: "Response from CODAP", content: formatMessage(codapResponse) });
return { tool_call_id: toolCall.id, output: "Graph created." };
return { tool_call_id: toolCall.id, output: "Tool call not recognized." };
}
})
)
))
: [];

if (toolOutputs) {
davai.beta.threads.runs.submitToolOutputsStream(
yield davai.beta.threads.runs.submitToolOutputs(
self.thread.id, runId, { tool_outputs: toolOutputs }
);

const threadMessageList = yield davai.beta.threads.messages.list(self.thread.id);
const threadMessages = threadMessageList.data.map((msg: any) => ({
role: msg.role,
content: msg.content[0].text.value,
}));

yield davai.chat.completions.create({
model: "gpt-4o-mini",
messages: [
...threadMessages
],
});
}
} catch (err) {
console.error(err);
Expand Down
Loading
Loading