Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Context limiter #343

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 3 additions & 12 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,9 @@
"bot"
],
"jest": {
"transform": {
"^.+\\.(ts|tsx)$": "babel-jest"
},
"testEnvironment": "node",
"moduleFileExtensions": [
"ts",
"tsx",
"js",
"jsx",
"json",
"node"
]
"preset": "ts-jest",
"rootDir": "./src",
"testEnvironment": "node"
},
"author": "Harmony One",
"license": "MIT",
Expand Down
11 changes: 9 additions & 2 deletions src/modules/open-ai/api/openAi.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import {
DalleGPTModels
} from '../types'
import type fs from 'fs'
import { limitConversationContext } from '../helpers'

const openai = new OpenAI({ apiKey: config.openAiKey })

Expand Down Expand Up @@ -80,12 +81,15 @@ export async function chatCompletion (
model = config.openAi.chatGpt.model,
limitTokens = true
): Promise<ChatCompletion> {
const limitedConversation = limitConversationContext(conversation as OpenAI.Chat.Completions.CreateChatCompletionRequestMessage[], 512)

const payload = {
model,
max_tokens: limitTokens ? config.openAi.chatGpt.maxTokens : undefined,
temperature: config.openAi.dalle.completions.temperature,
messages: conversation
messages: limitedConversation
}

const response = await openai.chat.completions.create(
payload as OpenAI.Chat.CompletionCreateParamsNonStreaming
)
Expand Down Expand Up @@ -115,9 +119,12 @@ export const streamChatCompletion = async (
): Promise<string> => {
let completion = ''
let wordCountMinimum = 2

const limitedConversation = limitConversationContext(conversation as OpenAI.Chat.Completions.CreateChatCompletionRequestMessage[], 512)

const stream = await openai.chat.completions.create({
model,
messages: conversation as OpenAI.Chat.Completions.CreateChatCompletionRequestMessage[],
messages: limitedConversation,
stream: true,
max_tokens: limitTokens ? config.openAi.chatGpt.maxTokens : undefined,
temperature: config.openAi.dalle.completions.temperature || 0.8
Expand Down
84 changes: 84 additions & 0 deletions src/modules/open-ai/helpers.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
/* eslint-disable */
import { limitConversationContext } from './helpers'
import { describe, expect, test, it } from "@jest/globals";
import { CreateChatCompletionRequestMessage } from "openai/resources/chat";

describe('limitConversationContext', () => {
it('waits for the promise to resolve', () => {

let conversation: CreateChatCompletionRequestMessage[] = [
{ role: "assistant", content: "Welcome to the platform!" },
{ role: "user", content: "Your order has been confirmed." },
{ role: "assistant", content: "Please adhere to the community guidelines." },
];

let limitedConversation = limitConversationContext(conversation, 42)

expect(limitedConversation.length).toEqual(1)
})

it('shouldAttachPartOfAMessage', () => {
let conversation: CreateChatCompletionRequestMessage[] = [
{ role: "assistant", content: "Welcome to the platform!"},
{ role: "user", content: "Your order has been confirmed."},
{ role: "assistant", content: "Please adhere to the community guidelines."},
];

let limitedConversation = limitConversationContext(conversation, 52)

expect(limitedConversation.length).toEqual(2)
expect(limitedConversation[0].content).toEqual("confirmed.")
expect(limitedConversation[1].content).toEqual("Please adhere to the community guidelines.")
})

it('shouldReturnAllMessages', () => {
let conversation: CreateChatCompletionRequestMessage[] = [
{ role: "assistant", content: "Welcome to the platform!"},
{ role: "user", content: "Your order has been confirmed."},
{ role: "assistant", content: "Please adhere to the community guidelines."},
];

let limitedConversation = limitConversationContext(conversation, 100)

expect(limitedConversation.length).toEqual(3)
expect(limitedConversation[0].content).toEqual("Welcome to the platform!")
expect(limitedConversation[1].content).toEqual("Your order has been confirmed.")
expect(limitedConversation[2].content).toEqual("Please adhere to the community guidelines.")
})

it('shouldFilterEmpty', () => {
let emptyConversation: CreateChatCompletionRequestMessage[] = [];

let limitedEmpty = limitConversationContext(emptyConversation, 100)

expect(limitedEmpty.length).toEqual(0)
})

it('shouldFilterEmptyMessages', () => {
let conversation: CreateChatCompletionRequestMessage[] = [
{ role: "assistant", content: ""},
{ role: "assistant", content: "Please adhere to the community guidelines."},
{ role: "assistant", content: ""},
{ role: "assistant", content: null},
];

let cleanConversation = limitConversationContext(conversation, 100)

expect(cleanConversation.length).toEqual(1)
expect(cleanConversation[0].content).toEqual("Please adhere to the community guidelines.")
})

it('shouldPreserveOrderOfMessages', () => {
let conversation: CreateChatCompletionRequestMessage[] = [
{ role: "assistant", content: "one"},
{ role: "assistant", content: "two"},
{ role: "assistant", content: "three"},
];

let limitedc = limitConversationContext(conversation, 100)

expect(limitedc[0].content).toEqual("one")
expect(limitedc[1].content).toEqual("two")
expect(limitedc[2].content).toEqual("three")
})
})
38 changes: 38 additions & 0 deletions src/modules/open-ai/helpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import { type ParseMode } from 'grammy/types'
import { getChatModel, getChatModelPrice, getTokenNumber } from './api/openAi'
import { type Message, type InlineKeyboardMarkup } from 'grammy/out/types'
import { isValidUrl } from './utils/web-crawler'
import type OpenAI from 'openai/index'
// import { llmAddUrlDocument } from '../llms/api/llmApi'

export const SupportedCommands = {
Expand Down Expand Up @@ -290,3 +291,40 @@ export const getUrlFromText = (ctx: OnMessageContext | OnCallBackQueryData): str
}
return undefined
}

type ConversationItem = OpenAI.Chat.Completions.CreateChatCompletionRequestMessage

export function limitConversationContext (conversation: ConversationItem[], charactersCount: number): ConversationItem[] {
const filteredConversation: ConversationItem[] = []
let totalContentLength = 0

for (let i = conversation.length - 1; i >= 0; i--) {
const message = conversation[i]

if (!message.content || message.content.length === 0) {
continue
}

if (totalContentLength + message.content.length <= charactersCount) {
filteredConversation.unshift(message)
totalContentLength += message.content.length
continue
}

const charsLeft = charactersCount - totalContentLength
if (charsLeft > 0) {
const length = Math.min(charsLeft, message.content.length)
const trimmedContent = message.content.substring(message.content.length - length)
const newMessage: ConversationItem = {
role: message.role,
content: trimmedContent
}

filteredConversation.unshift(newMessage)
totalContentLength += trimmedContent.length
break
}
}

return filteredConversation
}
9 changes: 6 additions & 3 deletions src/modules/sd-images/api/helpers.test.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
/* eslint-disable */
import { waitingExecute, IParams, getParamsFromPrompt } from './helpers'
import { type IModel } from './models-config'
import { describe, it } from "@jest/globals";

export const test = () => {
return true
}
describe('', () => {
it('should ', () => {

});
})

// Mock the console.error method to prevent it from actually logging errors
// console.error = jest.fn()
Expand Down