Skip to content

Commit

Permalink
support for llamafile
Browse files Browse the repository at this point in the history
  • Loading branch information
pelikhan committed May 28, 2024
1 parent e861438 commit ffd32e7
Show file tree
Hide file tree
Showing 7 changed files with 55 additions and 3 deletions.
7 changes: 7 additions & 0 deletions docs/src/content/docs/getting-started/configuration.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,13 @@ script({

</Steps>

### Llamafile

[https://llamafile.ai/](https://llamafile.ai/) is a single file desktop application
that allows you to run an LLM locally.

The provider is `llamafile` and the model name is ignored.

### Jan, LMStudio, LLaMA.cpp

[Jan](https://jan.ai/), [LMStudio](https://lmstudio.ai/),
Expand Down
19 changes: 19 additions & 0 deletions packages/core/src/connection.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,17 @@ import {
DOCS_CONFIGURATION_AICI_URL,
DOCS_CONFIGURATION_AZURE_OPENAI_URL,
DOCS_CONFIGURATION_LITELLM_URL,
DOCS_CONFIGURATION_LLAMAFILE_URL,
DOCS_CONFIGURATION_LOCALAI_URL,
DOCS_CONFIGURATION_OLLAMA_URL,
DOCS_CONFIGURATION_OPENAI_URL,
LITELLM_API_BASE,
LLAMAFILE_API_BASE,
LOCALAI_API_BASE,
MODEL_PROVIDER_AICI,
MODEL_PROVIDER_AZURE,
MODEL_PROVIDER_LITELLM,
MODEL_PROVIDER_LLAMAFILE,
MODEL_PROVIDER_OLLAMA,
MODEL_PROVIDER_OPENAI,
OLLAMA_API_BASE,
Expand Down Expand Up @@ -175,6 +178,16 @@ export async function parseTokenFromEnv(
}
}

if (provider === MODEL_PROVIDER_LLAMAFILE) {
return {
provider,
base: LLAMAFILE_API_BASE,
token: "llamafile",
type: "openai",
source: "default",
}
}

if (provider === MODEL_PROVIDER_LITELLM) {
return {
provider,
Expand All @@ -193,6 +206,12 @@ export function dotEnvTemplate(provider: string, apiType: APIType) {
return `
## Ollama ${DOCS_CONFIGURATION_OLLAMA_URL}
# OLLAMA_API_BASE="<custom api base>" # uses ${OLLAMA_API_BASE} by default
`

if (provider === MODEL_PROVIDER_LLAMAFILE)
return `
## llamafile ${DOCS_CONFIGURATION_LLAMAFILE_URL}
# There is no configuration for llamafile
`

if (provider === MODEL_PROVIDER_LITELLM)
Expand Down
9 changes: 9 additions & 0 deletions packages/core/src/constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ export const MARKDOWN_PROMPT_FENCE = "`````"

export const OPENAI_API_BASE = "https://api.openai.com/v1"
export const OLLAMA_API_BASE = "http://localhost:11434/v1"
export const LLAMAFILE_API_BASE = "http://localhost:8080/v1"
export const LOCALAI_API_BASE = "http://localhost:8080/v1"
export const LITELLM_API_BASE = "http://localhost:4000"

Expand All @@ -81,6 +82,7 @@ export const EMOJI_UNDEFINED = "?"
export const MODEL_PROVIDER_OPENAI = "openai"
export const MODEL_PROVIDER_AZURE = "azure"
export const MODEL_PROVIDER_OLLAMA = "ollama"
export const MODEL_PROVIDER_LLAMAFILE = "llamafile"
export const MODEL_PROVIDER_LITELLM = "litellm"
export const MODEL_PROVIDER_AICI = "aici"

Expand All @@ -94,6 +96,8 @@ export const DOCS_CONFIGURATION_AZURE_OPENAI_URL =
"https://microsoft.github.io/genaiscript/getting-started/configuration/#azure-openai"
export const DOCS_CONFIGURATION_OLLAMA_URL =
"https://microsoft.github.io/genaiscript/getting-started/configuration/#ollama"
export const DOCS_CONFIGURATION_LLAMAFILE_URL =
"https://microsoft.github.io/genaiscript/getting-started/configuration/#llamafile"
export const DOCS_CONFIGURATION_LITELLM_URL =
"https://microsoft.github.io/genaiscript/getting-started/configuration/#litellm"
export const DOCS_CONFIGURATION_LOCALAI_URL =
Expand All @@ -117,6 +121,11 @@ export const MODEL_PROVIDERS = Object.freeze([
detail: "Ollama local model",
url: DOCS_CONFIGURATION_OLLAMA_URL,
},
{
id: MODEL_PROVIDER_LLAMAFILE,
detail: "llamafile.ai local model",
url: DOCS_CONFIGURATION_LLAMAFILE_URL,
},
{
id: MODEL_PROVIDER_LITELLM,
detail: "LiteLLM proxy",
Expand Down
12 changes: 11 additions & 1 deletion packages/core/src/models.test.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
import test, { describe } from "node:test"
import { parseModelIdentifier } from "./models"
import assert from "node:assert"
import { MODEL_PROVIDER_AICI, MODEL_PROVIDER_OLLAMA, MODEL_PROVIDER_OPENAI } from "./constants"
import {
MODEL_PROVIDER_AICI,
MODEL_PROVIDER_LLAMAFILE,
MODEL_PROVIDER_OLLAMA,
MODEL_PROVIDER_OPENAI,
} from "./constants"

// generate unit tests for parseModelIdentifier
describe("parseModelIdentifier", () => {
Expand All @@ -20,6 +25,11 @@ describe("parseModelIdentifier", () => {
assert(model === "phi3")
assert(modelId === "phi3")
})
test("llamafile", () => {
const { provider, model } = parseModelIdentifier("llamafile")
assert(provider === MODEL_PROVIDER_LLAMAFILE)
assert(model === "*")
})
test("gpt4", () => {
const { provider, model, modelId } = parseModelIdentifier("gpt4")
assert(provider === MODEL_PROVIDER_OPENAI)
Expand Down
3 changes: 3 additions & 0 deletions packages/core/src/models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import {
DEFAULT_MODEL,
MODEL_PROVIDER_AICI,
MODEL_PROVIDER_AZURE,
MODEL_PROVIDER_LLAMAFILE,
MODEL_PROVIDER_OLLAMA,
MODEL_PROVIDER_OPENAI,
} from "./constants"
Expand Down Expand Up @@ -43,6 +44,8 @@ export function parseModelIdentifier(id: string) {
}
else if (parts.length === 2)
return { provider: parts[0], model: parts[1], modelId: parts[1] }
else if (id === MODEL_PROVIDER_LLAMAFILE)
return { provider: MODEL_PROVIDER_LLAMAFILE, model: "*", modelId: id }
else return { provider: MODEL_PROVIDER_OPENAI, model: id, modelId: id }
}

Expand Down
6 changes: 4 additions & 2 deletions packages/sample/genaisrc/completion.genai.js
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
script({
model:"ollama:phi3:latest"
})
model:"llamafile"
})

$`Write a poem.`
2 changes: 2 additions & 0 deletions packages/vscode/src/lmaccess.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import {
MODEL_PROVIDER_AZURE,
parseModelIdentifier,
MODEL_PROVIDER_LITELLM,
MODEL_PROVIDER_LLAMAFILE,
} from "genaiscript-core"
import { isApiProposalEnabled } from "./proposals"

Expand All @@ -24,6 +25,7 @@ async function generateLanguageModelConfiguration(
const { provider } = parseModelIdentifier(modelId)
if (
provider === MODEL_PROVIDER_OLLAMA ||
provider === MODEL_PROVIDER_LLAMAFILE ||
provider === MODEL_PROVIDER_AICI ||
provider === MODEL_PROVIDER_AZURE ||
provider === MODEL_PROVIDER_LITELLM
Expand Down

0 comments on commit ffd32e7

Please sign in to comment.