Skip to content

Commit

Permalink
more endpoints (#1)
Browse files Browse the repository at this point in the history
  • Loading branch information
JBGruber committed Jan 6, 2024
1 parent 379e86c commit f589279
Show file tree
Hide file tree
Showing 10 changed files with 245 additions and 16 deletions.
3 changes: 3 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
# Generated by roxygen2: do not edit by hand

export(chat)
export(copy_model)
export(create_model)
export(delete_model)
export(embed_text)
export(list_models)
export(new_chat)
export(pull_model)
Expand Down
2 changes: 1 addition & 1 deletion R/chat.r
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
#' @param q the question as a character string or a conversation object.
#' @param model which model to use. See <https://ollama.ai/library> for options.
#' Default is "llama2". Set option(rollama_model = "modelname") to change
#' default for the current session.
#' default for the current session. See \link{pull_model} for more details.
#' @param screen Logical. Should the answer be printed to the screen.
#' @param server URL to an Oolama server (not the API). Defaults to
#' "http://localhost:11434".
Expand Down
55 changes: 55 additions & 0 deletions R/embedding.r
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
#' Generate Embeddings
#'
#' @param text text vector to generate embeddings for.
#' @inheritParams query
#'
#' @return a tibble with embeddings.
#' @export
#'
#' @examples
#' \dontrun{
#' embed_text(c("Here is an article about llamas...",
#' "R is a language and environment for statistical computing and graphics."))
#' }
embed_text <- function(text,
model = NULL,
server = NULL) {

if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server", default = "http://localhost:11434")
spinner <- getOption("rollama_spinner", default = interactive())

purrr::map(seq_along(text), function(i) {
req_data <- list(model = model,
prompt = text[i],
stream = FALSE)

if (spinner) {
cli::cli_progress_step("{cli::pb_spin} {model} is embedding text {i}", )
rp <- callr::r_bg(make_req,
args = list(req_data = req_data,
server = server,
endpoint = "/api/embeddings"),
package = TRUE)
while (rp$is_alive()) {
cli::cli_progress_update()
Sys.sleep(2 / 100)
}
resp <- rp$get_result()
cli::cli_progress_done()
} else {
resp <- make_req(req_data, server, "/api/embeddings")
}

if (!is.null(resp$error)) {
if (grepl("model.+not found, try pulling it first", resp$error)) {
resp$error <- paste(resp$error, "with {.code pull_model(\"{model}\")}")
}
cli::cli_abort(resp$error)
}
names(resp$embedding) <- paste0("dim_", seq_along(resp$embedding))
tibble::as_tibble(resp$embedding)
}) |>
dplyr::bind_rows()

}
91 changes: 87 additions & 4 deletions R/models.r
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,17 @@
#' @details
#' - `pull_model()`: downloads model
#' - `show_model()`: displays information about a local model
#' - `copy_model()`: creates a model with another name from an existing model
#' - `delete_model()`: deletes local model
#'
#' **Model names**: Model names follow a model:tag format, where model can have
#' an optional namespace such as example/model. Some examples are
#' orca-mini:3b-q4_1 and llama2:70b. The tag is optional and, if not provided,
#' will default to latest. The tag is used to identify a specific version.
#'
#' @param model name of the model. Defaults to "llama2" when `NULL` (except in
#' `delete_model`).
#' @param destination name of the copied model.
#' @inheritParams query
#'
#' @return (invisible) a tibble with information about the model (except in
Expand All @@ -22,7 +29,8 @@
pull_model <- function(model = NULL, server = NULL) {

if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server", default = "http://localhost:11434")
if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")

httr2::request(server) |>
httr2::req_url_path_append("/api/pull") |>
Expand All @@ -35,28 +43,80 @@ pull_model <- function(model = NULL, server = NULL) {
invisible(show_model(model))
}


#' @rdname pull_model
#' @export
show_model <- function(model = NULL, server = NULL) {

if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server", default = "http://localhost:11434")
if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")

httr2::request(server) |>
httr2::req_url_path_append("/api/show") |>
httr2::req_body_json(list(name = model)) |>
httr2::req_error(body = function(resp) httr2::resp_body_json(resp)$error) |>
httr2::req_perform() |>
httr2::resp_body_json() |>
purrr::list_flatten(name_spec = "{inner}") |>
tibble::as_tibble()
}


#' Create a model from a Modelfile
#'
#' @param model name of the model to create
#' @param modelfile either a path to a model file to be read or the contents of
#' the model file as a character vector.
#' @inheritParams query
#'
#' @details Custom models are the way to change paramters in Ollama. If you use
#' `show_model()`, you can look at the configuration of a model in the column
#' modelfile. To get more information and a list of valid parameters, check out
#' <https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md>.
#'
#'
#' @return Nothing. Called to create a model on the Ollama server.
#' @export
#'
#' @examples
#' modelfile <- system.file("extdata", "modelfile.txt", package = "rollama")
#' \dontrun(create_model("mario", modelfile))
#' modelfile <- "FROM llama2\nSYSTEM You are mario from Super Mario Bros."
#' \dontrun(create_model("mario", modelfile))
create_model <- function(model, modelfile, server = NULL) {

if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")
if (file.exists(modelfile)) {
modelfile <- readChar(modelfile, file.size(modelfile))
} else if (length(modelfile) > 1) {
modelfile <- paste0(modelfile, collapse = "\n")
}

httr2::request(server) |>
httr2::req_url_path_append("/api/create") |>
httr2::req_method("POST") |>
httr2::req_body_json(list(name = model, modelfile = modelfile)) |>
httr2::req_perform_stream(callback = pgrs, buffer_kb = 0.1)

cli::cli_process_done(.envir = the)
the$str_prgs <- NULL

model_info <- show_model(model) # move here to test if model was created
cli::cli_progress_message(
"{cli::col_green(cli::symbol$tick)} model {model} created"
)
invisible(model_info)
}


#' @rdname pull_model
#' @export
delete_model <- function(model, server = NULL) {

if (is.null(server)) server <- getOption("rollama_server", default = "http://localhost:11434")
if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")

httr2::request(server) |>
httr2::req_url_path_append("/api/delete") |>
Expand All @@ -65,7 +125,30 @@ delete_model <- function(model, server = NULL) {
httr2::req_error(body = function(resp) httr2::resp_body_json(resp)$error) |>
httr2::req_perform()

cli::cli_progress_message("{cli::col_green(cli::symbol$tick)} {model} removed")
cli::cli_progress_message(
"{cli::col_green(cli::symbol$tick)} model {model} removed"
)
}


#' @rdname pull_model
#' @export
copy_model <- function(model,
destination = paste0(model, "-copy"),
server = NULL) {

if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")

httr2::request(server) |>
httr2::req_url_path_append("/api/copy") |>
httr2::req_body_json(list(name = model)) |>
httr2::req_error(body = function(resp) httr2::resp_body_json(resp)$error) |>
httr2::req_perform()

cli::cli_progress_message(
"{cli::col_green(cli::symbol$tick)} model {model} copied to {destination}"
)
}


Expand Down
13 changes: 7 additions & 6 deletions R/utils.r
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,17 @@ build_req <- function(model, msg, server) {
cli::cli_progress_step("{model} is thinking {cli::pb_spin}")
rp <- callr::r_bg(make_req,
args = list(req_data = req_data,
server = server),
server = server,
endpoint = "/api/chat"),
package = TRUE)
while (rp$is_alive()) {
if (interactive()) cli::cli_progress_update()
cli::cli_progress_update()
Sys.sleep(2 / 100)
}
resp <- rp$get_result()
if (interactive()) cli::cli_progress_done()
cli::cli_progress_done()
} else {
resp <- make_req(req_data, server)
resp <- make_req(req_data, server, "/api/chat")
}

if (!is.null(resp$error)) {
Expand All @@ -38,9 +39,9 @@ build_req <- function(model, msg, server) {
}


make_req <- function(req_data, server) {
make_req <- function(req_data, server, endpoint) {
httr2::request(server) |>
httr2::req_url_path_append("/api/chat") |>
httr2::req_url_path_append(endpoint) |>
httr2::req_body_json(req_data) |>
# turn off errors since error messages can't be seen in sub-process
httr2::req_error(is_error = function(resp) FALSE) |>
Expand Down
8 changes: 8 additions & 0 deletions inst/extdata/modelfile.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
FROM llama2
# sets the temperature to 1 [higher is more creative, lower is more coherent]
PARAMETER temperature 1
# sets the context window size to 4096, this controls how many tokens the LLM can use as context to generate the next token
PARAMETER num_ctx 4096

# sets a custom system message to specify the behavior of the chat assistant
SYSTEM You are Mario from super mario bros, acting as an assistant. Blubb
35 changes: 35 additions & 0 deletions man/create_model.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

30 changes: 30 additions & 0 deletions man/embed_text.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

22 changes: 18 additions & 4 deletions man/pull_model.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/query.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit f589279

Please sign in to comment.