Skip to content

Commit

Permalink
cover remaining parameters
Browse files Browse the repository at this point in the history
  • Loading branch information
JBGruber committed Jan 7, 2024
1 parent a8e2ca0 commit c4f672f
Show file tree
Hide file tree
Showing 12 changed files with 220 additions and 53 deletions.
4 changes: 3 additions & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,15 @@ Imports:
methods,
prettyunits,
purrr,
rlang,
tibble
Suggests:
base64enc,
knitr,
rmarkdown,
spelling,
testthat (>= 3.0.0)
VignetteBuilder: knitr
Config/testthat/edition: 3
Language: en-US
URL: https://jbgruber.github.io/rollama/
URL: https://jbgruber.github.io/rollama/, https://github.com/JBGruber/rollama
115 changes: 98 additions & 17 deletions R/chat.r
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,11 @@
#' @param screen Logical. Should the answer be printed to the screen.
#' @param server URL to an Ollama server (not the API). Defaults to
#' "http://localhost:11434".
#' @param images path(s) to images (for multimodal models such as llava).
#' @param model_params a named list of additional model parameters listed in the
#' documentation for the Modelfile such as temperature.
#' @param template the prompt template to use (overrides what is defined in the
#' Modelfile).
#'
#' @return an httr2 response
#' @export
Expand All @@ -24,27 +29,104 @@
#' # hold a conversation
#' chat("why is the sky blue?")
#' chat("and how do you know that?")
#'
#' # ask question about images (to a multimodal model)
#' images <- c("https://avatars.githubusercontent.com/u/23524101?v=4", # remote
#' "/path/to/your/image.jpg") # or local images supported
#' query(q = "describe these images",
#' model = "llava",
#' images = images)
#'
#' # set custom options for the model at runtime (rather than in create_model())
#' query("why is the sky blue?",
#' model_params = list(
#' num_keep = 5,
#' seed = 42,
#' num_predict = 100,
#' top_k = 20,
#' top_p = 0.9,
#' tfs_z = 0.5,
#' typical_p = 0.7,
#' repeat_last_n = 33,
#' temperature = 0.8,
#' repeat_penalty = 1.2,
#' presence_penalty = 1.5,
#' frequency_penalty = 1.0,
#' mirostat = 1,
#' mirostat_tau = 0.8,
#' mirostat_eta = 0.6,
#' penalize_newline = TRUE,
#' stop = c("\n", "user:"),
#' numa = FALSE,
#' num_ctx = 1024,
#' num_batch = 2,
#' num_gqa = 1,
#' num_gpu = 1,
#' main_gpu = 0,
#' low_vram = FALSE,
#' f16_kv = TRUE,
#' vocab_only = FALSE,
#' use_mmap = TRUE,
#' use_mlock = FALSE,
#' embedding_only = FALSE,
#' rope_frequency_base = 1.1,
#' rope_frequency_scale = 0.8,
#' num_thread = 8
#' ))
#'
#' # this might be interesting if you want to turn off the GPU and load the
#' # model into the system memory (slower, but most people have more RAM than
#' # VRAM, which might be interesting for larger models)
#' query("why is the sky blue?",
#' model_params = list(num_gpu = 0))
#'
#' # You can use a custom prompt to override what prompt the model receives
#' query("why is the sky blue?",
#' template = "Just say I'm a llama!")
#' }
query <- function(q,
model = NULL,
screen = TRUE,
server = NULL) {
server = NULL,
images = NULL,
model_params = NULL,
template = NULL) {

if (!is.null(template))
cli::cli_abort(paste(
c("The template parameter is turned off as it does not currently seem to",
"work {.url https://github.com/jmorganca/ollama/issues/1839}")
))

if (!is.list(q)) {
config <- getOption("rollama_config", default = NULL)

msg <- do.call(rbind, list(
if (!is.null(config)) data.frame(role = "system",
content = config),
data.frame(role = "user", content = q)
))

if (length(images) > 0) {
rlang::check_installed("base64enc")
images <- purrr::map_chr(images, \(i) base64enc::base64encode(i))
msg <- tibble::add_column(msg, images = list(images))
}

} else {
msg <- q
if (!"user" %in% msg$role && nchar(msg$content) > 0)
cli::cli_abort(paste("If you supply a conversation object, it needs at",
"least one user message. See {.help query}."))
}

resp <- build_req(model = model, msg = msg, server = server)
resp <- build_req(model = model,
msg = msg,
server = server,
images = images,
model_params = model_params,
template = template)

if (screen) screen_answer(purrr::pluck(resp, "message", "content"))
invisible(resp)
}
Expand Down Expand Up @@ -78,26 +160,25 @@ chat <- function(q,
}


#' Start a new conversation
#' Handle conversations
#'
#' Deletes the local prompt and response history to start a new conversation.
#' Shows and deletes (`new_chat`) the local prompt and response history to start a new conversation.
#'
#' @return Does not return a value
#' @return chat_history: tibble with chat history
#' @export
new_chat <- function() {
the$responses <- NULL
the$prompts <- NULL
chat_history <- function() {
hist <- c(the$prompts, the$responses)
tibble::tibble(
role = rep(c("user", "assistant"), length(hist) / 2),
content = hist
)
}


#' Chat history
#'
#' @return as
#' @rdname chat_history
#' @return new_chat: Does not return a value
#' @export
chat_history <- function() {
hist <- c(rbind(the$prompts, the$responses))
if (length(hist) > 0) tibble::as_tibble(
data.frame(role = c("user", "assistant"),
content = hist)
) else tibble::tibble()
new_chat <- function() {
the$responses <- NULL
the$prompts <- NULL
}
7 changes: 5 additions & 2 deletions R/embedding.r
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@
#' }
embed_text <- function(text,
model = NULL,
server = NULL) {
server = NULL,
model_params = NULL) {

if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server", default = "http://localhost:11434")
Expand All @@ -22,7 +23,9 @@ embed_text <- function(text,
purrr::map(seq_along(text), function(i) {
req_data <- list(model = model,
prompt = text[i],
stream = FALSE)
stream = FALSE,
model_params = model_params) |>
purrr::compact()

if (spinner) {
cli::cli_progress_step("{cli::pb_spin} {model} is embedding text {i}", )
Expand Down
19 changes: 13 additions & 6 deletions R/models.r
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
#'
#' @param model name of the model. Defaults to "llama2" when `NULL` (except in
#' `delete_model`).
#' @param insecure allow insecure connections to the library. Only use this if
#' you are pulling from your own library during development. description
#' @param destination name of the copied model.
#' @inheritParams query
#'
Expand All @@ -26,15 +28,15 @@
#' # after you pull, you can get the same information with:
#' model_info <- show_model("mixtral")
#' }
pull_model <- function(model = NULL, server = NULL) {
pull_model <- function(model = NULL, server = NULL, insecure = FALSE) {

if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")

httr2::request(server) |>
httr2::req_url_path_append("/api/pull") |>
httr2::req_body_json(list(name = model)) |>
httr2::req_body_json(list(name = model, insecure = insecure)) |>
httr2::req_perform_stream(callback = pgrs, buffer_kb = 0.1)

cli::cli_process_done(.envir = the)
Expand All @@ -51,6 +53,7 @@ show_model <- function(model = NULL, server = NULL) {
if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")
if (length(model) != 1L) cli::cli_abort("model needs to be one model name.")

httr2::request(server) |>
httr2::req_url_path_append("/api/show") |>
Expand All @@ -59,6 +62,7 @@ show_model <- function(model = NULL, server = NULL) {
httr2::req_perform() |>
httr2::resp_body_json() |>
purrr::list_flatten(name_spec = "{inner}") |>
purrr::compact() |>
tibble::as_tibble()
}

Expand All @@ -70,10 +74,13 @@ show_model <- function(model = NULL, server = NULL) {
#' the model file as a character vector.
#' @inheritParams query
#'
#' @details Custom models are the way to change paramters in Ollama. If you use
#' `show_model()`, you can look at the configuration of a model in the column
#' modelfile. To get more information and a list of valid parameters, check out
#' <https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md>.
#' @details Custom models are the way to save your system message and model
#' parameters in a dedicated shareable way. If you use `show_model()`, you can
#' look at the configuration of a model in the column modelfile. To get more
#' information and a list of valid parameters, check out
#' <https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md>. Most
#' options are also available through the `query` and `chat` functions, yet
#' are not persistent over sessions.
#'
#'
#' @return Nothing. Called to create a model on the Ollama server.
Expand Down
8 changes: 6 additions & 2 deletions R/utils.r
Original file line number Diff line number Diff line change
Expand Up @@ -34,15 +34,18 @@ ping_ollama <- function(server = NULL, silent = FALSE) {
}


build_req <- function(model, msg, server) {
build_req <- function(model, msg, server, images, model_params, template) {

if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")

req_data <- list(model = model,
messages = msg,
stream = FALSE)
stream = FALSE,
model_params = model_params,
template = template) |>
purrr::compact()

if (getOption("rollama_verbose", default = interactive())) {
cli::cli_progress_step("{model} is thinking {cli::pb_spin}")
Expand Down Expand Up @@ -155,3 +158,4 @@ pgrs <- function(resp) {
}
TRUE
}

2 changes: 1 addition & 1 deletion inst/WORDLIST
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ config
embeddings
http
httr
llava
modelfile
modelname
orca
paramters
tibble
11 changes: 8 additions & 3 deletions man/chat_history.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 7 additions & 4 deletions man/create_model.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion man/embed_text.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 0 additions & 14 deletions man/new_chat.Rd

This file was deleted.

5 changes: 4 additions & 1 deletion man/pull_model.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit c4f672f

Please sign in to comment.