Skip to content

Commit

Permalink
adds tests
Browse files Browse the repository at this point in the history
  • Loading branch information
JBGruber committed Jan 6, 2024
1 parent f589279 commit 80f44fa
Show file tree
Hide file tree
Showing 23 changed files with 265 additions and 30 deletions.
1 change: 1 addition & 0 deletions .Rbuildignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@
^README\.Rmd$
^doc$
^Meta$
^\.github$
1 change: 1 addition & 0 deletions .github/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*.html
49 changes: 49 additions & 0 deletions .github/workflows/R-CMD-check.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
# Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
on:
push:
branches: [main, master]
pull_request:
branches: [main, master]

name: R-CMD-check

jobs:
R-CMD-check:
runs-on: ${{ matrix.config.os }}

name: ${{ matrix.config.os }} (${{ matrix.config.r }})

strategy:
fail-fast: false
matrix:
config:
- {os: macos-latest, r: 'release'}
- {os: windows-latest, r: 'release'}
- {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'}
- {os: ubuntu-latest, r: 'release'}
- {os: ubuntu-latest, r: 'oldrel-1'}

env:
GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
R_KEEP_PKG_SOURCE: yes

steps:
- uses: actions/checkout@v3

- uses: r-lib/actions/setup-pandoc@v2

- uses: r-lib/actions/setup-r@v2
with:
r-version: ${{ matrix.config.r }}
http-user-agent: ${{ matrix.config.http-user-agent }}
use-public-rspm: true

- uses: r-lib/actions/setup-r-dependencies@v2
with:
extra-packages: any::rcmdcheck
needs: check

- uses: r-lib/actions/check-r-package@v2
with:
upload-snapshots: true
6 changes: 5 additions & 1 deletion DESCRIPTION
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,9 @@ Imports:
utils
Suggests:
knitr,
rmarkdown
rmarkdown,
spelling,
testthat (>= 3.0.0)
VignetteBuilder: knitr
Config/testthat/edition: 3
Language: en-US
2 changes: 2 additions & 0 deletions NAMESPACE
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
# Generated by roxygen2: do not edit by hand

export(chat)
export(chat_history)
export(copy_model)
export(create_model)
export(delete_model)
export(embed_text)
export(list_models)
export(new_chat)
export(ping_ollama)
export(pull_model)
export(query)
export(show_model)
24 changes: 18 additions & 6 deletions R/chat.r
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#' Chat with a LLM through Oolama
#' Chat with a LLM through Ollama
#'
#' @details `query` sends a single question to the API, without knowledge about
#' previous questions (only the config message is relevant). `chat` treats new
Expand All @@ -10,7 +10,7 @@
#' Default is "llama2". Set option(rollama_model = "modelname") to change
#' default for the current session. See \link{pull_model} for more details.
#' @param screen Logical. Should the answer be printed to the screen.
#' @param server URL to an Oolama server (not the API). Defaults to
#' @param server URL to an Ollama server (not the API). Defaults to
#' "http://localhost:11434".
#'
#' @return an httr2 response
Expand Down Expand Up @@ -41,7 +41,7 @@ query <- function(q,
msg <- q
if (!"user" %in% msg$role && nchar(msg$content) > 0)
cli::cli_abort(paste("If you supply a conversation object, it needs at",
"least one user message. See {.help query}."))
"least one user message. See {.help query}."))
}

resp <- build_req(model = model, msg = msg, server = server)
Expand All @@ -58,16 +58,15 @@ chat <- function(q,
server = NULL) {

config <- getOption("rollama_config", default = NULL)
hist <- c(rbind(the$prompts, the$responses))
hist <- chat_history()

# save prompt
the$prompts <- c(the$prompts, q)

msg <- do.call(rbind, (list(
if (!is.null(config)) data.frame(role = "system",
content = config),
if (length(hist) > 0) data.frame(role = c("user", "assistant"),
content = hist),
if (length(hist) > 0) hist,
data.frame(role = "user", content = q)
)))
resp <- query(q = msg, model = model, screen = screen, server = server)
Expand All @@ -89,3 +88,16 @@ new_chat <- function() {
the$responses <- NULL
the$prompts <- NULL
}


#' Chat history
#'
#' @return as
#' @export
chat_history <- function() {
hist <- c(rbind(the$prompts, the$responses))
if (length(hist) > 0) tibble::as_tibble(
data.frame(role = c("user", "assistant"),
content = hist)
) else tibble::tibble()
}
7 changes: 4 additions & 3 deletions R/models.r
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,9 @@ show_model <- function(model = NULL, server = NULL) {
#'
#' @examples
#' modelfile <- system.file("extdata", "modelfile.txt", package = "rollama")
#' \dontrun(create_model("mario", modelfile))
#' \dontrun{create_model("mario", modelfile)}
#' modelfile <- "FROM llama2\nSYSTEM You are mario from Super Mario Bros."
#' \dontrun(create_model("mario", modelfile))
#' \dontrun{create_model("mario", modelfile)}
create_model <- function(model, modelfile, server = NULL) {

if (is.null(server)) server <- getOption("rollama_server",
Expand Down Expand Up @@ -142,7 +142,8 @@ copy_model <- function(model,

httr2::request(server) |>
httr2::req_url_path_append("/api/copy") |>
httr2::req_body_json(list(name = model)) |>
httr2::req_body_json(list(source = model,
destination = destination)) |>
httr2::req_error(body = function(resp) httr2::resp_body_json(resp)$error) |>
httr2::req_perform()

Expand Down
52 changes: 45 additions & 7 deletions R/utils.r
Original file line number Diff line number Diff line change
@@ -1,11 +1,44 @@
# package environment
the <- new.env()

#' Ping server to see if Ollama is reachable
#'
#' @inheritParams query
#'
#' @return TRUE if server is running
#' @export
ping_ollama <- function(server = NULL, silent = FALSE) {

if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")
res <- try({
httr2::request(server) |>
httr2::req_perform() |>
httr2::resp_body_string()
}, silent = TRUE)

if (!methods::is(res, "try-error")) {
if (!silent) cli::cli_progress_message(
"{cli::col_green(cli::symbol$play)} {res} at {.url {server}}!"
)
invisible(TRUE)
} else {
if (!silent) {
cli::cli_alert_danger("Could not connect to Ollama at {.url {server}}")
print(res)
}
invisible(FALSE)
}

}


build_req <- function(model, msg, server) {

if (is.null(model)) model <- getOption("rollama_model", default = "llama2")
if (is.null(server)) server <- getOption("rollama_server", default = "http://localhost:11434")
spinner <- getOption("rollama_spinner", default = interactive())
if (is.null(server)) server <- getOption("rollama_server",
default = "http://localhost:11434")
spinner <- getOption("rollama_verbose", default = interactive())

req_data <- list(model = model,
messages = msg,
Expand Down Expand Up @@ -60,8 +93,9 @@ screen_answer <- function(x) {

# function to display progress in streaming operations
pgrs <- function(resp) {

if (!getOption("rollama_verbose", default = interactive())) return(TRUE)
the$str_prgs$stream_resp <- c(the$str_prgs$stream_resp, resp)
x <<- the$str_prgs$stream_resp
resp <- the$str_prgs$stream_resp

status <- strsplit(rawToChar(resp), "\n")[[1]] |>
Expand All @@ -85,10 +119,14 @@ pgrs <- function(resp) {
the$str_prgs$total <- purrr::pluck(s, "total", .default = 0L)
the$str_prgs$done_pct <-
paste(round(the$str_prgs$done / the$str_prgs$total * 100, 0), "%")
the$str_prgs$speed <-
prettyunits::pretty_bytes(
the$str_prgs$done /
(as.integer(Sys.time()) - as.integer(the$str_prgs$pb_start)))
if (the$str_prgs$done != the$str_prgs$total) {
the$str_prgs$speed <-
prettyunits::pretty_bytes(
the$str_prgs$done /
(as.integer(Sys.time()) - as.integer(the$str_prgs$pb_start))
)
} else the$str_prgs$speed <- 1L

if (!isTRUE(the$str_prgs$pb == the$str_prgs$f)) {
cli::cli_progress_bar(
name = the$str_prgs$f,
Expand Down
1 change: 1 addition & 0 deletions README.Rmd
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ knitr::opts_chunk$set(
<!-- badges: start -->
[![Lifecycle: experimental](https://img.shields.io/badge/lifecycle-experimental-orange.svg)](https://lifecycle.r-lib.org/articles/stages.html#experimental)
[![say-thanks](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/JBGruber)
[![R-CMD-check](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml)
<!-- badges: end -->

The goal of `rollama` is to wrap the Ollama API, which allows you to run different LLMs locally and create an experience similar to ChatGPT/OpenAI's API.
Expand Down
5 changes: 2 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
# `rollama`

<!-- badges: start -->

[![Lifecycle:
experimental](https://img.shields.io/badge/lifecycle-experimental-orange.svg)](https://lifecycle.r-lib.org/articles/stages.html#experimental)
[![Lifecycle: experimental](https://img.shields.io/badge/lifecycle-experimental-orange.svg)](https://lifecycle.r-lib.org/articles/stages.html#experimental)
[![R-CMD-check](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/JBGruber/rollama/actions/workflows/R-CMD-check.yaml)
[![say-thanks](https://img.shields.io/badge/Say%20Thanks-!-1EAEDB.svg)](https://saythanks.io/to/JBGruber)
<!-- badges: end -->

Expand Down
19 changes: 19 additions & 0 deletions inst/WORDLIST
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
ChatGPT
Embeddings
LLM
LLMs
Lifecycle
Modelfile
Ollama
OpenAI's
OpenAI’s
Reichardt
config
embeddings
http
httr
modelfile
modelname
orca
paramters
tibble
14 changes: 14 additions & 0 deletions man/chat_history.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions man/create_model.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/embed_text.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 18 additions & 0 deletions man/ping_ollama.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 1 addition & 3 deletions man/pull_model.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 3 additions & 3 deletions man/query.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions tests/spelling.R
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
if (requireNamespace("spelling", quietly = TRUE)) {
spelling::spell_check_test(
vignettes = TRUE, error = FALSE,
skip_on_cran = TRUE
)
}
Loading

0 comments on commit 80f44fa

Please sign in to comment.