diff --git a/examples/llm_ollama.rs b/examples/llm_ollama.rs index 8ccfda4c..b6b94878 100644 --- a/examples/llm_ollama.rs +++ b/examples/llm_ollama.rs @@ -4,7 +4,7 @@ use langchain_rust::{language_models::llm::LLM, llm::ollama::client::Ollama}; #[cfg(feature = "ollama")] #[tokio::main] async fn main() { - let ollama = Ollama::default().with_model("llama3"); + let ollama = Ollama::default().with_model("llama3.2"); let response = ollama.invoke("Hi").await.unwrap(); println!("{}", response); diff --git a/src/llm/ollama/client.rs b/src/llm/ollama/client.rs index 80a23151..a970d7fc 100644 --- a/src/llm/ollama/client.rs +++ b/src/llm/ollama/client.rs @@ -24,8 +24,8 @@ pub struct Ollama { pub(crate) options: Option, } -/// [llama3](https://ollama.com/library/llama3) is a 8B parameters, 4.7GB model. -const DEFAULT_MODEL: &str = "llama3"; +/// [llama3.2](https://ollama.com/library/llama3.2) is a 3B parameters, 2.0GB model. +const DEFAULT_MODEL: &str = "llama3.2"; impl Ollama { pub fn new>( @@ -152,7 +152,7 @@ mod tests { #[tokio::test] #[ignore] async fn test_generate() { - let ollama = Ollama::default().with_model("llama3"); + let ollama = Ollama::default().with_model("llama3.2"); let response = ollama.invoke("Hey Macarena, ay").await.unwrap(); println!("{}", response); } @@ -160,7 +160,7 @@ mod tests { #[tokio::test] #[ignore] async fn test_stream() { - let ollama = Ollama::default().with_model("llama3"); + let ollama = Ollama::default().with_model("llama3.2"); let message = Message::new_human_message("Why does water boil at 100 degrees?"); let mut stream = ollama.stream(&vec![message]).await.unwrap(); diff --git a/src/llm/ollama/openai.rs b/src/llm/ollama/openai.rs index 0c4ea8dd..aaf3d10f 100644 --- a/src/llm/ollama/openai.rs +++ b/src/llm/ollama/openai.rs @@ -12,7 +12,7 @@ const OLLAMA_API_BASE: &str = "http://localhost:11434/v1"; /// ## Example /// /// ```rs -/// let ollama = OpenAI::new(OllamaConfig::default()).with_model("llama3"); +/// let ollama = OpenAI::new(OllamaConfig::default()).with_model("llama3.2"); /// let response = ollama.invoke("Say hello!").await.unwrap(); /// ``` #[derive(Clone, Debug, Deserialize)]