forked from frdel/agent-zero
-
Notifications
You must be signed in to change notification settings - Fork 0
/
initialize.py
58 lines (52 loc) · 2.61 KB
/
initialize.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import models
from agent import AgentConfig
def initialize():
# main chat model used by agents (smarter, more accurate)
chat_llm = models.get_openai_chat(model_name="gpt-4o-mini", temperature=0)
# chat_llm = models.get_ollama_chat(model_name="gemma2:latest", temperature=0)
# chat_llm = models.get_lmstudio_chat(model_name="lmstudio-community/Meta-Llama-3.1-8B-Instruct-GGUF", temperature=0)
# chat_llm = models.get_openrouter_chat(model_name="mattshumer/reflection-70b:free")
# chat_llm = models.get_azure_openai_chat(deployment_name="gpt-4o-mini", temperature=0)
# chat_llm = models.get_anthropic_chat(model_name="claude-3-5-sonnet-20240620", temperature=0)
# chat_llm = models.get_google_chat(model_name="gemini-1.5-flash", temperature=0)
# chat_llm = models.get_groq_chat(model_name="llama-3.1-70b-versatile", temperature=0)
# utility model used for helper functions (cheaper, faster)
utility_llm = chat_llm # change if you want to use a different utility model
# embedding model used for memory
embedding_llm = models.get_openai_embedding(model_name="text-embedding-3-small")
# embedding_llm = models.get_ollama_embedding(model_name="nomic-embed-text")
# embedding_llm = models.get_huggingface_embedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
# embedding_llm = models.get_lmstudio_embedding(model_name="nomic-ai/nomic-embed-text-v1.5-GGUF")
# agent configuration
config = AgentConfig(
chat_model = chat_llm,
utility_model = utility_llm,
embeddings_model = embedding_llm,
# prompts_subdir = "",
# memory_subdir = "",
# knowledge_subdir: str = ""
auto_memory_count = 0,
# auto_memory_skip = 2,
# rate_limit_seconds = 60,
rate_limit_requests = 15,
# rate_limit_input_tokens = 0,
# rate_limit_output_tokens = 0,
# msgs_keep_max = 25,
# msgs_keep_start = 5,
# msgs_keep_end = 10,
max_tool_response_length = 3000,
# response_timeout_seconds = 60,
code_exec_docker_enabled = True,
# code_exec_docker_name = "agent-zero-exe",
# code_exec_docker_image = "frdel/agent-zero-exe:latest",
# code_exec_docker_ports = { "22/tcp": 50022 }
# code_exec_docker_volumes = { files.get_abs_path("work_dir"): {"bind": "/root", "mode": "rw"} }
code_exec_ssh_enabled = True,
# code_exec_ssh_addr = "localhost",
# code_exec_ssh_port = 50022,
# code_exec_ssh_user = "root",
# code_exec_ssh_pass = "toor",
# additional = {},
)
# return config object
return config