Skip to content
This repository has been archived by the owner on Sep 12, 2024. It is now read-only.

Commit

Permalink
some fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
SeeknnDestroy committed Nov 9, 2023
1 parent cd68030 commit cbd318d
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 24 deletions.
11 changes: 5 additions & 6 deletions autollm/auto/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ class AutoLiteLLM:
@staticmethod
def from_defaults(
model: str = "gpt-3.5-turbo",
api_base: Optional[str] = None,
max_tokens: Optional[int] = None,
temperature: float = 0.1) -> LLM:
max_tokens: Optional[int] = 256,
temperature: float = 0.1,
api_base: Optional[str] = None) -> LLM:
"""
Create any LLM by model name. Check https://docs.litellm.ai/docs/providers for a list of
supported models.
Expand All @@ -25,13 +25,12 @@ def from_defaults(
Parameters:
model: Name of the LLM model to be initialized. Check
https://docs.litellm.ai/docs/providers for a list of supported models.
api_base: The API base URL to use for the LLM.
max_tokens: The maximum number of tokens to generate by the LLM.
temperature: The temperature to use when sampling from the distribution.
**kwargs: Arbitrary keyword arguments.
api_base: The API base URL to use for the LLM.
Returns:
LLM: The initialized LiteLLM instance for given model name and parameter set.
"""

return LiteLLM(model=model, api_base=api_base, max_tokens=max_tokens, temperature=temperature)
return LiteLLM(model=model, max_tokens=max_tokens, temperature=temperature, api_base=api_base)
4 changes: 2 additions & 2 deletions autollm/auto/query_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@ def create_query_engine(
documents: Optional[Sequence[Document]] = None,
# llm_params
llm_model: str = "gpt-3.5-turbo",
llm_api_base: Optional[str] = None,
llm_max_tokens: Optional[int] = None,
llm_max_tokens: Optional[int] = 256,
llm_temperature: float = 0.1,
llm_api_base: Optional[str] = None,
# service_context_params
system_prompt: str = None,
query_wrapper_prompt: str = None,
Expand Down
25 changes: 9 additions & 16 deletions tests/config.yaml
Original file line number Diff line number Diff line change
@@ -1,24 +1,17 @@
# config.example.yaml
tasks:
- name: "summarize"
llm_model: "gpt-3.5-turbo"
llm_max_tokens: 1028
llm_temperature: 0.1
system_prompt: "You are a friendly chatbot that can summarize documents.:" # System prompt for this task
vector_store_params:
vector_store_type: "SimpleVectorStore"
llm_params:
model: "gpt-3.5-turbo"
service_context_params:
chunk_size: 1024
query_engine_params:
similarity_top_k: 5
enable_cost_calculator: true
embed_model: "default"
chunk_size: 512
chunk_overlap: 64
context_window: 2048
similarity_top_k: 2
vector_store_type: "SimpleVectorStore"
- name: "qa"
system_prompt: "You are a friendly chatbot that can answer questions." # System prompt for this task
vector_store_params:
vector_store_type: "SimpleVectorStore"
llm_params:
model: "gpt-3.5-turbo"
service_context_params:
chunk_size: 1024
query_engine_params:
similarity_top_k: 3
enable_cost_calculator: false

0 comments on commit cbd318d

Please sign in to comment.