diff --git a/assistants/prospector-assistant/uv.lock b/assistants/prospector-assistant/uv.lock index bdfbd9dc..e4035b92 100644 --- a/assistants/prospector-assistant/uv.lock +++ b/assistants/prospector-assistant/uv.lock @@ -2191,6 +2191,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -2206,6 +2207,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/assistants/skill-assistant/assistant/skill_assistant.py b/assistants/skill-assistant/assistant/skill_assistant.py index 2e5d33cf..bf124fac 100644 --- a/assistants/skill-assistant/assistant/skill_assistant.py +++ b/assistants/skill-assistant/assistant/skill_assistant.py @@ -218,31 +218,31 @@ async def get_or_register_assistant( chat_driver_config=chat_driver_config, drive_root=assistant_drive_root, metadata_drive_root=assistant_metadata_drive_root, - skills={ - "common": CommonSkillDefinition( + skills=[ + CommonSkillDefinition( name="common", language_model=language_model, drive=assistant_drive.subdrive("common"), chat_driver_config=chat_driver_config, ), - "posix": PosixSkillDefinition( + PosixSkillDefinition( name="posix", sandbox_dir=Path(".data") / conversation_context.id, - chat_driver_config=chat_driver_config, mount_dir="/mnt/data", + chat_driver_config=chat_driver_config, ), - # "form_filler": FormFillerSkill( + # FormFillerSkill( # name="form_filler", # chat_driver_config=chat_driver_config, # language_model=language_model, # ), - "guided_conversation": GuidedConversationSkillDefinition( + GuidedConversationSkillDefinition( name="guided_conversation", language_model=language_model, drive=assistant_drive.subdrive("guided_conversation"), chat_driver_config=chat_driver_config, ), - }, + ], ) await assistant_registry.register_assistant(assistant, SkillEventMapper(conversation_context)) diff --git a/assistants/skill-assistant/uv.lock b/assistants/skill-assistant/uv.lock index 9d2fdf39..2884ea4d 100644 --- a/assistants/skill-assistant/uv.lock +++ b/assistants/skill-assistant/uv.lock @@ -809,6 +809,9 @@ requires-dist = [ dev = [ { name = "ipykernel", specifier = ">=6.29.4" }, { name = "pyright", specifier = ">=1.1.389" }, + { name = "pytest", specifier = ">=8.3.1" }, + { name = "pytest-asyncio", specifier = ">=0.23.8" }, + { name = "pytest-repeat", specifier = ">=0.9.3" }, ] [[package]] @@ -2219,6 +2222,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -2234,6 +2238,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/events/events/__init__.py b/libraries/python/events/events/__init__.py index 49b15a9b..977301bc 100644 --- a/libraries/python/events/events/__init__.py +++ b/libraries/python/events/events/__init__.py @@ -1,11 +1,21 @@ -from .events import EventProtocol, TEvent, BaseEvent, InformationEvent, ErrorEvent, StatusUpdatedEvent, MessageEvent +from .events import ( + BaseEvent, + ErrorEvent, + EventProtocol, + InformationEvent, + MessageEvent, + NoticeEvent, + StatusUpdatedEvent, + TEvent, +) __all__ = [ - "EventProtocol", - "TEvent", "BaseEvent", - "InformationEvent", "ErrorEvent", - "StatusUpdatedEvent", + "EventProtocol", + "InformationEvent", "MessageEvent", + "NoticeEvent", + "StatusUpdatedEvent", + "TEvent", ] diff --git a/libraries/python/openai-client/openai_client/chat_driver/chat_driver.py b/libraries/python/openai-client/openai_client/chat_driver/chat_driver.py index 634f5bd1..68905511 100644 --- a/libraries/python/openai-client/openai_client/chat_driver/chat_driver.py +++ b/libraries/python/openai-client/openai_client/chat_driver/chat_driver.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from typing import Any, Callable, Union -from events import BaseEvent, ErrorEvent, MessageEvent +from events import BaseEvent, ErrorEvent, InformationEvent, MessageEvent from openai import AsyncAzureOpenAI, AsyncOpenAI from openai.types.chat import ( ChatCompletionMessageParam, @@ -153,9 +153,9 @@ async def respond( command_string = message[1:] try: results = await self.command_list.execute_function_string(command_string, string_response=True) - return MessageEvent(message=results) + return InformationEvent(message=results) except Exception as e: - return ErrorEvent(message=f"Error! {e}", metadata={"error": str(e)}) + return InformationEvent(message=f"Error! {e}", metadata={"error": str(e)}) # If not a command, add the message to the history. if message is not None: diff --git a/libraries/python/openai-client/openai_client/tools.py b/libraries/python/openai-client/openai_client/tools.py index e63eb13f..1ba01a9d 100644 --- a/libraries/python/openai-client/openai_client/tools.py +++ b/libraries/python/openai-client/openai_client/tools.py @@ -286,12 +286,12 @@ async def execute_function_string(self, function_string: str, string_response: b try: function, args, kwargs = self.parse_function_string(function_string) except ValueError as e: - raise ValueError(f"{e}. Type: `/help` for more information.") + raise ValueError(f"{e} Type: `/help` for more information.") if not function: raise ValueError("Function not found in registry. Type: `/help` for more information.") - response = await function.execute(*args, **kwargs) + result = await function.execute(*args, **kwargs) if string_response: - return to_string(response) + return to_string(result) def parse_function_string(self, function_string: str) -> tuple[ToolFunction | None, list[Any], dict[str, Any]]: """Parse a function call string into a function and its arguments.""" diff --git a/libraries/python/skills/notebooks/uv.lock b/libraries/python/skills/notebooks/uv.lock index 42c7c9d3..c46a20d2 100644 --- a/libraries/python/skills/notebooks/uv.lock +++ b/libraries/python/skills/notebooks/uv.lock @@ -1853,6 +1853,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -1868,6 +1869,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skill-library/README.md b/libraries/python/skills/skill-library/README.md index 1e58128a..a5ea8b5d 100644 --- a/libraries/python/skills/skill-library/README.md +++ b/libraries/python/skills/skill-library/README.md @@ -5,7 +5,7 @@ does this through the concept of a "skill". Think of a skill as a package of assistant capabilities. A skill can contain "actions" that an assistant can perform and "routines" that are entire -procedures that an assistant can run. +procedures, made up of actions, that an assistant can run. A demonstration [Posix skill](../skills/posix-skill/README.md) is provided that makes these more clear. Various actions are provided in the skill that provide @@ -18,34 +18,12 @@ cook you a meal. The chef would be skilled at actions in the kitchen (like chopping or mixing or frying) but would also be able to perform full routines (recipes), allowing them to make particular dishes according to your preferences. -In a way, this whole library was set up to be able to experiment with _routines_ -more easily: - -- This library hides a lot of the complexity of developing multi-layered - assistants by providing clearer purposeful abstractions and better defining or - disambiguating commonly confused terms. For example, we separate out a lot of - the complexity of interacting with the OpenAI Chat Completion API with the - [chat driver](../../openai-client/openai_client/chat_driver/README.md) - abstraction and we now distinguish between chat commands, chat tool functions, - and routine actions in a clear way, even though they're really all just - functions. -- Routines (formerly referred to as "Recipes") make it clear that what we are - developing agents that can automate productive work collaboratively with the - user. We have several ideas here, from simply following a set of steps, to - being able to run Pythonic programs skill actions, to much more fully managed - routine running with LLM-driven meta-cognitive execution (having the LLM - monitor progress and modify the routines as necessary). - -Currently we provide one functional routine runner implementation, the -[InstructionRoutineRunner](./skill_library/routine_runners/instruction_routine_runner.py), -but will be adding several more in the upcoming weeks. - ## Combining skills in the assistant This library provides an [Assistant](./skill_library/assistant.py) class that allows you to configure the conversational assistant (relying on our [chat -driver](../../chat-driver/README.md) library) and the skills that the -assistant should have. +driver](../../openai-client/openai_client/chat_driver/README.md) library) and +the skills that the assistant should have. Oftentimes, a truly capable assistant will need to have many skills. Additionally, some skills are dependent upon other skills. When you register @@ -67,29 +45,25 @@ assistant allowing it to be exposed as an assistant in the workbench. See our Assistant](../../../../assistants/skill-assistant/README.md) package that does exactly this. -In the future, individual conversations might be handled in this library as -well. +## Routines -## Context - -This library uses the same [Context](../../context/README.md) library -as the [chat driver](../../chat-driver/README.md) library. This allows -you to instantiate a Context object for the assistant and have it automatically -passed into all assistant's actions and routines. This is especially helpful in -(1) setting the session id for all parts of the system (allowing them all to -share state in external state stores) and (2) passing and `emit` function that -all the parts can use to send events back up to the assistant for consistent -handling. - -## More about Routines - -### Experimentation - -As mentioned above, one of the main purposes of this library is to make it -possible for an assistant to run a routine. +This whole library was set up to be able to experiment with _routines_ +more easily: -We are currently investigating different kinds of routine specifications and -ways of executing them. +- This library hides a lot of the complexity of developing multi-layered + assistants by providing clearer purposeful abstractions and better defining or + disambiguating commonly confused terms. For example, we separate out a lot of + the complexity of interacting with the OpenAI Chat Completion API with the + [chat driver](../../openai-client/openai_client/chat_driver/README.md) + abstraction and we now distinguish between chat commands, chat tool functions, + and routine actions in a clear way, even though they're really all just + functions. +- Routines make it clear that what we are developing agents that can automate + productive work collaboratively with the user. We have several ideas here, + from simply following a set of steps, to being able to run Pythonic programs + of skill actions, to much more fully managed routine running with LLM-driven + meta-cognitive execution (having the LLM monitor progress and modify the + routines as necessary). Currently we provide one functional routine runner implementation, the [InstructionRoutineRunner](./skill_library/routine_runners/instruction_routine_runner.py), @@ -103,3 +77,34 @@ is not possible to simply instantiate a skill and run a routine within it (like you can do with a skill's action). Routines can only be run from an [Assistant](./skill_library/assistant.py) that has all dependent skills registered to it. + + +## Run Context + +This library uses the same [Context](../../context/README.md) library as the +[chat driver](../../openai-client/openai_client/chat_driver/README.md) library. +This allows you to instantiate a Context object for the assistant and have it +automatically passed into all assistant's actions and routines. This is +especially helpful in (1) setting the session id for all parts of the system +(allowing them all to share state in external state stores) and (2) passing and +`emit` function that all the parts can use to send events back up to the +assistant for consistent handling. + +## State + +### Drives + +### Assistant drive + +### Routine Stack state + +```python +async with context.stack_frame_state() as state: +``` + + + +- Natural language (user understandability, generatability) +- Metacognitive runners +- Skills/routines w/ subroutines (composability) +- Facilities (run_context, storage, com) \ No newline at end of file diff --git a/libraries/python/skills/skill-library/pyproject.toml b/libraries/python/skills/skill-library/pyproject.toml index aa53671c..1cde726d 100644 --- a/libraries/python/skills/skill-library/pyproject.toml +++ b/libraries/python/skills/skill-library/pyproject.toml @@ -15,6 +15,7 @@ dependencies = [ "pydantic-settings>=2.3.4", "pydantic>=2.6.1", "python-dotenv>=1.0.1", + "python-liquid>=1.12.1", "requests>=2.32.0", "tiktoken>=0.7.0", ] diff --git a/libraries/python/skills/skill-library/pytest.ini b/libraries/python/skills/skill-library/pytest.ini deleted file mode 100644 index 02bae23f..00000000 --- a/libraries/python/skills/skill-library/pytest.ini +++ /dev/null @@ -1,13 +0,0 @@ -# pytest.ini -[pytest] -minversion = 6.0 -addopts = -vv -rP -pythonpath = . -testpaths = **/tests -filterwarnings = - ignore::DeprecationWarning - ignore::PendingDeprecationWarning -asyncio_mode = auto -log_cli = true -log_cli_level = INFO -log_cli_format = %(asctime)s | %(levelname)-7s | %(name)s | %(message)s diff --git a/libraries/python/skills/skill-library/skill_library/__init__.py b/libraries/python/skills/skill-library/skill_library/__init__.py index 46112158..d157cebe 100644 --- a/libraries/python/skills/skill-library/skill_library/__init__.py +++ b/libraries/python/skills/skill-library/skill_library/__init__.py @@ -3,12 +3,13 @@ from .actions import ActionCallable from .assistant import Assistant from .chat_driver_helpers import ChatDriverFunctions -from .routine import InstructionRoutine, ProgramRoutine, RoutineTypes, StateMachineRoutine +from .routine import ActionListRoutine, InstructionRoutine, ProgramRoutine, RoutineTypes, StateMachineRoutine from .run_context import RunContext, RunContextProvider from .skill import EmitterType, Skill, SkillDefinition __all__ = [ "ActionCallable", + "ActionListRoutine", "Assistant", "ChatDriverFunctions", "Context", diff --git a/libraries/python/skills/skill-library/skill_library/actions.py b/libraries/python/skills/skill-library/skill_library/actions.py index ff843a8f..fca1b776 100644 --- a/libraries/python/skills/skill-library/skill_library/actions.py +++ b/libraries/python/skills/skill-library/skill_library/actions.py @@ -1,9 +1,9 @@ -import ast import inspect from dataclasses import dataclass from typing import Any, Protocol from .run_context import RunContext, RunContextProvider +from .utilities import parse_command_string class ActionCallable(Protocol): @@ -90,7 +90,7 @@ def usage(self) -> Usage: """ return Usage(name=self.name, parameters=self.parameters(exclude=["run_context"]), description=self.description) - async def execute(self, run_context: RunContext, *args, **kwargs) -> Any: + async def execute(self, run_context: RunContext, *args: Any, **kwargs: Any) -> Any: """ Run this action, and return its value. If the function is a coroutine, it will be awaited. @@ -196,60 +196,10 @@ def parse_action_string(self, action_string: str) -> tuple[Action | None, list[A # TODO: If used in routines, need to handle skill namespacing (designations). - # As a convenience, add parentheses if they are missing. - if " " not in action_string and "(" not in action_string: - action_string += "()" - - # Parse the string into an AST (Abstract Syntax Tree) - try: - tree = ast.parse(action_string) - except SyntaxError: - raise ValueError("Invalid function call. Please check your syntax.") - - # Ensure the tree contains exactly one expression (the function call) - if not (isinstance(tree, ast.Module) and len(tree.body) == 1 and isinstance(tree.body[0], ast.Expr)): - raise ValueError("Expected a single function call.") - - # The function call is stored as a `Call` node within the expression - call_node = tree.body[0].value - if not isinstance(call_node, ast.Call): - raise ValueError("Invalid function call. Please check your syntax.") - - # Extract the function name - if isinstance(call_node.func, ast.Name): - action_name = call_node.func.id - else: - raise ValueError("Unsupported function format. Please check your syntax.") - - # Helper function to evaluate AST nodes to their Python equivalent - def eval_node(node): - if isinstance(node, ast.Constant): - return node.value - elif isinstance(node, ast.List): - return [eval_node(elem) for elem in node.elts] - elif isinstance(node, ast.Tuple): - return tuple(eval_node(elem) for elem in node.elts) - elif isinstance(node, ast.Dict): - return {eval_node(key): eval_node(value) for key, value in zip(node.keys, node.values)} - elif isinstance(node, ast.Name): - return node.id # This can return variable names, but we assume they're constants - elif isinstance(node, ast.BinOp): # Handling arithmetic expressions - return eval(compile(ast.Expression(node), filename="", mode="eval")) - elif isinstance(node, ast.Call): - raise ValueError("Nested function calls are not supported.") - else: - raise ValueError(f"Unsupported AST node type: {type(node).__name__}") - - # Extract positional arguments - args = [eval_node(arg) for arg in call_node.args] - - # Extract keyword arguments - kwargs = {} - for kw in call_node.keywords: - kwargs[kw.arg] = eval_node(kw.value) + action_name, args, kwargs = parse_command_string(action_string) action = self.get_action(action_name) if not action: return None, [], {} - return action, args, kwargs + return action, list(args), kwargs diff --git a/libraries/python/skills/skill-library/skill_library/assistant.py b/libraries/python/skills/skill-library/skill_library/assistant.py index 1eadc596..31a86332 100644 --- a/libraries/python/skills/skill-library/skill_library/assistant.py +++ b/libraries/python/skills/skill-library/skill_library/assistant.py @@ -34,7 +34,7 @@ def __init__( chat_driver_config: ChatDriverConfig, drive_root: PathLike | None = None, metadata_drive_root: PathLike | None = None, - skills: dict[str, SkillDefinition] | None = None, + skills: list[SkillDefinition] | None = None, startup_action: str | None = None, startup_routine: str | None = None, startup_args: tuple = (), @@ -245,7 +245,7 @@ def list_routines(self) -> list[str]: """Lists all the routines the assistant is able to perform.""" return self.skill_registry.list_routines() if self.skill_registry else [] - async def run_routine(self, name: str, *args, **kwargs) -> Any: + async def run_routine(self, name: str, *args: Any, **kwargs: Any) -> Any: """ Run an assistant routine by name (e.g. .). """ @@ -257,13 +257,15 @@ def list_actions(self) -> list[str]: """Lists all the actions the assistant is able to perform.""" return self.skill_registry.list_actions() if self.skill_registry else [] - def run_action(self, designation: str, *args, **kwargs) -> Any: + async def run_action(self, designation: str, *args: Any, **kwargs: Any) -> Any: """ Run an assistant action by name (e.g. .). """ if not self.skill_registry: raise ValueError("No skill registry registered for this assistant.") - return self.skill_registry.run_action_by_designation(self.create_run_context(), designation, *args, **kwargs) + return await self.skill_registry.run_action_by_designation( + self.create_run_context(), designation, *args, **kwargs + ) async def step_active_routine(self, message: str) -> None: """Run another step in the current routine.""" diff --git a/libraries/python/skills/skill-library/skill_library/llm/__init__.py b/libraries/python/skills/skill-library/skill_library/llm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libraries/python/skills/skill-library/skill_library/llm/prompt.py b/libraries/python/skills/skill-library/skill_library/llm/prompt.py deleted file mode 100644 index ce12f7a2..00000000 --- a/libraries/python/skills/skill-library/skill_library/llm/prompt.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -These functions allow you to define a language model prompt in a simple template -language. This makes it simpler to write a prompt in plain text vs assembling a -prompt message by message in code. - -Your prompt spec can have any number of `--SYSTEM--`, `--HUMAN--`, or -`--ASSISTANT--` sections representing prompt messages. The messages contain text -that will be used as the content of the messages. You can use variables in any -of these sections by putting the name of variables in curly brackets. The -[../local/llm](invoke_llm) method will replace these with the value of the -variables you pass in. - -Example text that can be parsed into a PromptSpec: - ---SYSTEM-- -The assistant responds to a user like a {personality}. - ---USER-- -{user_message} - -""" - -from pathlib import Path -from typing import Any - -from openai.types.chat import ChatCompletionMessageParam -from pydantic import BaseModel, Field - - -class PromptSpec(BaseModel): - meta: dict[str, str] = Field(description="The meta information of the prompt.") - messages: list[tuple[str, str]] = Field(description="The dialogue of the prompt.") - - -def to_chat_completion_messages(template: str | Path, variables: dict[str, Any]) -> list[ChatCompletionMessageParam]: - if isinstance(template, str): - prompt_spec = parse_prompt_spec(template) - else: - prompt_spec = parse_prompt_file(template) - messages: list[ChatCompletionMessageParam] = [] - for role, text in prompt_spec.messages: - formatted_text = text.format(**variables) - if role == "system": - messages.append({"role": "system", "content": formatted_text}) - if role == "human": - messages.append({"role": "user", "content": formatted_text}) - if role == "assistant": - messages.append({"role": "assistant", "content": formatted_text}) - return messages - - -def parse_prompt_spec(prompt_spec: str) -> PromptSpec: - result = PromptSpec(meta={}, messages=[]) - current_section = None - current_text = [] - - def save_section() -> None: - text = "\n".join(current_text).strip() - if current_section == "meta": - meta_values = {} - for line in text.split("\n"): - key, value = line.split(":", 1) - meta_values[key.strip()] = value.strip() - result.meta = meta_values - elif current_section == "system": - result.messages.append(("system", text)) - elif current_section == "human": - result.messages.append(("human", text)) - elif current_section == "assistant": - result.messages.append(("assistant", text)) - - for line in prompt_spec.split("\n"): - line = line.strip() - if line == "--META--": - save_section() - current_text = [] - current_section = "meta" - continue - elif line == "--SYSTEM--": - save_section() - current_text = [] - current_section = "system" - continue - elif line in ["--HUMAN--", "--USER--"]: - save_section() - current_text = [] - current_section = "human" - continue - elif line in ["--ASSISTANT--", "--BOT--"]: - save_section() - current_text = [] - current_section = "assistant" - continue - - if current_section: - current_text.append(line) - - # Capture the last section - if current_section and current_text: - save_section() - - return result - - -def parse_prompt_file(file_path: Path) -> PromptSpec: - with open(file_path, "r", encoding="utf-8") as f: - return parse_prompt_spec(f.read()) diff --git a/libraries/python/skills/skill-library/skill_library/llm/utility.py b/libraries/python/skills/skill-library/skill_library/llm/utility.py deleted file mode 100644 index 96eaf719..00000000 --- a/libraries/python/skills/skill-library/skill_library/llm/utility.py +++ /dev/null @@ -1,72 +0,0 @@ -import logging -from typing import Iterable - -import tiktoken -from openai.types.chat import ChatCompletionMessageParam - - -logger = logging.getLogger(__name__) - - -def get_token_count(model: str, string: str) -> int: - encoding = tiktoken.encoding_for_model(model) - return len(encoding.encode(string)) - - -async def limit_messages_by_token_count( - messages: list[ChatCompletionMessageParam], - model: str, - max_message_tokens: int, -) -> list[ChatCompletionMessageParam]: - if len(messages) == 0: - return [] - current_tokens = 0 - history_messages: list[ChatCompletionMessageParam] = [] - for message in reversed(messages): - message_tokens = get_token_count(model=model, string=str(message.get("content"))) - current_tokens += message_tokens - if current_tokens > max_message_tokens: - break - history_messages.append(message) - history_messages.reverse() - return history_messages - - -def num_tokens_from_messages(messages: Iterable[ChatCompletionMessageParam], model: str) -> int: - """Return the number of tokens used by a list of messages.""" - try: - encoding = tiktoken.encoding_for_model(model) - except KeyError: - print("Warning: model not found. Using cl100k_base encoding.") - encoding = tiktoken.get_encoding("cl100k_base") - - if model in { - "gpt-3.5-turbo-0613", - "gpt-3.5-turbo-16k-0613", - "gpt-4-0314", - "gpt-4-32k-0314", - "gpt-4-0613", - "gpt-4-32k-0613", - }: - tokens_per_message = 3 - tokens_per_name = 1 - elif model == "gpt-3.5-turbo-0301": - tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n - tokens_per_name = -1 # if there's a name, the role is omitted - elif "gpt-3.5-turbo" in model: - logger.warning("gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613.") - return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613") - elif "gpt-4" in model: - logger.warning("gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.") - return num_tokens_from_messages(messages, model="gpt-4-0613") - else: - raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}.""") - num_tokens = 0 - for message in messages: - num_tokens += tokens_per_message - for key, value in message.items(): - num_tokens += len(encoding.encode(str(value))) - if key == "name": - num_tokens += tokens_per_name - num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> - return num_tokens diff --git a/libraries/python/skills/skill-library/skill_library/routine.py b/libraries/python/skills/skill-library/skill_library/routine.py deleted file mode 100644 index 8358e996..00000000 --- a/libraries/python/skills/skill-library/skill_library/routine.py +++ /dev/null @@ -1,101 +0,0 @@ -import re -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, Union - -from skill_library.run_context import RunContext - -if TYPE_CHECKING: - pass - - -def find_template_vars(text: str) -> list[str]: - """ - Find mustache template variables in a string. Variables will be - de-duplicated and returned in order of first appearance. - """ - matches = re.compile(r"\{\{([a-zA-Z0-9_]+)\}\}") - return list(set(matches.findall(text))) - - -class Routine: - def __init__( - self, - name: str, - skill_name: str, - description: str, - ) -> None: - self.name = name - self.skill_name = skill_name - self.description = description - - def fullname(self) -> str: - return f"{self.skill_name}.{self.name}" - - def __str__(self) -> str: - return self.fullname() - - -class InstructionRoutine(Routine): - def __init__( - self, - name: str, - skill_name: str, - description: str, - routine: str, - ) -> None: - super().__init__( - name=name, - skill_name=skill_name, - description=description, - ) - self.routine = routine - - def template_vars(self) -> list[str]: - return find_template_vars(self.routine) - - def __str__(self) -> str: - template_vars = find_template_vars(self.routine) - return f"{self.name}(vars: {template_vars}): {self.description}" - - -class ProgramRoutine(Routine): - def __init__( - self, - name: str, - skill_name: str, - description: str, - program: str, - ) -> None: - super().__init__( - name=name, - skill_name=skill_name, - description=description, - ) - self.program = program - - def __str__(self) -> str: - template_vars = find_template_vars(self.program) - return f"{self.name}(vars: {template_vars}): {self.description}" - - -class StateMachineRoutine(Routine): - def __init__( - self, - name: str, - skill_name: str, - description: str, - init_function: Callable[[RunContext, Any], Awaitable[Tuple[bool, Any]]], - step_function: Callable[[RunContext, Optional[str]], Awaitable[Tuple[bool, Any]]], - ) -> None: - super().__init__( - name=name, - skill_name=skill_name, - description=description, - ) - self.init_function = init_function - self.step_function = step_function - - def __str__(self) -> str: - return f"{self.name}: {self.description}" - - -RoutineTypes = Union[InstructionRoutine, ProgramRoutine, StateMachineRoutine] diff --git a/libraries/python/skills/skill-library/skill_library/routine/__init__.py b/libraries/python/skills/skill-library/skill_library/routine/__init__.py new file mode 100644 index 00000000..d9708f21 --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/routine/__init__.py @@ -0,0 +1,18 @@ +from typing import Union + +from .action_list_routine import ActionListRoutine +from .instruction_routine import InstructionRoutine +from .program_routine import ProgramRoutine +from .routine import Routine +from .state_machine_routine import StateMachineRoutine + +RoutineTypes = Union[ActionListRoutine, InstructionRoutine, ProgramRoutine, StateMachineRoutine] + +__all__ = [ + "ActionListRoutine", + "InstructionRoutine", + "ProgramRoutine", + "StateMachineRoutine", + "Routine", + "RoutineTypes", +] diff --git a/libraries/python/skills/skill-library/skill_library/routine/action_list_routine.py b/libraries/python/skills/skill-library/skill_library/routine/action_list_routine.py new file mode 100644 index 00000000..8ddf898f --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/routine/action_list_routine.py @@ -0,0 +1,84 @@ +from typing import Any + +from liquid import Template + +from ..utilities import find_template_vars, parse_command_string, parse_template +from .routine import Routine + + +def format_with_liquid(template: str, vars: dict[str, Any]) -> str: + """ + Format a string with the given variables using the Liquid template engine. + """ + if not vars: + return template + liquid_template = Template(template) + return liquid_template.render(**vars) + + +class ActionListRoutine(Routine): + def __init__( + self, + name: str, + skill_name: str, + description: str, + routine: str, + ) -> None: + super().__init__( + name=name, + skill_name=skill_name, + description=description, + ) + self.routine = routine + + def template_vars(self) -> list[str]: + return find_template_vars(self.routine) + + def validate(self, vars: dict[str, Any]): + """ + Validate the routine. In the case of an Action routine this means that: + - Each line should have an output variable name defined (the string + before the first colon). - For each line: + - Any template variables should have already been defined in either + vars, or in previous lines. + - The remaining line (after the colon), with replacements, should be + parseable as a command. + """ + # First, clean out any template variables that have been set. + parsed_routine = parse_template(self.routine, vars) + + # Gather up defined output variables as we go. + output_variables = {} + + for line in parsed_routine.split("\n"): + # Skip empty lines. + line = line.strip() + if not line: + continue + + # Check the line format (output_variable: command). + if ":" not in line: + raise ValueError(f"Invalid line in routine: {line}") + output_variable_name, command = line.split(":", 1) + output_variable_name = output_variable_name.strip() + command = command.strip() + if not command: + raise ValueError(f"Empty command in routine: {line}") + + # Check that no undefined variables are used. + if not all(variable in output_variables for variable in find_template_vars(command)): + raise ValueError(f"Unbound template variable in routine: {line}") + + # Check that the command string is valid. + try: + command = parse_template(command, dict(output_variables)) + parse_command_string(command) + except ValueError as e: + raise ValueError(f"Unparsable command in routine. {e} {command}") + + # Add this line's output variable for checking next line. + output_variables[output_variable_name] = None + + def __str__(self) -> str: + template_vars = find_template_vars(self.routine) + return f"{self.fullname}(vars: {template_vars}): {self.description}" diff --git a/libraries/python/skills/skill-library/skill_library/routine/instruction_routine.py b/libraries/python/skills/skill-library/skill_library/routine/instruction_routine.py new file mode 100644 index 00000000..7234c10f --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/routine/instruction_routine.py @@ -0,0 +1,25 @@ +from ..utilities import find_template_vars +from .routine import Routine + + +class InstructionRoutine(Routine): + def __init__( + self, + name: str, + skill_name: str, + description: str, + routine: str, + ) -> None: + super().__init__( + name=name, + skill_name=skill_name, + description=description, + ) + self.routine = routine + + def template_vars(self) -> list[str]: + return find_template_vars(self.routine) + + def __str__(self) -> str: + template_vars = find_template_vars(self.routine) + return f"{self.fullname}(vars: {template_vars}): {self.description}" diff --git a/libraries/python/skills/skill-library/skill_library/routine/program_routine.py b/libraries/python/skills/skill-library/skill_library/routine/program_routine.py new file mode 100644 index 00000000..d5d47efa --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/routine/program_routine.py @@ -0,0 +1,22 @@ +from ..utilities import find_template_vars +from .routine import Routine + + +class ProgramRoutine(Routine): + def __init__( + self, + name: str, + skill_name: str, + description: str, + program: str, + ) -> None: + super().__init__( + name=name, + skill_name=skill_name, + description=description, + ) + self.program = program + + def __str__(self) -> str: + template_vars = find_template_vars(self.program) + return f"{self.name}(vars: {template_vars}): {self.description}" diff --git a/libraries/python/skills/skill-library/skill_library/routine/routine.py b/libraries/python/skills/skill-library/skill_library/routine/routine.py new file mode 100644 index 00000000..244cf2a1 --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/routine/routine.py @@ -0,0 +1,16 @@ +class Routine: + def __init__( + self, + name: str, + skill_name: str, + description: str, + ) -> None: + self.name = name + self.skill_name = skill_name + self.description = description + + def fullname(self) -> str: + return f"{self.skill_name}.{self.name}" + + def __str__(self) -> str: + return self.fullname() diff --git a/libraries/python/skills/skill-library/skill_library/routine/state_machine_routine.py b/libraries/python/skills/skill-library/skill_library/routine/state_machine_routine.py new file mode 100644 index 00000000..d64d39c5 --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/routine/state_machine_routine.py @@ -0,0 +1,26 @@ +from typing import Any, Awaitable, Callable, Optional, Tuple + +from skill_library.run_context import RunContext + +from .routine import Routine + + +class StateMachineRoutine(Routine): + def __init__( + self, + name: str, + skill_name: str, + description: str, + init_function: Callable[[RunContext, Any], Awaitable[Tuple[bool, Any]]], + step_function: Callable[[RunContext, Optional[str]], Awaitable[Tuple[bool, Any]]], + ) -> None: + super().__init__( + name=name, + skill_name=skill_name, + description=description, + ) + self.init_function = init_function + self.step_function = step_function + + def __str__(self) -> str: + return f"{self.name}: {self.description}" diff --git a/libraries/python/skills/skill-library/skill_library/routine_runners/__init__.py b/libraries/python/skills/skill-library/skill_library/routine_runners/__init__.py index 4a633b4c..9f752634 100644 --- a/libraries/python/skills/skill-library/skill_library/routine_runners/__init__.py +++ b/libraries/python/skills/skill-library/skill_library/routine_runners/__init__.py @@ -1,9 +1,16 @@ from typing import Union +from .action_list_routine_runner import ActionListRoutineRunner from .instruction_routine_runner import InstructionRoutineRunner from .program_routine_runner import ProgramRoutineRunner from .state_machine_routine_runner import StateMachineRoutineRunner -RunnerTypes = Union[InstructionRoutineRunner, ProgramRoutineRunner, StateMachineRoutineRunner] +RunnerTypes = Union[ActionListRoutineRunner, InstructionRoutineRunner, ProgramRoutineRunner, StateMachineRoutineRunner] -__all__ = ["InstructionRoutineRunner", "RunnerTypes"] +__all__ = [ + "ActionListRoutineRunner", + "InstructionRoutineRunner", + "ProgramRoutineRunner", + "RunnerTypes", + "StateMachineRoutineRunner", +] diff --git a/libraries/python/skills/skill-library/skill_library/routine_runners/action_list_routine_runner.py b/libraries/python/skills/skill-library/skill_library/routine_runners/action_list_routine_runner.py new file mode 100644 index 00000000..7e95c17d --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/routine_runners/action_list_routine_runner.py @@ -0,0 +1,99 @@ +from typing import TYPE_CHECKING, Any, Awaitable, Callable + +from events import BaseEvent, InformationEvent, NoticeEvent + +if TYPE_CHECKING: + pass + +from ..routine import ActionListRoutine +from ..run_context import RunContext +from ..utilities import find_template_vars, make_arg_set, parse_command_string, parse_template, to_string + +RespondFunction = Callable[[str], Awaitable[BaseEvent]] + + +class ActionListRoutineRunner: + async def run( + self, run_context: RunContext, routine: ActionListRoutine, *args: Any, **kwargs: Any + ) -> tuple[bool, Any]: + """ + Run an Action List routine. This just runs through the steps of a + routine, which should be actions, and executes each one. Note: It might + handle more configuration, manage results of steps, handle errors and + retries, etc. Also, we might add meta-cognitive functions, tracking + progress and changing steps as necessary. + """ + + # Make kwargs out of args (aligning them to the order of the mustache + # variables in the routine). + arg_set = make_arg_set(find_template_vars(routine.routine), args, kwargs) + + # Validate the routine. + try: + routine.validate(arg_set) + except ValueError as e: + run_context.emit(InformationEvent(message=f"Routine failed validation. {e}")) + return True, None + + # Replace mustache variables in the routine with the values from the arg + # set. + parsed_routine = parse_template(routine.routine, arg_set) + + # Get current step and locals from the stack frame state. + async with run_context.stack_frame_state() as state: + state["routine"] = parsed_routine + current_step = state.get("current_step", 0) + locals = state.get("locals", arg_set) + state["current_step"] = current_step + state["locals"] = locals + + await self.next(run_context, routine, "") + + return True, None + + async def next(self, run_context: RunContext, routine: ActionListRoutine, message: str) -> tuple[bool, Any]: + """ + Run the next step in the current routine. + """ + + # Reload state + async with run_context.stack_frame_state() as state: + parsed_routine: str = state["routine"] + current_step: int = state["current_step"] + + # Run the remaining steps. + lines = [line.strip() for line in parsed_routine.split("\n") if line.strip()] + for line in lines[current_step:]: + async with run_context.stack_frame_state() as state: + run_context.emit(InformationEvent(message=f"Running step: {line}")) + + # Separate output variables from the action string. + output_variable_name, command_string = line.split(":", 1) + + # Replace mustache variables in the command string with locals. + command_string = parse_template(command_string, state["locals"]).strip() + + # Parse the command string into a command and args. + command, args, kwargs = parse_command_string(command_string) + + # Run the action! + match command: + case "run_routine": + # TODO: This is untested. + result: Any = await run_context.run_routine(command, *args, **kwargs) + case "print": + run_context.emit(InformationEvent(message=args[0])) + result = None + case _: + result: Any = await run_context.run_action(command, *args, **kwargs) + + # Save and report on the result. + result_string = to_string(result) + state["locals"][output_variable_name] = result_string + run_context.emit(NoticeEvent(message=result_string)) + + # Increment the current step. + current_step += 1 + state["current_step"] = current_step + + return True, None diff --git a/libraries/python/skills/skill-library/skill_library/routine_runners/instruction_routine_runner.py b/libraries/python/skills/skill-library/skill_library/routine_runners/instruction_routine_runner.py index 4543e1e0..cb6dc2ab 100644 --- a/libraries/python/skills/skill-library/skill_library/routine_runners/instruction_routine_runner.py +++ b/libraries/python/skills/skill-library/skill_library/routine_runners/instruction_routine_runner.py @@ -5,9 +5,9 @@ if TYPE_CHECKING: pass -from ..routine import InstructionRoutine, find_template_vars +from ..routine import InstructionRoutine from ..run_context import RunContext -from ..utilities import make_arg_set +from ..utilities import find_template_vars, make_arg_set RespondFunction = Callable[[str], Awaitable[BaseEvent]] @@ -21,12 +21,20 @@ async def run( ) -> tuple[bool, Any]: """ Run an Instruction routine. This just runs through the steps of a - routine, sending each one to a skill's response endpoint. Note, this - means this can only be used with skills that have been configured with a - chat driver. This could be much more sophisticated, though. It might - handle more configuration, manage results of steps, handle errors and - retries, etc. Also, we might add meta-cognitive functions, tracking - progress and changing steps as necessary. + routine, sending each one to its skill's response endpoint. + + Note, this means this can only be used with skills that have been + configured with a chat driver, and this currently doesn't work in a + "cross-skill" way. All actions must be accessible from the chat driver + of the skill that packages the routine. This is of limited use, but this + was the first routine runner we built as a demo and it may have some + utility in edge cases. + + This could be much more sophisticated, though. In addition to making it + cross-skill, it might handle more configuration, manage results of + steps, handle errors and retries, add meta-cognitive functions, tracking + progress and changing steps as necessary. Probably better to invest all + of these ideas in other routine runners, though. """ # Make kwargs out of args (aligning them to the order of the mustache diff --git a/libraries/python/skills/skill-library/skill_library/routine_runners/program_routine_runner.py b/libraries/python/skills/skill-library/skill_library/routine_runners/program_routine_runner.py index bea90a66..74dd2494 100644 --- a/libraries/python/skills/skill-library/skill_library/routine_runners/program_routine_runner.py +++ b/libraries/python/skills/skill-library/skill_library/routine_runners/program_routine_runner.py @@ -8,22 +8,15 @@ class ProgramRoutineRunner: def __init__(self) -> None: pass - async def run( - self, context: RunContext, routine: ProgramRoutine, vars: dict[str, Any] | None = None - ) -> tuple[bool, Any]: + async def run(self, context: RunContext, routine: ProgramRoutine, *args: Any, **kwargs: Any) -> tuple[bool, Any]: """ This implementation is not yet working. It is a placeholder for the future implementation of running a program routine. A program routine is a routine written in Python that can be executed by the assistant. The routine will refer to any skill.action(args) that it needs. """ - # Replace mustache variables in the routine with the values from the vars dict. - if vars: - for key, value in vars.items(): - routine.program = routine.program.replace(f"{{{{ {key} }}}}", value) - routine.program = routine.program.replace(f"{{{{{key}}}}}", value) - # TODO: execute the program. + # TODO: Unimplemented routine. return True, None diff --git a/libraries/python/skills/skill-library/skill_library/routine_runners/state_machine_routine_runner.py b/libraries/python/skills/skill-library/skill_library/routine_runners/state_machine_routine_runner.py index 3ade5f13..d585151e 100644 --- a/libraries/python/skills/skill-library/skill_library/routine_runners/state_machine_routine_runner.py +++ b/libraries/python/skills/skill-library/skill_library/routine_runners/state_machine_routine_runner.py @@ -8,7 +8,9 @@ class StateMachineRoutineRunner: def __init__(self) -> None: pass - async def run(self, context: RunContext, routine: StateMachineRoutine, *args, **kwargs) -> tuple[bool, Any]: + async def run( + self, context: RunContext, routine: StateMachineRoutine, *args: Any, **kwargs: Any + ) -> tuple[bool, Any]: return await routine.init_function(context, *args, **kwargs) async def next(self, context: RunContext, routine: StateMachineRoutine, message: str) -> tuple[bool, Any]: diff --git a/libraries/python/skills/skill-library/skill_library/run_context.py b/libraries/python/skills/skill-library/skill_library/run_context.py index cdfed331..650ad46c 100644 --- a/libraries/python/skills/skill-library/skill_library/run_context.py +++ b/libraries/python/skills/skill-library/skill_library/run_context.py @@ -1,6 +1,6 @@ import logging from contextlib import asynccontextmanager -from typing import Any, AsyncGenerator, Callable, Coroutine, Optional, Protocol, Union +from typing import Any, AsyncGenerator, Awaitable, Callable, Concatenate, ParamSpec, Protocol from uuid import uuid4 from assistant_drive import Drive @@ -27,6 +27,10 @@ async def unimplemented_action_runner(action_name: str, *args: Any, **kwargs: An logging.info("Action runner has not been implemented.") +# A typing spec for *args, **kwargs, used in run_action and run_routine sigs. +P = ParamSpec("P") + + class RunContext: """ "Run context" is passed to parts of the system (skill routines and @@ -40,8 +44,8 @@ def __init__( assistant_drive: Drive, emit: Callable[[EventProtocol], None], routine_stack: RoutineStack, - run_action: Callable[[str, Optional[dict[str, Any]]], Coroutine[Any, Any, Any]], - run_routine: Union[Callable[[str, Optional[dict[str, Any]]], Coroutine[Any, Any, Any]], None], + run_action: Callable[Concatenate[str, P], Awaitable[Any]], + run_routine: Callable[Concatenate[str, P], Awaitable[Any]], ) -> None: # A session id is useful for maintaining consistent session state across all # consumers of this context. For example, a session id can be set in an diff --git a/libraries/python/skills/skill-library/skill_library/skill_registry.py b/libraries/python/skills/skill-library/skill_library/skill_registry.py index edda8690..8f3d1c81 100644 --- a/libraries/python/skills/skill-library/skill_library/skill_registry.py +++ b/libraries/python/skills/skill-library/skill_library/skill_registry.py @@ -3,8 +3,9 @@ from typing import Any from skill_library.actions import Action +from skill_library.routine_runners.action_list_routine_runner import ActionListRoutineRunner -from .routine import InstructionRoutine, ProgramRoutine, RoutineTypes, StateMachineRoutine +from .routine import ActionListRoutine, InstructionRoutine, ProgramRoutine, RoutineTypes, StateMachineRoutine from .routine_runners import InstructionRoutineRunner, ProgramRoutineRunner, StateMachineRoutineRunner from .routine_stack import RoutineStack from .run_context import RunContext, RunContextProvider @@ -19,7 +20,7 @@ class SkillRegistry: def __init__( self, - skills: dict[str, SkillDefinition], + skills: list[SkillDefinition], run_context_provider: RunContextProvider, routine_stack: RoutineStack, ) -> None: @@ -30,7 +31,7 @@ def __init__( # natural language interface. This supports Instruction routines. self.skills = { skill_definition.name: skill_definition.skill_class(skill_definition, run_context_provider) - for skill_definition in skills.values() + for skill_definition in skills } self.run_context_provider = run_context_provider self.routine_stack = routine_stack @@ -66,7 +67,9 @@ def get_action_by_designation(self, designation: str) -> Action | None: return None return skill.get_action(action_name) - async def run_action_by_designation(self, run_context: RunContext, designation: str, *args, **kwargs) -> Any: + async def run_action_by_designation( + self, run_context: RunContext, designation: str, *args: Any, **kwargs: Any + ) -> Any: """ Run an action by designation (.). """ @@ -113,7 +116,9 @@ def get_routine_by_designation(self, designation: str) -> RoutineTypes | None: return None return skill.get_routine(routine_name) - async def run_routine_by_designation(self, run_context: RunContext, designation: str, *args, **kwargs) -> Any: + async def run_routine_by_designation( + self, run_context: RunContext, designation: str, *args: Any, **kwargs: Any + ) -> Any: """ Run an assistant routine by designation (.). """ @@ -123,7 +128,7 @@ async def run_routine_by_designation(self, run_context: RunContext, designation: response = await self.run_routine(run_context, routine, *args, **kwargs) return response - async def run_routine(self, run_context: RunContext, routine: RoutineTypes, *args, **kwargs) -> Any: + async def run_routine(self, run_context: RunContext, routine: RoutineTypes, *args: Any, **kwargs: Any) -> Any: """ Run an assistant routine. This is going to be much of the magic of the assistant. Currently, is just runs through the steps of a routine, but @@ -147,6 +152,9 @@ async def run_routine(self, run_context: RunContext, routine: RoutineTypes, *arg await self.routine_stack.push(routine.fullname()) match routine: + case ActionListRoutine(): + runner = ActionListRoutineRunner() + done, output = await runner.run(run_context, routine, *args, **kwargs) case InstructionRoutine(): skill = self.get_skill(routine.skill_name) if not skill: @@ -179,6 +187,9 @@ async def step_active_routine(self, run_context: RunContext, message: str) -> No raise ValueError(f"Routine {routine_frame.name} not found.") match routine: + case ActionListRoutine(): + runner = ActionListRoutineRunner() + done, output = await runner.next(run_context, routine, message) case InstructionRoutine(): # Instruction routines work by passing each line of the routine # to to the routine's skill's response function. This means any diff --git a/libraries/python/skills/skill-library/skill_library/tests/test_action_list_validation.py b/libraries/python/skills/skill-library/skill_library/tests/test_action_list_validation.py new file mode 100644 index 00000000..0bcc5152 --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/tests/test_action_list_validation.py @@ -0,0 +1,22 @@ +from textwrap import dedent + +from skill_library.routine.action_list_routine import ActionListRoutine + + +def test_action_list_routine_validation(): + routine = ActionListRoutine( + name="demo", + skill_name="skill", + description="A demo action list routine.", + routine=dedent(""" + 0: common.web_search("{{stock_ticker}} stock price") + 1: common.gpt_complete("Write this like a cowboy: {{0}}") + 2: posix.write_file("output.txt", {{1}}) + 3: print([{{1}}]) + """), + ) + + vars = {"stock_ticker": "MSFT"} + + # Assert no exception thrown. + assert routine.validate(vars) is None diff --git a/libraries/python/skills/skill-library/skill_library/tests/test_paramspec.py b/libraries/python/skills/skill-library/skill_library/tests/test_paramspec.py new file mode 100644 index 00000000..942d0c2a --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/tests/test_paramspec.py @@ -0,0 +1,45 @@ +from typing import Any, Awaitable, Callable, Concatenate, ParamSpec + +P = ParamSpec("P") +RunActionType = Callable[Concatenate[str, P], Awaitable[Any]] + + +# Implementation. +async def run_action(designation: str, *args: Any, **kwargs: Any) -> Any: + return designation, args, kwargs + + +# Assignment. +run_action_var: RunActionType = run_action + + +async def test_paramspec(): + result = await run_action_var( + "test_designation", + 1, + 2, + 3, + key1="value1", + key2=42, + ) + assert result == ("test_designation", (1, 2, 3), {"key1": "value1", "key2": 42}) + + +async def test_paramspec_with_dictionary(): + result = await run_action_var( + "test_designation", + 1, + 2, + 3, + **{"key1": "value1", "key2": 42}, + ) + assert result == ("test_designation", (1, 2, 3), {"key1": "value1", "key2": 42}) + + +async def test_paramspec_with_no_args(): + d: dict[str, Any] = {"key1": "value1", "key2": 42} + result = await run_action_var( + "test_designation", + **d, + ) + assert result == ("test_designation", (), {"key1": "value1", "key2": 42}) diff --git a/libraries/python/skills/skill-library/skill_library/tests/test_parse_command_string.py b/libraries/python/skills/skill-library/skill_library/tests/test_parse_command_string.py new file mode 100644 index 00000000..3dc081d9 --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/tests/test_parse_command_string.py @@ -0,0 +1,46 @@ +from skill_library.utilities import parse_command_string + + +def test_parse_command_string(): + command, args, kwargs = parse_command_string( + 'command(arg1, arg2, key1="val1", key2=True, key3=3, key4 = 3+2, key5=[1,2], key6=(1,2), key7={"a": 1, "b": 2})' + ) + assert command == "command" + assert args == ("arg1", "arg2") + assert kwargs == { + "key1": "val1", + "key2": True, + "key3": 3, + "key4": 5, + "key5": [1, 2], + "key6": (1, 2), + "key7": {"a": 1, "b": 2}, + } + + +def test_parse_command_string_no_args(): + command, args, kwargs = parse_command_string('command(key1="val1", key2="val2")') + assert command == "command" + assert args == () + assert kwargs == {"key1": "val1", "key2": "val2"} + + +def test_parse_command_string_no_kwargs(): + command, args, kwargs = parse_command_string("command(arg1, arg2)") + assert command == "command" + assert args == ("arg1", "arg2") + assert kwargs == {} + + +def test_parse_command_string_no_args_or_kwargs(): + command, args, kwargs = parse_command_string("command()") + assert command == "command" + assert args == () + assert kwargs == {} + + +def test_parse_command_string_no_parens(): + command, args, kwargs = parse_command_string("command") + assert command == "command" + assert args == () + assert kwargs == {} diff --git a/libraries/python/skills/skill-library/skill_library/tests/test_to_string.py b/libraries/python/skills/skill-library/skill_library/tests/test_to_string.py new file mode 100644 index 00000000..77ac0a67 --- /dev/null +++ b/libraries/python/skills/skill-library/skill_library/tests/test_to_string.py @@ -0,0 +1,46 @@ +from pydantic import BaseModel +from skill_library.utilities import to_string + + +def test_to_string_none(): + value = to_string(None) + assert value == "" + + +def test_to_string_str(): + value = to_string("hello") + assert value == "hello" + + +def test_to_string_int(): + value = to_string(42) + assert value == "42" + + +def test_to_string_float(): + value = to_string(3.14) + assert value == "3.14" + + +def test_to_string_dict(): + value = to_string({"key": "value"}) + assert value == '{\n "key": "value"\n}' + + +def test_to_string_list(): + value = to_string(["one", "two"]) + assert value == '[\n "one",\n "two"\n]' + + +def test_to_string_tuple(): + value = to_string(("one", "two")) + assert value == '[\n "one",\n "two"\n]' + + +def test_to_string_pydantic_model(): + class Model(BaseModel): + name: str = "base model" + + model = Model() + value = to_string(model) + assert value == '{\n "name": "base model"\n}' diff --git a/libraries/python/skills/skill-library/skill_library/utilities.py b/libraries/python/skills/skill-library/skill_library/utilities.py index a311dc85..c66c7fa3 100644 --- a/libraries/python/skills/skill-library/skill_library/utilities.py +++ b/libraries/python/skills/skill-library/skill_library/utilities.py @@ -1,5 +1,53 @@ +import ast +import json +import re from typing import Any +from pydantic import BaseModel + + +def parse_template(template: str, vars: dict[str, Any]) -> str: + """ + Replace mustache variables in the template with the values from the arg set. + """ + parsed_template = template + for key, value in vars.items(): + parsed_template = parsed_template.replace(f"{{{{ {key} }}}}", str(value)) + parsed_template = parsed_template.replace(f"{{{{{key}}}}}", str(value)) + return parsed_template + + +def find_template_vars(text: str) -> list[str]: + """ + Find mustache template variables in a string. Variables will be + de-duplicated and returned in order of first appearance. + """ + matches = re.compile(r"\{\{([a-zA-Z0-9_]+)\}\}") + return list(set(matches.findall(text))) + + +def to_string(value: Any) -> str: + """ + Convert a value to a string. This uses the json library or the Pydantic + library when possible and falls back to str. + """ + if value is None: + return "" + elif isinstance(value, str): + return value + elif isinstance(value, (int, float)): + return str(value) + elif isinstance(value, dict): + return json.dumps(value, indent=2) + elif isinstance(value, list): + return json.dumps(value, indent=2) + elif isinstance(value, tuple): + return json.dumps(value, indent=2) + elif isinstance(value, BaseModel): + return value.model_dump_json(indent=2) + else: + return str(value) + def make_arg_set(expected_variables: list[str], args: tuple, kwargs: dict[str, Any]) -> dict[str, Any]: """ @@ -21,3 +69,70 @@ def make_arg_set(expected_variables: list[str], args: tuple, kwargs: dict[str, A kwargs_set = {key: value for key, value in kwargs.items() if key in expected_variables} arg_set.update(kwargs_set) return arg_set + + +def parse_command_string(command_string: str) -> tuple[str, tuple[Any, ...], dict[str, Any]]: + """ + Parse a string representing a function call into its components (command, + args, and kwargs). + """ + + command_string = command_string.strip() + + # As a convenience, add parentheses if they are missing. + if " " not in command_string and "(" not in command_string: + command_string += "()" + + # Parse the string into an AST (Abstract Syntax Tree) + try: + tree = ast.parse(command_string) + except SyntaxError: + raise ValueError("Invalid function call. Please check your syntax.") + + # Ensure the tree contains exactly one expression (the function call) + if not (isinstance(tree, ast.Module) and len(tree.body) == 1 and isinstance(tree.body[0], ast.Expr)): + raise ValueError("Expected a single function call.") + + # The function call is stored as a `Call` node within the expression + call_node = tree.body[0].value + if not isinstance(call_node, ast.Call): + raise ValueError("Invalid function call. Please check your syntax.") + + # Extract the function name + if isinstance(call_node.func, ast.Name): + command_name = call_node.func.id + elif isinstance(call_node.func, ast.Attribute): + if not isinstance(call_node.func.value, ast.Name): + raise ValueError("Unsupported function format. Please check your syntax.") + command_name = f"{call_node.func.value.id}.{call_node.func.attr}" + else: + raise ValueError("Unsupported function format. Please check your syntax.") + + # Helper function to evaluate AST nodes to their Python equivalent + def eval_node(node): + if isinstance(node, ast.Constant): + return node.value + elif isinstance(node, ast.List): + return [eval_node(elem) for elem in node.elts] + elif isinstance(node, ast.Tuple): + return tuple(eval_node(elem) for elem in node.elts) + elif isinstance(node, ast.Dict): + return {eval_node(key): eval_node(value) for key, value in zip(node.keys, node.values)} + elif isinstance(node, ast.Name): + return node.id # This can return variable names, but we assume they're constants + elif isinstance(node, ast.BinOp): # Handling arithmetic expressions + return eval(compile(ast.Expression(node), filename="", mode="eval")) + elif isinstance(node, ast.Call): + raise ValueError("Nested function calls are not supported.") + else: + raise ValueError(f"Unsupported AST node type: {type(node).__name__}") + + # Extract positional arguments + args: list[Any] = [eval_node(arg) for arg in call_node.args] + + # Extract keyword arguments + kwargs = {} + for kw in call_node.keywords: + kwargs[kw.arg] = eval_node(kw.value) + + return command_name, tuple(args), kwargs diff --git a/libraries/python/skills/skill-library/uv.lock b/libraries/python/skills/skill-library/uv.lock index 0efb0207..3faf256c 100644 --- a/libraries/python/skills/skill-library/uv.lock +++ b/libraries/python/skills/skill-library/uv.lock @@ -1634,6 +1634,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -1657,6 +1658,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/common-skill/common_skill/common_skill.py b/libraries/python/skills/skills/common-skill/common_skill/common_skill.py index 0e6d95a2..89c4b810 100644 --- a/libraries/python/skills/skills/common-skill/common_skill/common_skill.py +++ b/libraries/python/skills/skills/common-skill/common_skill/common_skill.py @@ -1,5 +1,3 @@ -from typing import Type - from assistant_drive import Drive from openai_client.chat_driver import ChatDriverConfig from skill_library import ActionCallable, ChatDriverFunctions, RunContext, RunContextProvider, Skill, SkillDefinition @@ -7,6 +5,7 @@ from skill_library.types import LanguageModel from .actions import gpt_complete, web_search +from .routines.demo import get_demo_routine CLASS_NAME = "CommonSkill" DESCRIPTION = "Provides common actions and routines." @@ -19,11 +18,17 @@ def __init__( skill_definition: "CommonSkillDefinition", run_context_provider: RunContextProvider, ) -> None: + self.skill_name = skill_definition.name self.language_model = skill_definition.language_model self.drive = skill_definition.drive - routines: list[RoutineTypes] = [] - action_functions: list[ActionCallable] = Actions(skill_definition.language_model).list_actions() + action_functions: list[ActionCallable] = ActionFunctions( + skill_definition.language_model + ).list_action_functions() + + routines: list[RoutineTypes] = [ + get_demo_routine(self.skill_name), + ] # Configure the skill's chat driver. This is just used for testing the # skill out directly, but won't be exposed in the assistant. @@ -42,18 +47,7 @@ def __init__( ) -""" -1. RunContext gets passed in during execution. -2. Other variables can be passed in during initialization. -3. When a person runs /list_actions, they shouldn't see RunContext or the initialization variables. -4. A person should be able to run an action with params excluding RunContext and inits. -5. The chatdriver should be able to have registered actions without runcontext or init variables. The function that is given to the chatdriver should not have these things. - A. A chat driver should be able to specify it wants to run a tool call, but the function that executes it should inject a run context?? Or not... the functions themselves might be defined within a context that has a run context. - -""" - - -class Actions: +class ActionFunctions: """ Using a class like this might be a good pattern for declaring actions. It allows the injection of things while allowing the method signature from @@ -73,7 +67,7 @@ async def web_search(self, run_context: RunContext, query: str) -> str: content, metadata = await web_search(self.language_model, query) return content - def list_actions(self) -> list[ActionCallable]: + def list_action_functions(self) -> list[ActionCallable]: return [self.gpt_complete, self.web_search] @@ -85,7 +79,6 @@ def __init__( drive: Drive, description: str | None = None, chat_driver_config: ChatDriverConfig | None = None, - skill_class: Type[Skill] = CommonSkill, ) -> None: self.language_model = language_model self.drive = drive @@ -96,7 +89,7 @@ def __init__( # Initialize the skill! super().__init__( name=name, - skill_class=skill_class, + skill_class=CommonSkill, description=description or DESCRIPTION, chat_driver_config=chat_driver_config, ) diff --git a/libraries/python/skills/skills/common-skill/common_skill/routines/demo.py b/libraries/python/skills/skills/common-skill/common_skill/routines/demo.py new file mode 100644 index 00000000..9ad4dd8b --- /dev/null +++ b/libraries/python/skills/skills/common-skill/common_skill/routines/demo.py @@ -0,0 +1,17 @@ +from textwrap import dedent + +from skill_library import ActionListRoutine + + +def get_demo_routine(skill_name: str) -> ActionListRoutine: + return ActionListRoutine( + name="demo", + skill_name=skill_name, + description="A demo action list routine.", + routine=dedent(""" + 0: common.web_search("{{stock_ticker}} stock price") + 1: common.gpt_complete("Write this like a cowboy: {{0}}") + 2: posix.write_file("output.txt", "{{1}}") + 3: print("{{1}}") + """), + ) diff --git a/libraries/python/skills/skills/common-skill/uv.lock b/libraries/python/skills/skills/common-skill/uv.lock index c3bd1fe3..3c06404b 100644 --- a/libraries/python/skills/skills/common-skill/uv.lock +++ b/libraries/python/skills/skills/common-skill/uv.lock @@ -2077,6 +2077,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -2092,6 +2093,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/document-skill/document_skill/document_skill.py b/libraries/python/skills/skills/document-skill/document_skill/document_skill.py index a566b359..437e3bf5 100644 --- a/libraries/python/skills/skills/document-skill/document_skill/document_skill.py +++ b/libraries/python/skills/skills/document-skill/document_skill/document_skill.py @@ -200,7 +200,6 @@ def __init__( language_model: LanguageModel, description: str | None = None, chat_driver_config: ChatDriverConfig | None = None, - skill_class: type[Skill] = DocumentSkill, ) -> None: self.language_model = language_model @@ -210,7 +209,7 @@ def __init__( # Initialize the skill! super().__init__( name=name, - skill_class=skill_class, + skill_class=DocumentSkill, description=description or DESCRIPTION, chat_driver_config=chat_driver_config, ) diff --git a/libraries/python/skills/skills/document-skill/uv.lock b/libraries/python/skills/skills/document-skill/uv.lock index e5351547..0122bc25 100644 --- a/libraries/python/skills/skills/document-skill/uv.lock +++ b/libraries/python/skills/skills/document-skill/uv.lock @@ -1574,6 +1574,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -1589,6 +1590,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/form-filler-skill/form_filler_skill/form_filler_skill.py b/libraries/python/skills/skills/form-filler-skill/form_filler_skill/form_filler_skill.py index 166c2d39..54467a27 100644 --- a/libraries/python/skills/skills/form-filler-skill/form_filler_skill/form_filler_skill.py +++ b/libraries/python/skills/skills/form-filler-skill/form_filler_skill/form_filler_skill.py @@ -4,12 +4,12 @@ from skill_library import ( ChatDriverFunctions, RoutineTypes, + RunContext, RunContextProvider, Skill, SkillDefinition, StateMachineRoutine, ) -from skill_library.run_context import RunContext from skill_library.types import LanguageModel CLASS_NAME = "FormFillerSkill" @@ -70,11 +70,11 @@ async def form_fill_init(self, context: RunContext, vars: dict[str, Any] | None async def form_fill_step( self, - context: RunContext, + run_context: RunContext, message: Optional[str] = None, ) -> tuple[bool, str | None]: FormFiller = self - state = await context.get_state() + state = await run_context.get_state() while True: match state.get("mode"): case None: @@ -84,14 +84,15 @@ async def form_fill_step( # How do we want to pass in all the GC definitions? Should they just be a simpler config object TYPE? if not state["artifact"]: if not state["gce_id"]: - guided_conversation_vars: dict[str, Any] = { - "definition_type": "upload_files", - "objective": "Upload a form to be filled out by the form filler recipe.", - } - gc_id = await context.run_routine( - "guided_conversation.doc_upload", guided_conversation_vars - ) - state["gc_id"] = gc_id + pass + # guided_conversation_vars: dict[str, Any] = { + # "definition_type": "upload_files", + # "objective": "Upload a form to be filled out by the form filler recipe.", + # } + # gc_id = await run_context.run_routine( + # "guided_conversation.doc_upload", **guided_conversation_vars + # ) + # state["gc_id"] = gc_id # TODO: What is the best way to subroutine? # artifact = GuidedConversation.run(state["gce_id"], message) # if artifact: @@ -100,25 +101,25 @@ async def form_fill_step( # await context.set_state(state) # return - agenda, is_done = FormFiller.update_agenda(context) + agenda, is_done = FormFiller.update_agenda(run_context) state["agenda"] = agenda if is_done: state["mode"] = "done" state["mode"] = "conversation" - await context.set_state(state) + await run_context.set_state(state) return is_done, agenda case "conversation": - state["form"] = FormFiller.update_form(context) - agenda, is_done = FormFiller.update_agenda(context) + state["form"] = FormFiller.update_form(run_context) + agenda, is_done = FormFiller.update_agenda(run_context) state["agenda"] = agenda if is_done: state["mode"] = "finalize" - await context.set_state(state) + await run_context.set_state(state) return is_done, agenda case "finalize": - message = FormFiller.generate_filled_form(context) + message = FormFiller.generate_filled_form(run_context) state["mode"] = "done" - await context.set_state(state) + await run_context.set_state(state) return False, message case "done": return True, None @@ -146,10 +147,9 @@ def __init__( language_model: LanguageModel, description: str | None = None, chat_driver_config: ChatDriverConfig | None = None, - skill_class: type[Skill] = FormFillerSkill, ) -> None: self.name = name self.language_model = language_model self.description = description or DESCRIPTION self.chat_driver_config = chat_driver_config - self.skill_class = skill_class + self.skill_class = FormFillerSkill diff --git a/libraries/python/skills/skills/form-filler-skill/pyproject.toml b/libraries/python/skills/skills/form-filler-skill/pyproject.toml index 93cfbd10..1cd75600 100644 --- a/libraries/python/skills/skills/form-filler-skill/pyproject.toml +++ b/libraries/python/skills/skills/form-filler-skill/pyproject.toml @@ -15,9 +15,9 @@ dependencies = [ [dependency-groups] dev = [ - # "pytest>=8.3.1", - # "pytest-asyncio>=0.23.8", - # "pytest-repeat>=0.9.3", + "pytest>=8.3.1", + "pytest-asyncio>=0.23.8", + "pytest-repeat>=0.9.3", "ipykernel>=6.29.4", "pyright>=1.1.389", ] @@ -41,6 +41,6 @@ addopts = "-vv" log_cli = true log_cli_level = "WARNING" log_cli_format = "%(asctime)s | %(levelname)-7s | %(name)s | %(message)s" -testpaths = ["tests"] +# testpaths = ["tests"] asyncio_mode = "auto" asyncio_default_fixture_loop_scope = "function" diff --git a/libraries/python/skills/skills/form-filler-skill/tests/test_setup.py b/libraries/python/skills/skills/form-filler-skill/tests/test_setup.py new file mode 100644 index 00000000..1bf716aa --- /dev/null +++ b/libraries/python/skills/skills/form-filler-skill/tests/test_setup.py @@ -0,0 +1,2 @@ +def test_setup(): + assert 1 == 1 diff --git a/libraries/python/skills/skills/form-filler-skill/uv.lock b/libraries/python/skills/skills/form-filler-skill/uv.lock index aceb8fe7..421b987d 100644 --- a/libraries/python/skills/skills/form-filler-skill/uv.lock +++ b/libraries/python/skills/skills/form-filler-skill/uv.lock @@ -673,6 +673,9 @@ dependencies = [ dev = [ { name = "ipykernel" }, { name = "pyright" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "pytest-repeat" }, ] [package.metadata] @@ -688,6 +691,9 @@ requires-dist = [ dev = [ { name = "ipykernel", specifier = ">=6.29.4" }, { name = "pyright", specifier = ">=1.1.389" }, + { name = "pytest", specifier = ">=8.3.1" }, + { name = "pytest-asyncio", specifier = ">=0.23.8" }, + { name = "pytest-repeat", specifier = ">=0.9.3" }, ] [[package]] @@ -828,6 +834,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e1/6a/4604f9ae2fa62ef47b9de2fa5ad599589d28c9fd1d335f32759813dfa91e/importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717", size = 36115 }, ] +[[package]] +name = "iniconfig" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d7/4b/cbd8e699e64a6f16ca3a8220661b5f83792b3017d0f79807cb8708d33913/iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", size = 4646 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a6/62565a6e1cf69e10f5727360368e451d4b7f58beeac6173dc9db836a5b46/iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374", size = 5892 }, +] + [[package]] name = "ipykernel" version = "6.29.5" @@ -1334,6 +1349,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439 }, ] +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556 }, +] + [[package]] name = "portalocker" version = "2.10.1" @@ -1593,6 +1617,45 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/43/20/3f492ca789fb17962ad23619959c7fa642082621751514296c58de3bb801/pyright-1.1.390-py3-none-any.whl", hash = "sha256:ecebfba5b6b50af7c1a44c2ba144ba2ab542c227eb49bc1f16984ff714e0e110", size = 18579 }, ] +[[package]] +name = "pytest" +version = "8.3.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/05/35/30e0d83068951d90a01852cb1cef56e5d8a09d20c7f511634cc2f7e0372a/pytest-8.3.4.tar.gz", hash = "sha256:965370d062bce11e73868e0335abac31b4d3de0e82f4007408d242b4f8610761", size = 1445919 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/92/76a1c94d3afee238333bc0a42b82935dd8f9cf8ce9e336ff87ee14d9e1cf/pytest-8.3.4-py3-none-any.whl", hash = "sha256:50e16d954148559c9a74109af1eaf0c945ba2d8f30f0a3d3335edde19788b6f6", size = 343083 }, +] + +[[package]] +name = "pytest-asyncio" +version = "0.25.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/df/adcc0d60f1053d74717d21d58c0048479e9cab51464ce0d2965b086bd0e2/pytest_asyncio-0.25.2.tar.gz", hash = "sha256:3f8ef9a98f45948ea91a0ed3dc4268b5326c0e7bce73892acc654df4262ad45f", size = 53950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/d8/defa05ae50dcd6019a95527200d3b3980043df5aa445d40cb0ef9f7f98ab/pytest_asyncio-0.25.2-py3-none-any.whl", hash = "sha256:0d0bb693f7b99da304a0634afc0a4b19e49d5e0de2d670f38dc4bfa5727c5075", size = 19400 }, +] + +[[package]] +name = "pytest-repeat" +version = "0.9.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/86/5e/99365eb229efff0b1bd475886150fc6db9937ab7e1bd21f6f65c1279e0eb/pytest_repeat-0.9.3.tar.gz", hash = "sha256:ffd3836dfcd67bb270bec648b330e20be37d2966448c4148c4092d1e8aba8185", size = 6272 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/a8/0a0aec0c2541b8baf4a0b95af2ba99abce217ee43534adf9cb7c908cf184/pytest_repeat-0.9.3-py3-none-any.whl", hash = "sha256:26ab2df18226af9d5ce441c858f273121e92ff55f5bb311d25755b8d7abdd8ed", size = 4196 }, +] + [[package]] name = "python-dateutil" version = "2.9.0.post0" @@ -1946,6 +2009,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -1961,6 +2025,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/guided-conversation-skill/guided_conversation_skill/guided_conversation_skill.py b/libraries/python/skills/skills/guided-conversation-skill/guided_conversation_skill/guided_conversation_skill.py index 5ab01230..3d01df23 100644 --- a/libraries/python/skills/skills/guided-conversation-skill/guided_conversation_skill/guided_conversation_skill.py +++ b/libraries/python/skills/skills/guided-conversation-skill/guided_conversation_skill/guided_conversation_skill.py @@ -110,7 +110,7 @@ def conversation_routine(self) -> StateMachineRoutine: async def conversation_init_function( self, - context: RunContext, + run_context: RunContext, conversation_guide: ConversationGuide | str | None = None, conversation: Conversation | None = None, resource: ConversationResource | None = None, @@ -143,7 +143,7 @@ async def conversation_init_function( logger.debug( "Initializing guided conversation skill.", - add_serializable_data({"session_id": context.session_id, "vars": vars}), + add_serializable_data({"session_id": run_context.session_id, "vars": vars}), ) # The definition is required to run the conversation. It can be provided @@ -171,7 +171,7 @@ async def conversation_init_function( # We can put all this data in the routine frame, or we could also put it # in the skill drive. All of this intermediate state can just go in the # frame. Only the final artifact needs to be saved to the drive. - async with context.stack_frame_state() as state: + async with run_context.stack_frame_state() as state: state["guide"] = guide state["conversation"] = conversation or Conversation() state["agenda"] = agenda or Agenda() @@ -179,7 +179,7 @@ async def conversation_init_function( state["resource"] = resource or ConversationResource(resource_constraint=guide.resource_constraint) # For guided conversation, we want to go ahead and run the first step. - return await self.conversation_step_function(context) + return await self.conversation_step_function(run_context) async def conversation_step_function( self, diff --git a/libraries/python/skills/skills/guided-conversation-skill/uv.lock b/libraries/python/skills/skills/guided-conversation-skill/uv.lock index fd4c60b6..8f6d79cb 100644 --- a/libraries/python/skills/skills/guided-conversation-skill/uv.lock +++ b/libraries/python/skills/skills/guided-conversation-skill/uv.lock @@ -2051,6 +2051,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -2066,6 +2067,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/posix-skill/posix_skill/posix_skill.py b/libraries/python/skills/skills/posix-skill/posix_skill/posix_skill.py index 5bbfa17f..e19e986c 100644 --- a/libraries/python/skills/skills/posix-skill/posix_skill/posix_skill.py +++ b/libraries/python/skills/skills/posix-skill/posix_skill/posix_skill.py @@ -1,6 +1,5 @@ from pathlib import Path from textwrap import dedent -from typing import Type from openai_client.chat_driver import ChatDriverConfig from skill_library import ( @@ -173,11 +172,10 @@ def __init__( mount_dir: str = "/mnt/data", description: str | None = None, chat_driver_config: ChatDriverConfig | None = None, - skill_class: Type[Skill] = PosixSkill, ) -> None: self.name = name self.description = description or DESCRIPTION self.sandbox_dir = sandbox_dir self.mount_dir = mount_dir self.chat_driver_config = chat_driver_config - self.skill_class = skill_class + self.skill_class = PosixSkill diff --git a/libraries/python/skills/skills/posix-skill/posix_skill/skill_definition.py b/libraries/python/skills/skills/posix-skill/posix_skill/skill_definition.py deleted file mode 100644 index 65088c02..00000000 --- a/libraries/python/skills/skills/posix-skill/posix_skill/skill_definition.py +++ /dev/null @@ -1,35 +0,0 @@ -from pathlib import Path -from typing import TYPE_CHECKING, Type - -from openai_client.chat_driver import ChatDriverConfig -from skill_library import ( - Skill, - SkillDefinition, -) - -if TYPE_CHECKING: - from .posix_skill import PosixSkill - - -NAME = "posix" -CLASS_NAME = "PosixSkill" -DESCRIPTION = "Manages the filesystem using a sand-boxed Posix shell." -DEFAULT_MAX_RETRIES = 3 -INSTRUCTIONS = "You are an assistant that has access to a sand-boxed Posix shell." - -class PosixSkillDefinition(SkillDefinition): - def __init__( - self, - name: str, - sandbox_dir: Path, - mount_dir: str = "/mnt/data", - description: str | None = None, - chat_driver_config: ChatDriverConfig | None = None, - skill_class: Type[Skill] = PosixSkill, - ) -> None: - self.name = name - self.description = description or DESCRIPTION - self.sandbox_dir = sandbox_dir - self.mount_dir = mount_dir - self.chat_driver_config = chat_driver_config - self.skill_class = skill_class diff --git a/libraries/python/skills/skills/posix-skill/uv.lock b/libraries/python/skills/skills/posix-skill/uv.lock index 9a27cadb..fba38e4c 100644 --- a/libraries/python/skills/skills/posix-skill/uv.lock +++ b/libraries/python/skills/skills/posix-skill/uv.lock @@ -1574,6 +1574,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -1589,6 +1590,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/prospector-skill/prospector_skill/skill.py b/libraries/python/skills/skills/prospector-skill/prospector_skill/skill.py index 3a703b79..c724103b 100644 --- a/libraries/python/skills/skills/prospector-skill/prospector_skill/skill.py +++ b/libraries/python/skills/skills/prospector-skill/prospector_skill/skill.py @@ -114,7 +114,6 @@ def __init__( name: str, description: str | None = None, chat_driver_config: ChatDriverConfig | None = None, - skill_class: type[Skill] = Skill, ) -> None: if chat_driver_config: chat_driver_config.instructions = INSTRUCTIONS @@ -122,7 +121,7 @@ def __init__( # Initialize the skill! super().__init__( name=name, - skill_class=skill_class, + skill_class=Skill, description=description or DESCRIPTION, chat_driver_config=chat_driver_config, ) diff --git a/libraries/python/skills/skills/prospector-skill/uv.lock b/libraries/python/skills/skills/prospector-skill/uv.lock index ba243b78..feb0a080 100644 --- a/libraries/python/skills/skills/prospector-skill/uv.lock +++ b/libraries/python/skills/skills/prospector-skill/uv.lock @@ -1574,6 +1574,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -1589,6 +1590,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/skill-template/uv.lock b/libraries/python/skills/skills/skill-template/uv.lock index 79ea9d37..db23ae7a 100644 --- a/libraries/python/skills/skills/skill-template/uv.lock +++ b/libraries/python/skills/skills/skill-template/uv.lock @@ -1547,6 +1547,7 @@ dependencies = [ { name = "pydantic" }, { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "python-liquid" }, { name = "requests" }, { name = "tiktoken" }, ] @@ -1562,6 +1563,7 @@ requires-dist = [ { name = "pydantic", specifier = ">=2.6.1" }, { name = "pydantic-settings", specifier = ">=2.3.4" }, { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-liquid", specifier = ">=1.12.1" }, { name = "requests", specifier = ">=2.32.0" }, { name = "tiktoken", specifier = ">=0.7.0" }, ] diff --git a/libraries/python/skills/skills/skill-template/your_skill/skill.py b/libraries/python/skills/skills/skill-template/your_skill/skill.py index c5e19fb8..2afa487b 100644 --- a/libraries/python/skills/skills/skill-template/your_skill/skill.py +++ b/libraries/python/skills/skills/skill-template/your_skill/skill.py @@ -1,5 +1,3 @@ -from typing import Type - from openai_client.chat_driver import ChatDriverConfig from skill_library import ( ActionCallable, @@ -95,10 +93,9 @@ def __init__( name: str, description: str | None = None, chat_driver_config: ChatDriverConfig | None = None, - skill_class: Type[Skill] = YourSkill, # Any other parameters you want to pass to the skill. ) -> None: self.name = name self.description = description or DESCRIPTION self.chat_driver_config = chat_driver_config - self.skill_class = skill_class + self.skill_class = YourSkill