Skip to content

Commit

Permalink
chore: cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
phil65 committed Nov 23, 2024
1 parent e96951f commit 29bac94
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 130 deletions.
17 changes: 7 additions & 10 deletions src/llmling/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,20 @@
__version__ = "0.2.0"

__all__ = [
# Core components
"ConfigError",
"ContextError",
"ContextLoader",
"LLMError",
"LLMLingError",
"LoadedContext",
"LoaderError",
"ProcessorError",
"ProcessorRegistry",
"TaskError",
"TaskExecutor",
"TaskManager",
# Default registries
"context_registry",
"llm_registry",
# Exceptions
"LLMLingError",
"ConfigError",
"ContextError",
"LoaderError",
"ProcessorError",
"LLMError",
"TaskError",
]

# llmling/
Expand Down
Empty file.
67 changes: 3 additions & 64 deletions src/llmling/processors/__init__.py
Original file line number Diff line number Diff line change
@@ -1,62 +1,4 @@
"""Collection of content processors for text transformation pipelines.
This package provides a flexible framework for defining and executing content processing
pipelines. It includes base classes for processors, a registry system for managing
processor instances, and implementations for function-based and template-based
processing.
Key Components:
- ProcessorConfig: Configuration model for processors
- BaseProcessor: Abstract base class for all processors
- ChainableProcessor: Base class for processors that can be chained
- ProcessorRegistry: Central registry and execution manager
- FunctionProcessor: Executes Python functions on content
- TemplateProcessor: Applies Jinja2 templates to content
Interface Examples:
Creating and registering a processor:
```python
config = ProcessorConfig(
type="function",
name="my_processor",
import_path="my_module.process_func",
async_execution=True
)
registry = ProcessorRegistry()
registry.register("my_processor", config)
```
Processing content:
```python
steps = [
ProcessingStep(name="my_processor", required=True),
ProcessingStep(name="template_proc", parallel=True),
]
result = await registry.process("input text", steps)
```
Streaming processing:
```python
async for result in registry.process_stream("input text", steps):
print(result.content)
```
Creating a custom processor:
```python
class MyProcessor(ChainableProcessor):
async def _process_impl(self, context: ProcessingContext) -> ProcessorResult:
# Process content here
return ProcessorResult(
content="processed content",
original_content=context.current_content
)
```
The package supports both sequential and parallel processing steps, error handling,
and result validation. Processors can be configured via dependency injection and
support async operations.
"""
"""Collection of content processors for text transformation pipelines."""

from __future__ import annotations

Expand All @@ -73,15 +15,12 @@ async def _process_impl(self, context: ProcessingContext) -> ProcessorResult:


__all__ = [
# Base classes
"AsyncProcessor",
"BaseProcessor",
"ChainableProcessor",
"FunctionProcessor",
"ProcessorConfig",
"ProcessorRegistry",
"ProcessorResult",
# Implementations
"FunctionProcessor",
"TemplateProcessor",
# Registry
"ProcessorRegistry",
]
59 changes: 3 additions & 56 deletions src/llmling/task/__init__.py
Original file line number Diff line number Diff line change
@@ -1,54 +1,4 @@
"""Task execution and management system for LLM interactions.
This package provides a framework for defining, managing, and executing tasks using
Large Language Models (LLMs). It handles context loading, processing, and interaction
with LLM providers in a configurable and extensible way.
Key Components:
- TaskManager: Central component for managing and executing task templates
- TaskExecutor: Handles the actual execution of tasks with LLMs
- TaskContext: Configuration for task context and processing
- TaskProvider: Configuration for LLM provider settings
- TaskResult: Result of task execution
Usage Examples:
Basic task execution:
```python
result = await task_manager.execute_template(
"my_template",
system_prompt="You are a helpful assistant",
)
print(result.content)
```
Streaming execution:
```python
async for result in task_manager.execute_template_stream("my_template"):
print(result.content)
```
Concurrent execution:
```python
results = await execute_concurrent(
task_manager,
templates=["template1", "template2"],
max_concurrent=3,
)
```
Template Configuration:
```yaml
task_templates:
my_template:
context: my_context
provider: gpt4
inherit_tools: true
settings:
temperature: 0.7
max_tokens: 1000
```
"""
"""Task execution and management system for LLM interactions."""

from __future__ import annotations

Expand All @@ -59,13 +9,10 @@


__all__ = [
# Core components
"TaskManager",
"TaskExecutor",
# Models
"TaskContext",
"TaskExecutor",
"TaskManager",
"TaskProvider",
"TaskResult",
# Utilities
"execute_concurrent",
]

0 comments on commit 29bac94

Please sign in to comment.