Skip to content

Commit

Permalink
Fix parallelism in Thinker
Browse files Browse the repository at this point in the history
  • Loading branch information
kongzii committed Sep 20, 2024
1 parent 6243451 commit de8626a
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 19 deletions.
27 changes: 26 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,16 @@

import tenacity
from crewai import Agent, Crew, Process, Task
from langchain.utilities.tavily_search import TavilySearchAPIWrapper
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_community.utilities.tavily_search import TavilySearchAPIWrapper
from langchain_core.callbacks import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain_core.language_models import BaseChatModel
from langchain_core.pydantic_v1 import SecretStr
from langchain_openai import ChatOpenAI
from loky import get_reusable_executor
from openai import APIError
from prediction_market_agent_tooling.deploy.agent import initialize_langfuse
from prediction_market_agent_tooling.loggers import logger
Expand All @@ -22,11 +23,6 @@
OmenSubgraphHandler,
)
from prediction_market_agent_tooling.tools.langfuse_ import langfuse_context, observe
from prediction_market_agent_tooling.tools.parallelism import (
DEFAULT_PROCESSPOOL_EXECUTOR,
par_generator,
par_map,
)
from prediction_market_agent_tooling.tools.tavily_storage.tavily_models import (
TavilyStorage,
)
Expand Down Expand Up @@ -115,7 +111,14 @@ async def _arun(
class ThinkThoroughlyBase(ABC):
identifier: str

def __init__(self, model: str, enable_langfuse: bool, memory: bool = True) -> None:
def __init__(
self,
model: str,
enable_langfuse: bool,
memory: bool = True,
max_workers: int = 3,
worker_timeout: int = 5 * 60,
) -> None:
self.model = model
self.enable_langfuse = enable_langfuse
self.subgraph_handler = OmenSubgraphHandler()
Expand All @@ -124,6 +127,8 @@ def __init__(self, model: str, enable_langfuse: bool, memory: bool = True) -> No
self._long_term_memory = (
LongTermMemoryTableHandler(self.identifier) if self.memory else None
)
self.max_workers = max_workers
self.worker_timeout = worker_timeout

disable_crewai_telemetry() # To prevent telemetry from being sent to CrewAI

Expand Down Expand Up @@ -255,11 +260,13 @@ def get_correlated_markets(self, question: str) -> list[CorrelatedMarketInput]:
5, text=question
)

markets = par_map(
items=[q.market_address for q in nearest_questions],
func=lambda market_address: self.subgraph_handler.get_omen_market_by_market_id(
markets = get_reusable_executor(
max_workers=self.max_workers, timeout=self.worker_timeout
).map(
lambda market_address: self.subgraph_handler.get_omen_market_by_market_id(
market_id=market_address
),
[q.market_address for q in nearest_questions],
)
return [CorrelatedMarketInput.from_omen_market(market) for market in markets]

Expand Down Expand Up @@ -348,8 +355,12 @@ def answer_binary_market(
f"Starting to generate predictions for each scenario, iteration {iteration + 1} / {n_iterations}."
)

sub_predictions = par_generator(
items=[
sub_predictions = get_reusable_executor(
max_workers=self.max_workers,
timeout=self.worker_timeout,
).map(
process_scenarios,
[
(
self.enable_langfuse,
unique_id,
Expand All @@ -364,8 +375,6 @@ def answer_binary_market(
+ conditional_scenarios.scenarios
)
],
func=process_scenarios,
executor=DEFAULT_PROCESSPOOL_EXECUTOR,
)

scenarios_with_probs = []
Expand Down Expand Up @@ -577,7 +586,6 @@ def process_scenarios(
) -> tuple[str, AnswerWithScenario | None]:
# Needs to be a normal function outside of class, because `lambda` and `self` aren't pickable for processpool executor,
# and process pool executor is required, because ChromaDB isn't thread-safe.
# Input arguments needs to be as a single tuple, because par_generator requires a single argument.
(
enable_langfuse,
unique_id,
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ plotly = "^5.22.0"
prediction-prophet = { git = "https://github.com/agentcoinorg/predictionprophet.git", rev = "93e052b37fa87573f5d06741ad86e184836977a0" }
transformers = "^4.43.3"
openfactverification-kongzii = "^0.2.0"
loky = "^3.4.1"

[tool.poetry.group.dev.dependencies]
langchain-chroma = "^0.1.2"
Expand Down
7 changes: 4 additions & 3 deletions scripts/replicator_stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,11 @@
from pprint import pprint

import typer
from loky import get_reusable_executor
from prediction_market_agent_tooling.markets.omen.omen import OmenAgentMarket
from prediction_market_agent_tooling.markets.omen.omen_subgraph_handler import (
OmenSubgraphHandler,
)
from prediction_market_agent_tooling.tools.parallelism import par_generator
from tqdm import tqdm

from prediction_market_agent.agents.replicate_to_omen_agent.deploy import (
Expand All @@ -20,17 +20,18 @@ def main() -> None:
limit=None,
creator=REPLICATOR_ADDRESS,
)
executor = get_reusable_executor(max_workers=5, timeout=5 * 60)
bets_for_market = {
market.id: bets
for market, bets in tqdm(
par_generator(
markets,
executor.map(
lambda m: (
m,
OmenSubgraphHandler().get_bets(
market_id=m.market_maker_contract_address_checksummed
),
),
markets,
),
total=len(markets),
desc="Loading bets",
Expand Down

0 comments on commit de8626a

Please sign in to comment.