Skip to content

Commit

Permalink
fix: update with firecrawl and openai new version
Browse files Browse the repository at this point in the history
  • Loading branch information
Wendong-Fan committed Sep 15, 2024
1 parent 8738dee commit bbf05f0
Show file tree
Hide file tree
Showing 20 changed files with 53 additions and 102 deletions.
2 changes: 1 addition & 1 deletion .github/ISSUE_TEMPLATE/bug_report.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ body:
attributes:
label: What version of camel are you using?
description: Run command `python3 -c 'print(__import__("camel").__version__)'` in your shell and paste the output here.
placeholder: E.g., 0.2.0
placeholder: E.g., 0.2.1
validations:
required: true

Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ conda create --name camel python=3.10
conda activate camel
# Clone github repo
git clone -b v0.2.0 https://github.com/camel-ai/camel.git
git clone -b v0.2.1 https://github.com/camel-ai/camel.git
# Change directory into project directory
cd camel
Expand Down
2 changes: 1 addition & 1 deletion camel/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# limitations under the License.
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========

__version__ = '0.2.0'
__version__ = '0.2.1'

__all__ = [
'__version__',
Expand Down
54 changes: 11 additions & 43 deletions camel/loaders/firecrawl_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ def crawl(
self,
url: str,
params: Optional[Dict[str, Any]] = None,
wait_until_done: bool = True,
**kwargs: Any,
) -> Any:
r"""Crawl a URL and all accessible subpages. Customize the crawl by
Expand All @@ -60,14 +59,12 @@ def crawl(
url (str): The URL to crawl.
params (Optional[Dict[str, Any]]): Additional parameters for the
crawl request. Defaults to `None`.
wait_until_done (bool): Whether to wait until the crawl job is
completed. Defaults to `True`.
**kwargs (Any): Additional keyword arguments, such as
`poll_interval`, `idempotency_key`, etc.
`poll_interval`, `idempotency_key`.
Returns:
Any: The list content of the URL if `wait_until_done` is True;
otherwise, a string job ID.
Any: The crawl job ID or the crawl results if waiting until
completion.
Raises:
RuntimeError: If the crawling process fails.
Expand All @@ -78,13 +75,8 @@ def crawl(
url=url,
params=params,
**kwargs,
wait_until_done=wait_until_done,
)
return (
crawl_response
if wait_until_done
else crawl_response.get("jobId")
)
return crawl_response
except Exception as e:
raise RuntimeError(f"Failed to crawl the URL: {e}")

Expand All @@ -103,7 +95,10 @@ def markdown_crawl(self, url: str) -> str:
"""

try:
crawl_result = self.app.crawl_url(url=url)
crawl_result = self.app.crawl_url(
url,
{'formats': ['markdown']},
)
if not isinstance(crawl_result, list):
raise ValueError("Unexpected response format")
markdown_contents = [
Expand Down Expand Up @@ -180,41 +175,14 @@ def structured_scrape(self, url: str, output_schema: BaseModel) -> Dict:
data = self.app.scrape_url(
url,
{
'extractorOptions': {
"mode": "llm-extraction",
"extractionPrompt": "Based on the information on "
"the page, extract the information from the schema.",
'extractionSchema': output_schema.model_json_schema(),
},
'pageOptions': {'onlyMainContent': True},
'formats': ['extract'],
'extract': {'schema': output_schema.model_json_schema()},
},
)
return data.get("llm_extraction", {})
return data.get("extract", {})
except Exception as e:
raise RuntimeError(f"Failed to perform structured scrape: {e}")

def tidy_scrape(self, url: str) -> str:
r"""Only return the main content of the page, excluding headers,
navigation bars, footers, etc. in Markdown format.
Args:
url (str): The URL to read.
Returns:
str: The markdown content of the URL.
Raises:
RuntimeError: If the scrape process fails.
"""

try:
scrape_result = self.app.scrape_url(
url, {'pageOptions': {'onlyMainContent': True}}
)
return scrape_result.get("markdown", "")
except Exception as e:
raise RuntimeError(f"Failed to perform tidy scrape: {e}")

def map_site(
self, url: str, params: Optional[Dict[str, Any]] = None
) -> list:
Expand Down
2 changes: 1 addition & 1 deletion camel/models/mistral_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def _to_openai_response(
"name": tool_call.function.name, # type: ignore[union-attr]
"arguments": tool_call.function.arguments, # type: ignore[union-attr]
},
type=tool_call.TYPE, # type: ignore[union-attr]
type=tool_call.type, # type: ignore[union-attr]
)
for tool_call in response.choices[0].message.tool_calls
]
Expand Down
2 changes: 1 addition & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
project = 'CAMEL'
copyright = '2024, CAMEL-AI.org'
author = 'CAMEL-AI.org'
release = '0.2.0'
release = '0.2.1'

html_favicon = (
'https://raw.githubusercontent.com/camel-ai/camel/master/misc/favicon.png'
Expand Down
26 changes: 25 additions & 1 deletion docs/cookbooks/agents_message.ipynb
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
{
"cells": [
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "w5_pa5kKPzAE"
Expand All @@ -10,6 +11,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "1tGC1kVjPNX9"
Expand All @@ -19,6 +21,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "lSXa_rQQQzBd"
Expand All @@ -28,6 +31,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "OnmoAw2vQG8I"
Expand All @@ -43,6 +47,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "l4MP9uUNQ2kr"
Expand All @@ -53,6 +58,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "aYzY4OM6F3ay"
Expand All @@ -62,6 +68,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "kETDiaP2Rrdb"
Expand All @@ -71,6 +78,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "Cg96MkbcRtQR"
Expand All @@ -92,10 +100,11 @@
},
"outputs": [],
"source": [
"pip install camel-ai==0.2.0"
"pip install camel-ai==0.2.1"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "MyTTCe3IR_Lr"
Expand All @@ -105,6 +114,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "REqzgGL9SEaD"
Expand Down Expand Up @@ -142,6 +152,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "LRojeqp7dP1m"
Expand All @@ -151,6 +162,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "_sJV9GFldTBh"
Expand Down Expand Up @@ -186,6 +198,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "MBx8Pje9ETAL"
Expand Down Expand Up @@ -213,6 +226,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "PCqjf_fWEdLD"
Expand All @@ -238,6 +252,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "yR5ZYr5yEkLL"
Expand All @@ -247,6 +262,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "Gfc6WNUOEktw"
Expand Down Expand Up @@ -282,6 +298,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "412MphuYErw_"
Expand Down Expand Up @@ -316,6 +333,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "TIpjz3G2E5dy"
Expand Down Expand Up @@ -349,6 +367,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "523x8XqLE8Qo"
Expand Down Expand Up @@ -382,6 +401,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "TURczNJNFA3J"
Expand Down Expand Up @@ -415,6 +435,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "eRhObgkxFFUi"
Expand Down Expand Up @@ -452,6 +473,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "seSeG3KAHbLI"
Expand All @@ -461,6 +483,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "kAyagBo0GusE"
Expand Down Expand Up @@ -514,6 +537,7 @@
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {
"id": "HYNA7G06FJRq"
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/agents_prompting.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
},
"outputs": [],
"source": [
"!pip install camel-ai==0.2.0"
"!pip install camel-ai==0.2.1"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/agents_society.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@
},
"outputs": [],
"source": [
"pip install camel-ai==0.2.0"
"pip install camel-ai==0.2.1"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/agents_tracking.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@
},
"outputs": [],
"source": [
"%pip install camel-ai[all]==0.2.0\n",
"%pip install camel-ai[all]==0.2.1\n",
"%pip install agentops==0.3.10"
]
},
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/agents_with_memory.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@
},
"outputs": [],
"source": [
"pip install camel-ai[all]==0.2.0"
"pip install camel-ai[all]==0.2.1"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/agents_with_tools.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@
},
"outputs": [],
"source": [
"!pip install camel-ai[all]==0.2.0"
"!pip install camel-ai[all]==0.2.1"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion docs/cookbooks/create_your_first_agent.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@
{
"cell_type": "code",
"source": [
"!pip install camel-ai[all]==0.2.0"
"!pip install camel-ai[all]==0.2.1"
],
"metadata": {
"id": "UtcC3c-KVZmU"
Expand Down
Loading

0 comments on commit bbf05f0

Please sign in to comment.