camel-ai 0.2.42__py3-none-any.whl → 0.2.44__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/configs/__init__.py +3 -0
- camel/configs/anthropic_config.py +2 -24
- camel/configs/ppio_config.py +102 -0
- camel/configs/reka_config.py +1 -7
- camel/configs/samba_config.py +1 -7
- camel/configs/togetherai_config.py +1 -7
- camel/datasets/few_shot_generator.py +1 -0
- camel/embeddings/__init__.py +4 -0
- camel/embeddings/azure_embedding.py +119 -0
- camel/embeddings/together_embedding.py +136 -0
- camel/environments/__init__.py +3 -0
- camel/environments/multi_step.py +12 -10
- camel/environments/single_step.py +14 -2
- camel/environments/tic_tac_toe.py +518 -0
- camel/extractors/python_strategies.py +14 -5
- camel/loaders/__init__.py +2 -0
- camel/loaders/crawl4ai_reader.py +230 -0
- camel/models/__init__.py +2 -0
- camel/models/azure_openai_model.py +10 -2
- camel/models/base_model.py +111 -28
- camel/models/cohere_model.py +5 -1
- camel/models/deepseek_model.py +4 -0
- camel/models/gemini_model.py +8 -2
- camel/models/model_factory.py +3 -0
- camel/models/ollama_model.py +8 -2
- camel/models/openai_compatible_model.py +8 -2
- camel/models/openai_model.py +16 -4
- camel/models/ppio_model.py +184 -0
- camel/models/togetherai_model.py +106 -31
- camel/models/vllm_model.py +140 -57
- camel/societies/workforce/workforce.py +26 -3
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/browser_toolkit.py +11 -3
- camel/toolkits/google_calendar_toolkit.py +432 -0
- camel/toolkits/search_toolkit.py +119 -1
- camel/types/enums.py +74 -3
- camel/types/unified_model_type.py +5 -0
- camel/verifiers/python_verifier.py +93 -9
- {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/METADATA +21 -2
- {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/RECORD +43 -36
- {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.42.dist-info → camel_ai-0.2.44.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
import logging
|
|
17
|
+
from typing import Any, Dict, List, Optional, Set
|
|
18
|
+
|
|
19
|
+
from pydantic import BaseModel, ValidationError
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Crawl4AI:
|
|
25
|
+
r"""Class for converting websites into LLM-ready data.
|
|
26
|
+
|
|
27
|
+
This class uses asynchronous crawling with CSS selectors or LLM-based
|
|
28
|
+
extraction to convert entire websites into structured data.
|
|
29
|
+
|
|
30
|
+
References:
|
|
31
|
+
https://docs.crawl4ai.com/
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(self) -> None:
|
|
35
|
+
from crawl4ai import AsyncWebCrawler
|
|
36
|
+
|
|
37
|
+
self.crawler_class = AsyncWebCrawler
|
|
38
|
+
|
|
39
|
+
async def _run_crawler(self, url: str, **kwargs) -> Any:
|
|
40
|
+
r"""Run the asynchronous web crawler on a given URL.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
url (str): URL to crawl or scrape.
|
|
44
|
+
**kwargs: Additional keyword arguments for crawler configuration.
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Any: The result from the crawler.
|
|
48
|
+
|
|
49
|
+
Raises:
|
|
50
|
+
RuntimeError: If crawler execution fails.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
try:
|
|
54
|
+
async with self.crawler_class() as c:
|
|
55
|
+
return await c.arun(url, **kwargs)
|
|
56
|
+
except Exception as e:
|
|
57
|
+
logger.error("Crawler run failed: %s", e)
|
|
58
|
+
raise RuntimeError(f"Crawler run failed: {e}") from e
|
|
59
|
+
|
|
60
|
+
async def crawl(
|
|
61
|
+
self,
|
|
62
|
+
start_url: str,
|
|
63
|
+
max_depth: int = 1,
|
|
64
|
+
extraction_strategy=None,
|
|
65
|
+
**kwargs,
|
|
66
|
+
) -> List[Dict[str, Any]]:
|
|
67
|
+
r"""Crawl a URL and its subpages using breadth-first search.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
start_url (str): URL to start crawling from.
|
|
71
|
+
max_depth (int, optional): Maximum depth of links to follow
|
|
72
|
+
(default: :obj:`1`)
|
|
73
|
+
extraction_strategy (ExtractionStrategy, optional): Strategy
|
|
74
|
+
for data extraction. (default: :obj:`None`)
|
|
75
|
+
**kwargs: Additional arguments for crawler configuration.
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
List[Dict[str, Any]]: List of crawled page results.
|
|
79
|
+
|
|
80
|
+
Raises:
|
|
81
|
+
RuntimeError: If an error occurs during crawling.
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
all_results: List[Dict[str, Any]] = []
|
|
85
|
+
visited_urls: Set[str] = set()
|
|
86
|
+
queue: asyncio.Queue = asyncio.Queue()
|
|
87
|
+
|
|
88
|
+
await queue.put((start_url, 1))
|
|
89
|
+
visited_urls.add(start_url)
|
|
90
|
+
|
|
91
|
+
while not queue.empty():
|
|
92
|
+
url, depth = await queue.get()
|
|
93
|
+
try:
|
|
94
|
+
result = await self._run_crawler(
|
|
95
|
+
url, extraction_strategy=extraction_strategy, **kwargs
|
|
96
|
+
)
|
|
97
|
+
all_results.append(
|
|
98
|
+
{
|
|
99
|
+
"url": url,
|
|
100
|
+
"raw_result": result,
|
|
101
|
+
"markdown": result.markdown,
|
|
102
|
+
"cleaned_html": result.cleaned_html,
|
|
103
|
+
"links": result.links,
|
|
104
|
+
}
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
if depth < max_depth and result.links:
|
|
108
|
+
for _, links in result.links.items():
|
|
109
|
+
for link in links:
|
|
110
|
+
if (
|
|
111
|
+
'href' in link
|
|
112
|
+
and link['href'] not in visited_urls
|
|
113
|
+
):
|
|
114
|
+
visited_urls.add(link['href'])
|
|
115
|
+
await queue.put((link['href'], depth + 1))
|
|
116
|
+
|
|
117
|
+
except Exception as e:
|
|
118
|
+
logger.error("Error crawling %s: %s", url, e)
|
|
119
|
+
raise RuntimeError(f"Error crawling {url}: {e}") from e
|
|
120
|
+
|
|
121
|
+
queue.task_done()
|
|
122
|
+
|
|
123
|
+
await queue.join()
|
|
124
|
+
|
|
125
|
+
return all_results
|
|
126
|
+
|
|
127
|
+
async def scrape(
|
|
128
|
+
self,
|
|
129
|
+
url: str,
|
|
130
|
+
extraction_strategy=None,
|
|
131
|
+
**kwargs,
|
|
132
|
+
) -> Dict[str, Any]:
|
|
133
|
+
r"""Scrape a single URL using CSS or LLM-based extraction.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
url (str): URL to scrape.
|
|
137
|
+
extraction_strategy (ExtractionStrategy, optional): Extraction
|
|
138
|
+
strategy to use. (default: :obj:`None`)
|
|
139
|
+
**kwargs: Additional arguments for crawler configuration.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Dict[str, Any]: Dictionary containing scraped data such as markdown
|
|
143
|
+
and HTML content.
|
|
144
|
+
|
|
145
|
+
Raises:
|
|
146
|
+
RuntimeError: If scraping fails.
|
|
147
|
+
"""
|
|
148
|
+
|
|
149
|
+
result = await self._run_crawler(
|
|
150
|
+
url, extraction_strategy=extraction_strategy, **kwargs
|
|
151
|
+
)
|
|
152
|
+
return {
|
|
153
|
+
"url": url,
|
|
154
|
+
"raw_result": result,
|
|
155
|
+
"markdown": result.markdown,
|
|
156
|
+
"cleaned_html": result.cleaned_html,
|
|
157
|
+
"links": result.links,
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
async def structured_scrape(
|
|
161
|
+
self,
|
|
162
|
+
url: str,
|
|
163
|
+
response_format: BaseModel,
|
|
164
|
+
api_key: Optional[str] = None,
|
|
165
|
+
llm_provider: str = 'ollama/llama3',
|
|
166
|
+
**kwargs,
|
|
167
|
+
) -> Any:
|
|
168
|
+
r"""Extract structured data from a URL using an LLM.
|
|
169
|
+
|
|
170
|
+
Args:
|
|
171
|
+
url (str): URL to scrape.
|
|
172
|
+
response_format (BaseModel): Model defining the expected output
|
|
173
|
+
schema.
|
|
174
|
+
api_key (str, optional): API key for the LLM provider
|
|
175
|
+
(default: :obj:`None`).
|
|
176
|
+
llm_provider (str, optional): Identifier for the LLM provider
|
|
177
|
+
(default: :obj:`'ollama/llama3'`).
|
|
178
|
+
**kwargs: Additional arguments for crawler configuration.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Any: Crawl result containing the extracted data
|
|
182
|
+
structured according to the schema.
|
|
183
|
+
|
|
184
|
+
Raises:
|
|
185
|
+
ValidationError: If extracted data does not match the schema.
|
|
186
|
+
RuntimeError: If extraction fails.
|
|
187
|
+
"""
|
|
188
|
+
|
|
189
|
+
from crawl4ai.extraction_strategy import (
|
|
190
|
+
LLMExtractionStrategy,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
extraction_strategy = LLMExtractionStrategy(
|
|
194
|
+
provider=llm_provider,
|
|
195
|
+
api_token=api_key,
|
|
196
|
+
schema=response_format.model_json_schema(),
|
|
197
|
+
extraction_type="schema",
|
|
198
|
+
instruction="Extract the data according to the schema.",
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
try:
|
|
202
|
+
return await self._run_crawler(
|
|
203
|
+
url, extraction_strategy=extraction_strategy, **kwargs
|
|
204
|
+
)
|
|
205
|
+
except ValidationError as e:
|
|
206
|
+
raise ValidationError(
|
|
207
|
+
f"Extracted data does not match schema: {e}"
|
|
208
|
+
) from e
|
|
209
|
+
except Exception as e:
|
|
210
|
+
raise RuntimeError(e) from e
|
|
211
|
+
|
|
212
|
+
async def map_site(self, start_url: str, **kwargs) -> List[str]:
|
|
213
|
+
r"""Map a website by extracting all accessible URLs.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
start_url (str): Starting URL to map.
|
|
217
|
+
**kwargs: Additional configuration arguments.
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
List[str]: List of URLs discovered on the website.
|
|
221
|
+
|
|
222
|
+
Raises:
|
|
223
|
+
RuntimeError: If mapping fails.
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
try:
|
|
227
|
+
result = await self.crawl(start_url, **kwargs)
|
|
228
|
+
return [page["url"] for page in result]
|
|
229
|
+
except Exception as e:
|
|
230
|
+
raise RuntimeError(f"Failed to map url: {e}") from e
|
camel/models/__init__.py
CHANGED
|
@@ -35,6 +35,7 @@ from .openai_audio_models import OpenAIAudioModels
|
|
|
35
35
|
from .openai_compatible_model import OpenAICompatibleModel
|
|
36
36
|
from .openai_model import OpenAIModel
|
|
37
37
|
from .openrouter_model import OpenRouterModel
|
|
38
|
+
from .ppio_model import PPIOModel
|
|
38
39
|
from .qwen_model import QwenModel
|
|
39
40
|
from .reka_model import RekaModel
|
|
40
41
|
from .samba_model import SambaModel
|
|
@@ -72,6 +73,7 @@ __all__ = [
|
|
|
72
73
|
'RekaModel',
|
|
73
74
|
'SambaModel',
|
|
74
75
|
'TogetherAIModel',
|
|
76
|
+
'PPIOModel',
|
|
75
77
|
'YiModel',
|
|
76
78
|
'QwenModel',
|
|
77
79
|
'ModelProcessingError',
|
|
@@ -219,9 +219,13 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
219
219
|
response_format: Type[BaseModel],
|
|
220
220
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
221
221
|
) -> ChatCompletion:
|
|
222
|
-
|
|
222
|
+
import copy
|
|
223
|
+
|
|
224
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
223
225
|
|
|
224
226
|
request_config["response_format"] = response_format
|
|
227
|
+
# Remove stream from request config since OpenAI does not support it
|
|
228
|
+
# with structured response
|
|
225
229
|
request_config.pop("stream", None)
|
|
226
230
|
if tools is not None:
|
|
227
231
|
request_config["tools"] = tools
|
|
@@ -238,9 +242,13 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
238
242
|
response_format: Type[BaseModel],
|
|
239
243
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
240
244
|
) -> ChatCompletion:
|
|
241
|
-
|
|
245
|
+
import copy
|
|
246
|
+
|
|
247
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
242
248
|
|
|
243
249
|
request_config["response_format"] = response_format
|
|
250
|
+
# Remove stream from request config since OpenAI does not support it
|
|
251
|
+
# with structured response
|
|
244
252
|
request_config.pop("stream", None)
|
|
245
253
|
if tools is not None:
|
|
246
254
|
request_config["tools"] = tools
|
camel/models/base_model.py
CHANGED
|
@@ -108,42 +108,125 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
|
|
|
108
108
|
) -> List[OpenAIMessage]:
|
|
109
109
|
r"""Preprocess messages before sending to model API.
|
|
110
110
|
Removes thinking content from assistant and user messages.
|
|
111
|
+
Automatically formats messages for parallel tool calls if tools are
|
|
112
|
+
detected.
|
|
111
113
|
|
|
112
114
|
Args:
|
|
113
|
-
messages (List[OpenAIMessage]): Original messages
|
|
115
|
+
messages (List[OpenAIMessage]): Original messages.
|
|
114
116
|
|
|
115
117
|
Returns:
|
|
116
118
|
List[OpenAIMessage]: Preprocessed messages
|
|
117
119
|
"""
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
120
|
+
# Process all messages in a single pass
|
|
121
|
+
processed_messages = []
|
|
122
|
+
tool_calls_buffer: List[OpenAIMessage] = []
|
|
123
|
+
tool_responses_buffer: Dict[str, OpenAIMessage] = {}
|
|
124
|
+
has_tool_calls = False
|
|
125
|
+
|
|
126
|
+
for msg in messages:
|
|
127
|
+
# Remove thinking content if needed
|
|
128
|
+
role = msg.get('role')
|
|
129
|
+
content = msg.get('content')
|
|
130
|
+
if role in ['assistant', 'user'] and isinstance(content, str):
|
|
131
|
+
if '<think>' in content and '</think>' in content:
|
|
132
|
+
content = re.sub(
|
|
133
|
+
r'<think>.*?</think>', '', content, flags=re.DOTALL
|
|
134
|
+
).strip()
|
|
135
|
+
processed_msg = dict(msg)
|
|
136
|
+
processed_msg['content'] = content
|
|
137
|
+
else:
|
|
138
|
+
processed_msg = dict(msg)
|
|
139
|
+
|
|
140
|
+
# Check and track tool calls/responses
|
|
141
|
+
is_tool_call = (
|
|
142
|
+
processed_msg.get("role") == "assistant"
|
|
143
|
+
and "tool_calls" in processed_msg
|
|
144
|
+
)
|
|
145
|
+
is_tool_response = (
|
|
146
|
+
processed_msg.get("role") == "tool"
|
|
147
|
+
and "tool_call_id" in processed_msg
|
|
123
148
|
)
|
|
124
149
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
150
|
+
if is_tool_call or is_tool_response:
|
|
151
|
+
has_tool_calls = True
|
|
152
|
+
|
|
153
|
+
# Store the processed message for later formatting if needed
|
|
154
|
+
processed_messages.append(processed_msg)
|
|
155
|
+
|
|
156
|
+
# If no tool calls detected, return the processed messages
|
|
157
|
+
if not has_tool_calls:
|
|
158
|
+
return processed_messages # type: ignore[return-value]
|
|
159
|
+
|
|
160
|
+
# Format messages for parallel tool calls
|
|
161
|
+
formatted_messages = []
|
|
162
|
+
tool_calls_buffer = []
|
|
163
|
+
tool_responses_buffer = {}
|
|
164
|
+
|
|
165
|
+
for msg in processed_messages: # type: ignore[assignment]
|
|
166
|
+
# If this is an assistant message with tool calls, add it to the
|
|
167
|
+
# buffer
|
|
168
|
+
if msg.get("role") == "assistant" and "tool_calls" in msg:
|
|
169
|
+
tool_calls_buffer.append(msg)
|
|
170
|
+
continue
|
|
171
|
+
|
|
172
|
+
# If this is a tool response, add it to the responses buffer
|
|
173
|
+
if msg.get("role") == "tool" and "tool_call_id" in msg:
|
|
174
|
+
tool_call_id = msg.get("tool_call_id")
|
|
175
|
+
if isinstance(tool_call_id, str):
|
|
176
|
+
tool_responses_buffer[tool_call_id] = msg
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
# Process any complete tool call + responses before adding regular
|
|
180
|
+
# messages
|
|
181
|
+
if tool_calls_buffer and tool_responses_buffer:
|
|
182
|
+
# Add the assistant message with tool calls
|
|
183
|
+
assistant_msg = tool_calls_buffer[0]
|
|
184
|
+
formatted_messages.append(assistant_msg)
|
|
185
|
+
|
|
186
|
+
# Add all matching tool responses for this assistant message
|
|
187
|
+
tool_calls = assistant_msg.get("tool_calls", [])
|
|
188
|
+
if isinstance(tool_calls, list):
|
|
189
|
+
for tool_call in tool_calls:
|
|
190
|
+
tool_call_id = tool_call.get("id")
|
|
191
|
+
if (
|
|
192
|
+
isinstance(tool_call_id, str)
|
|
193
|
+
and tool_call_id in tool_responses_buffer
|
|
194
|
+
):
|
|
195
|
+
formatted_messages.append(
|
|
196
|
+
tool_responses_buffer[tool_call_id]
|
|
197
|
+
)
|
|
198
|
+
del tool_responses_buffer[tool_call_id]
|
|
199
|
+
|
|
200
|
+
tool_calls_buffer.pop(0)
|
|
201
|
+
|
|
202
|
+
# Add the current regular message
|
|
203
|
+
formatted_messages.append(msg)
|
|
204
|
+
|
|
205
|
+
# Process any remaining buffered tool calls and responses
|
|
206
|
+
while tool_calls_buffer:
|
|
207
|
+
assistant_msg = tool_calls_buffer[0]
|
|
208
|
+
formatted_messages.append(assistant_msg)
|
|
209
|
+
|
|
210
|
+
tool_calls = assistant_msg.get("tool_calls", [])
|
|
211
|
+
if isinstance(tool_calls, list):
|
|
212
|
+
for tool_call in tool_calls:
|
|
213
|
+
tool_call_id = tool_call.get("id")
|
|
214
|
+
if (
|
|
215
|
+
isinstance(tool_call_id, str)
|
|
216
|
+
and tool_call_id in tool_responses_buffer
|
|
217
|
+
):
|
|
218
|
+
formatted_messages.append(
|
|
219
|
+
tool_responses_buffer[tool_call_id]
|
|
220
|
+
)
|
|
221
|
+
del tool_responses_buffer[tool_call_id]
|
|
222
|
+
|
|
223
|
+
tool_calls_buffer.pop(0)
|
|
224
|
+
|
|
225
|
+
# Add any remaining tool responses
|
|
226
|
+
for response in tool_responses_buffer.values():
|
|
227
|
+
formatted_messages.append(response)
|
|
228
|
+
|
|
229
|
+
return formatted_messages
|
|
147
230
|
|
|
148
231
|
@abstractmethod
|
|
149
232
|
def _run(
|
camel/models/cohere_model.py
CHANGED
|
@@ -227,7 +227,11 @@ class CohereModel(BaseModelBackend):
|
|
|
227
227
|
response_format: Optional[Type[BaseModel]] = None,
|
|
228
228
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
229
229
|
) -> Dict[str, Any]:
|
|
230
|
-
|
|
230
|
+
import copy
|
|
231
|
+
|
|
232
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
233
|
+
# Remove strict from each tool's function parameters since Cohere does
|
|
234
|
+
# not support them
|
|
231
235
|
if tools:
|
|
232
236
|
for tool in tools:
|
|
233
237
|
function_dict = tool.get('function', {})
|
camel/models/deepseek_model.py
CHANGED
|
@@ -145,7 +145,11 @@ class DeepSeekModel(BaseModelBackend):
|
|
|
145
145
|
for key, value in request_config.items()
|
|
146
146
|
if key not in REASONSER_UNSUPPORTED_PARAMS
|
|
147
147
|
}
|
|
148
|
+
import copy
|
|
148
149
|
|
|
150
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
151
|
+
# Remove strict from each tool's function parameters since DeepSeek
|
|
152
|
+
# does not support them
|
|
149
153
|
if tools:
|
|
150
154
|
for tool in tools:
|
|
151
155
|
function_dict = tool.get('function', {})
|
camel/models/gemini_model.py
CHANGED
|
@@ -172,8 +172,11 @@ class GeminiModel(BaseModelBackend):
|
|
|
172
172
|
messages: List[OpenAIMessage],
|
|
173
173
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
174
174
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
175
|
-
|
|
175
|
+
import copy
|
|
176
176
|
|
|
177
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
178
|
+
# Remove strict and anyOf from each tool's function parameters since
|
|
179
|
+
# Gemini does not support them
|
|
177
180
|
if tools:
|
|
178
181
|
for tool in tools:
|
|
179
182
|
function_dict = tool.get('function', {})
|
|
@@ -209,8 +212,11 @@ class GeminiModel(BaseModelBackend):
|
|
|
209
212
|
messages: List[OpenAIMessage],
|
|
210
213
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
211
214
|
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
212
|
-
|
|
215
|
+
import copy
|
|
213
216
|
|
|
217
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
218
|
+
# Remove strict and anyOf from each tool's function parameters since
|
|
219
|
+
# Gemini does not support them
|
|
214
220
|
if tools:
|
|
215
221
|
for tool in tools:
|
|
216
222
|
function_dict = tool.get('function', {})
|
camel/models/model_factory.py
CHANGED
|
@@ -34,6 +34,7 @@ from camel.models.ollama_model import OllamaModel
|
|
|
34
34
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
35
35
|
from camel.models.openai_model import OpenAIModel
|
|
36
36
|
from camel.models.openrouter_model import OpenRouterModel
|
|
37
|
+
from camel.models.ppio_model import PPIOModel
|
|
37
38
|
from camel.models.qwen_model import QwenModel
|
|
38
39
|
from camel.models.reka_model import RekaModel
|
|
39
40
|
from camel.models.samba_model import SambaModel
|
|
@@ -145,6 +146,8 @@ class ModelFactory:
|
|
|
145
146
|
model_class = QwenModel
|
|
146
147
|
elif model_platform.is_deepseek:
|
|
147
148
|
model_class = DeepSeekModel
|
|
149
|
+
elif model_platform.is_ppio:
|
|
150
|
+
model_class = PPIOModel
|
|
148
151
|
elif model_platform.is_internlm and model_type.is_internlm:
|
|
149
152
|
model_class = InternLMModel
|
|
150
153
|
elif model_platform.is_moonshot and model_type.is_moonshot:
|
camel/models/ollama_model.py
CHANGED
|
@@ -250,8 +250,11 @@ class OllamaModel(BaseModelBackend):
|
|
|
250
250
|
response_format: Type[BaseModel],
|
|
251
251
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
252
252
|
) -> ChatCompletion:
|
|
253
|
-
|
|
253
|
+
import copy
|
|
254
254
|
|
|
255
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
256
|
+
# Remove stream from request_config since Ollama does not support it
|
|
257
|
+
# when structured response is used
|
|
255
258
|
request_config["response_format"] = response_format
|
|
256
259
|
request_config.pop("stream", None)
|
|
257
260
|
if tools is not None:
|
|
@@ -269,8 +272,11 @@ class OllamaModel(BaseModelBackend):
|
|
|
269
272
|
response_format: Type[BaseModel],
|
|
270
273
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
271
274
|
) -> ChatCompletion:
|
|
272
|
-
|
|
275
|
+
import copy
|
|
273
276
|
|
|
277
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
278
|
+
# Remove stream from request_config since Ollama does not support it
|
|
279
|
+
# when structured response is used
|
|
274
280
|
request_config["response_format"] = response_format
|
|
275
281
|
request_config.pop("stream", None)
|
|
276
282
|
if tools is not None:
|
|
@@ -177,8 +177,11 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
177
177
|
response_format: Type[BaseModel],
|
|
178
178
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
179
179
|
) -> ChatCompletion:
|
|
180
|
-
|
|
180
|
+
import copy
|
|
181
181
|
|
|
182
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
183
|
+
# Remove stream from request_config since OpenAI does not support it
|
|
184
|
+
# when structured response is used
|
|
182
185
|
request_config["response_format"] = response_format
|
|
183
186
|
request_config.pop("stream", None)
|
|
184
187
|
if tools is not None:
|
|
@@ -196,8 +199,11 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
196
199
|
response_format: Type[BaseModel],
|
|
197
200
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
198
201
|
) -> ChatCompletion:
|
|
199
|
-
|
|
202
|
+
import copy
|
|
200
203
|
|
|
204
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
205
|
+
# Remove stream from request_config since OpenAI does not support it
|
|
206
|
+
# when structured response is used
|
|
201
207
|
request_config["response_format"] = response_format
|
|
202
208
|
request_config.pop("stream", None)
|
|
203
209
|
if tools is not None:
|
camel/models/openai_model.py
CHANGED
|
@@ -247,7 +247,9 @@ class OpenAIModel(BaseModelBackend):
|
|
|
247
247
|
messages: List[OpenAIMessage],
|
|
248
248
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
249
249
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
250
|
-
|
|
250
|
+
import copy
|
|
251
|
+
|
|
252
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
251
253
|
|
|
252
254
|
if tools:
|
|
253
255
|
request_config["tools"] = tools
|
|
@@ -265,7 +267,9 @@ class OpenAIModel(BaseModelBackend):
|
|
|
265
267
|
messages: List[OpenAIMessage],
|
|
266
268
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
267
269
|
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
268
|
-
|
|
270
|
+
import copy
|
|
271
|
+
|
|
272
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
269
273
|
|
|
270
274
|
if tools:
|
|
271
275
|
request_config["tools"] = tools
|
|
@@ -284,9 +288,13 @@ class OpenAIModel(BaseModelBackend):
|
|
|
284
288
|
response_format: Type[BaseModel],
|
|
285
289
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
286
290
|
) -> ChatCompletion:
|
|
287
|
-
|
|
291
|
+
import copy
|
|
292
|
+
|
|
293
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
288
294
|
|
|
289
295
|
request_config["response_format"] = response_format
|
|
296
|
+
# Remove stream from request config since OpenAI does not support it
|
|
297
|
+
# with structured response
|
|
290
298
|
request_config.pop("stream", None)
|
|
291
299
|
if tools is not None:
|
|
292
300
|
request_config["tools"] = tools
|
|
@@ -305,9 +313,13 @@ class OpenAIModel(BaseModelBackend):
|
|
|
305
313
|
response_format: Type[BaseModel],
|
|
306
314
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
307
315
|
) -> ChatCompletion:
|
|
308
|
-
|
|
316
|
+
import copy
|
|
317
|
+
|
|
318
|
+
request_config = copy.deepcopy(self.model_config_dict)
|
|
309
319
|
|
|
310
320
|
request_config["response_format"] = response_format
|
|
321
|
+
# Remove stream from request config since OpenAI does not support it
|
|
322
|
+
# with structured response
|
|
311
323
|
request_config.pop("stream", None)
|
|
312
324
|
if tools is not None:
|
|
313
325
|
request_config["tools"] = tools
|