camel-ai 0.2.69a7__py3-none-any.whl → 0.2.70__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/societies/role_playing.py +26 -28
- camel/societies/workforce/workforce.py +226 -118
- camel/societies/workforce/workforce_logger.py +37 -23
- camel/storages/__init__.py +2 -0
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/pgvector.py +349 -0
- camel/toolkits/file_write_toolkit.py +21 -7
- camel/toolkits/terminal_toolkit.py +11 -4
- camel/types/enums.py +3 -0
- {camel_ai-0.2.69a7.dist-info → camel_ai-0.2.70.dist-info}/METADATA +5 -1
- {camel_ai-0.2.69a7.dist-info → camel_ai-0.2.70.dist-info}/RECORD +14 -13
- {camel_ai-0.2.69a7.dist-info → camel_ai-0.2.70.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.69a7.dist-info → camel_ai-0.2.70.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/societies/role_playing.py
CHANGED
|
@@ -556,13 +556,12 @@ class RolePlaying:
|
|
|
556
556
|
)
|
|
557
557
|
user_msg = self._reduce_message_options(user_response.msgs)
|
|
558
558
|
|
|
559
|
-
# To prevent recording
|
|
560
|
-
#
|
|
561
|
-
#
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
):
|
|
559
|
+
# To prevent recording missing messages: ChatAgent.step automatically
|
|
560
|
+
# saves the response to memory only when a single message is returned.
|
|
561
|
+
# When multi-response support is enabled (n > 1), it is the caller's
|
|
562
|
+
# responsibility to record the selected message. Therefore, we record
|
|
563
|
+
# it here after choosing one message via `_reduce_message_options()`.
|
|
564
|
+
if self._is_multi_response(self.user_agent):
|
|
566
565
|
self.user_agent.record_message(user_msg)
|
|
567
566
|
|
|
568
567
|
assistant_response = self.assistant_agent.step(user_msg)
|
|
@@ -579,13 +578,7 @@ class RolePlaying:
|
|
|
579
578
|
)
|
|
580
579
|
assistant_msg = self._reduce_message_options(assistant_response.msgs)
|
|
581
580
|
|
|
582
|
-
|
|
583
|
-
# step and once in role play), and the model generates only one
|
|
584
|
-
# response when multi-response support is enabled.
|
|
585
|
-
if (
|
|
586
|
-
'n' in self.assistant_agent.model_backend.model_config_dict.keys()
|
|
587
|
-
and self.assistant_agent.model_backend.model_config_dict['n'] > 1
|
|
588
|
-
):
|
|
581
|
+
if self._is_multi_response(self.assistant_agent):
|
|
589
582
|
self.assistant_agent.record_message(assistant_msg)
|
|
590
583
|
|
|
591
584
|
return (
|
|
@@ -639,13 +632,7 @@ class RolePlaying:
|
|
|
639
632
|
)
|
|
640
633
|
user_msg = self._reduce_message_options(user_response.msgs)
|
|
641
634
|
|
|
642
|
-
|
|
643
|
-
# step and once in role play), and the model generates only one
|
|
644
|
-
# response when multi-response support is enabled.
|
|
645
|
-
if (
|
|
646
|
-
'n' in self.user_agent.model_backend.model_config_dict.keys()
|
|
647
|
-
and self.user_agent.model_backend.model_config_dict['n'] > 1
|
|
648
|
-
):
|
|
635
|
+
if self._is_multi_response(self.user_agent):
|
|
649
636
|
self.user_agent.record_message(user_msg)
|
|
650
637
|
|
|
651
638
|
assistant_response = await self.assistant_agent.astep(user_msg)
|
|
@@ -662,13 +649,7 @@ class RolePlaying:
|
|
|
662
649
|
)
|
|
663
650
|
assistant_msg = self._reduce_message_options(assistant_response.msgs)
|
|
664
651
|
|
|
665
|
-
|
|
666
|
-
# step and once in role play), and the model generates only one
|
|
667
|
-
# response when multi-response support is enabled.
|
|
668
|
-
if (
|
|
669
|
-
'n' in self.assistant_agent.model_backend.model_config_dict.keys()
|
|
670
|
-
and self.assistant_agent.model_backend.model_config_dict['n'] > 1
|
|
671
|
-
):
|
|
652
|
+
if self._is_multi_response(self.assistant_agent):
|
|
672
653
|
self.assistant_agent.record_message(assistant_msg)
|
|
673
654
|
|
|
674
655
|
return (
|
|
@@ -730,3 +711,20 @@ class RolePlaying:
|
|
|
730
711
|
new_instance.critic = self.critic.clone(with_memory)
|
|
731
712
|
|
|
732
713
|
return new_instance
|
|
714
|
+
|
|
715
|
+
def _is_multi_response(self, agent: ChatAgent) -> bool:
|
|
716
|
+
r"""Checks if the given agent supports multi-response.
|
|
717
|
+
|
|
718
|
+
Args:
|
|
719
|
+
agent (ChatAgent): The agent to check for multi-response support.
|
|
720
|
+
|
|
721
|
+
Returns:
|
|
722
|
+
bool: True if the agent supports multi-response, False otherwise.
|
|
723
|
+
"""
|
|
724
|
+
if (
|
|
725
|
+
'n' in agent.model_backend.model_config_dict.keys()
|
|
726
|
+
and agent.model_backend.model_config_dict['n'] is not None
|
|
727
|
+
and agent.model_backend.model_config_dict['n'] > 1
|
|
728
|
+
):
|
|
729
|
+
return True
|
|
730
|
+
return False
|
|
@@ -111,27 +111,24 @@ class Workforce(BaseNode):
|
|
|
111
111
|
children (Optional[List[BaseNode]], optional): List of child nodes
|
|
112
112
|
under this node. Each child node can be a worker node or
|
|
113
113
|
another workforce node. (default: :obj:`None`)
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
(default: :obj:`None`
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
available parameters.
|
|
133
|
-
(default: :obj:`None` - creates workers with SearchToolkit,
|
|
134
|
-
CodeExecutionToolkit, and ThinkingToolkit)
|
|
114
|
+
coordinator_agent (Optional[ChatAgent], optional): A custom coordinator
|
|
115
|
+
agent instance for task assignment and worker creation. If
|
|
116
|
+
provided, the workforce will create a new agent using this agent's
|
|
117
|
+
model configuration but with the required system message and
|
|
118
|
+
functionality.
|
|
119
|
+
If None, a default agent will be created using DEFAULT model
|
|
120
|
+
settings. (default: :obj:`None`)
|
|
121
|
+
task_agent (Optional[ChatAgent], optional): A custom task planning
|
|
122
|
+
agent instance for task decomposition and composition. If
|
|
123
|
+
provided, the workforce will create a new agent using this agent's
|
|
124
|
+
model configuration but with the required system message and tools
|
|
125
|
+
(TaskPlanningToolkit). If None, a default agent will be created
|
|
126
|
+
using DEFAULT model settings. (default: :obj:`None`)
|
|
127
|
+
new_worker_agent (Optional[ChatAgent], optional): A template agent for
|
|
128
|
+
workers created dynamically at runtime when existing workers cannot
|
|
129
|
+
handle failed tasks. If None, workers will be created with default
|
|
130
|
+
settings including SearchToolkit, CodeExecutionToolkit, and
|
|
131
|
+
ThinkingToolkit. (default: :obj:`None`)
|
|
135
132
|
graceful_shutdown_timeout (float, optional): The timeout in seconds
|
|
136
133
|
for graceful shutdown when a task fails 3 times. During this
|
|
137
134
|
period, the workforce remains active for debugging.
|
|
@@ -147,40 +144,59 @@ class Workforce(BaseNode):
|
|
|
147
144
|
(default: :obj:`False`)
|
|
148
145
|
|
|
149
146
|
Example:
|
|
150
|
-
>>> # Configure with custom model and shared memory
|
|
151
147
|
>>> import asyncio
|
|
148
|
+
>>> from camel.agents import ChatAgent
|
|
149
|
+
>>> from camel.models import ModelFactory
|
|
150
|
+
>>> from camel.types import ModelPlatformType, ModelType
|
|
151
|
+
>>> from camel.tasks import Task
|
|
152
|
+
>>>
|
|
153
|
+
>>> # Simple workforce with default agents
|
|
154
|
+
>>> workforce = Workforce("Research Team")
|
|
155
|
+
>>>
|
|
156
|
+
>>> # Workforce with custom model configuration
|
|
152
157
|
>>> model = ModelFactory.create(
|
|
153
|
-
... ModelPlatformType.OPENAI, ModelType.GPT_4O
|
|
158
|
+
... ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O
|
|
154
159
|
... )
|
|
160
|
+
>>> coordinator_agent = ChatAgent(model=model)
|
|
161
|
+
>>> task_agent = ChatAgent(model=model)
|
|
162
|
+
>>>
|
|
155
163
|
>>> workforce = Workforce(
|
|
156
164
|
... "Research Team",
|
|
157
|
-
...
|
|
158
|
-
...
|
|
159
|
-
... share_memory=True # Enable shared memory
|
|
165
|
+
... coordinator_agent=coordinator_agent,
|
|
166
|
+
... task_agent=task_agent,
|
|
160
167
|
... )
|
|
161
168
|
>>>
|
|
162
169
|
>>> # Process a task
|
|
163
170
|
>>> async def main():
|
|
164
171
|
... task = Task(content="Research AI trends", id="1")
|
|
165
|
-
... result = workforce.
|
|
172
|
+
... result = await workforce.process_task_async(task)
|
|
166
173
|
... return result
|
|
167
|
-
>>>
|
|
174
|
+
>>>
|
|
175
|
+
>>> result_task = asyncio.run(main())
|
|
176
|
+
|
|
177
|
+
Note:
|
|
178
|
+
When custom coordinator_agent or task_agent are provided, the workforce
|
|
179
|
+
will preserve the user's system message and append the required
|
|
180
|
+
workforce coordination or task planning instructions to it. This
|
|
181
|
+
ensures both the user's intent is preserved and proper workforce
|
|
182
|
+
functionality is maintained. All other agent configurations (model,
|
|
183
|
+
memory, tools, etc.) will also be preserved.
|
|
168
184
|
"""
|
|
169
185
|
|
|
170
186
|
def __init__(
|
|
171
187
|
self,
|
|
172
188
|
description: str,
|
|
173
189
|
children: Optional[List[BaseNode]] = None,
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
190
|
+
coordinator_agent: Optional[ChatAgent] = None,
|
|
191
|
+
task_agent: Optional[ChatAgent] = None,
|
|
192
|
+
new_worker_agent: Optional[ChatAgent] = None, # TODO: use MCP Agent
|
|
177
193
|
graceful_shutdown_timeout: float = 15.0,
|
|
178
194
|
share_memory: bool = False,
|
|
179
195
|
) -> None:
|
|
180
196
|
super().__init__(description)
|
|
181
197
|
self._child_listening_tasks: Deque[asyncio.Task] = deque()
|
|
182
198
|
self._children = children or []
|
|
183
|
-
self.
|
|
199
|
+
self.new_worker_agent = new_worker_agent
|
|
184
200
|
self.graceful_shutdown_timeout = graceful_shutdown_timeout
|
|
185
201
|
self.share_memory = share_memory
|
|
186
202
|
self.metrics_logger = WorkforceLogger(workforce_id=self.node_id)
|
|
@@ -214,58 +230,72 @@ class Workforce(BaseNode):
|
|
|
214
230
|
role=role_or_desc,
|
|
215
231
|
)
|
|
216
232
|
|
|
217
|
-
#
|
|
218
|
-
if coordinator_agent_kwargs is None:
|
|
219
|
-
logger.warning(
|
|
220
|
-
"No coordinator_agent_kwargs provided. Using default "
|
|
221
|
-
"ChatAgent settings (ModelPlatformType.DEFAULT, "
|
|
222
|
-
"ModelType.DEFAULT). To customize the coordinator agent "
|
|
223
|
-
"that assigns tasks and handles failures, pass a dictionary "
|
|
224
|
-
"with ChatAgent parameters, e.g.: {'model': your_model, "
|
|
225
|
-
"'tools': your_tools, 'token_limit': 8000}. See ChatAgent "
|
|
226
|
-
"documentation for all available options."
|
|
227
|
-
)
|
|
228
|
-
if task_agent_kwargs is None:
|
|
229
|
-
logger.warning(
|
|
230
|
-
"No task_agent_kwargs provided. Using default ChatAgent "
|
|
231
|
-
"settings (ModelPlatformType.DEFAULT, ModelType.DEFAULT). "
|
|
232
|
-
"To customize the task planning agent that "
|
|
233
|
-
"decomposes/composes tasks, pass a dictionary with "
|
|
234
|
-
"ChatAgent parameters, e.g.: {'model': your_model, "
|
|
235
|
-
"'token_limit': 16000}. See ChatAgent documentation for "
|
|
236
|
-
"all available options."
|
|
237
|
-
)
|
|
238
|
-
if new_worker_agent_kwargs is None:
|
|
239
|
-
logger.warning(
|
|
240
|
-
"No new_worker_agent_kwargs provided. Workers created at "
|
|
241
|
-
"runtime will use default ChatAgent settings with "
|
|
242
|
-
"SearchToolkit, CodeExecutionToolkit, and ThinkingToolkit. "
|
|
243
|
-
"To customize runtime worker creation, pass a dictionary "
|
|
244
|
-
"with ChatAgent parameters, e.g.: {'model': your_model, "
|
|
245
|
-
"'tools': your_tools}. See ChatAgent documentation for all "
|
|
246
|
-
"available options."
|
|
247
|
-
)
|
|
248
|
-
|
|
249
|
-
if self.share_memory:
|
|
250
|
-
logger.info(
|
|
251
|
-
"Shared memory enabled. All agents will share their complete "
|
|
252
|
-
"conversation history and function-calling trajectory for "
|
|
253
|
-
"better context continuity during task handoffs."
|
|
254
|
-
)
|
|
255
|
-
|
|
233
|
+
# Set up coordinator agent with default system message
|
|
256
234
|
coord_agent_sys_msg = BaseMessage.make_assistant_message(
|
|
257
235
|
role_name="Workforce Manager",
|
|
258
|
-
content="You are coordinating a group of workers. A worker
|
|
259
|
-
"a group of agents or a single agent. Each worker is "
|
|
236
|
+
content="You are coordinating a group of workers. A worker "
|
|
237
|
+
"can be a group of agents or a single agent. Each worker is "
|
|
260
238
|
"created to solve a specific kind of task. Your job "
|
|
261
239
|
"includes assigning tasks to a existing worker, creating "
|
|
262
240
|
"a new worker for a task, etc.",
|
|
263
241
|
)
|
|
264
|
-
self.coordinator_agent = ChatAgent(
|
|
265
|
-
coord_agent_sys_msg,
|
|
266
|
-
**(coordinator_agent_kwargs or {}),
|
|
267
|
-
)
|
|
268
242
|
|
|
243
|
+
if coordinator_agent is None:
|
|
244
|
+
logger.warning(
|
|
245
|
+
"No coordinator_agent provided. Using default "
|
|
246
|
+
"ChatAgent settings (ModelPlatformType.DEFAULT, "
|
|
247
|
+
"ModelType.DEFAULT) with default system message."
|
|
248
|
+
)
|
|
249
|
+
self.coordinator_agent = ChatAgent(coord_agent_sys_msg)
|
|
250
|
+
else:
|
|
251
|
+
logger.info(
|
|
252
|
+
"Custom coordinator_agent provided. Preserving user's "
|
|
253
|
+
"system message and appending workforce coordination "
|
|
254
|
+
"instructions to ensure proper functionality."
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
if coordinator_agent.system_message is not None:
|
|
258
|
+
user_sys_msg_content = coordinator_agent.system_message.content
|
|
259
|
+
combined_content = (
|
|
260
|
+
f"{user_sys_msg_content}\n\n"
|
|
261
|
+
f"{coord_agent_sys_msg.content}"
|
|
262
|
+
)
|
|
263
|
+
combined_sys_msg = BaseMessage.make_assistant_message(
|
|
264
|
+
role_name=coordinator_agent.system_message.role_name,
|
|
265
|
+
content=combined_content,
|
|
266
|
+
)
|
|
267
|
+
else:
|
|
268
|
+
combined_sys_msg = coord_agent_sys_msg
|
|
269
|
+
|
|
270
|
+
# Create a new agent with the provided agent's configuration
|
|
271
|
+
# but with the combined system message
|
|
272
|
+
self.coordinator_agent = ChatAgent(
|
|
273
|
+
system_message=combined_sys_msg,
|
|
274
|
+
model=coordinator_agent.model_backend,
|
|
275
|
+
memory=coordinator_agent.memory,
|
|
276
|
+
message_window_size=getattr(
|
|
277
|
+
coordinator_agent.memory, "window_size", None
|
|
278
|
+
),
|
|
279
|
+
token_limit=getattr(
|
|
280
|
+
coordinator_agent.memory.get_context_creator(),
|
|
281
|
+
"token_limit",
|
|
282
|
+
None,
|
|
283
|
+
),
|
|
284
|
+
output_language=coordinator_agent.output_language,
|
|
285
|
+
tools=[
|
|
286
|
+
tool.func
|
|
287
|
+
for tool in coordinator_agent._internal_tools.values()
|
|
288
|
+
],
|
|
289
|
+
external_tools=[
|
|
290
|
+
schema
|
|
291
|
+
for schema in coordinator_agent._external_tool_schemas.values() # noqa: E501
|
|
292
|
+
],
|
|
293
|
+
response_terminators=coordinator_agent.response_terminators,
|
|
294
|
+
max_iteration=coordinator_agent.max_iteration,
|
|
295
|
+
stop_event=coordinator_agent.stop_event,
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
# Set up task agent with default system message and required tools
|
|
269
299
|
task_sys_msg = BaseMessage.make_assistant_message(
|
|
270
300
|
role_name="Task Planner",
|
|
271
301
|
content="You are going to compose and decompose tasks. Keep "
|
|
@@ -275,13 +305,83 @@ class Workforce(BaseNode):
|
|
|
275
305
|
"of agents. This ensures efficient execution by minimizing "
|
|
276
306
|
"context switching between agents.",
|
|
277
307
|
)
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
308
|
+
task_planning_tools = TaskPlanningToolkit().get_tools()
|
|
309
|
+
|
|
310
|
+
if task_agent is None:
|
|
311
|
+
logger.warning(
|
|
312
|
+
"No task_agent provided. Using default ChatAgent "
|
|
313
|
+
"settings (ModelPlatformType.DEFAULT, ModelType.DEFAULT) "
|
|
314
|
+
"with default system message and TaskPlanningToolkit."
|
|
315
|
+
)
|
|
316
|
+
self.task_agent = ChatAgent(
|
|
317
|
+
task_sys_msg,
|
|
318
|
+
tools=TaskPlanningToolkit().get_tools(), # type: ignore[arg-type]
|
|
319
|
+
)
|
|
320
|
+
else:
|
|
321
|
+
logger.info(
|
|
322
|
+
"Custom task_agent provided. Preserving user's "
|
|
323
|
+
"system message and appending task planning "
|
|
324
|
+
"instructions to ensure proper functionality."
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
if task_agent.system_message is not None:
|
|
328
|
+
user_task_sys_msg_content = task_agent.system_message.content
|
|
329
|
+
combined_task_content = (
|
|
330
|
+
f"{user_task_sys_msg_content}\n\n"
|
|
331
|
+
f"{task_sys_msg.content}"
|
|
332
|
+
)
|
|
333
|
+
combined_task_sys_msg = BaseMessage.make_assistant_message(
|
|
334
|
+
role_name=task_agent.system_message.role_name,
|
|
335
|
+
content=combined_task_content,
|
|
336
|
+
)
|
|
337
|
+
else:
|
|
338
|
+
combined_task_sys_msg = task_sys_msg
|
|
339
|
+
|
|
340
|
+
# Since ChatAgent constructor uses a dictionary with
|
|
341
|
+
# function names as keys, we don't need to manually deduplicate.
|
|
342
|
+
combined_tools = [
|
|
343
|
+
tool.func for tool in task_agent._internal_tools.values()
|
|
344
|
+
] + [tool.func for tool in task_planning_tools]
|
|
345
|
+
|
|
346
|
+
# Create a new agent with the provided agent's configuration
|
|
347
|
+
# but with the combined system message and tools
|
|
348
|
+
self.task_agent = ChatAgent(
|
|
349
|
+
system_message=combined_task_sys_msg,
|
|
350
|
+
model=task_agent.model_backend,
|
|
351
|
+
memory=task_agent.memory,
|
|
352
|
+
message_window_size=getattr(
|
|
353
|
+
task_agent.memory, "window_size", None
|
|
354
|
+
),
|
|
355
|
+
token_limit=getattr(
|
|
356
|
+
task_agent.memory.get_context_creator(),
|
|
357
|
+
"token_limit",
|
|
358
|
+
None,
|
|
359
|
+
),
|
|
360
|
+
output_language=task_agent.output_language,
|
|
361
|
+
tools=combined_tools,
|
|
362
|
+
external_tools=[
|
|
363
|
+
schema
|
|
364
|
+
for schema in task_agent._external_tool_schemas.values()
|
|
365
|
+
],
|
|
366
|
+
response_terminators=task_agent.response_terminators,
|
|
367
|
+
max_iteration=task_agent.max_iteration,
|
|
368
|
+
stop_event=task_agent.stop_event,
|
|
369
|
+
)
|
|
370
|
+
|
|
371
|
+
if new_worker_agent is None:
|
|
372
|
+
logger.info(
|
|
373
|
+
"No new_worker_agent provided. Workers created at runtime "
|
|
374
|
+
"will use default ChatAgent settings with SearchToolkit, "
|
|
375
|
+
"CodeExecutionToolkit, and ThinkingToolkit. To customize "
|
|
376
|
+
"runtime worker creation, pass a ChatAgent instance."
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
if self.share_memory:
|
|
380
|
+
logger.info(
|
|
381
|
+
"Shared memory enabled. All agents will share their complete "
|
|
382
|
+
"conversation history and function-calling trajectory for "
|
|
383
|
+
"better context continuity during task handoffs."
|
|
384
|
+
)
|
|
285
385
|
|
|
286
386
|
def __repr__(self):
|
|
287
387
|
return (
|
|
@@ -1225,8 +1325,16 @@ class Workforce(BaseNode):
|
|
|
1225
1325
|
)
|
|
1226
1326
|
return TaskAssignResult(assignments=[])
|
|
1227
1327
|
|
|
1228
|
-
|
|
1229
|
-
|
|
1328
|
+
try:
|
|
1329
|
+
result_dict = json.loads(response.msg.content, parse_int=str)
|
|
1330
|
+
return TaskAssignResult(**result_dict)
|
|
1331
|
+
except json.JSONDecodeError as e:
|
|
1332
|
+
logger.error(
|
|
1333
|
+
f"JSON parsing error in task assignment: Invalid response "
|
|
1334
|
+
f"format - {e}. Response content: "
|
|
1335
|
+
f"{response.msg.content[:50]}..."
|
|
1336
|
+
)
|
|
1337
|
+
return TaskAssignResult(assignments=[])
|
|
1230
1338
|
|
|
1231
1339
|
def _validate_assignments(
|
|
1232
1340
|
self, assignments: List[TaskAssignment], valid_ids: Set[str]
|
|
@@ -1450,8 +1558,19 @@ class Workforce(BaseNode):
|
|
|
1450
1558
|
"with various tasks.",
|
|
1451
1559
|
)
|
|
1452
1560
|
else:
|
|
1453
|
-
|
|
1454
|
-
|
|
1561
|
+
try:
|
|
1562
|
+
result_dict = json.loads(response.msg.content)
|
|
1563
|
+
new_node_conf = WorkerConf(**result_dict)
|
|
1564
|
+
except json.JSONDecodeError as e:
|
|
1565
|
+
logger.error(
|
|
1566
|
+
f"JSON parsing error in worker creation: Invalid response "
|
|
1567
|
+
f"format - {e}. Response content: "
|
|
1568
|
+
f"{response.msg.content[:100]}..."
|
|
1569
|
+
)
|
|
1570
|
+
raise RuntimeError(
|
|
1571
|
+
f"Failed to create worker for task {task.id}: "
|
|
1572
|
+
f"Coordinator agent returned malformed JSON response. "
|
|
1573
|
+
)
|
|
1455
1574
|
|
|
1456
1575
|
new_agent = self._create_new_agent(
|
|
1457
1576
|
new_node_conf.role,
|
|
@@ -1486,23 +1605,23 @@ class Workforce(BaseNode):
|
|
|
1486
1605
|
content=sys_msg,
|
|
1487
1606
|
)
|
|
1488
1607
|
|
|
1489
|
-
if self.
|
|
1490
|
-
return
|
|
1491
|
-
|
|
1492
|
-
|
|
1493
|
-
|
|
1494
|
-
|
|
1495
|
-
|
|
1496
|
-
|
|
1497
|
-
|
|
1608
|
+
if self.new_worker_agent is not None:
|
|
1609
|
+
return self.new_worker_agent
|
|
1610
|
+
else:
|
|
1611
|
+
# Default tools for a new agent
|
|
1612
|
+
function_list = [
|
|
1613
|
+
SearchToolkit().search_duckduckgo,
|
|
1614
|
+
*CodeExecutionToolkit().get_tools(),
|
|
1615
|
+
*ThinkingToolkit().get_tools(),
|
|
1616
|
+
]
|
|
1498
1617
|
|
|
1499
|
-
|
|
1500
|
-
|
|
1501
|
-
|
|
1502
|
-
|
|
1503
|
-
|
|
1618
|
+
model = ModelFactory.create(
|
|
1619
|
+
model_platform=ModelPlatformType.DEFAULT,
|
|
1620
|
+
model_type=ModelType.DEFAULT,
|
|
1621
|
+
model_config_dict={"temperature": 0},
|
|
1622
|
+
)
|
|
1504
1623
|
|
|
1505
|
-
|
|
1624
|
+
return ChatAgent(worker_sys_msg, model=model, tools=function_list) # type: ignore[arg-type]
|
|
1506
1625
|
|
|
1507
1626
|
async def _get_returned_task(self) -> Task:
|
|
1508
1627
|
r"""Get the task that's published by this node and just get returned
|
|
@@ -1988,28 +2107,17 @@ class Workforce(BaseNode):
|
|
|
1988
2107
|
"""
|
|
1989
2108
|
|
|
1990
2109
|
# Create a new instance with the same configuration
|
|
1991
|
-
# Extract the original kwargs from the agents to properly clone them
|
|
1992
|
-
coordinator_kwargs = (
|
|
1993
|
-
getattr(self.coordinator_agent, 'init_kwargs', {}) or {}
|
|
1994
|
-
)
|
|
1995
|
-
task_kwargs = getattr(self.task_agent, 'init_kwargs', {}) or {}
|
|
1996
|
-
|
|
1997
2110
|
new_instance = Workforce(
|
|
1998
2111
|
description=self.description,
|
|
1999
|
-
|
|
2000
|
-
|
|
2001
|
-
|
|
2002
|
-
if self.
|
|
2112
|
+
coordinator_agent=self.coordinator_agent.clone(with_memory),
|
|
2113
|
+
task_agent=self.task_agent.clone(with_memory),
|
|
2114
|
+
new_worker_agent=self.new_worker_agent.clone(with_memory)
|
|
2115
|
+
if self.new_worker_agent
|
|
2003
2116
|
else None,
|
|
2004
2117
|
graceful_shutdown_timeout=self.graceful_shutdown_timeout,
|
|
2005
2118
|
share_memory=self.share_memory,
|
|
2006
2119
|
)
|
|
2007
2120
|
|
|
2008
|
-
new_instance.task_agent = self.task_agent.clone(with_memory)
|
|
2009
|
-
new_instance.coordinator_agent = self.coordinator_agent.clone(
|
|
2010
|
-
with_memory
|
|
2011
|
-
)
|
|
2012
|
-
|
|
2013
2121
|
for child in self._children:
|
|
2014
2122
|
if isinstance(child, SingleAgentWorker):
|
|
2015
2123
|
cloned_worker = child.worker.clone(with_memory)
|
|
@@ -499,54 +499,68 @@ class WorkforceLogger:
|
|
|
499
499
|
|
|
500
500
|
tasks_handled_by_worker: Dict[str, int] = {}
|
|
501
501
|
|
|
502
|
+
# Helper function to check if a task is the main task (has no parent)
|
|
503
|
+
def is_main_task(task_id: str) -> bool:
|
|
504
|
+
return (
|
|
505
|
+
task_id in self._task_hierarchy
|
|
506
|
+
and self._task_hierarchy[task_id].get('parent') is None
|
|
507
|
+
)
|
|
508
|
+
|
|
502
509
|
for entry in self.log_entries:
|
|
503
510
|
event_type = entry['event_type']
|
|
504
511
|
timestamp = datetime.fromisoformat(entry['timestamp'])
|
|
512
|
+
task_id = entry.get('task_id', '')
|
|
513
|
+
|
|
505
514
|
if first_timestamp is None or timestamp < first_timestamp:
|
|
506
515
|
first_timestamp = timestamp
|
|
507
516
|
if last_timestamp is None or timestamp > last_timestamp:
|
|
508
517
|
last_timestamp = timestamp
|
|
509
518
|
|
|
510
519
|
if event_type == 'task_created':
|
|
511
|
-
|
|
512
|
-
|
|
520
|
+
# Exclude main task from total count
|
|
521
|
+
if not is_main_task(task_id):
|
|
522
|
+
kpis['total_tasks_created'] += 1
|
|
523
|
+
task_creation_timestamps[task_id] = timestamp
|
|
513
524
|
elif event_type == 'task_assigned':
|
|
514
|
-
task_assignment_timestamps[
|
|
525
|
+
task_assignment_timestamps[task_id] = timestamp
|
|
515
526
|
# Queue time tracking has been removed
|
|
516
527
|
|
|
517
528
|
elif event_type == 'task_started':
|
|
518
529
|
# Store start time for processing time calculation
|
|
519
|
-
task_start_times[
|
|
530
|
+
task_start_times[task_id] = timestamp.timestamp()
|
|
520
531
|
|
|
521
532
|
elif event_type == 'task_completed':
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
533
|
+
# Exclude main task from total count
|
|
534
|
+
if not is_main_task(task_id):
|
|
535
|
+
kpis['total_tasks_completed'] += 1
|
|
536
|
+
# Count tasks handled by worker (only for non-main tasks)
|
|
537
|
+
if 'worker_id' in entry and entry['worker_id'] is not None:
|
|
538
|
+
worker_id = entry['worker_id']
|
|
539
|
+
tasks_handled_by_worker[worker_id] = (
|
|
540
|
+
tasks_handled_by_worker.get(worker_id, 0) + 1
|
|
541
|
+
)
|
|
529
542
|
|
|
530
|
-
if
|
|
543
|
+
if task_id in task_assignment_timestamps:
|
|
531
544
|
completion_time = (
|
|
532
|
-
timestamp
|
|
533
|
-
- task_assignment_timestamps[entry['task_id']]
|
|
545
|
+
timestamp - task_assignment_timestamps[task_id]
|
|
534
546
|
).total_seconds()
|
|
535
547
|
# Store completion time in task hierarchy instead of KPIs
|
|
536
548
|
# array
|
|
537
|
-
if
|
|
538
|
-
self._task_hierarchy[
|
|
549
|
+
if task_id in self._task_hierarchy:
|
|
550
|
+
self._task_hierarchy[task_id][
|
|
539
551
|
'completion_time_seconds'
|
|
540
552
|
] = completion_time
|
|
541
553
|
|
|
542
554
|
elif event_type == 'task_failed':
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
555
|
+
# Exclude main task from total count
|
|
556
|
+
if not is_main_task(task_id):
|
|
557
|
+
kpis['total_tasks_failed'] += 1
|
|
558
|
+
# Count tasks handled by worker (only for non-main tasks)
|
|
559
|
+
if 'worker_id' in entry and entry['worker_id'] is not None:
|
|
560
|
+
worker_id = entry['worker_id']
|
|
561
|
+
tasks_handled_by_worker[worker_id] = (
|
|
562
|
+
tasks_handled_by_worker.get(worker_id, 0) + 1
|
|
563
|
+
)
|
|
550
564
|
error_type = entry['error_type']
|
|
551
565
|
kpis['error_types_count'][error_type] = (
|
|
552
566
|
kpis['error_types_count'].get(error_type, 0) + 1
|
camel/storages/__init__.py
CHANGED
|
@@ -30,6 +30,7 @@ from .vectordb_storages.chroma import ChromaStorage
|
|
|
30
30
|
from .vectordb_storages.faiss import FaissStorage
|
|
31
31
|
from .vectordb_storages.milvus import MilvusStorage
|
|
32
32
|
from .vectordb_storages.oceanbase import OceanBaseStorage
|
|
33
|
+
from .vectordb_storages.pgvector import PgVectorStorage
|
|
33
34
|
from .vectordb_storages.qdrant import QdrantStorage
|
|
34
35
|
from .vectordb_storages.tidb import TiDBStorage
|
|
35
36
|
from .vectordb_storages.weaviate import WeaviateStorage
|
|
@@ -53,5 +54,6 @@ __all__ = [
|
|
|
53
54
|
'Mem0Storage',
|
|
54
55
|
'OceanBaseStorage',
|
|
55
56
|
'WeaviateStorage',
|
|
57
|
+
'PgVectorStorage',
|
|
56
58
|
'ChromaStorage',
|
|
57
59
|
]
|
|
@@ -23,6 +23,7 @@ from .chroma import ChromaStorage
|
|
|
23
23
|
from .faiss import FaissStorage
|
|
24
24
|
from .milvus import MilvusStorage
|
|
25
25
|
from .oceanbase import OceanBaseStorage
|
|
26
|
+
from .pgvector import PgVectorStorage
|
|
26
27
|
from .qdrant import QdrantStorage
|
|
27
28
|
from .tidb import TiDBStorage
|
|
28
29
|
from .weaviate import WeaviateStorage
|
|
@@ -40,4 +41,5 @@ __all__ = [
|
|
|
40
41
|
'WeaviateStorage',
|
|
41
42
|
'VectorRecord',
|
|
42
43
|
'VectorDBStatus',
|
|
44
|
+
'PgVectorStorage',
|
|
43
45
|
]
|
|
@@ -0,0 +1,349 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from camel.logger import get_logger
|
|
19
|
+
from camel.storages.vectordb_storages import (
|
|
20
|
+
BaseVectorStorage,
|
|
21
|
+
VectorDBQuery,
|
|
22
|
+
VectorDBQueryResult,
|
|
23
|
+
VectorDBStatus,
|
|
24
|
+
VectorRecord,
|
|
25
|
+
)
|
|
26
|
+
from camel.types import VectorDistance
|
|
27
|
+
from camel.utils import dependencies_required
|
|
28
|
+
|
|
29
|
+
logger = get_logger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class PgVectorStorage(BaseVectorStorage):
|
|
33
|
+
r"""PgVectorStorage is an implementation of BaseVectorStorage for
|
|
34
|
+
PostgreSQL with pgvector extension.
|
|
35
|
+
|
|
36
|
+
This class provides methods to add, delete, query, and manage vector
|
|
37
|
+
records in a PostgreSQL database using the pgvector extension.
|
|
38
|
+
It supports different distance metrics for similarity search.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
vector_dim (int): The dimension of the vectors to be stored.
|
|
42
|
+
conn_info (Dict[str, Any]): Connection information for
|
|
43
|
+
psycopg2.connect.
|
|
44
|
+
table_name (str, optional): Name of the table to store vectors.
|
|
45
|
+
(default: :obj:`None`)
|
|
46
|
+
distance (VectorDistance, optional): Distance metric for vector
|
|
47
|
+
comparison. (default: :obj:`VectorDistance.COSINE`)
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
@dependencies_required('psycopg', 'pgvector')
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
vector_dim: int,
|
|
54
|
+
conn_info: Dict[str, Any],
|
|
55
|
+
table_name: Optional[str] = None,
|
|
56
|
+
distance: VectorDistance = VectorDistance.COSINE,
|
|
57
|
+
**kwargs: Any,
|
|
58
|
+
) -> None:
|
|
59
|
+
r"""Initialize PgVectorStorage.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
vector_dim (int): The dimension of the vectors.
|
|
63
|
+
conn_info (Dict[str, Any]): Connection info for psycopg2.connect.
|
|
64
|
+
table_name (str, optional): Table name. (default: :obj:`None`)
|
|
65
|
+
distance (VectorDistance, optional): Distance metric.
|
|
66
|
+
(default: :obj:`VectorDistance.COSINE`)
|
|
67
|
+
"""
|
|
68
|
+
import psycopg
|
|
69
|
+
from pgvector.psycopg import register_vector
|
|
70
|
+
|
|
71
|
+
if vector_dim <= 0:
|
|
72
|
+
raise ValueError("vector_dim must be positive")
|
|
73
|
+
|
|
74
|
+
self.vector_dim = vector_dim
|
|
75
|
+
self.conn_info = conn_info
|
|
76
|
+
self.table_name = table_name or 'vectors'
|
|
77
|
+
self.distance = distance
|
|
78
|
+
|
|
79
|
+
try:
|
|
80
|
+
self._conn = psycopg.connect(**conn_info)
|
|
81
|
+
register_vector(self._conn)
|
|
82
|
+
self._ensure_table()
|
|
83
|
+
self._ensure_index()
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.error(f"Failed to initialize PgVectorStorage: {e}")
|
|
86
|
+
raise
|
|
87
|
+
|
|
88
|
+
def _ensure_table(self) -> None:
|
|
89
|
+
r"""Ensure the vector table exists in the database.
|
|
90
|
+
Creates the table if it does not exist.
|
|
91
|
+
"""
|
|
92
|
+
try:
|
|
93
|
+
from psycopg.sql import SQL, Identifier, Literal
|
|
94
|
+
|
|
95
|
+
with self._conn.cursor() as cur:
|
|
96
|
+
query = SQL("""
|
|
97
|
+
CREATE TABLE IF NOT EXISTS {table} (
|
|
98
|
+
id VARCHAR PRIMARY KEY,
|
|
99
|
+
vector vector({dim}),
|
|
100
|
+
payload JSONB
|
|
101
|
+
)
|
|
102
|
+
""").format(
|
|
103
|
+
table=Identifier(self.table_name),
|
|
104
|
+
dim=Literal(self.vector_dim),
|
|
105
|
+
)
|
|
106
|
+
cur.execute(query)
|
|
107
|
+
self._conn.commit()
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logger.error(f"Failed to create table {self.table_name}: {e}")
|
|
110
|
+
raise
|
|
111
|
+
|
|
112
|
+
def _ensure_index(self) -> None:
|
|
113
|
+
r"""Ensure vector similarity search index exists for better
|
|
114
|
+
performance.
|
|
115
|
+
"""
|
|
116
|
+
try:
|
|
117
|
+
from psycopg.sql import SQL, Identifier
|
|
118
|
+
|
|
119
|
+
with self._conn.cursor() as cur:
|
|
120
|
+
index_name = f"{self.table_name}_vector_idx"
|
|
121
|
+
query = SQL("""
|
|
122
|
+
CREATE INDEX IF NOT EXISTS {index_name}
|
|
123
|
+
ON {table}
|
|
124
|
+
USING hnsw (vector vector_cosine_ops)
|
|
125
|
+
""").format(
|
|
126
|
+
index_name=Identifier(index_name),
|
|
127
|
+
table=Identifier(self.table_name),
|
|
128
|
+
)
|
|
129
|
+
cur.execute(query)
|
|
130
|
+
self._conn.commit()
|
|
131
|
+
except Exception as e:
|
|
132
|
+
logger.warning(f"Failed to create vector index: {e}")
|
|
133
|
+
|
|
134
|
+
def add(self, records: List[VectorRecord], **kwargs: Any) -> None:
|
|
135
|
+
r"""Add or update vector records in the database.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
records (List[VectorRecord]): List of vector records to
|
|
139
|
+
add or update.
|
|
140
|
+
"""
|
|
141
|
+
if not records:
|
|
142
|
+
return
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
with self._conn.cursor() as cur:
|
|
146
|
+
# Use batch insert for better performance
|
|
147
|
+
batch_data = []
|
|
148
|
+
for rec in records:
|
|
149
|
+
if len(rec.vector) != self.vector_dim:
|
|
150
|
+
raise ValueError(
|
|
151
|
+
f"Vector dimension mismatch: expected "
|
|
152
|
+
f"{self.vector_dim}, got {len(rec.vector)}"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
batch_data.append(
|
|
156
|
+
(
|
|
157
|
+
rec.id,
|
|
158
|
+
rec.vector,
|
|
159
|
+
json.dumps(rec.payload)
|
|
160
|
+
if rec.payload is not None
|
|
161
|
+
else None,
|
|
162
|
+
)
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Use executemany for efficient batch insert
|
|
166
|
+
from psycopg.sql import SQL, Identifier
|
|
167
|
+
|
|
168
|
+
query = SQL("""
|
|
169
|
+
INSERT INTO {table} (id, vector, payload)
|
|
170
|
+
VALUES (%s, %s, %s)
|
|
171
|
+
ON CONFLICT (id) DO UPDATE SET
|
|
172
|
+
vector=EXCLUDED.vector,
|
|
173
|
+
payload=EXCLUDED.payload
|
|
174
|
+
""").format(table=Identifier(self.table_name))
|
|
175
|
+
|
|
176
|
+
cur.executemany(query, batch_data)
|
|
177
|
+
self._conn.commit()
|
|
178
|
+
except Exception as e:
|
|
179
|
+
self._conn.rollback()
|
|
180
|
+
logger.error(f"Failed to add records: {e}")
|
|
181
|
+
raise
|
|
182
|
+
|
|
183
|
+
def delete(self, ids: List[str], **kwargs: Any) -> None:
|
|
184
|
+
r"""Delete vector records from the database by their IDs.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
ids (List[str]): List of record IDs to delete.
|
|
188
|
+
"""
|
|
189
|
+
from psycopg.sql import SQL, Identifier
|
|
190
|
+
|
|
191
|
+
if not ids:
|
|
192
|
+
return
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
with self._conn.cursor() as cur:
|
|
196
|
+
query = SQL("DELETE FROM {table} WHERE id = ANY(%s)").format(
|
|
197
|
+
table=Identifier(self.table_name)
|
|
198
|
+
)
|
|
199
|
+
cur.execute(query, (ids,))
|
|
200
|
+
self._conn.commit()
|
|
201
|
+
except Exception as e:
|
|
202
|
+
self._conn.rollback()
|
|
203
|
+
logger.error(f"Failed to delete records: {e}")
|
|
204
|
+
raise
|
|
205
|
+
|
|
206
|
+
def query(
|
|
207
|
+
self, query: VectorDBQuery, **kwargs: Any
|
|
208
|
+
) -> List[VectorDBQueryResult]:
|
|
209
|
+
r"""Query the database for the most similar vectors to the given
|
|
210
|
+
query vector.
|
|
211
|
+
|
|
212
|
+
Args:
|
|
213
|
+
query (VectorDBQuery): Query object containing the query
|
|
214
|
+
vector and top_k.
|
|
215
|
+
**kwargs (Any): Additional keyword arguments for the query.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
List[VectorDBQueryResult]: List of query results sorted by
|
|
219
|
+
similarity.
|
|
220
|
+
"""
|
|
221
|
+
if len(query.query_vector) != self.vector_dim:
|
|
222
|
+
raise ValueError(
|
|
223
|
+
f"Query vector dimension mismatch: "
|
|
224
|
+
f"expected {self.vector_dim}, got {len(query.query_vector)}"
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
with self._conn.cursor() as cur:
|
|
229
|
+
# Fix distance metric mapping
|
|
230
|
+
metric_info = {
|
|
231
|
+
VectorDistance.COSINE: ('<=>', 'ASC'), # Cosine distance
|
|
232
|
+
VectorDistance.EUCLIDEAN: (
|
|
233
|
+
'<->',
|
|
234
|
+
'ASC',
|
|
235
|
+
), # Euclidean distance
|
|
236
|
+
VectorDistance.DOT: (
|
|
237
|
+
'<#>',
|
|
238
|
+
'DESC',
|
|
239
|
+
), # Negative dot product (higher is better)
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
if self.distance not in metric_info:
|
|
243
|
+
raise ValueError(
|
|
244
|
+
f"Unsupported distance metric: {self.distance}"
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
metric, order = metric_info[self.distance]
|
|
248
|
+
|
|
249
|
+
from psycopg.sql import SQL, Identifier, Literal
|
|
250
|
+
|
|
251
|
+
query_sql = SQL("""
|
|
252
|
+
SELECT id, vector, payload, (vector {} %s::vector)
|
|
253
|
+
AS similarity
|
|
254
|
+
FROM {}
|
|
255
|
+
ORDER BY similarity {}
|
|
256
|
+
LIMIT %s
|
|
257
|
+
""").format(
|
|
258
|
+
Literal(metric),
|
|
259
|
+
Identifier(self.table_name),
|
|
260
|
+
Literal(order),
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
cur.execute(query_sql, (query.query_vector, query.top_k))
|
|
264
|
+
results = []
|
|
265
|
+
for row in cur.fetchall():
|
|
266
|
+
id, vector, payload, similarity = row
|
|
267
|
+
results.append(
|
|
268
|
+
VectorDBQueryResult.create(
|
|
269
|
+
similarity=float(similarity),
|
|
270
|
+
vector=list(vector),
|
|
271
|
+
id=id,
|
|
272
|
+
payload=payload,
|
|
273
|
+
)
|
|
274
|
+
)
|
|
275
|
+
return results
|
|
276
|
+
except Exception as e:
|
|
277
|
+
logger.error(f"Failed to query vectors: {e}")
|
|
278
|
+
raise
|
|
279
|
+
|
|
280
|
+
def status(self, **kwargs: Any) -> VectorDBStatus:
|
|
281
|
+
r"""Get the status of the vector database, including vector
|
|
282
|
+
dimension and count.
|
|
283
|
+
|
|
284
|
+
Args:
|
|
285
|
+
**kwargs (Any): Additional keyword arguments for the query.
|
|
286
|
+
|
|
287
|
+
Returns:
|
|
288
|
+
VectorDBStatus: Status object with vector dimension and count.
|
|
289
|
+
"""
|
|
290
|
+
try:
|
|
291
|
+
with self._conn.cursor() as cur:
|
|
292
|
+
from psycopg.sql import SQL, Identifier
|
|
293
|
+
|
|
294
|
+
query = SQL('SELECT COUNT(*) FROM {}').format(
|
|
295
|
+
Identifier(self.table_name)
|
|
296
|
+
)
|
|
297
|
+
cur.execute(query)
|
|
298
|
+
result = cur.fetchone()
|
|
299
|
+
count = result[0] if result else 0
|
|
300
|
+
return VectorDBStatus(
|
|
301
|
+
vector_dim=self.vector_dim, vector_count=count
|
|
302
|
+
)
|
|
303
|
+
except Exception as e:
|
|
304
|
+
logger.error(f"Failed to get status: {e}")
|
|
305
|
+
raise
|
|
306
|
+
|
|
307
|
+
def clear(self) -> None:
|
|
308
|
+
r"""Remove all vectors from the storage by truncating the table."""
|
|
309
|
+
try:
|
|
310
|
+
with self._conn.cursor() as cur:
|
|
311
|
+
from psycopg.sql import SQL, Identifier
|
|
312
|
+
|
|
313
|
+
query = SQL("TRUNCATE TABLE {table}").format(
|
|
314
|
+
table=Identifier(self.table_name)
|
|
315
|
+
)
|
|
316
|
+
cur.execute(query)
|
|
317
|
+
self._conn.commit()
|
|
318
|
+
except Exception as e:
|
|
319
|
+
self._conn.rollback()
|
|
320
|
+
logger.error(f"Failed to clear table: {e}")
|
|
321
|
+
raise
|
|
322
|
+
|
|
323
|
+
def load(self) -> None:
|
|
324
|
+
r"""Load the collection hosted on cloud service (no-op for pgvector).
|
|
325
|
+
This method is provided for interface compatibility.
|
|
326
|
+
"""
|
|
327
|
+
# For PostgreSQL local/managed instances, no loading is required
|
|
328
|
+
pass
|
|
329
|
+
|
|
330
|
+
def close(self) -> None:
|
|
331
|
+
r"""Close the database connection."""
|
|
332
|
+
if hasattr(self, '_conn') and self._conn:
|
|
333
|
+
try:
|
|
334
|
+
self._conn.close()
|
|
335
|
+
except Exception as e:
|
|
336
|
+
logger.warning(f"Error closing connection: {e}")
|
|
337
|
+
|
|
338
|
+
def __del__(self) -> None:
|
|
339
|
+
r"""Ensure connection is closed when object is destroyed."""
|
|
340
|
+
self.close()
|
|
341
|
+
|
|
342
|
+
@property
|
|
343
|
+
def client(self) -> Any:
|
|
344
|
+
r"""Provides access to the underlying vector database client.
|
|
345
|
+
|
|
346
|
+
Returns:
|
|
347
|
+
Any: The underlying psycopg connection object.
|
|
348
|
+
"""
|
|
349
|
+
return self._conn
|
|
@@ -176,26 +176,40 @@ class FileWriteToolkit(BaseToolkit):
|
|
|
176
176
|
|
|
177
177
|
doc = Document(documentclass="article")
|
|
178
178
|
doc.packages.append(Command('usepackage', 'amsmath'))
|
|
179
|
-
|
|
180
179
|
with doc.create(Section('Generated Content')):
|
|
181
180
|
for line in content.split('\n'):
|
|
182
|
-
# Remove leading whitespace
|
|
183
181
|
stripped_line = line.strip()
|
|
184
|
-
|
|
185
|
-
#
|
|
182
|
+
|
|
183
|
+
# Skip empty lines
|
|
184
|
+
if not stripped_line:
|
|
185
|
+
continue
|
|
186
|
+
|
|
187
|
+
# Convert Markdown-like headers
|
|
188
|
+
if stripped_line.startswith('## '):
|
|
189
|
+
header = stripped_line[3:]
|
|
190
|
+
doc.append(NoEscape(r'\subsection*{%s}' % header))
|
|
191
|
+
continue
|
|
192
|
+
elif stripped_line.startswith('# '):
|
|
193
|
+
header = stripped_line[2:]
|
|
194
|
+
doc.append(NoEscape(r'\section*{%s}' % header))
|
|
195
|
+
continue
|
|
196
|
+
elif stripped_line.strip() == '---':
|
|
197
|
+
doc.append(NoEscape(r'\hrule'))
|
|
198
|
+
continue
|
|
199
|
+
|
|
200
|
+
# Detect standalone math expressions like $...$
|
|
186
201
|
if (
|
|
187
202
|
stripped_line.startswith('$')
|
|
188
203
|
and stripped_line.endswith('$')
|
|
189
204
|
and len(stripped_line) > 1
|
|
190
205
|
):
|
|
191
|
-
# Extract content between the '$' delimiters
|
|
192
206
|
math_data = stripped_line[1:-1]
|
|
193
207
|
doc.append(Math(data=math_data))
|
|
194
208
|
else:
|
|
195
|
-
doc.append(NoEscape(
|
|
209
|
+
doc.append(NoEscape(stripped_line))
|
|
196
210
|
doc.append(NoEscape(r'\par'))
|
|
197
211
|
|
|
198
|
-
|
|
212
|
+
doc.generate_pdf(str(file_path), clean_tex=True)
|
|
199
213
|
|
|
200
214
|
logger.info(f"Wrote PDF (with LaTeX) to {file_path}")
|
|
201
215
|
else:
|
|
@@ -187,7 +187,8 @@ class TerminalToolkit(BaseToolkit):
|
|
|
187
187
|
logger.error(f"Failed to create environment: {e}")
|
|
188
188
|
|
|
189
189
|
def _create_terminal(self):
|
|
190
|
-
r"""Create a terminal GUI.
|
|
190
|
+
r"""Create a terminal GUI. If GUI creation fails, fallback
|
|
191
|
+
to file output."""
|
|
191
192
|
|
|
192
193
|
try:
|
|
193
194
|
import tkinter as tk
|
|
@@ -239,7 +240,12 @@ class TerminalToolkit(BaseToolkit):
|
|
|
239
240
|
self.root.mainloop()
|
|
240
241
|
|
|
241
242
|
except Exception as e:
|
|
242
|
-
logger.
|
|
243
|
+
logger.warning(
|
|
244
|
+
f"Failed to create GUI terminal: {e}, "
|
|
245
|
+
f"falling back to file output mode"
|
|
246
|
+
)
|
|
247
|
+
# Fallback to file output mode when GUI creation fails
|
|
248
|
+
self._setup_file_output()
|
|
243
249
|
self.terminal_ready.set()
|
|
244
250
|
|
|
245
251
|
def _update_terminal_output(self, output: str):
|
|
@@ -249,8 +255,9 @@ class TerminalToolkit(BaseToolkit):
|
|
|
249
255
|
output (str): The output to be sent to the agent
|
|
250
256
|
"""
|
|
251
257
|
try:
|
|
252
|
-
# If it is macOS
|
|
253
|
-
|
|
258
|
+
# If it is macOS or if we have a log_file (fallback mode),
|
|
259
|
+
# write to file
|
|
260
|
+
if self.is_macos or hasattr(self, 'log_file'):
|
|
254
261
|
if hasattr(self, 'log_file'):
|
|
255
262
|
with open(self.log_file, "a") as f:
|
|
256
263
|
f.write(output)
|
camel/types/enums.py
CHANGED
|
@@ -209,6 +209,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
209
209
|
MISTRAL_PIXTRAL_12B = "pixtral-12b-2409"
|
|
210
210
|
MISTRAL_MEDIUM_3 = "mistral-medium-latest"
|
|
211
211
|
MAGISTRAL_MEDIUM = "magistral-medium-2506"
|
|
212
|
+
MISTRAL_SMALL_3_2 = "mistral-small-2506"
|
|
212
213
|
|
|
213
214
|
# Reka models
|
|
214
215
|
REKA_CORE = "reka-core"
|
|
@@ -646,6 +647,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
646
647
|
ModelType.MISTRAL_3B,
|
|
647
648
|
ModelType.MISTRAL_MEDIUM_3,
|
|
648
649
|
ModelType.MAGISTRAL_MEDIUM,
|
|
650
|
+
ModelType.MISTRAL_SMALL_3_2,
|
|
649
651
|
}
|
|
650
652
|
|
|
651
653
|
@property
|
|
@@ -1166,6 +1168,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
1166
1168
|
ModelType.MISTRAL_PIXTRAL_12B,
|
|
1167
1169
|
ModelType.MISTRAL_8B,
|
|
1168
1170
|
ModelType.MISTRAL_3B,
|
|
1171
|
+
ModelType.MISTRAL_SMALL_3_2,
|
|
1169
1172
|
ModelType.QWEN_2_5_CODER_32B,
|
|
1170
1173
|
ModelType.QWEN_2_5_VL_72B,
|
|
1171
1174
|
ModelType.QWEN_2_5_72B,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.70
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Project-URL: Homepage, https://www.camel-ai.org/
|
|
6
6
|
Project-URL: Repository, https://github.com/camel-ai/camel
|
|
@@ -87,10 +87,12 @@ Requires-Dist: openapi-spec-validator<0.8,>=0.7.1; extra == 'all'
|
|
|
87
87
|
Requires-Dist: openpyxl>=3.1.5; extra == 'all'
|
|
88
88
|
Requires-Dist: pandas<2,>=1.5.3; extra == 'all'
|
|
89
89
|
Requires-Dist: pandasai<3,>=2.3.0; extra == 'all'
|
|
90
|
+
Requires-Dist: pgvector<0.3,>=0.2.4; extra == 'all'
|
|
90
91
|
Requires-Dist: playwright>=1.50.0; extra == 'all'
|
|
91
92
|
Requires-Dist: prance<24,>=23.6.21.0; extra == 'all'
|
|
92
93
|
Requires-Dist: praw<8,>=7.7.1; extra == 'all'
|
|
93
94
|
Requires-Dist: pre-commit<4,>=3; extra == 'all'
|
|
95
|
+
Requires-Dist: psycopg[binary]<4,>=3.1.18; extra == 'all'
|
|
94
96
|
Requires-Dist: pyautogui<0.10,>=0.9.54; extra == 'all'
|
|
95
97
|
Requires-Dist: pydub<0.26,>=0.25.1; extra == 'all'
|
|
96
98
|
Requires-Dist: pygithub<3,>=2.6.0; extra == 'all'
|
|
@@ -319,6 +321,8 @@ Requires-Dist: google-cloud-storage<3,>=2.18.0; extra == 'storage'
|
|
|
319
321
|
Requires-Dist: mem0ai>=0.1.73; extra == 'storage'
|
|
320
322
|
Requires-Dist: nebula3-python==3.8.2; extra == 'storage'
|
|
321
323
|
Requires-Dist: neo4j<6,>=5.18.0; extra == 'storage'
|
|
324
|
+
Requires-Dist: pgvector<0.3,>=0.2.4; extra == 'storage'
|
|
325
|
+
Requires-Dist: psycopg[binary]<4,>=3.1.18; extra == 'storage'
|
|
322
326
|
Requires-Dist: pymilvus<3,>=2.4.0; extra == 'storage'
|
|
323
327
|
Requires-Dist: pyobvector>=0.1.18; extra == 'storage'
|
|
324
328
|
Requires-Dist: pytidb-experimental==0.0.1.dev4; extra == 'storage'
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
camel/__init__.py,sha256=
|
|
1
|
+
camel/__init__.py,sha256=rtQ7MDzPTxNXpydxnv9IwyW7crRkyL7PBwDGcQg017U,899
|
|
2
2
|
camel/generators.py,sha256=JRqj9_m1PF4qT6UtybzTQ-KBT9MJQt18OAAYvQ_fr2o,13844
|
|
3
3
|
camel/human.py,sha256=Xg8x1cS5KK4bQ1SDByiHZnzsRpvRP-KZViNvmu38xo4,5475
|
|
4
4
|
camel/logger.py,sha256=rZVeOVYuQ9RYJ5Tqyv0usqy0g4zaVEq4qSfZ9nd2640,5755
|
|
@@ -268,7 +268,7 @@ camel/schemas/openai_converter.py,sha256=SEnYsYcboZgVmjcC1YP5xke3c0MYPESPRmYQWsD
|
|
|
268
268
|
camel/schemas/outlines_converter.py,sha256=OYKPR1fNyrYs9eh5RiXEAccMbnRc9WTwSVJYbh9HkKE,8738
|
|
269
269
|
camel/societies/__init__.py,sha256=NOHjtlsY-gV9UCF2xXgcbG-xXyuigmbwbpLpNsDgEJ4,826
|
|
270
270
|
camel/societies/babyagi_playing.py,sha256=KbTdpHfZ2V8AripVck0bNTOyF-RSaMPCRARz3DvzWfQ,11855
|
|
271
|
-
camel/societies/role_playing.py,sha256=
|
|
271
|
+
camel/societies/role_playing.py,sha256=0XScr3WfxX1QOC71RhBLmrcS5y2c7DMQB_mAFOHU34M,31421
|
|
272
272
|
camel/societies/workforce/__init__.py,sha256=bkTI-PE-MSK9AQ2V2gR6cR2WY-R7Jqy_NmXRtAoqo8o,920
|
|
273
273
|
camel/societies/workforce/base.py,sha256=z2DmbTP5LL5-aCAAqglznQqCLfPmnyM5zD3w6jjtsb8,2175
|
|
274
274
|
camel/societies/workforce/prompts.py,sha256=_l7OUkzH5p7KOd8HMZle9zB9W3jKza_Yb_6elFKiZ2s,11813
|
|
@@ -277,9 +277,9 @@ camel/societies/workforce/single_agent_worker.py,sha256=ZDVq5doJUoUY0Uuvhkucf4U-
|
|
|
277
277
|
camel/societies/workforce/task_channel.py,sha256=uqQQI67Tr4awbR4bjZXdx8_4gL6-ON5IjQk_H_ryqT4,7431
|
|
278
278
|
camel/societies/workforce/utils.py,sha256=Gjlz7pLo4r1b6iNHtlIMxeEuat4d6tEEQMI40JAU3kY,6190
|
|
279
279
|
camel/societies/workforce/worker.py,sha256=36tkOyz4G2wfBdrFjt9NBPXsx4UbE6uL5on8sP2aoqk,6414
|
|
280
|
-
camel/societies/workforce/workforce.py,sha256=
|
|
281
|
-
camel/societies/workforce/workforce_logger.py,sha256=
|
|
282
|
-
camel/storages/__init__.py,sha256=
|
|
280
|
+
camel/societies/workforce/workforce.py,sha256=Nm7TML3I_rZVo22lLxt27fpBUopfFkG1MacVKCPJRIY,100497
|
|
281
|
+
camel/societies/workforce/workforce_logger.py,sha256=7xOnmsqeHNoNiwqhFyh3SUtfOHCslCoX6aB2MMCnb0M,24865
|
|
282
|
+
camel/storages/__init__.py,sha256=RwpEyvxpMbJzVDZJJygeBg4AzyYMkTjjkfB53hTuqGo,2141
|
|
283
283
|
camel/storages/graph_storages/__init__.py,sha256=G29BNn651C0WTOpjCl4QnVM-4B9tcNh8DdmsCiONH8Y,948
|
|
284
284
|
camel/storages/graph_storages/base.py,sha256=uSe9jWuLudfm5jtfo6E-L_kNzITwK1_Ef-6L4HWw-JM,2852
|
|
285
285
|
camel/storages/graph_storages/graph_element.py,sha256=X_2orbQOMaQd00xxzAoJLfEcrVNE1mgCqMJv0orMAKA,2733
|
|
@@ -296,12 +296,13 @@ camel/storages/object_storages/amazon_s3.py,sha256=9Yvyyyb1LGHxr8REEza7oGopbVtLE
|
|
|
296
296
|
camel/storages/object_storages/azure_blob.py,sha256=66dHcvjE2ZNdb339oBU6LbFiKzPYrnlb4tQ_3m2Yazc,5992
|
|
297
297
|
camel/storages/object_storages/base.py,sha256=pImWylYJm7Wt8q87lBE1Cxk26IJ9sRtXq_OJmV6bJlg,3796
|
|
298
298
|
camel/storages/object_storages/google_cloud.py,sha256=59AvGar_GDoGYHhzUi5KBtInv2KaUVnw8SalsL43410,5332
|
|
299
|
-
camel/storages/vectordb_storages/__init__.py,sha256=
|
|
299
|
+
camel/storages/vectordb_storages/__init__.py,sha256=ZxN9EWkhh4W9LGfpwmnQkpiaY1dEfvVERd6QZ-jN4Bs,1412
|
|
300
300
|
camel/storages/vectordb_storages/base.py,sha256=EP_WbEtI3SJPHro9rjNkIq9UDUP1AAHmxZgeya94Lgk,6738
|
|
301
301
|
camel/storages/vectordb_storages/chroma.py,sha256=wXuLUYsgkC2VvdyLrlL5VqEDVzJDBUo7OdimK8hBLmg,27553
|
|
302
302
|
camel/storages/vectordb_storages/faiss.py,sha256=MHE3db9kJmVuu0aScXsSo8p60TCtc2Ot0rO77zcPgt8,26760
|
|
303
303
|
camel/storages/vectordb_storages/milvus.py,sha256=ChQyEuaXCWCKxytLN2z4QrkEthx2xE6bQPO6KCS9RgQ,13535
|
|
304
304
|
camel/storages/vectordb_storages/oceanbase.py,sha256=eNBelw4D6r3OWlhHzGJ8Xw-ej9nU1uTZ6CYoXdbxDkI,17054
|
|
305
|
+
camel/storages/vectordb_storages/pgvector.py,sha256=p-5RGCVT46zP-Yop85thWi2m0ZnHILSJFpu2A-7qWnk,12438
|
|
305
306
|
camel/storages/vectordb_storages/qdrant.py,sha256=a_cT0buSCHQ2CPZy852-mdvMDwy5zodCvAKMaa4zIvg,18017
|
|
306
307
|
camel/storages/vectordb_storages/tidb.py,sha256=w83bxgKgso43MtHqlpf2EMSpn1_Nz6ZZtY4fPw_-vgs,11192
|
|
307
308
|
camel/storages/vectordb_storages/weaviate.py,sha256=wDUE4KvfmOl3DqHFU4uF0VKbHu-q9vKhZDe8FZ6QXsk,27888
|
|
@@ -329,7 +330,7 @@ camel/toolkits/dappier_toolkit.py,sha256=OEHOYXX_oXhgbVtWYAy13nO9uXf9i5qEXSwY4Pe
|
|
|
329
330
|
camel/toolkits/data_commons_toolkit.py,sha256=aHZUSL1ACpnYGaf1rE2csVKTmXTmN8lMGRUBYhZ_YEk,14168
|
|
330
331
|
camel/toolkits/edgeone_pages_mcp_toolkit.py,sha256=1TFpAGHUNLggFQeN1OEw7P5laijwnlrCkfxBtgxFuUY,2331
|
|
331
332
|
camel/toolkits/excel_toolkit.py,sha256=9Uk5GLWl719c4W-NcGPJTNMtodAbEE5gUgLsFkIInbk,32564
|
|
332
|
-
camel/toolkits/file_write_toolkit.py,sha256=
|
|
333
|
+
camel/toolkits/file_write_toolkit.py,sha256=8G7swo_9EXpbt5IO-hCy-Wf5BPd70ewfpCHqErPsCXU,17072
|
|
333
334
|
camel/toolkits/function_tool.py,sha256=xCDzjxTRCVj_kmbnMFBuwK-3NuzM4JNe_kv9HLtjnIA,33644
|
|
334
335
|
camel/toolkits/github_toolkit.py,sha256=iUyRrjWGAW_iljZVfNyfkm1Vi55wJxK6PsDAQs9pOag,13099
|
|
335
336
|
camel/toolkits/google_calendar_toolkit.py,sha256=E-sdgdbtNBs_CXbhht9t1dsVr4DsTr5NguHkx4fvSmc,15410
|
|
@@ -367,7 +368,7 @@ camel/toolkits/slack_toolkit.py,sha256=Nb3w-TbUmnUWEvHE9Hbf_wkpSepm_zKQj7m19UyoF
|
|
|
367
368
|
camel/toolkits/stripe_toolkit.py,sha256=07swo5znGTnorafC1uYLKB4NRcJIOPOx19J7tkpLYWk,10102
|
|
368
369
|
camel/toolkits/sympy_toolkit.py,sha256=BAQnI8EFJydNUpKQWXBdleQ1Cm-srDBhFlqp9V9pbPQ,33757
|
|
369
370
|
camel/toolkits/task_planning_toolkit.py,sha256=Ttw9fHae4omGC1SA-6uaeXVHJ1YkwiVloz_hO-fm1gw,4855
|
|
370
|
-
camel/toolkits/terminal_toolkit.py,sha256=
|
|
371
|
+
camel/toolkits/terminal_toolkit.py,sha256=15pEz281g_6sJq5xGPdFFonjjHEDPf-NGJtfM-csOu0,37559
|
|
371
372
|
camel/toolkits/thinking_toolkit.py,sha256=nZYLvKWIx2BM1DYu69I9B5EISAG7aYcLYXKv9663BVk,8000
|
|
372
373
|
camel/toolkits/twitter_toolkit.py,sha256=Px4N8aUxUzy01LhGSWkdrC2JgwKkrY3cvxgMeJ2XYfU,15939
|
|
373
374
|
camel/toolkits/video_analysis_toolkit.py,sha256=Wh08MAVvs3PtgXN88Sk0TXYaGfVmQAol8FPCXMPPpIM,23375
|
|
@@ -409,7 +410,7 @@ camel/toolkits/open_api_specs/web_scraper/openapi.yaml,sha256=u_WalQ01e8W1D27VnZ
|
|
|
409
410
|
camel/toolkits/open_api_specs/web_scraper/paths/__init__.py,sha256=OKCZrQCDwaWtXIN_2rA9FSqEvgpQRieRoHh7Ek6N16A,702
|
|
410
411
|
camel/toolkits/open_api_specs/web_scraper/paths/scraper.py,sha256=aWy1_ppV4NVVEZfnbN3tu9XA9yAPAC9bRStJ5JuXMRU,1117
|
|
411
412
|
camel/types/__init__.py,sha256=pFTg3CWGSCfwFdoxPDTf4dKV8DdJS1x-bBPuEOmtdf0,2549
|
|
412
|
-
camel/types/enums.py,sha256=
|
|
413
|
+
camel/types/enums.py,sha256=FsUh5p4xo_c-h1Z5xzE7HeSue-pYUR7s4zzkYEDRBwI,63014
|
|
413
414
|
camel/types/mcp_registries.py,sha256=dl4LgYtSaUhsqAKpz28k_SA9La11qxqBvDLaEuyzrFE,4971
|
|
414
415
|
camel/types/openai_types.py,sha256=8ZFzLe-zGmKNPfuVZFzxlxAX98lGf18gtrPhOgMmzus,2104
|
|
415
416
|
camel/types/unified_model_type.py,sha256=ZweHiS4MQ1QbDEX3a3oUc-pvgueYP27Zt0SlAPcYg_4,5623
|
|
@@ -436,7 +437,7 @@ camel/verifiers/math_verifier.py,sha256=tA1D4S0sm8nsWISevxSN0hvSVtIUpqmJhzqfbuMo
|
|
|
436
437
|
camel/verifiers/models.py,sha256=GdxYPr7UxNrR1577yW4kyroRcLGfd-H1GXgv8potDWU,2471
|
|
437
438
|
camel/verifiers/physics_verifier.py,sha256=c1grrRddcrVN7szkxhv2QirwY9viIRSITWeWFF5HmLs,30187
|
|
438
439
|
camel/verifiers/python_verifier.py,sha256=ogTz77wODfEcDN4tMVtiSkRQyoiZbHPY2fKybn59lHw,20558
|
|
439
|
-
camel_ai-0.2.
|
|
440
|
-
camel_ai-0.2.
|
|
441
|
-
camel_ai-0.2.
|
|
442
|
-
camel_ai-0.2.
|
|
440
|
+
camel_ai-0.2.70.dist-info/METADATA,sha256=rcJwA1uI37kPW3D5P9jRroWihc_DLTbLGmyksOqCDew,45274
|
|
441
|
+
camel_ai-0.2.70.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
442
|
+
camel_ai-0.2.70.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
|
|
443
|
+
camel_ai-0.2.70.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|