camel-ai 0.2.71a8__py3-none-any.whl → 0.2.71a9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.71a8'
17
+ __version__ = '0.2.71a9'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -22,6 +22,7 @@ from typing import Any, List, Optional
22
22
  from colorama import Fore
23
23
 
24
24
  from camel.agents import ChatAgent
25
+ from camel.agents.chat_agent import AsyncStreamingChatAgentResponse
25
26
  from camel.societies.workforce.prompts import PROCESS_TASK_PROMPT
26
27
  from camel.societies.workforce.structured_output_handler import (
27
28
  StructuredOutputHandler,
@@ -285,6 +286,7 @@ class SingleAgentWorker(Worker):
285
286
  """
286
287
  # Get agent efficiently (from pool or by cloning)
287
288
  worker_agent = await self._get_worker_agent()
289
+ response_content = ""
288
290
 
289
291
  try:
290
292
  dependency_tasks_info = self._get_dep_tasks_info(dependencies)
@@ -314,11 +316,23 @@ class SingleAgentWorker(Worker):
314
316
  )
315
317
  )
316
318
  response = await worker_agent.astep(enhanced_prompt)
319
+
320
+ # Handle streaming response
321
+ if isinstance(response, AsyncStreamingChatAgentResponse):
322
+ content = ""
323
+ async for chunk in response:
324
+ if chunk.msg:
325
+ content = chunk.msg.content
326
+ response_content = content
327
+ else:
328
+ # Regular ChatAgentResponse
329
+ response_content = (
330
+ response.msg.content if response.msg else ""
331
+ )
332
+
317
333
  task_result = (
318
334
  self.structured_handler.parse_structured_response(
319
- response_text=response.msg.content
320
- if response.msg
321
- else "",
335
+ response_text=response_content,
322
336
  schema=TaskResult,
323
337
  fallback_values={
324
338
  "content": "Task processing failed",
@@ -331,13 +345,41 @@ class SingleAgentWorker(Worker):
331
345
  response = await worker_agent.astep(
332
346
  prompt, response_format=TaskResult
333
347
  )
334
- task_result = response.msg.parsed
348
+
349
+ # Handle streaming response for native output
350
+ if isinstance(response, AsyncStreamingChatAgentResponse):
351
+ task_result = None
352
+ async for chunk in response:
353
+ if chunk.msg and chunk.msg.parsed:
354
+ task_result = chunk.msg.parsed
355
+ response_content = chunk.msg.content
356
+ # If no parsed result found in streaming, create fallback
357
+ if task_result is None:
358
+ task_result = TaskResult(
359
+ content="Failed to parse streaming response",
360
+ failed=True,
361
+ )
362
+ else:
363
+ # Regular ChatAgentResponse
364
+ task_result = response.msg.parsed
365
+ response_content = (
366
+ response.msg.content if response.msg else ""
367
+ )
335
368
 
336
369
  # Get token usage from the response
337
- usage_info = response.info.get("usage") or response.info.get(
338
- "token_usage"
370
+ if isinstance(response, AsyncStreamingChatAgentResponse):
371
+ # For streaming responses, get the final response info
372
+ final_response = await response
373
+ usage_info = final_response.info.get(
374
+ "usage"
375
+ ) or final_response.info.get("token_usage")
376
+ else:
377
+ usage_info = response.info.get("usage") or response.info.get(
378
+ "token_usage"
379
+ )
380
+ total_tokens = (
381
+ usage_info.get("total_tokens", 0) if usage_info else 0
339
382
  )
340
- total_tokens = usage_info.get("total_tokens", 0)
341
383
 
342
384
  except Exception as e:
343
385
  print(
@@ -369,8 +411,10 @@ class SingleAgentWorker(Worker):
369
411
  f"(from pool/clone of "
370
412
  f"{getattr(self.worker, 'agent_id', self.worker.role_name)}) "
371
413
  f"to process task {task.content}",
372
- "response_content": response.msg.content,
373
- "tool_calls": response.info.get("tool_calls"),
414
+ "response_content": response_content,
415
+ "tool_calls": final_response.info.get("tool_calls")
416
+ if isinstance(response, AsyncStreamingChatAgentResponse)
417
+ else response.info.get("tool_calls"),
374
418
  "total_tokens": total_tokens,
375
419
  }
376
420
 
@@ -25,6 +25,7 @@ from typing import (
25
25
  Coroutine,
26
26
  Deque,
27
27
  Dict,
28
+ Generator,
28
29
  List,
29
30
  Optional,
30
31
  Set,
@@ -420,6 +421,11 @@ class Workforce(BaseNode):
420
421
  "CodeExecutionToolkit, and ThinkingToolkit. To customize "
421
422
  "runtime worker creation, pass a ChatAgent instance."
422
423
  )
424
+ else:
425
+ # Validate new_worker_agent if provided
426
+ self._validate_agent_compatibility(
427
+ new_worker_agent, "new_worker_agent"
428
+ )
423
429
 
424
430
  if self.share_memory:
425
431
  logger.info(
@@ -432,6 +438,42 @@ class Workforce(BaseNode):
432
438
  # Helper for propagating pause control to externally supplied agents
433
439
  # ------------------------------------------------------------------
434
440
 
441
+ def _validate_agent_compatibility(
442
+ self, agent: ChatAgent, agent_context: str = "agent"
443
+ ) -> None:
444
+ r"""Validate that agent configuration is compatible with workforce
445
+ settings.
446
+
447
+ Args:
448
+ agent (ChatAgent): The agent to validate.
449
+ agent_context (str): Context description for error messages.
450
+
451
+ Raises:
452
+ ValueError: If agent has tools and stream mode enabled but
453
+ use_structured_output_handler is False.
454
+ """
455
+ agent_has_tools = (
456
+ bool(agent.tool_dict) if hasattr(agent, 'tool_dict') else False
457
+ )
458
+ agent_stream_mode = (
459
+ getattr(agent.model_backend, 'stream', False)
460
+ if hasattr(agent, 'model_backend')
461
+ else False
462
+ )
463
+
464
+ if (
465
+ agent_has_tools
466
+ and agent_stream_mode
467
+ and not self.use_structured_output_handler
468
+ ):
469
+ raise ValueError(
470
+ f"{agent_context} has tools and stream mode enabled, but "
471
+ "use_structured_output_handler is False. Native structured "
472
+ "output doesn't work with tool calls in stream mode. "
473
+ "Please set use_structured_output_handler=True when creating "
474
+ "the Workforce."
475
+ )
476
+
435
477
  def _attach_pause_event_to_agent(self, agent: ChatAgent) -> None:
436
478
  r"""Ensure the given ChatAgent shares this workforce's pause_event.
437
479
 
@@ -678,12 +720,15 @@ class Workforce(BaseNode):
678
720
  if task_id in self._assignees:
679
721
  del self._assignees[task_id]
680
722
 
681
- def _decompose_task(self, task: Task) -> List[Task]:
723
+ def _decompose_task(
724
+ self, task: Task
725
+ ) -> Union[List[Task], Generator[List[Task], None, None]]:
682
726
  r"""Decompose the task into subtasks. This method will also set the
683
727
  relationship between the task and its subtasks.
684
728
 
685
729
  Returns:
686
- List[Task]: The subtasks.
730
+ Union[List[Task], Generator[List[Task], None, None]]:
731
+ The subtasks or generator of subtasks.
687
732
  """
688
733
  decompose_prompt = WF_TASK_DECOMPOSE_PROMPT.format(
689
734
  content=task.content,
@@ -691,13 +736,30 @@ class Workforce(BaseNode):
691
736
  additional_info=task.additional_info,
692
737
  )
693
738
  self.task_agent.reset()
694
- subtasks = task.decompose(self.task_agent, decompose_prompt)
695
-
696
- # Update dependency tracking for decomposed task
697
- if subtasks:
698
- self._update_dependencies_for_decomposition(task, subtasks)
739
+ result = task.decompose(self.task_agent, decompose_prompt)
740
+
741
+ # Handle both streaming and non-streaming results
742
+ if isinstance(result, Generator):
743
+ # This is a generator (streaming mode)
744
+ def streaming_with_dependencies():
745
+ all_subtasks = []
746
+ for new_tasks in result:
747
+ all_subtasks.extend(new_tasks)
748
+ # Update dependency tracking for each batch of new tasks
749
+ if new_tasks:
750
+ self._update_dependencies_for_decomposition(
751
+ task, all_subtasks
752
+ )
753
+ yield new_tasks
699
754
 
700
- return subtasks
755
+ return streaming_with_dependencies()
756
+ else:
757
+ # This is a regular list (non-streaming mode)
758
+ subtasks = result
759
+ # Update dependency tracking for decomposed task
760
+ if subtasks:
761
+ self._update_dependencies_for_decomposition(task, subtasks)
762
+ return subtasks
701
763
 
702
764
  def _analyze_failure(
703
765
  self, task: Task, error_message: str
@@ -1148,7 +1210,17 @@ class Workforce(BaseNode):
1148
1210
  task.state = TaskState.FAILED
1149
1211
  # The agent tend to be overconfident on the whole task, so we
1150
1212
  # decompose the task into subtasks first
1151
- subtasks = self._decompose_task(task)
1213
+ subtasks_result = self._decompose_task(task)
1214
+
1215
+ # Handle both streaming and non-streaming results
1216
+ if isinstance(subtasks_result, Generator):
1217
+ # This is a generator (streaming mode)
1218
+ subtasks = []
1219
+ for new_tasks in subtasks_result:
1220
+ subtasks.extend(new_tasks)
1221
+ else:
1222
+ # This is a regular list (non-streaming mode)
1223
+ subtasks = subtasks_result
1152
1224
  if self.metrics_logger and subtasks:
1153
1225
  self.metrics_logger.log_task_decomposed(
1154
1226
  parent_task_id=task.id, subtask_ids=[st.id for st in subtasks]
@@ -1265,7 +1337,17 @@ class Workforce(BaseNode):
1265
1337
  task.state = TaskState.FAILED # TODO: Add logic for OPEN
1266
1338
 
1267
1339
  # Decompose the task into subtasks first
1268
- subtasks = self._decompose_task(task)
1340
+ subtasks_result = self._decompose_task(task)
1341
+
1342
+ # Handle both streaming and non-streaming results
1343
+ if isinstance(subtasks_result, Generator):
1344
+ # This is a generator (streaming mode)
1345
+ subtasks = []
1346
+ for new_tasks in subtasks_result:
1347
+ subtasks.extend(new_tasks)
1348
+ else:
1349
+ # This is a regular list (non-streaming mode)
1350
+ subtasks = subtasks_result
1269
1351
  if subtasks:
1270
1352
  # If decomposition happened, the original task becomes a container.
1271
1353
  # We only execute its subtasks.
@@ -1435,12 +1517,18 @@ class Workforce(BaseNode):
1435
1517
 
1436
1518
  Raises:
1437
1519
  RuntimeError: If called while workforce is running (not paused).
1520
+ ValueError: If worker has tools and stream mode enabled but
1521
+ use_structured_output_handler is False.
1438
1522
  """
1439
1523
  if self._state == WorkforceState.RUNNING:
1440
1524
  raise RuntimeError(
1441
1525
  "Cannot add workers while workforce is running. "
1442
1526
  "Pause the workforce first."
1443
1527
  )
1528
+
1529
+ # Validate worker agent compatibility
1530
+ self._validate_agent_compatibility(worker, "Worker agent")
1531
+
1444
1532
  # Ensure the worker agent shares this workforce's pause control
1445
1533
  self._attach_pause_event_to_agent(worker)
1446
1534
 
@@ -2084,6 +2172,14 @@ class Workforce(BaseNode):
2084
2172
  new_node_conf.sys_msg,
2085
2173
  )
2086
2174
 
2175
+ # Validate the new agent compatibility before creating worker
2176
+ try:
2177
+ self._validate_agent_compatibility(
2178
+ new_agent, f"Agent for task {task.id}"
2179
+ )
2180
+ except ValueError as e:
2181
+ raise ValueError(f"Cannot create worker for task {task.id}: {e!s}")
2182
+
2087
2183
  new_node = SingleAgentWorker(
2088
2184
  description=new_node_conf.description,
2089
2185
  worker=new_agent,
@@ -2349,7 +2445,17 @@ class Workforce(BaseNode):
2349
2445
 
2350
2446
  elif recovery_decision.strategy == RecoveryStrategy.DECOMPOSE:
2351
2447
  # Decompose the task into subtasks
2352
- subtasks = self._decompose_task(task)
2448
+ subtasks_result = self._decompose_task(task)
2449
+
2450
+ # Handle both streaming and non-streaming results
2451
+ if isinstance(subtasks_result, Generator):
2452
+ # This is a generator (streaming mode)
2453
+ subtasks = []
2454
+ for new_tasks in subtasks_result:
2455
+ subtasks.extend(new_tasks)
2456
+ else:
2457
+ # This is a regular list (non-streaming mode)
2458
+ subtasks = subtasks_result
2353
2459
  if self.metrics_logger and subtasks:
2354
2460
  self.metrics_logger.log_task_decomposed(
2355
2461
  parent_task_id=task.id,
@@ -3203,6 +3309,18 @@ class Workforce(BaseNode):
3203
3309
  )
3204
3310
 
3205
3311
  agent = ChatAgent(sys_msg, **(agent_kwargs or {}))
3312
+
3313
+ # Validate agent compatibility
3314
+ try:
3315
+ workforce_instance._validate_agent_compatibility(
3316
+ agent, "Worker agent"
3317
+ )
3318
+ except ValueError as e:
3319
+ return {
3320
+ "status": "error",
3321
+ "message": str(e),
3322
+ }
3323
+
3206
3324
  workforce_instance.add_single_agent_worker(description, agent)
3207
3325
 
3208
3326
  return {
camel/tasks/task.py CHANGED
@@ -19,6 +19,7 @@ from typing import (
19
19
  Any,
20
20
  Callable,
21
21
  Dict,
22
+ Generator,
22
23
  List,
23
24
  Literal,
24
25
  Optional,
@@ -30,6 +31,7 @@ from pydantic import BaseModel, ConfigDict, Field
30
31
 
31
32
  if TYPE_CHECKING:
32
33
  from camel.agents import ChatAgent
34
+ from camel.agents.chat_agent import StreamingChatAgentResponse
33
35
  import uuid
34
36
 
35
37
  from camel.logger import get_logger
@@ -402,9 +404,9 @@ class Task(BaseModel):
402
404
  agent: "ChatAgent",
403
405
  prompt: Optional[str] = None,
404
406
  task_parser: Callable[[str, str], List["Task"]] = parse_response,
405
- ) -> List["Task"]:
406
- r"""Decompose a task to a list of sub-tasks. It can be used for data
407
- generation and planner of agent.
407
+ ) -> Union[List["Task"], Generator[List["Task"], None, None]]:
408
+ r"""Decompose a task to a list of sub-tasks. Automatically detects
409
+ streaming or non-streaming based on agent configuration.
408
410
 
409
411
  Args:
410
412
  agent (ChatAgent): An agent that used to decompose the task.
@@ -415,7 +417,10 @@ class Task(BaseModel):
415
417
  the default parse_response will be used.
416
418
 
417
419
  Returns:
418
- List[Task]: A list of tasks which are :obj:`Task` instances.
420
+ Union[List[Task], Generator[List[Task], None, None]]: If agent is
421
+ configured for streaming, returns a generator that yields lists
422
+ of new tasks as they are parsed. Otherwise returns a list of
423
+ all tasks.
419
424
  """
420
425
 
421
426
  role_name = agent.role_name
@@ -427,6 +432,72 @@ class Task(BaseModel):
427
432
  role_name=role_name, content=content
428
433
  )
429
434
  response = agent.step(msg)
435
+
436
+ # Auto-detect streaming based on response type
437
+ from camel.agents.chat_agent import StreamingChatAgentResponse
438
+
439
+ if isinstance(response, StreamingChatAgentResponse):
440
+ return self._decompose_streaming(response, task_parser)
441
+ else:
442
+ return self._decompose_non_streaming(response, task_parser)
443
+
444
+ def _decompose_streaming(
445
+ self,
446
+ response: "StreamingChatAgentResponse",
447
+ task_parser: Callable[[str, str], List["Task"]],
448
+ ) -> Generator[List["Task"], None, None]:
449
+ r"""Handle streaming response for task decomposition.
450
+
451
+ Args:
452
+ response: Streaming response from agent
453
+ task_parser: Function to parse tasks from response
454
+
455
+ Yields:
456
+ List[Task]: New tasks as they are parsed from streaming response
457
+ """
458
+ accumulated_content = ""
459
+ yielded_count = 0
460
+
461
+ # Process streaming response
462
+ for chunk in response:
463
+ accumulated_content = chunk.msg.content
464
+
465
+ # Try to parse partial tasks from accumulated content
466
+ try:
467
+ current_tasks = self._parse_partial_tasks(accumulated_content)
468
+
469
+ # Yield new tasks if we have more than previously yielded
470
+ if len(current_tasks) > yielded_count:
471
+ new_tasks = current_tasks[yielded_count:]
472
+ for task in new_tasks:
473
+ task.additional_info = self.additional_info
474
+ task.parent = self
475
+ yield new_tasks
476
+ yielded_count = len(current_tasks)
477
+
478
+ except Exception:
479
+ # If parsing fails, continue accumulating
480
+ continue
481
+
482
+ # Final complete parsing
483
+ final_tasks = task_parser(accumulated_content, self.id)
484
+ for task in final_tasks:
485
+ task.additional_info = self.additional_info
486
+ task.parent = self
487
+ self.subtasks = final_tasks
488
+
489
+ def _decompose_non_streaming(
490
+ self, response, task_parser: Callable[[str, str], List["Task"]]
491
+ ) -> List["Task"]:
492
+ r"""Handle non-streaming response for task decomposition.
493
+
494
+ Args:
495
+ response: Regular response from agent
496
+ task_parser: Function to parse tasks from response
497
+
498
+ Returns:
499
+ List[Task]: All parsed tasks
500
+ """
430
501
  tasks = task_parser(response.msg.content, self.id)
431
502
  for task in tasks:
432
503
  task.additional_info = self.additional_info
@@ -434,6 +505,35 @@ class Task(BaseModel):
434
505
  self.subtasks = tasks
435
506
  return tasks
436
507
 
508
+ def _parse_partial_tasks(self, response: str) -> List["Task"]:
509
+ r"""Parse tasks from potentially incomplete response.
510
+
511
+ Args:
512
+ response: Partial response content
513
+
514
+ Returns:
515
+ List[Task]: Tasks parsed from complete <task></task> blocks
516
+ """
517
+ pattern = r"<task>(.*?)</task>"
518
+ tasks_content = re.findall(pattern, response, re.DOTALL)
519
+
520
+ tasks = []
521
+ task_id = self.id or "0"
522
+
523
+ for i, content in enumerate(tasks_content, 1):
524
+ stripped_content = content.strip()
525
+ if validate_task_content(stripped_content, f"{task_id}.{i}"):
526
+ tasks.append(
527
+ Task(content=stripped_content, id=f"{task_id}.{i}")
528
+ )
529
+ else:
530
+ logger.warning(
531
+ f"Skipping invalid subtask {task_id}.{i} "
532
+ f"during streaming decomposition: "
533
+ f"Content '{stripped_content[:50]}...' failed validation"
534
+ )
535
+ return tasks
536
+
437
537
  def compose(
438
538
  self,
439
539
  agent: "ChatAgent",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: camel-ai
3
- Version: 0.2.71a8
3
+ Version: 0.2.71a9
4
4
  Summary: Communicative Agents for AI Society Study
5
5
  Project-URL: Homepage, https://www.camel-ai.org/
6
6
  Project-URL: Repository, https://github.com/camel-ai/camel
@@ -1,4 +1,4 @@
1
- camel/__init__.py,sha256=TUs23EA2p98OC0WV8dpKGkATsBnuXjPHnsVhnXlDUPk,901
1
+ camel/__init__.py,sha256=5n7NZMAxb749oqgkyLbTX4gLlHYBd4c8HTvC0fzotXg,901
2
2
  camel/generators.py,sha256=JRqj9_m1PF4qT6UtybzTQ-KBT9MJQt18OAAYvQ_fr2o,13844
3
3
  camel/human.py,sha256=Xg8x1cS5KK4bQ1SDByiHZnzsRpvRP-KZViNvmu38xo4,5475
4
4
  camel/logger.py,sha256=WgEwael_eT6D-lVAKHpKIpwXSTjvLbny5jbV1Ab8lnA,5760
@@ -275,12 +275,12 @@ camel/societies/workforce/__init__.py,sha256=bkTI-PE-MSK9AQ2V2gR6cR2WY-R7Jqy_NmX
275
275
  camel/societies/workforce/base.py,sha256=z2DmbTP5LL5-aCAAqglznQqCLfPmnyM5zD3w6jjtsb8,2175
276
276
  camel/societies/workforce/prompts.py,sha256=gobVelz7rRdReogFG2QCfFy21GfhaQJyoiNnKWo4EHE,14391
277
277
  camel/societies/workforce/role_playing_worker.py,sha256=z5-OcNiaNEaoC206_3HD7xeonuUkD-XTxYbD3AqoNC8,10319
278
- camel/societies/workforce/single_agent_worker.py,sha256=seWeFR-scpaTSS0yXwqJSJkUWaNuiYyuxDTfbpebRN8,17314
278
+ camel/societies/workforce/single_agent_worker.py,sha256=fXgAEkDMhvTd-nbwy5Gw3BOaRAiPsL8mf3AW1aaNqKU,19342
279
279
  camel/societies/workforce/structured_output_handler.py,sha256=xr8szFN86hg3jQ825aEkJTjkSFQnTlbinVg4j1vZJVI,17870
280
280
  camel/societies/workforce/task_channel.py,sha256=GWHaGQBpjTzdKbWoBYAQzzAiLKRKRXa6beqtc4cg-Is,7611
281
281
  camel/societies/workforce/utils.py,sha256=THgNHSeZsNVnjTzQTur3qCJhi72MrDS8X2gPET174cI,8434
282
282
  camel/societies/workforce/worker.py,sha256=k_AokiF-HAfq6Cd5SQYb0uaoJsA_kSwV_n5o1fxvzmo,6522
283
- camel/societies/workforce/workforce.py,sha256=u-AVADZFYWLQMB6WjB8gsDcVE3z48LYpGDwdkhlIut0,133692
283
+ camel/societies/workforce/workforce.py,sha256=HD9Q5U4hqtTYewNWbYxa7u6ojW6lyGX_o_K7alm4S6E,138306
284
284
  camel/societies/workforce/workforce_logger.py,sha256=0YT__ys48Bgn0IICKIZBmSWhON-eA1KShebjCdn5ppE,24525
285
285
  camel/storages/__init__.py,sha256=RwpEyvxpMbJzVDZJJygeBg4AzyYMkTjjkfB53hTuqGo,2141
286
286
  camel/storages/graph_storages/__init__.py,sha256=G29BNn651C0WTOpjCl4QnVM-4B9tcNh8DdmsCiONH8Y,948
@@ -310,7 +310,7 @@ camel/storages/vectordb_storages/qdrant.py,sha256=a_cT0buSCHQ2CPZy852-mdvMDwy5zo
310
310
  camel/storages/vectordb_storages/tidb.py,sha256=w83bxgKgso43MtHqlpf2EMSpn1_Nz6ZZtY4fPw_-vgs,11192
311
311
  camel/storages/vectordb_storages/weaviate.py,sha256=wDUE4KvfmOl3DqHFU4uF0VKbHu-q9vKhZDe8FZ6QXsk,27888
312
312
  camel/tasks/__init__.py,sha256=MuHwkw5GRQc8NOCzj8tjtBrr2Xg9KrcKp-ed_-2ZGIM,906
313
- camel/tasks/task.py,sha256=-TqoSIrWk_BaocNUqr3RVowA9JBe2OO_fAXTdp6nXAU,20366
313
+ camel/tasks/task.py,sha256=GNkySKVtX8h6kCgSA9AIT5OQNlt4v87oxyNiYWmHebE,24118
314
314
  camel/tasks/task_prompt.py,sha256=3KZmKYKUPcTKe8EAZOZBN3G05JHRVt7oHY9ORzLVu1g,2150
315
315
  camel/terminators/__init__.py,sha256=t8uqrkUnXEOYMXQDgaBkMFJ0EXFKI0kmx4cUimli3Ls,991
316
316
  camel/terminators/base.py,sha256=xmJzERX7GdSXcxZjAHHODa0rOxRChMSRboDCNHWSscs,1511
@@ -446,7 +446,7 @@ camel/verifiers/math_verifier.py,sha256=tA1D4S0sm8nsWISevxSN0hvSVtIUpqmJhzqfbuMo
446
446
  camel/verifiers/models.py,sha256=GdxYPr7UxNrR1577yW4kyroRcLGfd-H1GXgv8potDWU,2471
447
447
  camel/verifiers/physics_verifier.py,sha256=c1grrRddcrVN7szkxhv2QirwY9viIRSITWeWFF5HmLs,30187
448
448
  camel/verifiers/python_verifier.py,sha256=ogTz77wODfEcDN4tMVtiSkRQyoiZbHPY2fKybn59lHw,20558
449
- camel_ai-0.2.71a8.dist-info/METADATA,sha256=v1GZ0ttSR8kcp2jXjYxmz0SUh6EOf_eaR7QdoTP8BcY,50002
450
- camel_ai-0.2.71a8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
451
- camel_ai-0.2.71a8.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
452
- camel_ai-0.2.71a8.dist-info/RECORD,,
449
+ camel_ai-0.2.71a9.dist-info/METADATA,sha256=3IG3o0Zfp79IqN8Y6I6N5jJsjw0-A9A_gqwDszF8qLw,50002
450
+ camel_ai-0.2.71a9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
451
+ camel_ai-0.2.71a9.dist-info/licenses/LICENSE,sha256=id0nB2my5kG0xXeimIu5zZrbHLS6EQvxvkKkzIHaT2k,11343
452
+ camel_ai-0.2.71a9.dist-info/RECORD,,