camel-ai 0.2.78__py3-none-any.whl → 0.2.79a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (39) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_utils.py +38 -0
  3. camel/agents/chat_agent.py +1112 -287
  4. camel/datasets/base_generator.py +39 -10
  5. camel/environments/single_step.py +28 -3
  6. camel/memories/__init__.py +1 -2
  7. camel/memories/agent_memories.py +34 -0
  8. camel/memories/base.py +26 -0
  9. camel/memories/blocks/chat_history_block.py +117 -17
  10. camel/memories/context_creators/score_based.py +25 -384
  11. camel/messages/base.py +26 -0
  12. camel/models/aws_bedrock_model.py +1 -17
  13. camel/models/azure_openai_model.py +113 -67
  14. camel/models/model_factory.py +17 -1
  15. camel/models/moonshot_model.py +102 -5
  16. camel/models/openai_compatible_model.py +62 -32
  17. camel/models/openai_model.py +61 -35
  18. camel/models/samba_model.py +34 -15
  19. camel/models/sglang_model.py +41 -11
  20. camel/societies/workforce/__init__.py +2 -0
  21. camel/societies/workforce/events.py +122 -0
  22. camel/societies/workforce/role_playing_worker.py +15 -11
  23. camel/societies/workforce/single_agent_worker.py +143 -291
  24. camel/societies/workforce/utils.py +2 -1
  25. camel/societies/workforce/workflow_memory_manager.py +772 -0
  26. camel/societies/workforce/workforce.py +513 -188
  27. camel/societies/workforce/workforce_callback.py +74 -0
  28. camel/societies/workforce/workforce_logger.py +144 -140
  29. camel/societies/workforce/workforce_metrics.py +33 -0
  30. camel/storages/vectordb_storages/oceanbase.py +5 -4
  31. camel/toolkits/file_toolkit.py +166 -0
  32. camel/toolkits/message_integration.py +15 -13
  33. camel/toolkits/terminal_toolkit/terminal_toolkit.py +112 -79
  34. camel/types/enums.py +1 -0
  35. camel/utils/context_utils.py +201 -2
  36. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/METADATA +14 -13
  37. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/RECORD +39 -35
  38. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/WHEEL +0 -0
  39. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/licenses/LICENSE +0 -0
@@ -15,12 +15,8 @@ from __future__ import annotations
15
15
 
16
16
  import asyncio
17
17
  import datetime
18
- import glob
19
- import os
20
- import re
21
18
  import time
22
19
  from collections import deque
23
- from pathlib import Path
24
20
  from typing import Any, Dict, List, Optional
25
21
 
26
22
  from colorama import Fore
@@ -34,8 +30,11 @@ from camel.societies.workforce.structured_output_handler import (
34
30
  )
35
31
  from camel.societies.workforce.utils import TaskResult
36
32
  from camel.societies.workforce.worker import Worker
33
+ from camel.societies.workforce.workflow_memory_manager import (
34
+ WorkflowMemoryManager,
35
+ )
37
36
  from camel.tasks.task import Task, TaskState, is_task_result_insufficient
38
- from camel.utils.context_utils import ContextUtility, WorkflowSummary
37
+ from camel.utils.context_utils import ContextUtility
39
38
 
40
39
  logger = get_logger(__name__)
41
40
 
@@ -80,6 +79,7 @@ class AgentPool:
80
79
  self._in_use_agents: set = set()
81
80
  self._agent_last_used: dict = {}
82
81
  self._lock = asyncio.Lock()
82
+ self._condition = asyncio.Condition(self._lock)
83
83
 
84
84
  # Statistics
85
85
  self._total_borrows = 0
@@ -105,36 +105,31 @@ class AgentPool:
105
105
 
106
106
  async def get_agent(self) -> ChatAgent:
107
107
  r"""Get an agent from the pool, creating one if necessary."""
108
- async with self._lock:
108
+ async with self._condition:
109
109
  self._total_borrows += 1
110
110
 
111
- if self._available_agents:
112
- agent = self._available_agents.popleft()
113
- self._in_use_agents.add(id(agent))
114
- self._pool_hits += 1
115
- return agent
116
-
117
- # Check if we can create a new agent
118
- if len(self._in_use_agents) < self.max_size or self.auto_scale:
119
- agent = self._create_fresh_agent()
120
- self._in_use_agents.add(id(agent))
121
- return agent
122
-
123
- # Wait for available agent
124
- while True:
125
- async with self._lock:
111
+ # Try to get available agent or create new one
112
+ while True:
126
113
  if self._available_agents:
127
114
  agent = self._available_agents.popleft()
128
115
  self._in_use_agents.add(id(agent))
129
116
  self._pool_hits += 1
130
117
  return agent
131
- await asyncio.sleep(0.05)
118
+
119
+ # Check if we can create a new agent
120
+ if len(self._in_use_agents) < self.max_size or self.auto_scale:
121
+ agent = self._create_fresh_agent()
122
+ self._in_use_agents.add(id(agent))
123
+ return agent
124
+
125
+ # Wait for an agent to be returned
126
+ await self._condition.wait()
132
127
 
133
128
  async def return_agent(self, agent: ChatAgent) -> None:
134
129
  r"""Return an agent to the pool."""
135
130
  agent_id = id(agent)
136
131
 
137
- async with self._lock:
132
+ async with self._condition:
138
133
  if agent_id not in self._in_use_agents:
139
134
  return
140
135
 
@@ -145,6 +140,8 @@ class AgentPool:
145
140
  agent.reset()
146
141
  self._agent_last_used[agent_id] = time.time()
147
142
  self._available_agents.append(agent)
143
+ # Notify one waiting coroutine that an agent is available
144
+ self._condition.notify()
148
145
  else:
149
146
  # Remove tracking for agents not returned to pool
150
147
  self._agent_last_used.pop(agent_id, None)
@@ -154,7 +151,7 @@ class AgentPool:
154
151
  if not self.auto_scale:
155
152
  return
156
153
 
157
- async with self._lock:
154
+ async with self._condition:
158
155
  if not self._available_agents:
159
156
  return
160
157
 
@@ -256,6 +253,9 @@ class SingleAgentWorker(Worker):
256
253
  # from all task processing
257
254
  self._conversation_accumulator: Optional[ChatAgent] = None
258
255
 
256
+ # workflow memory manager for handling workflow operations
257
+ self._workflow_manager: Optional[WorkflowMemoryManager] = None
258
+
259
259
  # note: context utility is set on the worker agent during save/load
260
260
  # operations to avoid creating session folders during initialization
261
261
 
@@ -318,6 +318,21 @@ class SingleAgentWorker(Worker):
318
318
  )
319
319
  return self._conversation_accumulator
320
320
 
321
+ def _get_workflow_manager(self) -> WorkflowMemoryManager:
322
+ r"""Get or create the workflow memory manager."""
323
+ if self._workflow_manager is None:
324
+ context_util = (
325
+ self._shared_context_utility
326
+ if self._shared_context_utility is not None
327
+ else None
328
+ )
329
+ self._workflow_manager = WorkflowMemoryManager(
330
+ worker=self.worker,
331
+ description=self.description,
332
+ context_utility=context_util,
333
+ )
334
+ return self._workflow_manager
335
+
321
336
  async def _process_task(
322
337
  self, task: Task, dependencies: List[Task]
323
338
  ) -> TaskState:
@@ -344,11 +359,15 @@ class SingleAgentWorker(Worker):
344
359
 
345
360
  try:
346
361
  dependency_tasks_info = self._get_dep_tasks_info(dependencies)
347
- prompt = PROCESS_TASK_PROMPT.format(
348
- content=task.content,
349
- parent_task_content=task.parent.content if task.parent else "",
350
- dependency_tasks_info=dependency_tasks_info,
351
- additional_info=task.additional_info,
362
+ prompt = str(
363
+ PROCESS_TASK_PROMPT.format(
364
+ content=task.content,
365
+ parent_task_content=task.parent.content
366
+ if task.parent
367
+ else "",
368
+ dependency_tasks_info=dependency_tasks_info,
369
+ additional_info=task.additional_info,
370
+ )
352
371
  )
353
372
 
354
373
  if self.use_structured_output_handler and self.structured_handler:
@@ -428,6 +447,7 @@ class SingleAgentWorker(Worker):
428
447
  "usage"
429
448
  ) or final_response.info.get("token_usage")
430
449
  else:
450
+ final_response = response
431
451
  usage_info = response.info.get("usage") or response.info.get(
432
452
  "token_usage"
433
453
  )
@@ -562,10 +582,11 @@ class SingleAgentWorker(Worker):
562
582
  while True:
563
583
  try:
564
584
  # Fixed interval cleanup
565
- await asyncio.sleep(self.agent_pool.cleanup_interval)
566
-
567
585
  if self.agent_pool:
586
+ await asyncio.sleep(self.agent_pool.cleanup_interval)
568
587
  await self.agent_pool.cleanup_idle_agents()
588
+ else:
589
+ break
569
590
  except asyncio.CancelledError:
570
591
  break
571
592
  except Exception as e:
@@ -581,9 +602,16 @@ class SingleAgentWorker(Worker):
581
602
  r"""Save the worker's current workflow memories using agent
582
603
  summarization.
583
604
 
605
+ .. deprecated:: 0.2.80
606
+ Use :meth:`save_workflow_memories_async` for async/await support
607
+ and better integration with parallel workflow saving.
608
+
584
609
  This method generates a workflow summary from the worker agent's
585
610
  conversation history and saves it to a markdown file. The filename
586
- is based on the worker's description for easy loading later.
611
+ is based on either the worker's explicit role_name or the generated
612
+ task_title from the summary.
613
+
614
+ Delegates to WorkflowMemoryManager for all workflow operations.
587
615
 
588
616
  Returns:
589
617
  Dict[str, Any]: Result dictionary with keys:
@@ -591,287 +619,111 @@ class SingleAgentWorker(Worker):
591
619
  - summary (str): Generated workflow summary
592
620
  - file_path (str): Path to saved file
593
621
  - worker_description (str): Worker description used
622
+
623
+ See Also:
624
+ :meth:`save_workflow_memories_async`: Async version for better
625
+ performance in parallel workflows.
594
626
  """
595
- try:
596
- # validate requirements
597
- validation_error = self._validate_workflow_save_requirements()
598
- if validation_error:
599
- return validation_error
600
-
601
- # setup context utility and agent
602
- context_util = self._get_context_utility()
603
- self.worker.set_context_utility(context_util)
604
-
605
- # prepare workflow summarization components
606
- filename = self._generate_workflow_filename()
607
- structured_prompt = self._prepare_workflow_prompt()
608
- agent_to_summarize = self._select_agent_for_summarization(
609
- context_util
610
- )
627
+ import warnings
628
+
629
+ warnings.warn(
630
+ "save_workflow_memories() is synchronous. Consider using "
631
+ "save_workflow_memories_async() for async/await support.",
632
+ DeprecationWarning,
633
+ stacklevel=2,
634
+ )
635
+
636
+ manager = self._get_workflow_manager()
637
+ result = manager.save_workflow(
638
+ conversation_accumulator=self._conversation_accumulator
639
+ )
611
640
 
612
- # generate and save workflow summary
613
- result = agent_to_summarize.summarize(
614
- filename=filename,
615
- summary_prompt=structured_prompt,
616
- response_format=WorkflowSummary,
641
+ # clean up accumulator after successful save
642
+ if (
643
+ result.get("status") == "success"
644
+ and self._conversation_accumulator is not None
645
+ ):
646
+ logger.info(
647
+ "Cleaning up conversation accumulator after workflow "
648
+ "summarization"
617
649
  )
650
+ self._conversation_accumulator = None
618
651
 
619
- # add worker metadata and cleanup
620
- result["worker_description"] = self.description
621
- if self._conversation_accumulator is not None:
622
- logger.info(
623
- "Cleaning up conversation accumulator after workflow "
624
- "summarization"
625
- )
626
- self._conversation_accumulator = None
652
+ return result
627
653
 
628
- return result
654
+ async def save_workflow_memories_async(self) -> Dict[str, Any]:
655
+ r"""Asynchronously save the worker's current workflow memories using
656
+ agent summarization.
629
657
 
630
- except Exception as e:
631
- return {
632
- "status": "error",
633
- "summary": "",
634
- "file_path": None,
635
- "worker_description": self.description,
636
- "message": f"Failed to save workflow memories: {e!s}",
637
- }
658
+ This is the async version of save_workflow_memories() that uses
659
+ asummarize() for non-blocking LLM calls, enabling parallel
660
+ summarization of multiple workers.
661
+
662
+ Delegates to WorkflowMemoryManager for all workflow operations.
663
+
664
+ Returns:
665
+ Dict[str, Any]: Result dictionary with keys:
666
+ - status (str): "success" or "error"
667
+ - summary (str): Generated workflow summary
668
+ - file_path (str): Path to saved file
669
+ - worker_description (str): Worker description used
670
+ """
671
+ manager = self._get_workflow_manager()
672
+ result = await manager.save_workflow_async(
673
+ conversation_accumulator=self._conversation_accumulator
674
+ )
675
+
676
+ # clean up accumulator after successful save
677
+ if (
678
+ result.get("status") == "success"
679
+ and self._conversation_accumulator is not None
680
+ ):
681
+ logger.info(
682
+ "Cleaning up conversation accumulator after workflow "
683
+ "summarization"
684
+ )
685
+ self._conversation_accumulator = None
686
+
687
+ return result
638
688
 
639
689
  def load_workflow_memories(
640
690
  self,
641
691
  pattern: Optional[str] = None,
642
- max_files_to_load: int = 3,
692
+ max_workflows: int = 3,
643
693
  session_id: Optional[str] = None,
694
+ use_smart_selection: bool = True,
644
695
  ) -> bool:
645
- r"""Load workflow memories matching worker description
646
- from saved files.
696
+ r"""Load workflow memories using intelligent agent-based selection.
697
+
698
+ This method uses the worker agent to intelligently select the most
699
+ relevant workflows based on metadata (title, description, tags)
700
+ rather than simple filename pattern matching.
647
701
 
648
- This method searches for workflow memory files that match the worker's
649
- description and loads them into the agent's memory using
650
- ContextUtility.
702
+ Delegates to WorkflowMemoryManager for all workflow operations.
651
703
 
652
704
  Args:
653
- pattern (Optional[str]): Custom search pattern for workflow
654
- memory files.
655
- If None, uses worker description to generate pattern.
656
- max_files_to_load (int): Maximum number of workflow files to load.
705
+ pattern (Optional[str]): Legacy parameter for backward
706
+ compatibility. When use_smart_selection=False, uses this
707
+ pattern for file matching. Ignored when smart selection
708
+ is enabled.
709
+ max_workflows (int): Maximum number of workflow files to load.
657
710
  (default: :obj:`3`)
658
711
  session_id (Optional[str]): Specific workforce session ID to load
659
712
  from. If None, searches across all sessions.
660
713
  (default: :obj:`None`)
714
+ use_smart_selection (bool): Whether to use agent-based intelligent
715
+ workflow selection. When True, uses metadata and LLM to select
716
+ most relevant workflows. When False, falls back to pattern
717
+ matching. (default: :obj:`True`)
661
718
 
662
719
  Returns:
663
720
  bool: True if workflow memories were successfully loaded, False
664
721
  otherwise.
665
722
  """
666
- try:
667
- # reset system message to original state before loading
668
- # this prevents duplicate workflow context on multiple calls
669
- if isinstance(self.worker, ChatAgent):
670
- self.worker.reset_to_original_system_message()
671
-
672
- # Find workflow memory files matching the pattern
673
- workflow_files = self._find_workflow_files(pattern, session_id)
674
- if not workflow_files:
675
- return False
676
-
677
- # Load the workflow memory files
678
- loaded_count = self._load_workflow_files(
679
- workflow_files, max_files_to_load
680
- )
681
-
682
- # Report results
683
- logger.info(
684
- f"Successfully loaded {loaded_count} workflow file(s) for "
685
- f"{self.description}"
686
- )
687
- return loaded_count > 0
688
-
689
- except Exception as e:
690
- logger.warning(
691
- f"Error loading workflow memories for {self.description}: "
692
- f"{e!s}"
693
- )
694
- return False
695
-
696
- def _find_workflow_files(
697
- self, pattern: Optional[str], session_id: Optional[str] = None
698
- ) -> List[str]:
699
- r"""Find and return sorted workflow files matching the pattern.
700
-
701
- Args:
702
- pattern (Optional[str]): Custom search pattern for workflow files.
703
- If None, uses worker description to generate pattern.
704
- session_id (Optional[str]): Specific session ID to search in.
705
- If None, searches across all sessions.
706
-
707
- Returns:
708
- List[str]: Sorted list of workflow file paths (empty if
709
- validation fails).
710
- """
711
- # Ensure we have a ChatAgent worker
712
- if not isinstance(self.worker, ChatAgent):
713
- logger.warning(
714
- f"Cannot load workflow: {self.description} worker is not "
715
- "a ChatAgent"
716
- )
717
- return []
718
-
719
- # generate filename-safe search pattern from worker description
720
- if pattern is None:
721
- # sanitize description: spaces to underscores, remove special chars
722
- clean_desc = self.description.lower().replace(" ", "_")
723
- clean_desc = re.sub(r'[^a-z0-9_]', '', clean_desc)
724
- pattern = f"{clean_desc}_workflow*.md"
725
-
726
- # Get the base workforce_workflows directory
727
- camel_workdir = os.environ.get("CAMEL_WORKDIR")
728
- if camel_workdir:
729
- base_dir = os.path.join(camel_workdir, "workforce_workflows")
730
- else:
731
- base_dir = "workforce_workflows"
732
-
733
- # search for workflow files in specified or all session directories
734
- if session_id:
735
- search_path = str(Path(base_dir) / session_id / pattern)
736
- else:
737
- # search across all session directories using wildcard pattern
738
- search_path = str(Path(base_dir) / "*" / pattern)
739
- workflow_files = glob.glob(search_path)
740
-
741
- if not workflow_files:
742
- logger.info(f"No workflow files found for pattern: {pattern}")
743
- return []
744
-
745
- # prioritize most recent sessions by session timestamp in
746
- # directory name
747
- def extract_session_timestamp(filepath: str) -> str:
748
- match = re.search(r'session_(\d{8}_\d{6}_\d{6})', filepath)
749
- return match.group(1) if match else ""
750
-
751
- workflow_files.sort(key=extract_session_timestamp, reverse=True)
752
- return workflow_files
753
-
754
- def _load_workflow_files(
755
- self, workflow_files: List[str], max_files_to_load: int
756
- ) -> int:
757
- r"""Load workflow files and return count of successful loads.
758
-
759
- Args:
760
- workflow_files (List[str]): List of workflow file paths to load.
761
-
762
- Returns:
763
- int: Number of successfully loaded workflow files.
764
- """
765
- loaded_count = 0
766
- # limit loading to prevent context overflow
767
- for file_path in workflow_files[:max_files_to_load]:
768
- try:
769
- # extract file and session info from full path
770
- filename = os.path.basename(file_path).replace('.md', '')
771
- session_dir = os.path.dirname(file_path)
772
- session_id = os.path.basename(session_dir)
773
-
774
- # create context utility for the specific session
775
- # where file exists
776
- temp_utility = ContextUtility.get_workforce_shared(session_id)
777
-
778
- status = temp_utility.load_markdown_context_to_memory(
779
- self.worker, filename
780
- )
781
-
782
- if "Context appended" in status:
783
- loaded_count += 1
784
- logger.info(f"Loaded workflow: {filename}")
785
- else:
786
- logger.warning(
787
- f"Failed to load workflow {filename}: {status}"
788
- )
789
-
790
- except Exception as e:
791
- logger.warning(
792
- f"Failed to load workflow file {file_path}: {e!s}"
793
- )
794
- continue
795
-
796
- return loaded_count
797
-
798
- def _validate_workflow_save_requirements(self) -> Optional[Dict[str, Any]]:
799
- r"""Validate requirements for workflow saving.
800
-
801
- Returns:
802
- Optional[Dict[str, Any]]: Error result dict if validation fails,
803
- None if validation passes.
804
- """
805
- if not isinstance(self.worker, ChatAgent):
806
- return {
807
- "status": "error",
808
- "summary": "",
809
- "file_path": None,
810
- "worker_description": self.description,
811
- "message": (
812
- "Worker must be a ChatAgent instance to save workflow "
813
- "memories"
814
- ),
815
- }
816
- return None
817
-
818
- def _generate_workflow_filename(self) -> str:
819
- r"""Generate a filename for the workflow based on worker description.
820
-
821
- Returns:
822
- str: Sanitized filename without timestamp (session already has
823
- timestamp).
824
- """
825
- clean_desc = self.description.lower().replace(" ", "_")
826
- clean_desc = re.sub(r'[^a-z0-9_]', '', clean_desc)
827
- return f"{clean_desc}_workflow"
828
-
829
- def _prepare_workflow_prompt(self) -> str:
830
- r"""Prepare the structured prompt for workflow summarization.
831
-
832
- Returns:
833
- str: Structured prompt for workflow summary.
834
- """
835
- workflow_prompt = WorkflowSummary.get_instruction_prompt()
836
- return StructuredOutputHandler.generate_structured_prompt(
837
- base_prompt=workflow_prompt, schema=WorkflowSummary
723
+ manager = self._get_workflow_manager()
724
+ return manager.load_workflows(
725
+ pattern=pattern,
726
+ max_files_to_load=max_workflows,
727
+ session_id=session_id,
728
+ use_smart_selection=use_smart_selection,
838
729
  )
839
-
840
- def _select_agent_for_summarization(
841
- self, context_util: ContextUtility
842
- ) -> ChatAgent:
843
- r"""Select the best agent for workflow summarization.
844
-
845
- Args:
846
- context_util: Context utility to set on selected agent.
847
-
848
- Returns:
849
- ChatAgent: Agent to use for summarization.
850
- """
851
- agent_to_summarize = self.worker
852
-
853
- if self._conversation_accumulator is not None:
854
- accumulator_messages, _ = (
855
- self._conversation_accumulator.memory.get_context()
856
- )
857
- if accumulator_messages:
858
- self._conversation_accumulator.set_context_utility(
859
- context_util
860
- )
861
- agent_to_summarize = self._conversation_accumulator
862
- logger.info(
863
- f"Using conversation accumulator with "
864
- f"{len(accumulator_messages)} messages for workflow "
865
- f"summary"
866
- )
867
- else:
868
- logger.info(
869
- "Using original worker for workflow summary (no "
870
- "accumulated conversations)"
871
- )
872
- else:
873
- logger.info(
874
- "Using original worker for workflow summary (no accumulator)"
875
- )
876
-
877
- return agent_to_summarize
@@ -38,7 +38,8 @@ class TaskResult(BaseModel):
38
38
 
39
39
  content: str = Field(description="The result of the task.")
40
40
  failed: bool = Field(
41
- description="Flag indicating whether the task processing failed."
41
+ default=False,
42
+ description="Flag indicating whether the task processing failed.",
42
43
  )
43
44
 
44
45