camel-ai 0.2.76a14__py3-none-any.whl → 0.2.78__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -15,14 +15,19 @@ from __future__ import annotations
15
15
 
16
16
  import asyncio
17
17
  import datetime
18
+ import glob
19
+ import os
20
+ import re
18
21
  import time
19
22
  from collections import deque
20
- from typing import Any, List, Optional
23
+ from pathlib import Path
24
+ from typing import Any, Dict, List, Optional
21
25
 
22
26
  from colorama import Fore
23
27
 
24
28
  from camel.agents import ChatAgent
25
29
  from camel.agents.chat_agent import AsyncStreamingChatAgentResponse
30
+ from camel.logger import get_logger
26
31
  from camel.societies.workforce.prompts import PROCESS_TASK_PROMPT
27
32
  from camel.societies.workforce.structured_output_handler import (
28
33
  StructuredOutputHandler,
@@ -30,6 +35,9 @@ from camel.societies.workforce.structured_output_handler import (
30
35
  from camel.societies.workforce.utils import TaskResult
31
36
  from camel.societies.workforce.worker import Worker
32
37
  from camel.tasks.task import Task, TaskState, is_task_result_insufficient
38
+ from camel.utils.context_utils import ContextUtility, WorkflowSummary
39
+
40
+ logger = get_logger(__name__)
33
41
 
34
42
 
35
43
  class AgentPool:
@@ -201,6 +209,16 @@ class SingleAgentWorker(Worker):
201
209
  support native structured output. When disabled, the workforce
202
210
  uses the native response_format parameter.
203
211
  (default: :obj:`True`)
212
+ context_utility (ContextUtility, optional): Shared context utility
213
+ instance for workflow management. If provided, all workflow
214
+ operations will use this shared instance instead of creating
215
+ a new one. This ensures multiple workers share the same session
216
+ directory. (default: :obj:`None`)
217
+ enable_workflow_memory (bool, optional): Whether to enable workflow
218
+ memory accumulation during task execution. When enabled,
219
+ conversations from all task executions are accumulated for
220
+ potential workflow saving. Set to True if you plan to call
221
+ save_workflow_memories(). (default: :obj:`False`)
204
222
  """
205
223
 
206
224
  def __init__(
@@ -212,6 +230,8 @@ class SingleAgentWorker(Worker):
212
230
  pool_max_size: int = 10,
213
231
  auto_scale_pool: bool = True,
214
232
  use_structured_output_handler: bool = True,
233
+ context_utility: Optional[ContextUtility] = None,
234
+ enable_workflow_memory: bool = False,
215
235
  ) -> None:
216
236
  node_id = worker.agent_id
217
237
  super().__init__(
@@ -226,6 +246,18 @@ class SingleAgentWorker(Worker):
226
246
  )
227
247
  self.worker = worker
228
248
  self.use_agent_pool = use_agent_pool
249
+ self.enable_workflow_memory = enable_workflow_memory
250
+ self._shared_context_utility = context_utility
251
+ self._context_utility: Optional[ContextUtility] = (
252
+ None # Will be initialized when needed
253
+ )
254
+
255
+ # accumulator agent for collecting conversations
256
+ # from all task processing
257
+ self._conversation_accumulator: Optional[ChatAgent] = None
258
+
259
+ # note: context utility is set on the worker agent during save/load
260
+ # operations to avoid creating session folders during initialization
229
261
 
230
262
  self.agent_pool: Optional[AgentPool] = None
231
263
  self._cleanup_task: Optional[asyncio.Task] = None
@@ -268,6 +300,24 @@ class SingleAgentWorker(Worker):
268
300
  await self.agent_pool.return_agent(agent)
269
301
  # If not using pool, agent will be garbage collected
270
302
 
303
+ def _get_context_utility(self) -> ContextUtility:
304
+ r"""Get context utility with lazy initialization."""
305
+ if self._context_utility is None:
306
+ self._context_utility = (
307
+ self._shared_context_utility
308
+ or ContextUtility.get_workforce_shared()
309
+ )
310
+ return self._context_utility
311
+
312
+ def _get_conversation_accumulator(self) -> ChatAgent:
313
+ r"""Get or create the conversation accumulator agent."""
314
+ if self._conversation_accumulator is None:
315
+ # create a clone of the original worker to serve as accumulator
316
+ self._conversation_accumulator = self.worker.clone(
317
+ with_memory=False
318
+ )
319
+ return self._conversation_accumulator
320
+
271
321
  async def _process_task(
272
322
  self, task: Task, dependencies: List[Task]
273
323
  ) -> TaskState:
@@ -385,10 +435,36 @@ class SingleAgentWorker(Worker):
385
435
  usage_info.get("total_tokens", 0) if usage_info else 0
386
436
  )
387
437
 
438
+ # collect conversation from working agent to
439
+ # accumulator for workflow memory
440
+ # Only transfer memory if workflow memory is enabled
441
+ if self.enable_workflow_memory:
442
+ accumulator = self._get_conversation_accumulator()
443
+
444
+ # transfer all memory records from working agent to accumulator
445
+ try:
446
+ # retrieve all context records from the working agent
447
+ work_records = worker_agent.memory.retrieve()
448
+
449
+ # write these records to the accumulator's memory
450
+ memory_records = [
451
+ record.memory_record for record in work_records
452
+ ]
453
+ accumulator.memory.write_records(memory_records)
454
+
455
+ logger.debug(
456
+ f"Transferred {len(memory_records)} memory records to "
457
+ f"accumulator"
458
+ )
459
+
460
+ except Exception as e:
461
+ logger.warning(
462
+ f"Failed to transfer conversation to accumulator: {e}"
463
+ )
464
+
388
465
  except Exception as e:
389
- print(
390
- f"{Fore.RED}Error processing task {task.id}: "
391
- f"{type(e).__name__}: {e}{Fore.RESET}"
466
+ logger.error(
467
+ f"Error processing task {task.id}: {type(e).__name__}: {e}"
392
468
  )
393
469
  # Store error information in task result
394
470
  task.result = f"{type(e).__name__}: {e!s}"
@@ -433,13 +509,13 @@ class SingleAgentWorker(Worker):
433
509
  task.additional_info["token_usage"] = {"total_tokens": total_tokens}
434
510
 
435
511
  print(f"======\n{Fore.GREEN}Response from {self}:{Fore.RESET}")
512
+ logger.info(f"Response from {self}:")
436
513
 
437
514
  if not self.use_structured_output_handler:
438
515
  # Handle native structured output parsing
439
516
  if task_result is None:
440
- print(
441
- f"{Fore.RED}Error in worker step execution: Invalid "
442
- f"task result{Fore.RESET}"
517
+ logger.error(
518
+ "Error in worker step execution: Invalid task result"
443
519
  )
444
520
  task_result = TaskResult(
445
521
  content="Failed to generate valid task result.",
@@ -450,6 +526,10 @@ class SingleAgentWorker(Worker):
450
526
  print(
451
527
  f"\n{color}{task_result.content}{Fore.RESET}\n======", # type: ignore[union-attr]
452
528
  )
529
+ if task_result.failed: # type: ignore[union-attr]
530
+ logger.error(f"{task_result.content}") # type: ignore[union-attr]
531
+ else:
532
+ logger.info(f"{task_result.content}") # type: ignore[union-attr]
453
533
 
454
534
  task.result = task_result.content # type: ignore[union-attr]
455
535
 
@@ -457,9 +537,9 @@ class SingleAgentWorker(Worker):
457
537
  return TaskState.FAILED
458
538
 
459
539
  if is_task_result_insufficient(task):
460
- print(
461
- f"{Fore.RED}Task {task.id}: Content validation failed - "
462
- f"task marked as failed{Fore.RESET}"
540
+ logger.warning(
541
+ f"Task {task.id}: Content validation failed - "
542
+ f"task marked as failed"
463
543
  )
464
544
  return TaskState.FAILED
465
545
  return TaskState.DONE
@@ -489,10 +569,309 @@ class SingleAgentWorker(Worker):
489
569
  except asyncio.CancelledError:
490
570
  break
491
571
  except Exception as e:
492
- print(f"Error in pool cleanup: {e}")
572
+ logger.warning(f"Error in pool cleanup: {e}")
493
573
 
494
574
  def get_pool_stats(self) -> Optional[dict]:
495
575
  r"""Get agent pool statistics if pool is enabled."""
496
576
  if self.use_agent_pool and self.agent_pool:
497
577
  return self.agent_pool.get_stats()
498
578
  return None
579
+
580
+ def save_workflow_memories(self) -> Dict[str, Any]:
581
+ r"""Save the worker's current workflow memories using agent
582
+ summarization.
583
+
584
+ This method generates a workflow summary from the worker agent's
585
+ conversation history and saves it to a markdown file. The filename
586
+ is based on the worker's description for easy loading later.
587
+
588
+ Returns:
589
+ Dict[str, Any]: Result dictionary with keys:
590
+ - status (str): "success" or "error"
591
+ - summary (str): Generated workflow summary
592
+ - file_path (str): Path to saved file
593
+ - worker_description (str): Worker description used
594
+ """
595
+ try:
596
+ # validate requirements
597
+ validation_error = self._validate_workflow_save_requirements()
598
+ if validation_error:
599
+ return validation_error
600
+
601
+ # setup context utility and agent
602
+ context_util = self._get_context_utility()
603
+ self.worker.set_context_utility(context_util)
604
+
605
+ # prepare workflow summarization components
606
+ filename = self._generate_workflow_filename()
607
+ structured_prompt = self._prepare_workflow_prompt()
608
+ agent_to_summarize = self._select_agent_for_summarization(
609
+ context_util
610
+ )
611
+
612
+ # generate and save workflow summary
613
+ result = agent_to_summarize.summarize(
614
+ filename=filename,
615
+ summary_prompt=structured_prompt,
616
+ response_format=WorkflowSummary,
617
+ )
618
+
619
+ # add worker metadata and cleanup
620
+ result["worker_description"] = self.description
621
+ if self._conversation_accumulator is not None:
622
+ logger.info(
623
+ "Cleaning up conversation accumulator after workflow "
624
+ "summarization"
625
+ )
626
+ self._conversation_accumulator = None
627
+
628
+ return result
629
+
630
+ except Exception as e:
631
+ return {
632
+ "status": "error",
633
+ "summary": "",
634
+ "file_path": None,
635
+ "worker_description": self.description,
636
+ "message": f"Failed to save workflow memories: {e!s}",
637
+ }
638
+
639
+ def load_workflow_memories(
640
+ self,
641
+ pattern: Optional[str] = None,
642
+ max_files_to_load: int = 3,
643
+ session_id: Optional[str] = None,
644
+ ) -> bool:
645
+ r"""Load workflow memories matching worker description
646
+ from saved files.
647
+
648
+ This method searches for workflow memory files that match the worker's
649
+ description and loads them into the agent's memory using
650
+ ContextUtility.
651
+
652
+ Args:
653
+ pattern (Optional[str]): Custom search pattern for workflow
654
+ memory files.
655
+ If None, uses worker description to generate pattern.
656
+ max_files_to_load (int): Maximum number of workflow files to load.
657
+ (default: :obj:`3`)
658
+ session_id (Optional[str]): Specific workforce session ID to load
659
+ from. If None, searches across all sessions.
660
+ (default: :obj:`None`)
661
+
662
+ Returns:
663
+ bool: True if workflow memories were successfully loaded, False
664
+ otherwise.
665
+ """
666
+ try:
667
+ # reset system message to original state before loading
668
+ # this prevents duplicate workflow context on multiple calls
669
+ if isinstance(self.worker, ChatAgent):
670
+ self.worker.reset_to_original_system_message()
671
+
672
+ # Find workflow memory files matching the pattern
673
+ workflow_files = self._find_workflow_files(pattern, session_id)
674
+ if not workflow_files:
675
+ return False
676
+
677
+ # Load the workflow memory files
678
+ loaded_count = self._load_workflow_files(
679
+ workflow_files, max_files_to_load
680
+ )
681
+
682
+ # Report results
683
+ logger.info(
684
+ f"Successfully loaded {loaded_count} workflow file(s) for "
685
+ f"{self.description}"
686
+ )
687
+ return loaded_count > 0
688
+
689
+ except Exception as e:
690
+ logger.warning(
691
+ f"Error loading workflow memories for {self.description}: "
692
+ f"{e!s}"
693
+ )
694
+ return False
695
+
696
+ def _find_workflow_files(
697
+ self, pattern: Optional[str], session_id: Optional[str] = None
698
+ ) -> List[str]:
699
+ r"""Find and return sorted workflow files matching the pattern.
700
+
701
+ Args:
702
+ pattern (Optional[str]): Custom search pattern for workflow files.
703
+ If None, uses worker description to generate pattern.
704
+ session_id (Optional[str]): Specific session ID to search in.
705
+ If None, searches across all sessions.
706
+
707
+ Returns:
708
+ List[str]: Sorted list of workflow file paths (empty if
709
+ validation fails).
710
+ """
711
+ # Ensure we have a ChatAgent worker
712
+ if not isinstance(self.worker, ChatAgent):
713
+ logger.warning(
714
+ f"Cannot load workflow: {self.description} worker is not "
715
+ "a ChatAgent"
716
+ )
717
+ return []
718
+
719
+ # generate filename-safe search pattern from worker description
720
+ if pattern is None:
721
+ # sanitize description: spaces to underscores, remove special chars
722
+ clean_desc = self.description.lower().replace(" ", "_")
723
+ clean_desc = re.sub(r'[^a-z0-9_]', '', clean_desc)
724
+ pattern = f"{clean_desc}_workflow*.md"
725
+
726
+ # Get the base workforce_workflows directory
727
+ camel_workdir = os.environ.get("CAMEL_WORKDIR")
728
+ if camel_workdir:
729
+ base_dir = os.path.join(camel_workdir, "workforce_workflows")
730
+ else:
731
+ base_dir = "workforce_workflows"
732
+
733
+ # search for workflow files in specified or all session directories
734
+ if session_id:
735
+ search_path = str(Path(base_dir) / session_id / pattern)
736
+ else:
737
+ # search across all session directories using wildcard pattern
738
+ search_path = str(Path(base_dir) / "*" / pattern)
739
+ workflow_files = glob.glob(search_path)
740
+
741
+ if not workflow_files:
742
+ logger.info(f"No workflow files found for pattern: {pattern}")
743
+ return []
744
+
745
+ # prioritize most recent sessions by session timestamp in
746
+ # directory name
747
+ def extract_session_timestamp(filepath: str) -> str:
748
+ match = re.search(r'session_(\d{8}_\d{6}_\d{6})', filepath)
749
+ return match.group(1) if match else ""
750
+
751
+ workflow_files.sort(key=extract_session_timestamp, reverse=True)
752
+ return workflow_files
753
+
754
+ def _load_workflow_files(
755
+ self, workflow_files: List[str], max_files_to_load: int
756
+ ) -> int:
757
+ r"""Load workflow files and return count of successful loads.
758
+
759
+ Args:
760
+ workflow_files (List[str]): List of workflow file paths to load.
761
+
762
+ Returns:
763
+ int: Number of successfully loaded workflow files.
764
+ """
765
+ loaded_count = 0
766
+ # limit loading to prevent context overflow
767
+ for file_path in workflow_files[:max_files_to_load]:
768
+ try:
769
+ # extract file and session info from full path
770
+ filename = os.path.basename(file_path).replace('.md', '')
771
+ session_dir = os.path.dirname(file_path)
772
+ session_id = os.path.basename(session_dir)
773
+
774
+ # create context utility for the specific session
775
+ # where file exists
776
+ temp_utility = ContextUtility.get_workforce_shared(session_id)
777
+
778
+ status = temp_utility.load_markdown_context_to_memory(
779
+ self.worker, filename
780
+ )
781
+
782
+ if "Context appended" in status:
783
+ loaded_count += 1
784
+ logger.info(f"Loaded workflow: {filename}")
785
+ else:
786
+ logger.warning(
787
+ f"Failed to load workflow {filename}: {status}"
788
+ )
789
+
790
+ except Exception as e:
791
+ logger.warning(
792
+ f"Failed to load workflow file {file_path}: {e!s}"
793
+ )
794
+ continue
795
+
796
+ return loaded_count
797
+
798
+ def _validate_workflow_save_requirements(self) -> Optional[Dict[str, Any]]:
799
+ r"""Validate requirements for workflow saving.
800
+
801
+ Returns:
802
+ Optional[Dict[str, Any]]: Error result dict if validation fails,
803
+ None if validation passes.
804
+ """
805
+ if not isinstance(self.worker, ChatAgent):
806
+ return {
807
+ "status": "error",
808
+ "summary": "",
809
+ "file_path": None,
810
+ "worker_description": self.description,
811
+ "message": (
812
+ "Worker must be a ChatAgent instance to save workflow "
813
+ "memories"
814
+ ),
815
+ }
816
+ return None
817
+
818
+ def _generate_workflow_filename(self) -> str:
819
+ r"""Generate a filename for the workflow based on worker description.
820
+
821
+ Returns:
822
+ str: Sanitized filename without timestamp (session already has
823
+ timestamp).
824
+ """
825
+ clean_desc = self.description.lower().replace(" ", "_")
826
+ clean_desc = re.sub(r'[^a-z0-9_]', '', clean_desc)
827
+ return f"{clean_desc}_workflow"
828
+
829
+ def _prepare_workflow_prompt(self) -> str:
830
+ r"""Prepare the structured prompt for workflow summarization.
831
+
832
+ Returns:
833
+ str: Structured prompt for workflow summary.
834
+ """
835
+ workflow_prompt = WorkflowSummary.get_instruction_prompt()
836
+ return StructuredOutputHandler.generate_structured_prompt(
837
+ base_prompt=workflow_prompt, schema=WorkflowSummary
838
+ )
839
+
840
+ def _select_agent_for_summarization(
841
+ self, context_util: ContextUtility
842
+ ) -> ChatAgent:
843
+ r"""Select the best agent for workflow summarization.
844
+
845
+ Args:
846
+ context_util: Context utility to set on selected agent.
847
+
848
+ Returns:
849
+ ChatAgent: Agent to use for summarization.
850
+ """
851
+ agent_to_summarize = self.worker
852
+
853
+ if self._conversation_accumulator is not None:
854
+ accumulator_messages, _ = (
855
+ self._conversation_accumulator.memory.get_context()
856
+ )
857
+ if accumulator_messages:
858
+ self._conversation_accumulator.set_context_utility(
859
+ context_util
860
+ )
861
+ agent_to_summarize = self._conversation_accumulator
862
+ logger.info(
863
+ f"Using conversation accumulator with "
864
+ f"{len(accumulator_messages)} messages for workflow "
865
+ f"summary"
866
+ )
867
+ else:
868
+ logger.info(
869
+ "Using original worker for workflow summary (no "
870
+ "accumulated conversations)"
871
+ )
872
+ else:
873
+ logger.info(
874
+ "Using original worker for workflow summary (no accumulator)"
875
+ )
876
+
877
+ return agent_to_summarize
@@ -19,8 +19,8 @@ from pydantic import BaseModel, ValidationError
19
19
 
20
20
  from camel.logger import get_logger
21
21
  from camel.societies.workforce.utils import (
22
- RecoveryDecision,
23
22
  RecoveryStrategy,
23
+ TaskAnalysisResult,
24
24
  TaskAssignResult,
25
25
  WorkerConf,
26
26
  )
@@ -65,9 +65,9 @@ class StructuredOutputHandler:
65
65
  r'description.*?:\s*"([^"]+)"'
66
66
  ),
67
67
  ],
68
- 'RecoveryDecision': [
69
- r'"strategy"\s*:\s*"([^"]+)".*?"reasoning"\s*:\s*"([^"]+)"',
70
- r'strategy.*?:\s*"([^"]+)".*?reasoning.*?:\s*"([^"]+)"',
68
+ 'TaskAnalysisResult': [
69
+ r'"recovery_strategy"\s*:\s*"([^"]+)".*?"reasoning"\s*:\s*"([^"]+)"',
70
+ r'recovery_strategy.*?:\s*"([^"]+)".*?reasoning.*?:\s*"([^"]+)"',
71
71
  ],
72
72
  }
73
73
 
@@ -239,12 +239,12 @@ Ensure the JSON is valid and properly formatted.
239
239
  except (IndexError, AttributeError):
240
240
  continue
241
241
 
242
- elif schema_name == 'RecoveryDecision':
242
+ elif schema_name == 'TaskAnalysisResult':
243
243
  for pattern in patterns:
244
244
  match = re.search(pattern, text, re.DOTALL | re.IGNORECASE)
245
245
  if match:
246
246
  try:
247
- strategy = match.group(1)
247
+ recovery_strategy = match.group(1)
248
248
  reasoning = match.group(2)
249
249
  # Look for modified_task_content
250
250
  content_match = re.search(
@@ -252,14 +252,25 @@ Ensure the JSON is valid and properly formatted.
252
252
  text,
253
253
  re.IGNORECASE,
254
254
  )
255
+ # Look for quality_score (for quality evaluation)
256
+ score_match = re.search(
257
+ r'"quality_score"\s*:\s*(\d+)',
258
+ text,
259
+ re.IGNORECASE,
260
+ )
255
261
  return {
256
- 'strategy': strategy,
262
+ 'recovery_strategy': recovery_strategy,
257
263
  'reasoning': reasoning,
258
264
  'modified_task_content': (
259
265
  content_match.group(1)
260
266
  if content_match
261
267
  else None
262
268
  ),
269
+ 'quality_score': (
270
+ int(score_match.group(1))
271
+ if score_match
272
+ else None
273
+ ),
263
274
  }
264
275
  except (IndexError, AttributeError):
265
276
  continue
@@ -370,21 +381,22 @@ Ensure the JSON is valid and properly formatted.
370
381
  else:
371
382
  assignment['dependencies'] = []
372
383
 
373
- elif schema_name == 'RecoveryDecision':
374
- # Ensure strategy is valid
375
- if 'strategy' in fixed_data:
376
- strategy = fixed_data['strategy'].lower()
384
+ elif schema_name == 'TaskAnalysisResult':
385
+ # Ensure recovery_strategy is valid
386
+ if 'recovery_strategy' in fixed_data:
387
+ strategy = fixed_data['recovery_strategy'].lower()
377
388
  valid_strategies = [
378
389
  'retry',
379
390
  'replan',
380
391
  'decompose',
381
392
  'create_worker',
393
+ 'reassign',
382
394
  ]
383
395
  if strategy not in valid_strategies:
384
396
  # Try to match partial
385
397
  for valid in valid_strategies:
386
398
  if valid.startswith(strategy) or strategy in valid:
387
- fixed_data['strategy'] = valid
399
+ fixed_data['recovery_strategy'] = valid
388
400
  break
389
401
 
390
402
  return fixed_data
@@ -410,10 +422,10 @@ Ensure the JSON is valid and properly formatted.
410
422
  sys_msg="You are a helpful assistant.",
411
423
  description="A general-purpose worker",
412
424
  )
413
- elif schema_name == 'RecoveryDecision':
414
- return RecoveryDecision(
415
- strategy=RecoveryStrategy.RETRY,
425
+ elif schema_name == 'TaskAnalysisResult':
426
+ return TaskAnalysisResult(
416
427
  reasoning="Unable to parse response, defaulting to retry",
428
+ recovery_strategy=RecoveryStrategy.RETRY,
417
429
  modified_task_content=None,
418
430
  )
419
431
  else:
@@ -482,11 +494,11 @@ Ensure the JSON is valid and properly formatted.
482
494
  description=f"Fallback worker for task: {task_content}...",
483
495
  )
484
496
 
485
- elif schema_name == 'RecoveryDecision':
497
+ elif schema_name == 'TaskAnalysisResult':
486
498
  # Default to retry strategy
487
- return RecoveryDecision(
488
- strategy=RecoveryStrategy.RETRY,
499
+ return TaskAnalysisResult(
489
500
  reasoning=f"Fallback decision due to: {error_message}",
501
+ recovery_strategy=RecoveryStrategy.RETRY,
490
502
  modified_task_content=None,
491
503
  )
492
504