camel-ai 0.2.75a5__py3-none-any.whl → 0.2.76__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (103) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +1148 -298
  3. camel/agents/mcp_agent.py +30 -27
  4. camel/configs/__init__.py +9 -0
  5. camel/configs/amd_config.py +70 -0
  6. camel/configs/cometapi_config.py +104 -0
  7. camel/configs/nebius_config.py +103 -0
  8. camel/data_collectors/alpaca_collector.py +15 -6
  9. camel/environments/tic_tac_toe.py +1 -1
  10. camel/interpreters/__init__.py +2 -0
  11. camel/interpreters/docker/Dockerfile +3 -12
  12. camel/interpreters/microsandbox_interpreter.py +395 -0
  13. camel/loaders/__init__.py +11 -2
  14. camel/loaders/chunkr_reader.py +9 -0
  15. camel/memories/__init__.py +2 -1
  16. camel/memories/agent_memories.py +3 -1
  17. camel/memories/blocks/chat_history_block.py +21 -3
  18. camel/memories/records.py +88 -8
  19. camel/messages/base.py +127 -34
  20. camel/models/__init__.py +6 -0
  21. camel/models/amd_model.py +101 -0
  22. camel/models/azure_openai_model.py +0 -6
  23. camel/models/base_model.py +30 -0
  24. camel/models/cometapi_model.py +83 -0
  25. camel/models/model_factory.py +6 -0
  26. camel/models/nebius_model.py +83 -0
  27. camel/models/ollama_model.py +3 -3
  28. camel/models/openai_compatible_model.py +0 -6
  29. camel/models/openai_model.py +0 -6
  30. camel/models/zhipuai_model.py +61 -2
  31. camel/parsers/__init__.py +18 -0
  32. camel/parsers/mcp_tool_call_parser.py +176 -0
  33. camel/retrievers/auto_retriever.py +1 -0
  34. camel/runtimes/daytona_runtime.py +11 -12
  35. camel/societies/workforce/prompts.py +131 -50
  36. camel/societies/workforce/single_agent_worker.py +434 -49
  37. camel/societies/workforce/structured_output_handler.py +30 -18
  38. camel/societies/workforce/task_channel.py +163 -27
  39. camel/societies/workforce/utils.py +105 -12
  40. camel/societies/workforce/workforce.py +1357 -314
  41. camel/societies/workforce/workforce_logger.py +24 -5
  42. camel/storages/key_value_storages/json.py +15 -2
  43. camel/storages/object_storages/google_cloud.py +1 -1
  44. camel/storages/vectordb_storages/oceanbase.py +10 -11
  45. camel/storages/vectordb_storages/tidb.py +8 -6
  46. camel/tasks/task.py +4 -3
  47. camel/toolkits/__init__.py +18 -5
  48. camel/toolkits/aci_toolkit.py +45 -0
  49. camel/toolkits/code_execution.py +28 -1
  50. camel/toolkits/context_summarizer_toolkit.py +684 -0
  51. camel/toolkits/dingtalk.py +1135 -0
  52. camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
  53. camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +194 -34
  54. camel/toolkits/function_tool.py +6 -1
  55. camel/toolkits/github_toolkit.py +104 -17
  56. camel/toolkits/google_drive_mcp_toolkit.py +12 -31
  57. camel/toolkits/hybrid_browser_toolkit/config_loader.py +12 -0
  58. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +79 -2
  59. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +95 -59
  60. camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
  61. camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
  62. camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
  63. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +619 -95
  64. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +7 -2
  65. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +115 -219
  66. camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
  67. camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
  68. camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
  69. camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +1 -0
  70. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
  71. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +412 -133
  72. camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +9 -5
  73. camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
  74. camel/toolkits/markitdown_toolkit.py +27 -1
  75. camel/toolkits/math_toolkit.py +64 -10
  76. camel/toolkits/mcp_toolkit.py +348 -348
  77. camel/toolkits/message_integration.py +3 -0
  78. camel/toolkits/minimax_mcp_toolkit.py +195 -0
  79. camel/toolkits/note_taking_toolkit.py +18 -8
  80. camel/toolkits/notion_mcp_toolkit.py +16 -26
  81. camel/toolkits/origene_mcp_toolkit.py +8 -49
  82. camel/toolkits/playwright_mcp_toolkit.py +12 -31
  83. camel/toolkits/resend_toolkit.py +168 -0
  84. camel/toolkits/search_toolkit.py +13 -2
  85. camel/toolkits/slack_toolkit.py +50 -1
  86. camel/toolkits/terminal_toolkit/__init__.py +18 -0
  87. camel/toolkits/terminal_toolkit/terminal_toolkit.py +924 -0
  88. camel/toolkits/terminal_toolkit/utils.py +532 -0
  89. camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
  90. camel/toolkits/video_analysis_toolkit.py +17 -11
  91. camel/toolkits/wechat_official_toolkit.py +483 -0
  92. camel/types/enums.py +155 -1
  93. camel/types/unified_model_type.py +10 -0
  94. camel/utils/commons.py +17 -0
  95. camel/utils/context_utils.py +804 -0
  96. camel/utils/mcp.py +136 -2
  97. camel/utils/token_counting.py +25 -17
  98. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76.dist-info}/METADATA +158 -67
  99. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76.dist-info}/RECORD +101 -80
  100. camel/loaders/pandas_reader.py +0 -368
  101. camel/toolkits/terminal_toolkit.py +0 -1788
  102. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76.dist-info}/WHEEL +0 -0
  103. {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,804 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from datetime import datetime
17
+ from pathlib import Path
18
+ from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional
19
+
20
+ from pydantic import BaseModel, Field
21
+
22
+ from camel.logger import get_logger
23
+
24
+ if TYPE_CHECKING:
25
+ from camel.agents import ChatAgent
26
+ from camel.memories.records import MemoryRecord
27
+
28
+ logger = get_logger(__name__)
29
+
30
+
31
+ class WorkflowSummary(BaseModel):
32
+ r"""Pydantic model for structured workflow summaries.
33
+
34
+ This model defines the schema for workflow memories that can be reused
35
+ by future agents for similar tasks.
36
+ """
37
+
38
+ task_title: str = Field(
39
+ description="A short, generic title of the main task (≤ 10 words). "
40
+ "Avoid product- or case-specific names. "
41
+ "Example: 'List GitHub stargazers', "
42
+ "'Remind weekly meetings on Slack', "
43
+ "'Find best leads and turn them into a table on Notion'."
44
+ )
45
+ task_description: str = Field(
46
+ description="One-paragraph summary of what the user asked for "
47
+ "(≤ 80 words). "
48
+ "No implementation details; just the outcome the user wants. "
49
+ "Example: Find academic professors who might be interested in the "
50
+ "upcoming research paper on Graph-based Agentic Memory, extract "
51
+ "their email addresses, affiliations, and research interests, "
52
+ "and create a table on Notion with this information."
53
+ )
54
+ tools: List[str] = Field(
55
+ description="Bullet list of tool calls or functions calls used. "
56
+ "For each: name → what it did → why it was useful (one line each). "
57
+ "This field is explicitly for tool call messages or the MCP "
58
+ "servers used."
59
+ "Example: - ArxivToolkit: get authors from a paper title, "
60
+ "it helped find academic professors who authored a particular "
61
+ "paper, and then get their email addresses, affiliations, and "
62
+ "research interests.",
63
+ default_factory=list,
64
+ )
65
+ steps: List[str] = Field(
66
+ description="Numbered, ordered actions the agent took to complete "
67
+ "the task. Each step starts with a verb and is generic "
68
+ "enough to be repeatable. "
69
+ "Example: 1. Find the upcoming meetings on Google Calendar "
70
+ " today. 2. Send participants a reminder on Slack...",
71
+ default_factory=list,
72
+ )
73
+ failure_and_recovery_strategies: List[str] = Field(
74
+ description="[Optional] Bullet each incident with symptom, "
75
+ " cause (if known), fix/workaround, verification of "
76
+ "recovery. Leave empty if no failures. "
77
+ "failures. Example: Running the script for consumer data "
78
+ "analysis failed since Pandas package was not installed. "
79
+ "Fixed by running 'pip install pandas'.",
80
+ default_factory=list,
81
+ )
82
+ notes_and_observations: str = Field(
83
+ description="[Optional] Anything not covered in previous fields "
84
+ "that is critical to know for future executions of the task. "
85
+ "Leave empty if no notes. Do not repeat any information, or "
86
+ "mention trivial details. Only what is essential. "
87
+ "Example: The user likes to be in the "
88
+ "loop of the task execution, make sure to check with them the "
89
+ "plan before starting to work, and ask them for approval "
90
+ "mid-task by using the HumanToolkit.",
91
+ default="",
92
+ )
93
+
94
+ @classmethod
95
+ def get_instruction_prompt(cls) -> str:
96
+ r"""Get the instruction prompt for this model.
97
+
98
+ Returns:
99
+ str: The instruction prompt that guides agents to produce
100
+ structured output matching this schema.
101
+ """
102
+ return (
103
+ 'You are writing a compact "workflow memory" so future agents '
104
+ 'can reuse what you just did for future tasks. '
105
+ 'Be concise, precise, and action-oriented. Analyze the '
106
+ 'conversation and extract the key workflow information '
107
+ 'following the provided schema structure. If a field has no '
108
+ 'content, still include it per the schema, but keep it empty. '
109
+ 'The length of your workflow must be proportional to the '
110
+ 'complexity of the task. Example: If the task is simply '
111
+ 'about a simple math problem, the workflow must be short, '
112
+ 'e.g. <60 words. By contrast, if the task is complex and '
113
+ 'multi-step, such as finding particular job applications based '
114
+ 'on user CV, the workflow must be longer, e.g. about 120 words.'
115
+ )
116
+
117
+
118
+ class ContextUtility:
119
+ r"""Utility class for context management and file operations.
120
+
121
+ This utility provides generic functionality for managing context files,
122
+ markdown generation, and session management that can be used by
123
+ context-related features.
124
+
125
+ Key features:
126
+ - Session-based directory management
127
+ - Generic markdown file operations
128
+ - Text-based search through files
129
+ - File metadata handling
130
+ - Agent memory record retrieval
131
+ - Shared session management for workforce workflows
132
+ """
133
+
134
+ # Class variables for shared session management
135
+ _shared_sessions: ClassVar[Dict[str, 'ContextUtility']] = {}
136
+ _default_workforce_session: ClassVar[Optional['ContextUtility']] = None
137
+
138
+ def __init__(
139
+ self,
140
+ working_directory: Optional[str] = None,
141
+ session_id: Optional[str] = None,
142
+ create_folder: bool = True,
143
+ ):
144
+ r"""Initialize the ContextUtility.
145
+
146
+ Args:
147
+ working_directory (str, optional): The directory path where files
148
+ will be stored. If not provided, a default directory will be
149
+ used.
150
+ session_id (str, optional): The session ID to use. If provided,
151
+ this instance will use the same session folder as other
152
+ instances with the same session_id. If not provided, a new
153
+ session ID will be generated.
154
+ create_folder (bool): Whether to create the session folder
155
+ immediately. If False, the folder will be created only when
156
+ needed (e.g., when saving files). Default is True for
157
+ backward compatibility.
158
+ """
159
+ self.working_directory_param = working_directory
160
+ self._setup_storage(working_directory, session_id, create_folder)
161
+
162
+ def _setup_storage(
163
+ self,
164
+ working_directory: Optional[str],
165
+ session_id: Optional[str] = None,
166
+ create_folder: bool = True,
167
+ ) -> None:
168
+ r"""Initialize session-specific storage paths and optionally create
169
+ directory structure for context file management."""
170
+ self.session_id = session_id or self._generate_session_id()
171
+
172
+ if working_directory:
173
+ self.working_directory = Path(working_directory).resolve()
174
+ else:
175
+ camel_workdir = os.environ.get("CAMEL_WORKDIR")
176
+ if camel_workdir:
177
+ self.working_directory = Path(camel_workdir) / "context_files"
178
+ else:
179
+ self.working_directory = Path("context_files")
180
+
181
+ # Create session-specific directory
182
+ self.working_directory = self.working_directory / self.session_id
183
+
184
+ # Only create directory if requested
185
+ if create_folder:
186
+ self.working_directory.mkdir(parents=True, exist_ok=True)
187
+
188
+ def _generate_session_id(self) -> str:
189
+ r"""Create timestamp-based unique identifier for isolating
190
+ current session files from other sessions."""
191
+ timestamp = datetime.now().strftime('%Y%m%d_%H%M%S_%f')
192
+ return f"session_{timestamp}"
193
+
194
+ # ========= GENERIC FILE MANAGEMENT METHODS =========
195
+
196
+ def _ensure_directory_exists(self) -> None:
197
+ r"""Ensure the working directory exists, creating it if necessary."""
198
+ self.working_directory.mkdir(parents=True, exist_ok=True)
199
+
200
+ def _create_or_update_note(self, note_name: str, content: str) -> str:
201
+ r"""Write content to markdown file, creating new file or
202
+ overwriting existing one with UTF-8 encoding.
203
+
204
+ Args:
205
+ note_name (str): Name of the note (without .md extension).
206
+ content (str): Content to write to the note.
207
+
208
+ Returns:
209
+ str: Success message.
210
+ """
211
+ try:
212
+ # Ensure directory exists before writing
213
+ self._ensure_directory_exists()
214
+ file_path = self.working_directory / f"{note_name}.md"
215
+ with open(file_path, 'w', encoding='utf-8') as f:
216
+ f.write(content)
217
+ return f"Note '{note_name}.md' created successfully"
218
+ except Exception as e:
219
+ logger.error(f"Error creating note {note_name}: {e}")
220
+ return f"Error creating note: {e}"
221
+
222
+ def save_markdown_file(
223
+ self,
224
+ filename: str,
225
+ content: str,
226
+ title: Optional[str] = None,
227
+ metadata: Optional[Dict[str, Any]] = None,
228
+ ) -> str:
229
+ r"""Generic method to save any markdown content to a file.
230
+
231
+ Args:
232
+ filename (str): Name without .md extension.
233
+ content (str): Main content to save.
234
+ title (str, optional): Title for the markdown file.
235
+ metadata (Dict, optional): Additional metadata to include.
236
+
237
+ Returns:
238
+ str: "success" on success, error message starting with "Error:"
239
+ on failure.
240
+ """
241
+ try:
242
+ markdown_content = ""
243
+
244
+ # Add title if provided
245
+ if title:
246
+ markdown_content += f"# {title}\n\n"
247
+
248
+ # Add metadata section if provided
249
+ if metadata:
250
+ markdown_content += "## Metadata\n\n"
251
+ for key, value in metadata.items():
252
+ markdown_content += f"- {key}: {value}\n"
253
+ markdown_content += "\n"
254
+
255
+ # Add main content
256
+ markdown_content += content
257
+
258
+ self._create_or_update_note(filename, markdown_content)
259
+ logger.info(
260
+ f"Markdown file '{filename}.md' saved successfully to "
261
+ f"{self.working_directory / f'{filename}.md'}"
262
+ )
263
+ return "success"
264
+
265
+ except Exception as e:
266
+ logger.error(f"Error saving markdown file {filename}: {e}")
267
+ return f"Error: {e}"
268
+
269
+ def structured_output_to_markdown(
270
+ self,
271
+ structured_data: BaseModel,
272
+ metadata: Optional[Dict[str, Any]] = None,
273
+ title: Optional[str] = None,
274
+ field_mappings: Optional[Dict[str, str]] = None,
275
+ ) -> str:
276
+ r"""Convert any Pydantic BaseModel instance to markdown format.
277
+
278
+ Args:
279
+ structured_data: Any Pydantic BaseModel instance
280
+ metadata: Optional metadata to include in the markdown
281
+ title: Optional custom title, defaults to model class name
282
+ field_mappings: Optional mapping of field names to custom
283
+ section titles
284
+
285
+ Returns:
286
+ str: Markdown formatted content
287
+ """
288
+ markdown_content = []
289
+
290
+ # Add metadata if provided
291
+ if metadata:
292
+ markdown_content.append("## Metadata\n")
293
+ for key, value in metadata.items():
294
+ markdown_content.append(f"- {key}: {value}")
295
+ markdown_content.append("")
296
+
297
+ # Add title
298
+ if title:
299
+ markdown_content.extend([f"## {title}", ""])
300
+ else:
301
+ model_name = structured_data.__class__.__name__
302
+ markdown_content.extend([f"## {model_name}", ""])
303
+
304
+ # Get model fields and values
305
+ model_dict = structured_data.model_dump()
306
+
307
+ for field_name, field_value in model_dict.items():
308
+ # Use custom mapping or convert field name to title case
309
+ if field_mappings and field_name in field_mappings:
310
+ section_title = field_mappings[field_name]
311
+ else:
312
+ # Convert snake_case to Title Case
313
+ section_title = field_name.replace('_', ' ').title()
314
+
315
+ markdown_content.append(f"### {section_title}")
316
+
317
+ # Handle different data types
318
+ if isinstance(field_value, list):
319
+ if field_value:
320
+ for i, item in enumerate(field_value):
321
+ if isinstance(item, str):
322
+ # Check if it looks like a numbered item already
323
+ if item.strip() and not item.strip()[0].isdigit():
324
+ # For steps or numbered lists, add numbers
325
+ if 'step' in field_name.lower():
326
+ markdown_content.append(f"{i + 1}. {item}")
327
+ else:
328
+ markdown_content.append(f"- {item}")
329
+ else:
330
+ markdown_content.append(f"- {item}")
331
+ else:
332
+ markdown_content.append(f"- {item!s}")
333
+ else:
334
+ markdown_content.append(
335
+ f"(No {section_title.lower()} recorded)"
336
+ )
337
+ elif isinstance(field_value, str):
338
+ if field_value.strip():
339
+ markdown_content.append(field_value)
340
+ else:
341
+ markdown_content.append(
342
+ f"(No {section_title.lower()} provided)"
343
+ )
344
+ elif isinstance(field_value, dict):
345
+ for k, v in field_value.items():
346
+ markdown_content.append(f"- **{k}**: {v}")
347
+ else:
348
+ markdown_content.append(str(field_value))
349
+
350
+ markdown_content.append("")
351
+
352
+ return "\n".join(markdown_content)
353
+
354
+ def load_markdown_file(self, filename: str) -> str:
355
+ r"""Generic method to load any markdown file.
356
+
357
+ Args:
358
+ filename (str): Name without .md extension.
359
+
360
+ Returns:
361
+ str: File content or empty string if not found.
362
+ """
363
+ try:
364
+ file_path = self.working_directory / f"{filename}.md"
365
+ if file_path.exists():
366
+ return file_path.read_text(encoding="utf-8")
367
+ return ""
368
+ except Exception as e:
369
+ logger.error(f"Error loading markdown file {filename}: {e}")
370
+ return ""
371
+
372
+ def file_exists(self, filename: str) -> bool:
373
+ r"""Verify presence of markdown file in current session directory.
374
+
375
+ Args:
376
+ filename (str): Name without .md extension.
377
+
378
+ Returns:
379
+ bool: True if file exists, False otherwise.
380
+ """
381
+ file_path = self.working_directory / f"{filename}.md"
382
+ return file_path.exists()
383
+
384
+ def list_markdown_files(self) -> List[str]:
385
+ r"""Discover all markdown files in current session directory
386
+ and return their base names for reference.
387
+
388
+ Returns:
389
+ List[str]: List of filenames without .md extension.
390
+ """
391
+ try:
392
+ md_files = list(self.working_directory.glob("*.md"))
393
+ return [f.stem for f in md_files]
394
+ except Exception as e:
395
+ logger.error(f"Error listing markdown files: {e}")
396
+ return []
397
+
398
+ # ========= GENERIC AGENT MEMORY METHODS =========
399
+
400
+ def get_agent_memory_records(
401
+ self, agent: "ChatAgent"
402
+ ) -> List["MemoryRecord"]:
403
+ r"""Retrieve conversation history from agent's memory system.
404
+
405
+ Args:
406
+ agent (ChatAgent): The agent to extract memory records from.
407
+
408
+ Returns:
409
+ List[MemoryRecord]: List of memory records from the agent.
410
+ """
411
+ try:
412
+ context_records = agent.memory.retrieve()
413
+ return [cr.memory_record for cr in context_records]
414
+ except Exception as e:
415
+ logger.error(f"Error extracting memory records: {e}")
416
+ return []
417
+
418
+ def format_memory_as_conversation(
419
+ self, memory_records: List["MemoryRecord"]
420
+ ) -> str:
421
+ r"""Transform structured memory records into human-readable
422
+ conversation format with role labels and message content.
423
+
424
+ Args:
425
+ memory_records (List[MemoryRecord]): Memory records to format.
426
+
427
+ Returns:
428
+ str: Formatted conversation text.
429
+ """
430
+ conversation_lines = []
431
+
432
+ for record in memory_records:
433
+ role = (
434
+ record.role_at_backend.value
435
+ if hasattr(record.role_at_backend, 'value')
436
+ else str(record.role_at_backend)
437
+ )
438
+ content = record.message.content
439
+ conversation_lines.append(f"{role}: {content}")
440
+
441
+ return "\n".join(conversation_lines)
442
+
443
+ # ========= SESSION MANAGEMENT METHODS =========
444
+
445
+ def create_session_directory(
446
+ self, base_dir: Optional[str] = None, session_id: Optional[str] = None
447
+ ) -> Path:
448
+ r"""Create a session-specific directory.
449
+
450
+ Args:
451
+ base_dir (str, optional): Base directory. If None, uses current
452
+ working directory.
453
+ session_id (str, optional): Custom session ID. If None, generates
454
+ new one.
455
+
456
+ Returns:
457
+ Path: The created session directory path.
458
+ """
459
+ if session_id is None:
460
+ session_id = self._generate_session_id()
461
+
462
+ if base_dir:
463
+ base_path = Path(base_dir).resolve()
464
+ else:
465
+ base_path = self.working_directory.parent
466
+
467
+ session_dir = base_path / session_id
468
+ session_dir.mkdir(parents=True, exist_ok=True)
469
+ return session_dir
470
+
471
+ def get_session_metadata(self) -> Dict[str, Any]:
472
+ r"""Collect comprehensive session information including identifiers,
473
+ timestamps, and directory paths for tracking and reference.
474
+
475
+ Returns:
476
+ Dict[str, Any]: Session metadata including ID, timestamp,
477
+ directory.
478
+ """
479
+ return {
480
+ 'session_id': self.session_id,
481
+ 'working_directory': str(self.working_directory),
482
+ 'created_at': datetime.now().isoformat(),
483
+ 'base_directory': str(self.working_directory.parent),
484
+ }
485
+
486
+ def list_sessions(self, base_dir: Optional[str] = None) -> List[str]:
487
+ r"""Discover all available session directories for browsing
488
+ historical conversations and context files.
489
+
490
+ Args:
491
+ base_dir (str, optional): Base directory to search. If None, uses
492
+ parent of working directory.
493
+
494
+ Returns:
495
+ List[str]: List of session directory names.
496
+ """
497
+ try:
498
+ if base_dir:
499
+ search_dir = Path(base_dir)
500
+ else:
501
+ search_dir = self.working_directory.parent
502
+
503
+ session_dirs = [
504
+ d.name
505
+ for d in search_dir.iterdir()
506
+ if d.is_dir() and d.name.startswith('session_')
507
+ ]
508
+ return sorted(session_dirs)
509
+ except Exception as e:
510
+ logger.error(f"Error listing sessions: {e}")
511
+ return []
512
+
513
+ # ========= GENERIC SEARCH METHODS =========
514
+
515
+ def search_in_file(
516
+ self, file_path: Path, keywords: List[str], top_k: int = 4
517
+ ) -> str:
518
+ r"""Perform keyword-based search through file sections,
519
+ ranking results by keyword frequency and returning top matches.
520
+
521
+ Args:
522
+ file_path (Path): Path to the file to search.
523
+ keywords (List[str]): Keywords to search for.
524
+ top_k (int): Maximum number of results to return.
525
+
526
+ Returns:
527
+ str: Formatted search results.
528
+ """
529
+ results: List[Dict[str, Any]] = []
530
+ keyword_terms = [keyword.lower() for keyword in keywords]
531
+
532
+ try:
533
+ if not file_path.exists():
534
+ return ""
535
+
536
+ with open(file_path, 'r', encoding='utf-8') as f:
537
+ content = f.read()
538
+
539
+ # Split content into sections (assuming ### headers)
540
+ sections = content.split('### ')[1:] # Skip the header part
541
+
542
+ for i, section in enumerate(sections):
543
+ if not section.strip():
544
+ continue
545
+
546
+ section_lower = section.lower()
547
+
548
+ # count how many keywords appear in this section
549
+ keyword_matches = sum(
550
+ 1 for keyword in keyword_terms if keyword in section_lower
551
+ )
552
+
553
+ if keyword_matches > 0:
554
+ results.append(
555
+ {
556
+ 'content': f"### {section.strip()}",
557
+ 'keyword_count': keyword_matches,
558
+ 'section_num': i + 1,
559
+ }
560
+ )
561
+
562
+ except Exception as e:
563
+ logger.warning(f"Error reading file {file_path}: {e}")
564
+ return ""
565
+
566
+ # sort by keyword count and limit results
567
+ results.sort(key=lambda x: x['keyword_count'], reverse=True)
568
+ results = results[:top_k]
569
+
570
+ if not results:
571
+ return ""
572
+
573
+ # format results
574
+ formatted_sections = []
575
+ for result in results:
576
+ formatted_sections.append(
577
+ f"Section {result['section_num']} "
578
+ f"(keyword matches: {result['keyword_count']}):\n"
579
+ f"{result['content']}\n"
580
+ )
581
+
582
+ return "\n---\n".join(formatted_sections)
583
+
584
+ # ========= UTILITY METHODS =========
585
+
586
+ def get_working_directory(self) -> Path:
587
+ r"""Retrieve the session-specific directory path where
588
+ all context files are stored.
589
+
590
+ Returns:
591
+ Path: The working directory path.
592
+ """
593
+ return self.working_directory
594
+
595
+ def get_session_id(self) -> str:
596
+ r"""Retrieve the unique identifier for the current session
597
+ used for file organization and tracking.
598
+
599
+ Returns:
600
+ str: The session ID.
601
+ """
602
+ return self.session_id
603
+
604
+ def set_session_id(self, session_id: str) -> None:
605
+ r"""Set a new session ID and update the working directory accordingly.
606
+
607
+ This allows sharing session directories between multiple ContextUtility
608
+ instances by using the same session_id.
609
+
610
+ Args:
611
+ session_id (str): The session ID to use.
612
+ """
613
+ self.session_id = session_id
614
+
615
+ # Update working directory with new session_id
616
+ if self.working_directory_param:
617
+ base_dir = Path(self.working_directory_param).resolve()
618
+ else:
619
+ camel_workdir = os.environ.get("CAMEL_WORKDIR")
620
+ if camel_workdir:
621
+ base_dir = Path(camel_workdir) / "context_files"
622
+ else:
623
+ base_dir = Path("context_files")
624
+
625
+ self.working_directory = base_dir / self.session_id
626
+ self.working_directory.mkdir(parents=True, exist_ok=True)
627
+
628
+ def load_markdown_context_to_memory(
629
+ self, agent: "ChatAgent", filename: str, include_metadata: bool = False
630
+ ) -> str:
631
+ r"""Load context from a markdown file and append it to agent memory.
632
+
633
+ Args:
634
+ agent (ChatAgent): The agent to append context to.
635
+ filename (str): Name of the markdown file (without .md extension).
636
+ include_metadata (bool): Whether to include metadata section in the
637
+ loaded content. Defaults to False.
638
+
639
+ Returns:
640
+ str: Status message indicating success or failure with details.
641
+ """
642
+ try:
643
+ content = self.load_markdown_file(filename)
644
+
645
+ if not content.strip():
646
+ return f"Context file not found or empty: {filename}"
647
+
648
+ # Filter out metadata section if not requested
649
+ if not include_metadata:
650
+ content = self._filter_metadata_from_content(content)
651
+
652
+ from camel.types import OpenAIBackendRole
653
+
654
+ prefix_prompt = (
655
+ "The following is the context from a previous "
656
+ "session or workflow which might be useful for "
657
+ "to the current task. This information might help you "
658
+ "understand the background, choose which tools to use, "
659
+ "and plan your next steps."
660
+ )
661
+
662
+ # Append workflow content to the agent's system message
663
+ # This ensures the context persists when agents are cloned
664
+ workflow_content = (
665
+ f"\n\n--- Workflow Memory ---\n{prefix_prompt}\n\n{content}"
666
+ )
667
+
668
+ # Update the original system message to include workflow
669
+ if agent._original_system_message is None:
670
+ logger.error(
671
+ f"Agent {agent.agent_id} has no system message. "
672
+ "Cannot append workflow memory to system message."
673
+ )
674
+ return (
675
+ "Error: Agent has no system message to append workflow to"
676
+ )
677
+
678
+ # Update the current system message
679
+ current_system_message = agent._system_message
680
+ if current_system_message is not None:
681
+ new_sys_content = (
682
+ current_system_message.content + workflow_content
683
+ )
684
+ agent._system_message = (
685
+ current_system_message.create_new_instance(new_sys_content)
686
+ )
687
+
688
+ # Replace the system message in memory
689
+ # Clear and re-initialize with updated system message
690
+ agent.memory.clear()
691
+ agent.update_memory(
692
+ agent._system_message, OpenAIBackendRole.SYSTEM
693
+ )
694
+
695
+ char_count = len(content)
696
+ log_msg = (
697
+ f"Context appended to agent {agent.agent_id} "
698
+ f"({char_count} characters)"
699
+ )
700
+ logger.info(log_msg)
701
+
702
+ return log_msg
703
+
704
+ except Exception as e:
705
+ error_msg = f"Failed to load markdown context to memory: {e}"
706
+ logger.error(error_msg)
707
+ return error_msg
708
+
709
+ def _filter_metadata_from_content(self, content: str) -> str:
710
+ r"""Filter out metadata section from markdown content.
711
+
712
+ Args:
713
+ content (str): The full markdown content including metadata.
714
+
715
+ Returns:
716
+ str: Content with metadata section removed.
717
+ """
718
+ lines = content.split('\n')
719
+ filtered_lines = []
720
+ skip_metadata = False
721
+
722
+ for line in lines:
723
+ # Check if we're starting a metadata section
724
+ if line.strip() == "## Metadata":
725
+ skip_metadata = True
726
+ continue
727
+
728
+ # Check if we're starting a new section after metadata
729
+ if (
730
+ skip_metadata
731
+ and line.startswith("## ")
732
+ and "Metadata" not in line
733
+ ):
734
+ skip_metadata = False
735
+
736
+ # Add line if we're not in metadata section
737
+ if not skip_metadata:
738
+ filtered_lines.append(line)
739
+
740
+ # Clean up any extra whitespace at the beginning
741
+ result = '\n'.join(filtered_lines).strip()
742
+ return result
743
+
744
+ # ========= SHARED SESSION MANAGEMENT METHODS =========
745
+
746
+ @classmethod
747
+ def get_workforce_shared(
748
+ cls, session_id: Optional[str] = None
749
+ ) -> 'ContextUtility':
750
+ r"""Get or create shared workforce context utility with lazy init.
751
+
752
+ This method provides a centralized way to access shared context
753
+ utilities for workforce workflows, ensuring all workforce components
754
+ use the same session directory.
755
+
756
+ Args:
757
+ session_id (str, optional): Custom session ID. If None, uses the
758
+ default workforce session.
759
+
760
+ Returns:
761
+ ContextUtility: Shared context utility instance for workforce.
762
+ """
763
+ if session_id is None:
764
+ # Use default workforce session
765
+ if cls._default_workforce_session is None:
766
+ camel_workdir = os.environ.get("CAMEL_WORKDIR")
767
+ if camel_workdir:
768
+ base_path = os.path.join(
769
+ camel_workdir, "workforce_workflows"
770
+ )
771
+ else:
772
+ base_path = "workforce_workflows"
773
+
774
+ cls._default_workforce_session = cls(
775
+ working_directory=base_path,
776
+ create_folder=False, # Don't create folder until needed
777
+ )
778
+ return cls._default_workforce_session
779
+
780
+ # Use specific session
781
+ if session_id not in cls._shared_sessions:
782
+ camel_workdir = os.environ.get("CAMEL_WORKDIR")
783
+ if camel_workdir:
784
+ base_path = os.path.join(camel_workdir, "workforce_workflows")
785
+ else:
786
+ base_path = "workforce_workflows"
787
+
788
+ cls._shared_sessions[session_id] = cls(
789
+ working_directory=base_path,
790
+ session_id=session_id,
791
+ create_folder=False, # Don't create folder until needed
792
+ )
793
+ return cls._shared_sessions[session_id]
794
+
795
+ @classmethod
796
+ def reset_shared_sessions(cls) -> None:
797
+ r"""Reset shared sessions (useful for testing).
798
+
799
+ This method clears all shared session instances, forcing new ones
800
+ to be created on next access. Primarily used for testing to ensure
801
+ clean state between tests.
802
+ """
803
+ cls._shared_sessions.clear()
804
+ cls._default_workforce_session = None