camel-ai 0.2.79a0__py3-none-any.whl → 0.2.79a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -863,10 +863,12 @@ class Workforce(BaseNode):
863
863
  Union[List[Task], Generator[List[Task], None, None]]:
864
864
  The subtasks or generator of subtasks.
865
865
  """
866
- decompose_prompt = TASK_DECOMPOSE_PROMPT.format(
867
- content=task.content,
868
- child_nodes_info=self._get_child_nodes_info(),
869
- additional_info=task.additional_info,
866
+ decompose_prompt = str(
867
+ TASK_DECOMPOSE_PROMPT.format(
868
+ content=task.content,
869
+ child_nodes_info=self._get_child_nodes_info(),
870
+ additional_info=task.additional_info,
871
+ )
870
872
  )
871
873
  self.task_agent.reset()
872
874
  result = task.decompose(self.task_agent, decompose_prompt)
@@ -993,16 +995,18 @@ class Workforce(BaseNode):
993
995
  ]
994
996
 
995
997
  # Format the unified analysis prompt
996
- analysis_prompt = TASK_ANALYSIS_PROMPT.format(
997
- task_id=task.id,
998
- task_content=task.content,
999
- task_result=task_result,
1000
- failure_count=task.failure_count,
1001
- task_depth=task.get_depth(),
1002
- assigned_worker=task.assigned_worker_id or "unknown",
1003
- issue_type=issue_type,
1004
- issue_specific_analysis=issue_analysis,
1005
- response_format=response_format,
998
+ analysis_prompt = str(
999
+ TASK_ANALYSIS_PROMPT.format(
1000
+ task_id=task.id,
1001
+ task_content=task.content,
1002
+ task_result=task_result,
1003
+ failure_count=task.failure_count,
1004
+ task_depth=task.get_depth(),
1005
+ assigned_worker=task.assigned_worker_id or "unknown",
1006
+ issue_type=issue_type,
1007
+ issue_specific_analysis=issue_analysis,
1008
+ response_format=response_format,
1009
+ )
1006
1010
  )
1007
1011
 
1008
1012
  try:
@@ -1415,6 +1419,20 @@ class Workforce(BaseNode):
1415
1419
  logger.warning(f"Task {task_id} not found in pending tasks.")
1416
1420
  return False
1417
1421
 
1422
+ def get_main_task_queue(self) -> List[Task]:
1423
+ r"""Get current main task queue for human review.
1424
+ Returns:
1425
+ List[Task]: List of main tasks waiting to be decomposed
1426
+ and executed.
1427
+ """
1428
+ # Return tasks from pending queue that need decomposition
1429
+ return [
1430
+ t
1431
+ for t in self._pending_tasks
1432
+ if t.additional_info
1433
+ and t.additional_info.get('_needs_decomposition')
1434
+ ]
1435
+
1418
1436
  def add_task(
1419
1437
  self,
1420
1438
  content: str,
@@ -2439,8 +2457,10 @@ class Workforce(BaseNode):
2439
2457
 
2440
2458
  def load_workflow_memories(
2441
2459
  self,
2442
- max_files_to_load: int = 3,
2443
2460
  session_id: Optional[str] = None,
2461
+ worker_max_workflows: int = 3,
2462
+ coordinator_max_workflows: int = 5,
2463
+ task_agent_max_workflows: int = 3,
2444
2464
  ) -> Dict[str, bool]:
2445
2465
  r"""Load workflow memories for all SingleAgentWorker instances in the
2446
2466
  workforce.
@@ -2451,11 +2471,15 @@ class Workforce(BaseNode):
2451
2471
  method. Workers match files based on their description names.
2452
2472
 
2453
2473
  Args:
2454
- max_files_to_load (int): Maximum number of workflow files to load
2455
- per worker. (default: :obj:`3`)
2456
2474
  session_id (Optional[str]): Specific workforce session ID to load
2457
2475
  from. If None, searches across all sessions.
2458
2476
  (default: :obj:`None`)
2477
+ worker_max_workflows (int): Maximum number of workflow files to
2478
+ load per worker agent. (default: :obj:`3`)
2479
+ coordinator_max_workflows (int): Maximum number of workflow files
2480
+ to load for the coordinator agent. (default: :obj:`5`)
2481
+ task_agent_max_workflows (int): Maximum number of workflow files
2482
+ to load for the task planning agent. (default: :obj:`3`)
2459
2483
 
2460
2484
  Returns:
2461
2485
  Dict[str, bool]: Dictionary mapping worker node IDs to load
@@ -2467,7 +2491,11 @@ class Workforce(BaseNode):
2467
2491
  >>> workforce.add_single_agent_worker(
2468
2492
  ... "data_analyst", analyst_agent
2469
2493
  ... )
2470
- >>> success_status = workforce.load_workflows()
2494
+ >>> success_status = workforce.load_workflow_memories(
2495
+ ... worker_max_workflows=5,
2496
+ ... coordinator_max_workflows=10,
2497
+ ... task_agent_max_workflows=5
2498
+ ... )
2471
2499
  >>> print(success_status)
2472
2500
  {'worker_123': True} # Successfully loaded workflows for
2473
2501
  # data_analyst
@@ -2485,7 +2513,7 @@ class Workforce(BaseNode):
2485
2513
  # For loading, don't set shared context utility
2486
2514
  # Let each worker search across existing sessions
2487
2515
  success = child.load_workflow_memories(
2488
- max_files_to_load=max_files_to_load,
2516
+ max_workflows=worker_max_workflows,
2489
2517
  session_id=session_id,
2490
2518
  )
2491
2519
  results[child.node_id] = success
@@ -2500,13 +2528,18 @@ class Workforce(BaseNode):
2500
2528
  results[child.node_id] = False
2501
2529
 
2502
2530
  # Load aggregated workflow summaries for coordinator and task agents
2503
- self._load_management_agent_workflows(max_files_to_load, session_id)
2531
+ self._load_management_agent_workflows(
2532
+ coordinator_max_workflows, task_agent_max_workflows, session_id
2533
+ )
2504
2534
 
2505
2535
  logger.info(f"Workflow load completed for {len(results)} workers")
2506
2536
  return results
2507
2537
 
2508
2538
  def _load_management_agent_workflows(
2509
- self, max_files_to_load: int, session_id: Optional[str] = None
2539
+ self,
2540
+ coordinator_max_workflows: int,
2541
+ task_agent_max_workflows: int,
2542
+ session_id: Optional[str] = None,
2510
2543
  ) -> None:
2511
2544
  r"""Load workflow summaries for coordinator and task planning agents.
2512
2545
 
@@ -2517,7 +2550,10 @@ class Workforce(BaseNode):
2517
2550
  successful strategies
2518
2551
 
2519
2552
  Args:
2520
- max_files_to_load (int): Maximum number of workflow files to load.
2553
+ coordinator_max_workflows (int): Maximum number of workflow files
2554
+ to load for the coordinator agent.
2555
+ task_agent_max_workflows (int): Maximum number of workflow files
2556
+ to load for the task planning agent.
2521
2557
  session_id (Optional[str]): Specific session ID to load from.
2522
2558
  If None, searches across all sessions.
2523
2559
  """
@@ -2555,9 +2591,9 @@ class Workforce(BaseNode):
2555
2591
  key=lambda x: os.path.getmtime(x), reverse=True
2556
2592
  )
2557
2593
 
2558
- # Load workflows for coordinator agent (up to 5 most recent)
2594
+ # Load workflows for coordinator agent
2559
2595
  coordinator_loaded = 0
2560
- for file_path in workflow_files[:max_files_to_load]:
2596
+ for file_path in workflow_files[:coordinator_max_workflows]:
2561
2597
  try:
2562
2598
  filename = os.path.basename(file_path).replace('.md', '')
2563
2599
  session_dir = os.path.dirname(file_path)
@@ -2578,9 +2614,9 @@ class Workforce(BaseNode):
2578
2614
  f"Failed to load coordinator workflow {file_path}: {e}"
2579
2615
  )
2580
2616
 
2581
- # Load workflows for task agent (up to 3 most recent)
2617
+ # Load workflows for task agent
2582
2618
  task_agent_loaded = 0
2583
- for file_path in workflow_files[:max_files_to_load]:
2619
+ for file_path in workflow_files[:task_agent_max_workflows]:
2584
2620
  try:
2585
2621
  filename = os.path.basename(file_path).replace('.md', '')
2586
2622
  session_dir = os.path.dirname(file_path)
@@ -3089,10 +3125,12 @@ class Workforce(BaseNode):
3089
3125
  Returns:
3090
3126
  Worker: The created worker node.
3091
3127
  """
3092
- prompt = CREATE_NODE_PROMPT.format(
3093
- content=task.content,
3094
- child_nodes_info=self._get_child_nodes_info(),
3095
- additional_info=task.additional_info,
3128
+ prompt = str(
3129
+ CREATE_NODE_PROMPT.format(
3130
+ content=task.content,
3131
+ child_nodes_info=self._get_child_nodes_info(),
3132
+ additional_info=task.additional_info,
3133
+ )
3096
3134
  )
3097
3135
  # Check if we should use structured handler
3098
3136
  if self.use_structured_output_handler:
@@ -3892,8 +3930,7 @@ class Workforce(BaseNode):
3892
3930
  # Reset in-flight counter to prevent hanging
3893
3931
  self._in_flight_tasks = 0
3894
3932
 
3895
- # Check if there are any pending tasks (including those needing
3896
- # decomposition)
3933
+ # Check if there are any main pending tasks after filtering
3897
3934
  if self._pending_tasks:
3898
3935
  # Check if the first pending task needs decomposition
3899
3936
  next_task = self._pending_tasks[0]
@@ -4102,6 +4139,20 @@ class Workforce(BaseNode):
4102
4139
  )
4103
4140
  if not halt:
4104
4141
  continue
4142
+
4143
+ # Do not halt if we have main tasks in queue
4144
+ if len(self.get_main_task_queue()) > 0:
4145
+ print(
4146
+ f"{Fore.RED}Task {returned_task.id} has "
4147
+ f"failed for {MAX_TASK_RETRIES} times "
4148
+ f"after insufficient results, skipping "
4149
+ f"that task. Final error: "
4150
+ f"{returned_task.result or 'Unknown err'}"
4151
+ f"{Fore.RESET}"
4152
+ )
4153
+ self._skip_requested = True
4154
+ continue
4155
+
4105
4156
  print(
4106
4157
  f"{Fore.RED}Task {returned_task.id} has "
4107
4158
  f"failed for {MAX_TASK_RETRIES} times after "
@@ -4207,6 +4258,19 @@ class Workforce(BaseNode):
4207
4258
  halt = await self._handle_failed_task(returned_task)
4208
4259
  if not halt:
4209
4260
  continue
4261
+
4262
+ # Do not halt if we have main tasks in queue
4263
+ if len(self.get_main_task_queue()) > 0:
4264
+ print(
4265
+ f"{Fore.RED}Task {returned_task.id} has "
4266
+ f"failed for {MAX_TASK_RETRIES} times, "
4267
+ f"skipping that task. Final error: "
4268
+ f"{returned_task.result or 'Unknown error'}"
4269
+ f"{Fore.RESET}"
4270
+ )
4271
+ self._skip_requested = True
4272
+ continue
4273
+
4210
4274
  print(
4211
4275
  f"{Fore.RED}Task {returned_task.id} has failed "
4212
4276
  f"for {MAX_TASK_RETRIES} times, halting "
@@ -121,10 +121,11 @@ class OceanBaseStorage(BaseVectorStorage):
121
121
  )
122
122
 
123
123
  # Get the first index parameter
124
- first_index_param = next(iter(index_params))
125
- self._client.create_vidx_with_vec_index_param(
126
- table_name=self.table_name, vidx_param=first_index_param
127
- )
124
+ first_index_param = next(iter(index_params), None)
125
+ if first_index_param is not None:
126
+ self._client.create_vidx_with_vec_index_param(
127
+ table_name=self.table_name, vidx_param=first_index_param
128
+ )
128
129
 
129
130
  logger.info(f"Created table {self.table_name} with vector index")
130
131
  else:
@@ -1201,6 +1201,171 @@ class FileToolkit(BaseToolkit):
1201
1201
  except Exception as e:
1202
1202
  return f"Error editing file: {e}"
1203
1203
 
1204
+ def search_files(
1205
+ self,
1206
+ pattern: str,
1207
+ file_types: Optional[List[str]] = None,
1208
+ file_pattern: Optional[str] = None,
1209
+ path: Optional[str] = None,
1210
+ ) -> str:
1211
+ r"""Search for a text pattern in files with specified extensions or
1212
+ file patterns.
1213
+
1214
+ This method searches for a text pattern (case-insensitive substring
1215
+ match) in files matching either the specified file types or a file
1216
+ pattern. It returns structured results showing which files contain
1217
+ the pattern, along with line numbers and matching content.
1218
+
1219
+ Args:
1220
+ pattern (str): The text pattern to search for (case-insensitive
1221
+ string match).
1222
+ file_types (Optional[List[str]]): List of file extensions to
1223
+ search (e.g., ["md", "txt", "py"]). Do not include the dot.
1224
+ If not provided and file_pattern is also not provided,
1225
+ defaults to ["md"] (markdown files). Ignored if file_pattern
1226
+ is provided. (default: :obj:`None`)
1227
+ file_pattern (Optional[str]): Glob pattern for matching files
1228
+ (e.g., "*_workflow.md", "test_*.py"). If provided, this
1229
+ overrides file_types. (default: :obj:`None`)
1230
+ path (Optional[str]): Directory to search in. If not provided,
1231
+ uses the working_directory. Can be relative or absolute.
1232
+ (default: :obj:`None`)
1233
+
1234
+ Returns:
1235
+ str: JSON-formatted string containing search results with the
1236
+ structure:
1237
+ {
1238
+ "pattern": "search_pattern",
1239
+ "searched_path": "/absolute/path",
1240
+ "file_types": ["md", "txt"],
1241
+ "file_pattern": "*_workflow.md",
1242
+ "matches": [
1243
+ {
1244
+ "file": "relative/path/to/file.md",
1245
+ "line": 42,
1246
+ "content": "matching line content"
1247
+ },
1248
+ ...
1249
+ ],
1250
+ "total_matches": 10,
1251
+ "files_searched": 5
1252
+ }
1253
+ If an error occurs, returns a JSON string with an "error" key.
1254
+ """
1255
+ import json
1256
+
1257
+ try:
1258
+ # resolve search path
1259
+ if path:
1260
+ path_obj = Path(path)
1261
+ if not path_obj.is_absolute():
1262
+ search_path = (self.working_directory / path_obj).resolve()
1263
+ else:
1264
+ search_path = path_obj.resolve()
1265
+ else:
1266
+ search_path = self.working_directory
1267
+
1268
+ # validate that search path exists
1269
+ if not search_path.exists():
1270
+ return json.dumps(
1271
+ {"error": f"Search path does not exist: {search_path}"}
1272
+ )
1273
+
1274
+ if not search_path.is_dir():
1275
+ return json.dumps(
1276
+ {"error": f"Search path is not a directory: {search_path}"}
1277
+ )
1278
+
1279
+ # collect all matching files
1280
+ matching_files: List[Path] = []
1281
+
1282
+ if file_pattern:
1283
+ # use file_pattern if provided (overrides file_types)
1284
+ pattern_glob = f"**/{file_pattern}"
1285
+ matching_files.extend(search_path.rglob(pattern_glob))
1286
+ else:
1287
+ # use file_types if file_pattern not provided
1288
+ if file_types is None:
1289
+ file_types = ["md"]
1290
+
1291
+ # normalize and deduplicate file types
1292
+ normalized_types = set()
1293
+ for file_type in file_types:
1294
+ file_type = file_type.lstrip('.')
1295
+ if file_type: # skip empty strings
1296
+ normalized_types.add(file_type)
1297
+
1298
+ for file_type in normalized_types:
1299
+ # use rglob for recursive search
1300
+ pattern_glob = f"**/*.{file_type}"
1301
+ matching_files.extend(search_path.rglob(pattern_glob))
1302
+
1303
+ # search through files (case-insensitive)
1304
+ matches = []
1305
+ files_searched = 0
1306
+ pattern_lower = pattern.lower()
1307
+
1308
+ for file_path in matching_files:
1309
+ files_searched += 1
1310
+ try:
1311
+ # read file content
1312
+ content = file_path.read_text(
1313
+ encoding=self.default_encoding
1314
+ )
1315
+ lines = content.splitlines()
1316
+
1317
+ # search each line for pattern (case-insensitive)
1318
+ for line_num, line in enumerate(lines, start=1):
1319
+ if pattern_lower in line.lower():
1320
+ # get relative path for cleaner output
1321
+ try:
1322
+ relative_path = file_path.relative_to(
1323
+ search_path
1324
+ )
1325
+ except ValueError:
1326
+ relative_path = file_path
1327
+
1328
+ matches.append(
1329
+ {
1330
+ "file": str(relative_path),
1331
+ "line": line_num,
1332
+ "content": line.strip(),
1333
+ }
1334
+ )
1335
+
1336
+ except (UnicodeDecodeError, PermissionError) as e:
1337
+ # skip files that can't be read
1338
+ logger.debug(f"Skipping file {file_path}: {e}")
1339
+ continue
1340
+
1341
+ # build result
1342
+ result = {
1343
+ "pattern": pattern,
1344
+ "searched_path": str(search_path),
1345
+ "matches": matches,
1346
+ "total_matches": len(matches),
1347
+ "files_searched": files_searched,
1348
+ }
1349
+
1350
+ # include file_pattern or file_types in result
1351
+ if file_pattern:
1352
+ result["file_pattern"] = file_pattern
1353
+ else:
1354
+ result["file_types"] = (
1355
+ sorted(normalized_types) if normalized_types else ["md"]
1356
+ )
1357
+
1358
+ logger.info(
1359
+ f"Search completed: found {len(matches)} matches "
1360
+ f"in {files_searched} files"
1361
+ )
1362
+ return json.dumps(result, indent=2)
1363
+
1364
+ except Exception as e:
1365
+ error_msg = f"Error during file search: {e}"
1366
+ logger.error(error_msg)
1367
+ return json.dumps({"error": error_msg})
1368
+
1204
1369
  def get_tools(self) -> List[FunctionTool]:
1205
1370
  r"""Return a list of FunctionTool objects representing the functions
1206
1371
  in the toolkit.
@@ -1213,6 +1378,7 @@ class FileToolkit(BaseToolkit):
1213
1378
  FunctionTool(self.write_to_file),
1214
1379
  FunctionTool(self.read_file),
1215
1380
  FunctionTool(self.edit_file),
1381
+ FunctionTool(self.search_files),
1216
1382
  ]
1217
1383
 
1218
1384
 
@@ -148,12 +148,10 @@ class ToolkitMessageIntegration:
148
148
  """
149
149
  return FunctionTool(self.send_message_to_user)
150
150
 
151
- def register_toolkits(
152
- self, toolkit: BaseToolkit, tool_names: Optional[List[str]] = None
153
- ) -> BaseToolkit:
154
- r"""Add messaging capabilities to toolkit methods.
151
+ def register_toolkits(self, toolkit: BaseToolkit) -> BaseToolkit:
152
+ r"""Add messaging capabilities to all toolkit methods.
155
153
 
156
- This method modifies a toolkit so that specified tools can send
154
+ This method modifies a toolkit so that all its tools can send
157
155
  status messages to users while executing their primary function.
158
156
  The tools will accept optional messaging parameters:
159
157
  - message_title: Title of the status message
@@ -162,20 +160,18 @@ class ToolkitMessageIntegration:
162
160
 
163
161
  Args:
164
162
  toolkit: The toolkit to add messaging capabilities to
165
- tool_names: List of specific tool names to modify.
166
- If None, messaging is added to all tools.
167
163
 
168
164
  Returns:
169
- The toolkit with messaging capabilities added
165
+ The same toolkit instance with messaging capabilities added to
166
+ all methods.
170
167
  """
171
168
  original_tools = toolkit.get_tools()
172
169
  enhanced_methods = {}
173
170
  for tool in original_tools:
174
171
  method_name = tool.func.__name__
175
- if tool_names is None or method_name in tool_names:
176
- enhanced_func = self._add_messaging_to_tool(tool.func)
177
- enhanced_methods[method_name] = enhanced_func
178
- setattr(toolkit, method_name, enhanced_func)
172
+ enhanced_func = self._add_messaging_to_tool(tool.func)
173
+ enhanced_methods[method_name] = enhanced_func
174
+ setattr(toolkit, method_name, enhanced_func)
179
175
  original_get_tools_method = toolkit.get_tools
180
176
 
181
177
  def enhanced_get_tools() -> List[FunctionTool]:
@@ -201,7 +197,7 @@ class ToolkitMessageIntegration:
201
197
  def enhanced_clone_for_new_session(new_session_id=None):
202
198
  cloned_toolkit = original_clone_method(new_session_id)
203
199
  return message_integration_instance.register_toolkits(
204
- cloned_toolkit, tool_names
200
+ cloned_toolkit
205
201
  )
206
202
 
207
203
  toolkit.clone_for_new_session = enhanced_clone_for_new_session
@@ -300,6 +296,12 @@ class ToolkitMessageIntegration:
300
296
  This internal method modifies the function signature and docstring
301
297
  to include optional messaging parameters that trigger status updates.
302
298
  """
299
+ if getattr(func, "__message_integration_enhanced__", False):
300
+ logger.debug(
301
+ f"Function {func.__name__} already enhanced, skipping"
302
+ )
303
+ return func
304
+
303
305
  # Get the original signature
304
306
  original_sig = inspect.signature(func)
305
307