camel-ai 0.2.66__py3-none-any.whl → 0.2.67__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +3 -0
  3. camel/configs/qianfan_config.py +85 -0
  4. camel/models/__init__.py +2 -0
  5. camel/models/aiml_model.py +8 -0
  6. camel/models/anthropic_model.py +8 -0
  7. camel/models/aws_bedrock_model.py +8 -0
  8. camel/models/azure_openai_model.py +14 -5
  9. camel/models/base_model.py +4 -0
  10. camel/models/cohere_model.py +9 -2
  11. camel/models/crynux_model.py +8 -0
  12. camel/models/deepseek_model.py +8 -0
  13. camel/models/gemini_model.py +8 -0
  14. camel/models/groq_model.py +8 -0
  15. camel/models/internlm_model.py +8 -0
  16. camel/models/litellm_model.py +5 -0
  17. camel/models/lmstudio_model.py +14 -1
  18. camel/models/mistral_model.py +15 -1
  19. camel/models/model_factory.py +6 -0
  20. camel/models/modelscope_model.py +8 -0
  21. camel/models/moonshot_model.py +8 -0
  22. camel/models/nemotron_model.py +17 -2
  23. camel/models/netmind_model.py +8 -0
  24. camel/models/novita_model.py +8 -0
  25. camel/models/nvidia_model.py +8 -0
  26. camel/models/ollama_model.py +8 -0
  27. camel/models/openai_compatible_model.py +23 -5
  28. camel/models/openai_model.py +21 -4
  29. camel/models/openrouter_model.py +8 -0
  30. camel/models/ppio_model.py +8 -0
  31. camel/models/qianfan_model.py +104 -0
  32. camel/models/qwen_model.py +8 -0
  33. camel/models/reka_model.py +18 -3
  34. camel/models/samba_model.py +17 -3
  35. camel/models/sglang_model.py +20 -5
  36. camel/models/siliconflow_model.py +8 -0
  37. camel/models/stub_model.py +8 -1
  38. camel/models/togetherai_model.py +8 -0
  39. camel/models/vllm_model.py +7 -0
  40. camel/models/volcano_model.py +14 -1
  41. camel/models/watsonx_model.py +4 -1
  42. camel/models/yi_model.py +8 -0
  43. camel/models/zhipuai_model.py +8 -0
  44. camel/societies/workforce/prompts.py +33 -17
  45. camel/societies/workforce/role_playing_worker.py +3 -8
  46. camel/societies/workforce/single_agent_worker.py +1 -3
  47. camel/societies/workforce/task_channel.py +16 -18
  48. camel/societies/workforce/utils.py +104 -14
  49. camel/societies/workforce/workforce.py +1253 -99
  50. camel/societies/workforce/workforce_logger.py +613 -0
  51. camel/tasks/task.py +16 -5
  52. camel/toolkits/__init__.py +2 -0
  53. camel/toolkits/code_execution.py +1 -1
  54. camel/toolkits/playwright_mcp_toolkit.py +2 -1
  55. camel/toolkits/pptx_toolkit.py +4 -4
  56. camel/types/enums.py +32 -0
  57. camel/types/unified_model_type.py +5 -0
  58. {camel_ai-0.2.66.dist-info → camel_ai-0.2.67.dist-info}/METADATA +3 -3
  59. {camel_ai-0.2.66.dist-info → camel_ai-0.2.67.dist-info}/RECORD +61 -58
  60. {camel_ai-0.2.66.dist-info → camel_ai-0.2.67.dist-info}/WHEEL +0 -0
  61. {camel_ai-0.2.66.dist-info → camel_ai-0.2.67.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,613 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import json
15
+ from datetime import datetime, timezone
16
+ from typing import Any, Dict, List, Optional
17
+
18
+ from camel.logger import get_logger
19
+ from camel.types.agents import ToolCallingRecord
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class WorkforceLogger:
25
+ r"""Logs events and metrics for a Workforce instance."""
26
+
27
+ def __init__(self, workforce_id: str):
28
+ """Initializes the WorkforceLogger.
29
+
30
+ Args:
31
+ workforce_id (str): The unique identifier for the workforce.
32
+ """
33
+ self.workforce_id: str = workforce_id
34
+ self.log_entries: List[Dict[str, Any]] = []
35
+ self._task_hierarchy: Dict[str, Dict[str, Any]] = {}
36
+ self._worker_information: Dict[str, Dict[str, Any]] = {}
37
+ self._initial_worker_logs: List[Dict[str, Any]] = []
38
+
39
+ def _log_event(self, event_type: str, **kwargs: Any) -> None:
40
+ r"""Internal method to create and store a log entry.
41
+
42
+ Args:
43
+ event_type (str): The type of event being logged.
44
+ **kwargs: Additional data associated with the event.
45
+ """
46
+ log_entry = {
47
+ 'timestamp': datetime.now(timezone.utc).isoformat(),
48
+ 'workforce_id': self.workforce_id,
49
+ 'event_type': event_type,
50
+ **kwargs,
51
+ }
52
+ self.log_entries.append(log_entry)
53
+ if event_type == 'worker_created':
54
+ self._initial_worker_logs.append(log_entry)
55
+
56
+ def log_task_created(
57
+ self,
58
+ task_id: str,
59
+ description: str,
60
+ parent_task_id: Optional[str] = None,
61
+ task_type: Optional[str] = None,
62
+ metadata: Optional[Dict[str, Any]] = None,
63
+ ) -> None:
64
+ r"""Logs the creation of a new task."""
65
+ self._log_event(
66
+ 'task_created',
67
+ task_id=task_id,
68
+ description=description,
69
+ parent_task_id=parent_task_id,
70
+ task_type=task_type,
71
+ metadata=metadata or {},
72
+ )
73
+ self._task_hierarchy[task_id] = {
74
+ 'parent': parent_task_id,
75
+ 'children': [],
76
+ 'status': 'created',
77
+ 'description': description,
78
+ 'assigned_to': None,
79
+ **(metadata or {}),
80
+ }
81
+ if parent_task_id and parent_task_id in self._task_hierarchy:
82
+ self._task_hierarchy[parent_task_id]['children'].append(task_id)
83
+
84
+ def log_task_decomposed(
85
+ self,
86
+ parent_task_id: str,
87
+ subtask_ids: List[str],
88
+ metadata: Optional[Dict[str, Any]] = None,
89
+ ) -> None:
90
+ r"""Logs the decomposition of a task into subtasks."""
91
+ self._log_event(
92
+ 'task_decomposed',
93
+ parent_task_id=parent_task_id,
94
+ subtask_ids=subtask_ids,
95
+ metadata=metadata or {},
96
+ )
97
+ if parent_task_id in self._task_hierarchy:
98
+ self._task_hierarchy[parent_task_id]['status'] = "decomposed"
99
+
100
+ def log_task_assigned(
101
+ self,
102
+ task_id: str,
103
+ worker_id: str,
104
+ queue_time_seconds: Optional[float] = None,
105
+ dependencies: Optional[List[str]] = None,
106
+ metadata: Optional[Dict[str, Any]] = None,
107
+ ) -> None:
108
+ r"""Logs the assignment of a task to a worker."""
109
+ self._log_event(
110
+ 'task_assigned',
111
+ task_id=task_id,
112
+ worker_id=worker_id,
113
+ queue_time_seconds=queue_time_seconds,
114
+ dependencies=dependencies or [],
115
+ metadata=metadata or {},
116
+ )
117
+ if task_id in self._task_hierarchy:
118
+ self._task_hierarchy[task_id]['status'] = 'assigned'
119
+ self._task_hierarchy[task_id]['assigned_to'] = worker_id
120
+ self._task_hierarchy[task_id]['dependencies'] = dependencies or []
121
+ if worker_id in self._worker_information:
122
+ self._worker_information[worker_id]['current_task_id'] = task_id
123
+ self._worker_information[worker_id]['status'] = 'busy'
124
+
125
+ def log_task_started(
126
+ self,
127
+ task_id: str,
128
+ worker_id: str,
129
+ metadata: Optional[Dict[str, Any]] = None,
130
+ ) -> None:
131
+ r"""Logs when a worker starts processing a task."""
132
+ self._log_event(
133
+ 'task_started',
134
+ task_id=task_id,
135
+ worker_id=worker_id,
136
+ metadata=metadata or {},
137
+ )
138
+ if task_id in self._task_hierarchy:
139
+ self._task_hierarchy[task_id]['status'] = 'processing'
140
+
141
+ def log_task_completed(
142
+ self,
143
+ task_id: str,
144
+ worker_id: str,
145
+ result_summary: Optional[str] = None,
146
+ processing_time_seconds: Optional[float] = None,
147
+ token_usage: Optional[Dict[str, int]] = None,
148
+ metadata: Optional[Dict[str, Any]] = None,
149
+ ) -> None:
150
+ r"""Logs the successful completion of a task."""
151
+ self._log_event(
152
+ 'task_completed',
153
+ task_id=task_id,
154
+ worker_id=worker_id,
155
+ result_summary=result_summary,
156
+ processing_time_seconds=processing_time_seconds,
157
+ token_usage=token_usage or {},
158
+ metadata=metadata or {},
159
+ )
160
+ if task_id in self._task_hierarchy:
161
+ self._task_hierarchy[task_id]['status'] = 'completed'
162
+ self._task_hierarchy[task_id]['assigned_to'] = None
163
+ # Store processing time in task hierarchy for display in tree
164
+ if processing_time_seconds is not None:
165
+ self._task_hierarchy[task_id]['completion_time_seconds'] = (
166
+ processing_time_seconds
167
+ )
168
+ # Store token usage in task hierarchy for display in tree
169
+ if token_usage is not None:
170
+ self._task_hierarchy[task_id]['token_usage'] = token_usage
171
+ if worker_id in self._worker_information:
172
+ self._worker_information[worker_id]['current_task_id'] = None
173
+ self._worker_information[worker_id]['status'] = 'idle'
174
+ self._worker_information[worker_id]['tasks_completed'] = (
175
+ self._worker_information[worker_id].get('tasks_completed', 0)
176
+ + 1
177
+ )
178
+
179
+ def log_task_failed(
180
+ self,
181
+ task_id: str,
182
+ error_message: str,
183
+ error_type: str,
184
+ worker_id: Optional[str] = None,
185
+ metadata: Optional[Dict[str, Any]] = None,
186
+ ) -> None:
187
+ r"""Logs the failure of a task."""
188
+ self._log_event(
189
+ 'task_failed',
190
+ task_id=task_id,
191
+ worker_id=worker_id,
192
+ error_message=error_message,
193
+ error_type=error_type,
194
+ metadata=metadata or {},
195
+ )
196
+ if task_id in self._task_hierarchy:
197
+ self._task_hierarchy[task_id]['status'] = 'failed'
198
+ self._task_hierarchy[task_id]['error'] = error_message
199
+ self._task_hierarchy[task_id]['assigned_to'] = None
200
+ if worker_id and worker_id in self._worker_information:
201
+ self._worker_information[worker_id]['current_task_id'] = None
202
+ self._worker_information[worker_id]['status'] = 'idle'
203
+ self._worker_information[worker_id]['tasks_failed'] = (
204
+ self._worker_information[worker_id].get('tasks_failed', 0) + 1
205
+ )
206
+
207
+ def log_worker_created(
208
+ self,
209
+ worker_id: str,
210
+ worker_type: str,
211
+ role: str,
212
+ metadata: Optional[Dict[str, Any]] = None,
213
+ ) -> None:
214
+ r"""Logs the creation of a new worker."""
215
+ self._log_event(
216
+ 'worker_created',
217
+ worker_id=worker_id,
218
+ worker_type=worker_type,
219
+ role=role,
220
+ metadata=metadata or {},
221
+ )
222
+ self._worker_information[worker_id] = {
223
+ 'type': worker_type,
224
+ 'role': role,
225
+ 'status': 'idle',
226
+ 'current_task_id': None,
227
+ 'tasks_completed': 0,
228
+ 'tasks_failed': 0,
229
+ **(metadata or {}),
230
+ }
231
+
232
+ def log_worker_deleted(
233
+ self,
234
+ worker_id: str,
235
+ reason: Optional[str] = None,
236
+ metadata: Optional[Dict[str, Any]] = None,
237
+ ) -> None:
238
+ r"""Logs the deletion of a worker."""
239
+ self._log_event(
240
+ 'worker_deleted',
241
+ worker_id=worker_id,
242
+ reason=reason,
243
+ metadata=metadata or {},
244
+ )
245
+ if worker_id in self._worker_information:
246
+ self._worker_information[worker_id]['status'] = 'deleted'
247
+ # Or del self._worker_information[worker_id]
248
+
249
+ def reset_task_data(self) -> None:
250
+ r"""Resets logs and data related to tasks, preserving worker
251
+ information.
252
+ """
253
+ # Restore log entries from the initial worker logs
254
+ self.log_entries = list(self._initial_worker_logs) # Make a copy
255
+
256
+ self._task_hierarchy.clear()
257
+ for worker_id in self._worker_information:
258
+ if (
259
+ self._worker_information[worker_id].get('status') != 'deleted'
260
+ ): # Don't revive deleted workers
261
+ self._worker_information[worker_id]['current_task_id'] = None
262
+ self._worker_information[worker_id]['status'] = 'idle'
263
+ logger.info(
264
+ f"WorkforceLogger: Task data reset for workforce "
265
+ f"{self.workforce_id}"
266
+ )
267
+
268
+ def log_queue_status(
269
+ self,
270
+ queue_name: str,
271
+ length: int,
272
+ pending_task_ids: Optional[List[str]] = None,
273
+ metadata: Optional[Dict[str, Any]] = None,
274
+ ) -> None:
275
+ r"""Logs the status of a task queue."""
276
+ self._log_event(
277
+ 'queue_status',
278
+ queue_name=queue_name,
279
+ length=length,
280
+ pending_task_ids=pending_task_ids or [],
281
+ metadata=metadata or {},
282
+ )
283
+
284
+ def dump_to_json(self, file_path: str) -> None:
285
+ r"""Dumps all log entries to a JSON file.
286
+
287
+ Args:
288
+ file_path (str): The path to the JSON file.
289
+ """
290
+
291
+ def json_serializer_default(o: Any) -> Any:
292
+ if isinstance(o, ToolCallingRecord):
293
+ return o.as_dict()
294
+ # Let the default encoder raise the TypeError for other types
295
+ raise TypeError(
296
+ f"Object of type {o.__class__.__name__} is not "
297
+ f"JSON serializable"
298
+ )
299
+
300
+ try:
301
+ with open(file_path, 'w') as f:
302
+ json.dump(
303
+ self.log_entries,
304
+ f,
305
+ indent=4,
306
+ default=json_serializer_default,
307
+ )
308
+ except IOError as e:
309
+ # Consider using camel.logger for this kind of internal error
310
+ logger.error(f"Error dumping logs to JSON: {e}")
311
+
312
+ def _get_all_tasks_in_hierarchy(
313
+ self, task_id: str
314
+ ) -> Dict[str, Dict[str, Any]]:
315
+ r"""Recursively collect all tasks in the hierarchy starting from
316
+ task_id.
317
+ """
318
+ result: Dict[str, Dict[str, Any]] = {}
319
+ if task_id not in self._task_hierarchy:
320
+ return result
321
+
322
+ # Add the current task
323
+ result[task_id] = self._task_hierarchy[task_id]
324
+
325
+ # Add all children recursively
326
+ children = self._task_hierarchy[task_id].get('children', [])
327
+ for child_id in children:
328
+ result.update(self._get_all_tasks_in_hierarchy(child_id))
329
+
330
+ return result
331
+
332
+ def _get_task_tree_string(
333
+ self, task_id: str, prefix: str = "", is_last: bool = True
334
+ ) -> str:
335
+ r"""Generate a string representation of the task tree."""
336
+ if task_id not in self._task_hierarchy:
337
+ return ""
338
+
339
+ task_info = self._task_hierarchy[task_id]
340
+ description = task_info.get('description', '')
341
+ status = task_info.get('status', 'unknown')
342
+ assignee = task_info.get('assigned_to')
343
+ assignee_str = f" [assigned to: {assignee}]" if assignee else ""
344
+ dependencies = task_info.get('dependencies', [])
345
+ dependencies_list = [
346
+ dep for dep in dependencies if dep in self._task_hierarchy
347
+ ]
348
+ dependencies_str = (
349
+ f" (dependencies: {', '.join(dependencies_list)})"
350
+ if dependencies_list
351
+ else ""
352
+ )
353
+ error_str = (
354
+ f" [ERROR: {task_info.get('error', '')}]"
355
+ if status == 'failed'
356
+ else ""
357
+ )
358
+
359
+ # Add completion time and token usage for completed tasks
360
+ completion_time_str = ""
361
+ token_usage_str = ""
362
+
363
+ if status == 'completed':
364
+ # For the root task (typically task_id = '0'), calculate total
365
+ # tokens and time
366
+ if task_id == '0':
367
+ # Calculate total tokens from all child tasks
368
+ total_tokens = 0
369
+ total_time = 0.0
370
+
371
+ # Recursively get all tasks in the hierarchy
372
+ all_tasks = self._get_all_tasks_in_hierarchy(task_id)
373
+
374
+ # Sum up tokens and time from all tasks
375
+ for child_id, child_info in all_tasks.items():
376
+ if (
377
+ child_id != task_id
378
+ ): # Skip the root task itself to avoid double counting
379
+ # Add tokens
380
+ if (
381
+ 'token_usage' in child_info
382
+ and child_info['token_usage'] is not None
383
+ ):
384
+ child_tokens = child_info['token_usage']
385
+ if (
386
+ isinstance(child_tokens, dict)
387
+ and 'total_tokens' in child_tokens
388
+ ):
389
+ total_tokens += child_tokens['total_tokens']
390
+ elif isinstance(child_tokens, int):
391
+ total_tokens += child_tokens
392
+
393
+ # Add completion time
394
+ if (
395
+ 'completion_time_seconds' in child_info
396
+ and child_info['completion_time_seconds']
397
+ is not None
398
+ ):
399
+ total_time += child_info['completion_time_seconds']
400
+
401
+ # Format the strings for the root task
402
+ completion_time_str = (
403
+ f" (completed in {total_time:.2f} seconds total)"
404
+ )
405
+ token_usage_str = f" [total tokens: {total_tokens}]"
406
+ else:
407
+ # Regular task (not root) - show its own completion time and
408
+ # tokens
409
+ if (
410
+ 'completion_time_seconds' in task_info
411
+ and task_info['completion_time_seconds'] is not None
412
+ ):
413
+ completion_time = task_info['completion_time_seconds']
414
+ completion_time_str = (
415
+ f" (completed in {completion_time:.2f} seconds)"
416
+ )
417
+ else:
418
+ # Add a default message when completion time is not
419
+ # available
420
+ completion_time_str = " (completed)"
421
+
422
+ # Add token usage if available
423
+ if (
424
+ 'token_usage' in task_info
425
+ and task_info['token_usage'] is not None
426
+ ):
427
+ token_usage = task_info['token_usage']
428
+ if (
429
+ isinstance(token_usage, dict)
430
+ and 'total_tokens' in token_usage
431
+ ):
432
+ token_usage_str = (
433
+ f" [tokens: {token_usage['total_tokens']}]"
434
+ )
435
+ elif isinstance(token_usage, int):
436
+ token_usage_str = f" [tokens: {token_usage}]"
437
+
438
+ tree_str = f"{prefix}{'`-- ' if is_last else '|-- '}[{task_id}] {description} [{status}]{completion_time_str}{token_usage_str}{assignee_str}{dependencies_str}{error_str}\n" # noqa: E501
439
+
440
+ children = task_info.get('children', [])
441
+ for i, child_id in enumerate(children):
442
+ new_prefix = prefix + (" " if is_last else "| ")
443
+ tree_str += self._get_task_tree_string(
444
+ child_id, new_prefix, i == len(children) - 1
445
+ )
446
+ return tree_str
447
+
448
+ def get_ascii_tree_representation(self) -> str:
449
+ r"""Generates an ASCII tree representation of the current task
450
+ hierarchy and worker status.
451
+ """
452
+ output_str = "=== Task Hierarchy ===\n"
453
+ root_tasks = [
454
+ task_id
455
+ for task_id, info in self._task_hierarchy.items()
456
+ if info.get('parent') is None
457
+ ]
458
+ if not root_tasks:
459
+ output_str += "No tasks recorded.\n"
460
+ else:
461
+ for i, task_id in enumerate(root_tasks):
462
+ output_str += self._get_task_tree_string(
463
+ task_id, "", i == len(root_tasks) - 1
464
+ )
465
+
466
+ output_str += "\n=== Worker Information ===\n"
467
+ if not self._worker_information:
468
+ output_str += "No workers recorded.\n"
469
+ else:
470
+ for worker_id, info in self._worker_information.items():
471
+ role = info.get('role', 'N/A')
472
+ completed = info.get('tasks_completed', 0)
473
+ failed = info.get('tasks_failed', 0)
474
+ output_str += (
475
+ f"- Worker ID: {worker_id} (Role: {role})\n"
476
+ f" Tasks Completed: {completed}, Tasks "
477
+ f"Failed: {failed}\n"
478
+ )
479
+ return output_str
480
+
481
+ def get_kpis(self) -> Dict[str, Any]:
482
+ r"""Calculates and returns key performance indicators from the logs."""
483
+ kpis: Dict[str, Any] = {
484
+ 'total_tasks_created': 0,
485
+ 'total_tasks_completed': 0,
486
+ 'total_tasks_failed': 0,
487
+ 'error_types_count': {},
488
+ 'worker_utilization': {},
489
+ 'current_pending_tasks': 0,
490
+ 'total_workforce_running_time_seconds': 0.0,
491
+ 'avg_task_queue_time_seconds': 0.0,
492
+ }
493
+
494
+ task_start_times: Dict[str, float] = {}
495
+ task_creation_timestamps: Dict[str, datetime] = {}
496
+ task_assignment_timestamps: Dict[str, datetime] = {}
497
+ first_timestamp: Optional[datetime] = None
498
+ last_timestamp: Optional[datetime] = None
499
+
500
+ tasks_handled_by_worker: Dict[str, int] = {}
501
+
502
+ for entry in self.log_entries:
503
+ event_type = entry['event_type']
504
+ timestamp = datetime.fromisoformat(entry['timestamp'])
505
+ if first_timestamp is None or timestamp < first_timestamp:
506
+ first_timestamp = timestamp
507
+ if last_timestamp is None or timestamp > last_timestamp:
508
+ last_timestamp = timestamp
509
+
510
+ if event_type == 'task_created':
511
+ kpis['total_tasks_created'] += 1
512
+ task_creation_timestamps[entry['task_id']] = timestamp
513
+ elif event_type == 'task_assigned':
514
+ task_assignment_timestamps[entry['task_id']] = timestamp
515
+ # Queue time tracking has been removed
516
+
517
+ elif event_type == 'task_started':
518
+ # Store start time for processing time calculation
519
+ task_start_times[entry['task_id']] = timestamp.timestamp()
520
+
521
+ elif event_type == 'task_completed':
522
+ kpis['total_tasks_completed'] += 1
523
+ # Count tasks handled by worker
524
+ if 'worker_id' in entry and entry['worker_id'] is not None:
525
+ worker_id = entry['worker_id']
526
+ tasks_handled_by_worker[worker_id] = (
527
+ tasks_handled_by_worker.get(worker_id, 0) + 1
528
+ )
529
+
530
+ if entry['task_id'] in task_assignment_timestamps:
531
+ completion_time = (
532
+ timestamp
533
+ - task_assignment_timestamps[entry['task_id']]
534
+ ).total_seconds()
535
+ # Store completion time in task hierarchy instead of KPIs
536
+ # array
537
+ if entry['task_id'] in self._task_hierarchy:
538
+ self._task_hierarchy[entry['task_id']][
539
+ 'completion_time_seconds'
540
+ ] = completion_time
541
+
542
+ elif event_type == 'task_failed':
543
+ kpis['total_tasks_failed'] += 1
544
+ # Count tasks handled by worker (also for failed tasks)
545
+ if 'worker_id' in entry and entry['worker_id'] is not None:
546
+ worker_id = entry['worker_id']
547
+ tasks_handled_by_worker[worker_id] = (
548
+ tasks_handled_by_worker.get(worker_id, 0) + 1
549
+ )
550
+ error_type = entry['error_type']
551
+ kpis['error_types_count'][error_type] = (
552
+ kpis['error_types_count'].get(error_type, 0) + 1
553
+ )
554
+
555
+ elif event_type == 'queue_status':
556
+ pass # Placeholder for now
557
+
558
+ # Calculate total workforce running time
559
+ if first_timestamp and last_timestamp and self.log_entries:
560
+ kpis['total_workforce_running_time_seconds'] = (
561
+ last_timestamp - first_timestamp
562
+ ).total_seconds()
563
+ # Calculate worker utilization based on proportion of tasks handled
564
+ total_tasks_processed_for_utilization = (
565
+ kpis['total_tasks_completed'] + kpis['total_tasks_failed']
566
+ )
567
+ if total_tasks_processed_for_utilization > 0:
568
+ for (
569
+ worker_id_key,
570
+ num_tasks_handled,
571
+ ) in tasks_handled_by_worker.items():
572
+ percentage = (
573
+ num_tasks_handled / total_tasks_processed_for_utilization
574
+ ) * 100
575
+ kpis['worker_utilization'][worker_id_key] = (
576
+ f"{percentage:.2f}%"
577
+ )
578
+ else:
579
+ for worker_id_key in (
580
+ tasks_handled_by_worker
581
+ ): # Ensure all workers who handled tasks are listed, even if 0%
582
+ kpis['worker_utilization'][worker_id_key] = "0.00%"
583
+ # If no tasks were processed, but workers exist (e.g. from
584
+ # _initial_worker_logs), list them with 0%
585
+ for worker_id_key in self._worker_information:
586
+ if worker_id_key not in kpis['worker_utilization']:
587
+ kpis['worker_utilization'][worker_id_key] = "0.00%"
588
+
589
+ # Task throughput (completed tasks per minute, for example)
590
+ if self.log_entries:
591
+ first_log_time = datetime.fromisoformat(
592
+ self.log_entries[0]['timestamp']
593
+ )
594
+ last_log_time = datetime.fromisoformat(
595
+ self.log_entries[-1]['timestamp']
596
+ )
597
+ duration_seconds = (last_log_time - first_log_time).total_seconds()
598
+ if duration_seconds > 0:
599
+ kpis['task_throughput_per_second'] = (
600
+ kpis['total_tasks_completed'] / duration_seconds
601
+ )
602
+ kpis['task_throughput_per_minute'] = (
603
+ kpis['task_throughput_per_second'] * 60
604
+ )
605
+
606
+ kpis['total_workers_created'] = len(self._worker_information)
607
+
608
+ # Current pending tasks (simplified)
609
+ kpis['current_pending_tasks'] = kpis['total_tasks_created'] - (
610
+ kpis['total_tasks_completed'] + kpis['total_tasks_failed']
611
+ )
612
+
613
+ return kpis
camel/tasks/task.py CHANGED
@@ -14,11 +14,21 @@
14
14
 
15
15
  import re
16
16
  from enum import Enum
17
- from typing import Any, Callable, Dict, List, Literal, Optional, Union
17
+ from typing import (
18
+ TYPE_CHECKING,
19
+ Any,
20
+ Callable,
21
+ Dict,
22
+ List,
23
+ Literal,
24
+ Optional,
25
+ Union,
26
+ )
18
27
 
19
28
  from pydantic import BaseModel
20
29
 
21
- from camel.agents import ChatAgent
30
+ if TYPE_CHECKING:
31
+ from camel.agents import ChatAgent
22
32
  from camel.logger import get_logger
23
33
  from camel.messages import BaseMessage
24
34
  from camel.prompts import TextPrompt
@@ -288,7 +298,7 @@ class Task(BaseModel):
288
298
 
289
299
  def decompose(
290
300
  self,
291
- agent: ChatAgent,
301
+ agent: "ChatAgent",
292
302
  prompt: Optional[str] = None,
293
303
  task_parser: Callable[[str, str], List["Task"]] = parse_response,
294
304
  ) -> List["Task"]:
@@ -323,7 +333,7 @@ class Task(BaseModel):
323
333
 
324
334
  def compose(
325
335
  self,
326
- agent: ChatAgent,
336
+ agent: "ChatAgent",
327
337
  template: TextPrompt = TASK_COMPOSE_PROMPT,
328
338
  result_parser: Optional[Callable[[str], str]] = None,
329
339
  ):
@@ -472,12 +482,13 @@ class TaskManager:
472
482
  def evolve(
473
483
  self,
474
484
  task: Task,
475
- agent: ChatAgent,
485
+ agent: "ChatAgent",
476
486
  template: Optional[TextPrompt] = None,
477
487
  task_parser: Optional[Callable[[str, str], List[Task]]] = None,
478
488
  ) -> Optional[Task]:
479
489
  r"""Evolve a task to a new task.
480
490
  Evolve is only used for data generation.
491
+
481
492
  Args:
482
493
  task (Task): A given task.
483
494
  agent (ChatAgent): An agent that used to evolve the task.
@@ -76,6 +76,7 @@ from .klavis_toolkit import KlavisToolkit
76
76
  from .aci_toolkit import ACIToolkit
77
77
  from .playwright_mcp_toolkit import PlaywrightMCPToolkit
78
78
  from .wolfram_alpha_toolkit import WolframAlphaToolkit
79
+ from .task_planning_toolkit import TaskPlanningToolkit
79
80
 
80
81
 
81
82
  __all__ = [
@@ -140,4 +141,5 @@ __all__ = [
140
141
  'PlaywrightMCPToolkit',
141
142
  'WolframAlphaToolkit',
142
143
  'BohriumToolkit',
144
+ 'TaskPlanningToolkit',
143
145
  ]
@@ -122,7 +122,7 @@ class CodeExecutionToolkit(BaseToolkit):
122
122
 
123
123
  def execute_command(self, command: str) -> Union[str, tuple[str, str]]:
124
124
  r"""Execute a command can be used to resolve the dependency of the
125
- code.
125
+ code. Useful if there's dependency issues when you try to execute code.
126
126
 
127
127
  Args:
128
128
  command (str): The command to execute.