camel-ai 0.2.78__py3-none-any.whl → 0.2.79a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (39) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_utils.py +38 -0
  3. camel/agents/chat_agent.py +1112 -287
  4. camel/datasets/base_generator.py +39 -10
  5. camel/environments/single_step.py +28 -3
  6. camel/memories/__init__.py +1 -2
  7. camel/memories/agent_memories.py +34 -0
  8. camel/memories/base.py +26 -0
  9. camel/memories/blocks/chat_history_block.py +117 -17
  10. camel/memories/context_creators/score_based.py +25 -384
  11. camel/messages/base.py +26 -0
  12. camel/models/aws_bedrock_model.py +1 -17
  13. camel/models/azure_openai_model.py +113 -67
  14. camel/models/model_factory.py +17 -1
  15. camel/models/moonshot_model.py +102 -5
  16. camel/models/openai_compatible_model.py +62 -32
  17. camel/models/openai_model.py +61 -35
  18. camel/models/samba_model.py +34 -15
  19. camel/models/sglang_model.py +41 -11
  20. camel/societies/workforce/__init__.py +2 -0
  21. camel/societies/workforce/events.py +122 -0
  22. camel/societies/workforce/role_playing_worker.py +15 -11
  23. camel/societies/workforce/single_agent_worker.py +143 -291
  24. camel/societies/workforce/utils.py +2 -1
  25. camel/societies/workforce/workflow_memory_manager.py +772 -0
  26. camel/societies/workforce/workforce.py +513 -188
  27. camel/societies/workforce/workforce_callback.py +74 -0
  28. camel/societies/workforce/workforce_logger.py +144 -140
  29. camel/societies/workforce/workforce_metrics.py +33 -0
  30. camel/storages/vectordb_storages/oceanbase.py +5 -4
  31. camel/toolkits/file_toolkit.py +166 -0
  32. camel/toolkits/message_integration.py +15 -13
  33. camel/toolkits/terminal_toolkit/terminal_toolkit.py +112 -79
  34. camel/types/enums.py +1 -0
  35. camel/utils/context_utils.py +201 -2
  36. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/METADATA +14 -13
  37. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/RECORD +39 -35
  38. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/WHEEL +0 -0
  39. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/licenses/LICENSE +0 -0
@@ -11,41 +11,24 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- from collections import defaultdict
15
- from typing import Dict, List, Optional, Tuple
16
14
 
17
- from pydantic import BaseModel
15
+ from typing import List, Optional, Tuple
18
16
 
19
- from camel.logger import get_logger
20
17
  from camel.memories.base import BaseContextCreator
21
18
  from camel.memories.records import ContextRecord
22
- from camel.messages import FunctionCallingMessage, OpenAIMessage
19
+ from camel.messages import OpenAIMessage
23
20
  from camel.types.enums import OpenAIBackendRole
24
21
  from camel.utils import BaseTokenCounter
25
22
 
26
- logger = get_logger(__name__)
27
-
28
-
29
- class _ContextUnit(BaseModel):
30
- idx: int
31
- record: ContextRecord
32
- num_tokens: int
33
-
34
23
 
35
24
  class ScoreBasedContextCreator(BaseContextCreator):
36
- r"""A default implementation of context creation strategy, which inherits
37
- from :obj:`BaseContextCreator`.
38
-
39
- This class provides a strategy to generate a conversational context from
40
- a list of chat history records while ensuring the total token count of
41
- the context does not exceed a specified limit. It prunes messages based
42
- on their score if the total token count exceeds the limit.
25
+ r"""A context creation strategy that orders records chronologically.
43
26
 
44
27
  Args:
45
- token_counter (BaseTokenCounter): An instance responsible for counting
46
- tokens in a message.
47
- token_limit (int): The maximum number of tokens allowed in the
48
- generated context.
28
+ token_counter (BaseTokenCounter): Token counter instance used to
29
+ compute the combined token count of the returned messages.
30
+ token_limit (int): Retained for API compatibility. No longer used to
31
+ filter records.
49
32
  """
50
33
 
51
34
  def __init__(
@@ -66,376 +49,34 @@ class ScoreBasedContextCreator(BaseContextCreator):
66
49
  self,
67
50
  records: List[ContextRecord],
68
51
  ) -> Tuple[List[OpenAIMessage], int]:
69
- r"""Constructs conversation context from chat history while respecting
70
- token limits.
71
-
72
- Key strategies:
73
- 1. System message is always prioritized and preserved
74
- 2. Truncation removes low-score messages first
75
- 3. Final output maintains chronological order and in history memory,
76
- the score of each message decreases according to keep_rate. The
77
- newer the message, the higher the score.
78
- 4. Tool calls and their responses are kept together to maintain
79
- API compatibility
80
-
81
- Args:
82
- records (List[ContextRecord]): List of context records with scores
83
- and timestamps.
84
-
85
- Returns:
86
- Tuple[List[OpenAIMessage], int]:
87
- - Ordered list of OpenAI messages
88
- - Total token count of the final context
89
-
90
- Raises:
91
- RuntimeError: If system message alone exceeds token limit
92
- """
93
- # ======================
94
- # 1. System Message Handling
95
- # ======================
96
- system_unit, regular_units = self._extract_system_message(records)
97
- system_tokens = system_unit.num_tokens if system_unit else 0
52
+ """Returns messages sorted by timestamp and their total token count."""
98
53
 
99
- # Check early if system message alone exceeds token limit
100
- if system_tokens > self.token_limit:
101
- raise RuntimeError(
102
- f"System message alone exceeds token limit"
103
- f": {system_tokens} > {self.token_limit}",
104
- system_tokens,
105
- )
54
+ system_record: Optional[ContextRecord] = None
55
+ remaining_records: List[ContextRecord] = []
106
56
 
107
- # ======================
108
- # 2. Deduplication & Initial Processing
109
- # ======================
110
- seen_uuids = set()
111
- if system_unit:
112
- seen_uuids.add(system_unit.record.memory_record.uuid)
113
-
114
- # Process non-system messages with deduplication
115
- for idx, record in enumerate(records):
57
+ for record in records:
116
58
  if (
117
- record.memory_record.role_at_backend
59
+ system_record is None
60
+ and record.memory_record.role_at_backend
118
61
  == OpenAIBackendRole.SYSTEM
119
62
  ):
63
+ system_record = record
120
64
  continue
121
- if record.memory_record.uuid in seen_uuids:
122
- continue
123
- seen_uuids.add(record.memory_record.uuid)
124
-
125
- token_count = self.token_counter.count_tokens_from_messages(
126
- [record.memory_record.to_openai_message()]
127
- )
128
- regular_units.append(
129
- _ContextUnit(
130
- idx=idx,
131
- record=record,
132
- num_tokens=token_count,
133
- )
134
- )
135
-
136
- # ======================
137
- # 3. Tool Call Relationship Mapping
138
- # ======================
139
- tool_call_groups = self._group_tool_calls_and_responses(regular_units)
140
-
141
- # ======================
142
- # 4. Token Calculation
143
- # ======================
144
- total_tokens = system_tokens + sum(u.num_tokens for u in regular_units)
145
-
146
- # ======================
147
- # 5. Early Return if Within Limit
148
- # ======================
149
- if total_tokens <= self.token_limit:
150
- sorted_units = sorted(
151
- regular_units, key=self._conversation_sort_key
152
- )
153
- return self._assemble_output(sorted_units, system_unit)
154
-
155
- # ======================
156
- # 6. Truncation Logic with Tool Call Awareness
157
- # ======================
158
- remaining_units = self._truncate_with_tool_call_awareness(
159
- regular_units, tool_call_groups, system_tokens
160
- )
161
-
162
- # Log only after truncation is actually performed so that both
163
- # the original and the final token counts are visible.
164
- tokens_after = system_tokens + sum(
165
- u.num_tokens for u in remaining_units
166
- )
167
- logger.warning(
168
- "Context truncation performed: "
169
- f"before={total_tokens}, after={tokens_after}, "
170
- f"limit={self.token_limit}"
171
- )
172
-
173
- # ======================
174
- # 7. Output Assembly
175
- # ======================
176
-
177
- # In case system message is the only message in memory when sorted
178
- # units are empty, raise an error
179
- if system_unit and len(remaining_units) == 0 and len(records) > 1:
180
- raise RuntimeError(
181
- "System message and current message exceeds token limit ",
182
- total_tokens,
183
- )
184
-
185
- # Sort remaining units chronologically
186
- final_units = sorted(remaining_units, key=self._conversation_sort_key)
187
- return self._assemble_output(final_units, system_unit)
188
-
189
- def _group_tool_calls_and_responses(
190
- self, units: List[_ContextUnit]
191
- ) -> Dict[str, List[_ContextUnit]]:
192
- r"""Groups tool calls with their corresponding responses based on
193
- `tool_call_id`.
194
-
195
- This improved logic robustly gathers all messages (assistant requests
196
- and tool responses, including chunks) that share a `tool_call_id`.
197
-
198
- Args:
199
- units (List[_ContextUnit]): List of context units to analyze.
200
-
201
- Returns:
202
- Dict[str, List[_ContextUnit]]: Mapping from `tool_call_id` to a
203
- list of related units.
204
- """
205
- tool_call_groups: Dict[str, List[_ContextUnit]] = defaultdict(list)
206
-
207
- for unit in units:
208
- # FunctionCallingMessage stores tool_call_id.
209
- message = unit.record.memory_record.message
210
- tool_call_id = getattr(message, 'tool_call_id', None)
211
-
212
- if tool_call_id:
213
- tool_call_groups[tool_call_id].append(unit)
214
-
215
- # Filter out empty or incomplete groups if necessary,
216
- # though defaultdict and getattr handle this gracefully.
217
- return dict(tool_call_groups)
218
-
219
- def _truncate_with_tool_call_awareness(
220
- self,
221
- regular_units: List[_ContextUnit],
222
- tool_call_groups: Dict[str, List[_ContextUnit]],
223
- system_tokens: int,
224
- ) -> List[_ContextUnit]:
225
- r"""Truncates messages while preserving tool call-response pairs.
226
- This method implements a more sophisticated truncation strategy:
227
- 1. It treats tool call groups (request + responses) and standalone
228
- messages as individual items to be included.
229
- 2. It sorts all items by score and greedily adds them to the context.
230
- 3. **Partial Truncation**: If a complete tool group is too large to
231
- fit,it attempts to add the request message and as many of the most
232
- recent response chunks as the token budget allows.
233
-
234
- Args:
235
- regular_units (List[_ContextUnit]): All regular message units.
236
- tool_call_groups (Dict[str, List[_ContextUnit]]): Grouped tool
237
- calls.
238
- system_tokens (int): Tokens used by the system message.
239
-
240
- Returns:
241
- List[_ContextUnit]: A list of units that fit within the token
242
- limit.
243
- """
244
-
245
- # Create a set for quick lookup of units belonging to any tool call
246
- tool_call_unit_ids = {
247
- unit.record.memory_record.uuid
248
- for group in tool_call_groups.values()
249
- for unit in group
250
- }
251
-
252
- # Separate standalone units from tool call groups
253
- standalone_units = [
254
- u
255
- for u in regular_units
256
- if u.record.memory_record.uuid not in tool_call_unit_ids
257
- ]
258
-
259
- # Prepare all items (standalone units and groups) for sorting
260
- all_potential_items: List[Dict] = []
261
- for unit in standalone_units:
262
- all_potential_items.append(
263
- {
264
- "type": "standalone",
265
- "score": unit.record.score,
266
- "timestamp": unit.record.timestamp,
267
- "tokens": unit.num_tokens,
268
- "item": unit,
269
- }
270
- )
271
- for group in tool_call_groups.values():
272
- all_potential_items.append(
273
- {
274
- "type": "group",
275
- "score": max(u.record.score for u in group),
276
- "timestamp": max(u.record.timestamp for u in group),
277
- "tokens": sum(u.num_tokens for u in group),
278
- "item": group,
279
- }
280
- )
281
-
282
- # Sort all potential items by score (high to low), then timestamp
283
- all_potential_items.sort(key=lambda x: (-x["score"], -x["timestamp"]))
284
-
285
- remaining_units: List[_ContextUnit] = []
286
- current_tokens = system_tokens
287
-
288
- for item_dict in all_potential_items:
289
- item_type = item_dict["type"]
290
- item = item_dict["item"]
291
- item_tokens = item_dict["tokens"]
292
-
293
- if current_tokens + item_tokens <= self.token_limit:
294
- # The whole item (standalone or group) fits, so add it
295
- if item_type == "standalone":
296
- remaining_units.append(item)
297
- else: # item_type == "group"
298
- remaining_units.extend(item)
299
- current_tokens += item_tokens
300
-
301
- elif item_type == "group":
302
- # The group does not fit completely; try partial inclusion.
303
- request_unit: Optional[_ContextUnit] = None
304
- response_units: List[_ContextUnit] = []
305
-
306
- for unit in item:
307
- # Assistant msg with `args` is the request
308
- if (
309
- isinstance(
310
- unit.record.memory_record.message,
311
- FunctionCallingMessage,
312
- )
313
- and unit.record.memory_record.message.args is not None
314
- ):
315
- request_unit = unit
316
- else:
317
- response_units.append(unit)
318
-
319
- # A group must have a request to be considered for inclusion.
320
- if request_unit is None:
321
- continue
322
-
323
- # Check if we can at least fit the request.
324
- if (
325
- current_tokens + request_unit.num_tokens
326
- <= self.token_limit
327
- ):
328
- units_to_add = [request_unit]
329
- tokens_to_add = request_unit.num_tokens
330
-
331
- # Sort responses by timestamp to add newest chunks first
332
- response_units.sort(
333
- key=lambda u: u.record.timestamp, reverse=True
334
- )
65
+ remaining_records.append(record)
335
66
 
336
- for resp_unit in response_units:
337
- if (
338
- current_tokens
339
- + tokens_to_add
340
- + resp_unit.num_tokens
341
- <= self.token_limit
342
- ):
343
- units_to_add.append(resp_unit)
344
- tokens_to_add += resp_unit.num_tokens
67
+ remaining_records.sort(key=lambda record: record.timestamp)
345
68
 
346
- # A request must be followed by at least one response
347
- if len(units_to_add) > 1:
348
- remaining_units.extend(units_to_add)
349
- current_tokens += tokens_to_add
69
+ messages: List[OpenAIMessage] = []
70
+ if system_record is not None:
71
+ messages.append(system_record.memory_record.to_openai_message())
350
72
 
351
- return remaining_units
352
-
353
- def _extract_system_message(
354
- self, records: List[ContextRecord]
355
- ) -> Tuple[Optional[_ContextUnit], List[_ContextUnit]]:
356
- r"""Extracts the system message from records and validates it.
357
-
358
- Args:
359
- records (List[ContextRecord]): List of context records
360
- representing conversation history.
361
-
362
- Returns:
363
- Tuple[Optional[_ContextUnit], List[_ContextUnit]]: containing:
364
- - The system message as a `_ContextUnit`, if valid; otherwise,
365
- `None`.
366
- - An empty list, serving as the initial container for regular
367
- messages.
368
- """
369
- if not records:
370
- return None, []
371
-
372
- first_record = records[0]
373
- if (
374
- first_record.memory_record.role_at_backend
375
- != OpenAIBackendRole.SYSTEM
376
- ):
377
- return None, []
378
-
379
- message = first_record.memory_record.to_openai_message()
380
- tokens = self.token_counter.count_tokens_from_messages([message])
381
- system_message_unit = _ContextUnit(
382
- idx=0,
383
- record=first_record,
384
- num_tokens=tokens,
73
+ messages.extend(
74
+ record.memory_record.to_openai_message()
75
+ for record in remaining_records
385
76
  )
386
- return system_message_unit, []
387
-
388
- def _conversation_sort_key(
389
- self, unit: _ContextUnit
390
- ) -> Tuple[float, float]:
391
- r"""Defines the sorting key for assembling the final output.
392
-
393
- Sorting priority:
394
- - Primary: Sort by timestamp in ascending order (chronological order).
395
- - Secondary: Sort by score in descending order (higher scores first
396
- when timestamps are equal).
397
-
398
- Args:
399
- unit (_ContextUnit): A `_ContextUnit` representing a conversation
400
- record.
401
-
402
- Returns:
403
- Tuple[float, float]:
404
- - Timestamp for chronological sorting.
405
- - Negative score for descending order sorting.
406
- """
407
- return (unit.record.timestamp, -unit.record.score)
408
-
409
- def _assemble_output(
410
- self,
411
- context_units: List[_ContextUnit],
412
- system_unit: Optional[_ContextUnit],
413
- ) -> Tuple[List[OpenAIMessage], int]:
414
- r"""Assembles final message list with proper ordering and token count.
415
-
416
- Args:
417
- context_units (List[_ContextUnit]): Sorted list of regular message
418
- units.
419
- system_unit (Optional[_ContextUnit]): System message unit (if
420
- present).
421
-
422
- Returns:
423
- Tuple[List[OpenAIMessage], int]: Tuple of (ordered messages, total
424
- tokens)
425
- """
426
- messages = []
427
- total_tokens = 0
428
-
429
- # Add system message first if present
430
- if system_unit:
431
- messages.append(
432
- system_unit.record.memory_record.to_openai_message()
433
- )
434
- total_tokens += system_unit.num_tokens
435
77
 
436
- # Add sorted regular messages
437
- for unit in context_units:
438
- messages.append(unit.record.memory_record.to_openai_message())
439
- total_tokens += unit.num_tokens
78
+ if not messages:
79
+ return [], 0
440
80
 
81
+ total_tokens = self.token_counter.count_tokens_from_messages(messages)
441
82
  return messages, total_tokens
camel/messages/base.py CHANGED
@@ -178,6 +178,32 @@ class BaseMessage:
178
178
  OpenAIVisionDetailType(video_detail).value,
179
179
  )
180
180
 
181
+ @classmethod
182
+ def make_system_message(
183
+ cls,
184
+ content: str,
185
+ role_name: str = "System",
186
+ meta_dict: Optional[Dict[str, str]] = None,
187
+ ) -> "BaseMessage":
188
+ r"""Create a new system message.
189
+
190
+ Args:
191
+ content (str): The content of the system message.
192
+ role_name (str): The name of the system role.
193
+ (default: :obj:`"System"`)
194
+ meta_dict (Optional[Dict[str, str]]): Additional metadata
195
+ dictionary for the message.
196
+
197
+ Returns:
198
+ BaseMessage: The new system message.
199
+ """
200
+ return cls(
201
+ role_name,
202
+ RoleType.SYSTEM,
203
+ meta_dict,
204
+ content,
205
+ )
206
+
181
207
  def create_new_instance(self, content: str) -> "BaseMessage":
182
208
  r"""Create a new instance of the :obj:`BaseMessage` with updated
183
209
  content.
@@ -13,17 +13,11 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncStream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import BedrockConfig
22
- from camel.messages import OpenAIMessage
23
19
  from camel.models.openai_compatible_model import OpenAICompatibleModel
24
20
  from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
21
  ModelType,
28
22
  )
29
23
  from camel.utils import BaseTokenCounter, api_keys_required
@@ -93,13 +87,3 @@ class AWSBedrockModel(OpenAICompatibleModel):
93
87
  max_retries=max_retries,
94
88
  **kwargs,
95
89
  )
96
-
97
- async def _arun(
98
- self,
99
- messages: List[OpenAIMessage],
100
- response_format: Optional[Type[BaseModel]] = None,
101
- tools: Optional[List[Dict[str, Any]]] = None,
102
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
103
- raise NotImplementedError(
104
- "AWS Bedrock does not support async inference."
105
- )