camel-ai 0.2.78__py3-none-any.whl → 0.2.79a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (39) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_utils.py +38 -0
  3. camel/agents/chat_agent.py +1112 -287
  4. camel/datasets/base_generator.py +39 -10
  5. camel/environments/single_step.py +28 -3
  6. camel/memories/__init__.py +1 -2
  7. camel/memories/agent_memories.py +34 -0
  8. camel/memories/base.py +26 -0
  9. camel/memories/blocks/chat_history_block.py +117 -17
  10. camel/memories/context_creators/score_based.py +25 -384
  11. camel/messages/base.py +26 -0
  12. camel/models/aws_bedrock_model.py +1 -17
  13. camel/models/azure_openai_model.py +113 -67
  14. camel/models/model_factory.py +17 -1
  15. camel/models/moonshot_model.py +102 -5
  16. camel/models/openai_compatible_model.py +62 -32
  17. camel/models/openai_model.py +61 -35
  18. camel/models/samba_model.py +34 -15
  19. camel/models/sglang_model.py +41 -11
  20. camel/societies/workforce/__init__.py +2 -0
  21. camel/societies/workforce/events.py +122 -0
  22. camel/societies/workforce/role_playing_worker.py +15 -11
  23. camel/societies/workforce/single_agent_worker.py +143 -291
  24. camel/societies/workforce/utils.py +2 -1
  25. camel/societies/workforce/workflow_memory_manager.py +772 -0
  26. camel/societies/workforce/workforce.py +513 -188
  27. camel/societies/workforce/workforce_callback.py +74 -0
  28. camel/societies/workforce/workforce_logger.py +144 -140
  29. camel/societies/workforce/workforce_metrics.py +33 -0
  30. camel/storages/vectordb_storages/oceanbase.py +5 -4
  31. camel/toolkits/file_toolkit.py +166 -0
  32. camel/toolkits/message_integration.py +15 -13
  33. camel/toolkits/terminal_toolkit/terminal_toolkit.py +112 -79
  34. camel/types/enums.py +1 -0
  35. camel/utils/context_utils.py +201 -2
  36. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/METADATA +14 -13
  37. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/RECORD +39 -35
  38. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/WHEEL +0 -0
  39. {camel_ai-0.2.78.dist-info → camel_ai-0.2.79a1.dist-info}/licenses/LICENSE +0 -0
@@ -90,9 +90,21 @@ class OpenAIModel(BaseModelBackend):
90
90
  (default: :obj:`None`)
91
91
  max_retries (int, optional): Maximum number of retries for API calls.
92
92
  (default: :obj:`3`)
93
+ client (Optional[Any], optional): A custom synchronous OpenAI client
94
+ instance. If provided, this client will be used instead of
95
+ creating a new one. Useful for RL frameworks like AReaL or rLLM
96
+ that provide OpenAI-compatible clients. The client should
97
+ implement the OpenAI client interface with
98
+ `.chat.completions.create()` and `.beta.chat.completions.parse()`
99
+ methods. (default: :obj:`None`)
100
+ async_client (Optional[Any], optional): A custom asynchronous OpenAI
101
+ client instance. If provided, this client will be used instead of
102
+ creating a new one. The client should implement the AsyncOpenAI
103
+ client interface. (default: :obj:`None`)
93
104
  **kwargs (Any): Additional arguments to pass to the
94
105
  OpenAI client initialization. These can include parameters like
95
106
  'organization', 'default_headers', 'http_client', etc.
107
+ Ignored if custom clients are provided.
96
108
  """
97
109
 
98
110
  @api_keys_required(
@@ -109,6 +121,8 @@ class OpenAIModel(BaseModelBackend):
109
121
  token_counter: Optional[BaseTokenCounter] = None,
110
122
  timeout: Optional[float] = None,
111
123
  max_retries: int = 3,
124
+ client: Optional[Any] = None,
125
+ async_client: Optional[Any] = None,
112
126
  **kwargs: Any,
113
127
  ) -> None:
114
128
  if model_config_dict is None:
@@ -124,42 +138,54 @@ class OpenAIModel(BaseModelBackend):
124
138
  model_type, model_config_dict, api_key, url, token_counter, timeout
125
139
  )
126
140
 
127
- if is_langfuse_available():
128
- from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
129
- from langfuse.openai import OpenAI as LangfuseOpenAI
130
-
131
- # Create Langfuse client with base parameters and additional
132
- # arguments
133
- self._client = LangfuseOpenAI(
134
- timeout=self._timeout,
135
- max_retries=self._max_retries,
136
- base_url=self._url,
137
- api_key=self._api_key,
138
- **kwargs,
139
- )
140
- self._async_client = LangfuseAsyncOpenAI(
141
- timeout=self._timeout,
142
- max_retries=self._max_retries,
143
- base_url=self._url,
144
- api_key=self._api_key,
145
- **kwargs,
146
- )
141
+ # Use custom clients if provided, otherwise create new ones
142
+ if client is not None:
143
+ # Use the provided custom sync client
144
+ self._client = client
147
145
  else:
148
- # Create client with base parameters and additional arguments
149
- self._client = OpenAI(
150
- timeout=self._timeout,
151
- max_retries=self._max_retries,
152
- base_url=self._url,
153
- api_key=self._api_key,
154
- **kwargs,
155
- )
156
- self._async_client = AsyncOpenAI(
157
- timeout=self._timeout,
158
- max_retries=self._max_retries,
159
- base_url=self._url,
160
- api_key=self._api_key,
161
- **kwargs,
162
- )
146
+ # Create default sync client
147
+ if is_langfuse_available():
148
+ from langfuse.openai import OpenAI as LangfuseOpenAI
149
+
150
+ self._client = LangfuseOpenAI(
151
+ timeout=self._timeout,
152
+ max_retries=self._max_retries,
153
+ base_url=self._url,
154
+ api_key=self._api_key,
155
+ **kwargs,
156
+ )
157
+ else:
158
+ self._client = OpenAI(
159
+ timeout=self._timeout,
160
+ max_retries=self._max_retries,
161
+ base_url=self._url,
162
+ api_key=self._api_key,
163
+ **kwargs,
164
+ )
165
+
166
+ if async_client is not None:
167
+ # Use the provided custom async client
168
+ self._async_client = async_client
169
+ else:
170
+ # Create default async client
171
+ if is_langfuse_available():
172
+ from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
173
+
174
+ self._async_client = LangfuseAsyncOpenAI(
175
+ timeout=self._timeout,
176
+ max_retries=self._max_retries,
177
+ base_url=self._url,
178
+ api_key=self._api_key,
179
+ **kwargs,
180
+ )
181
+ else:
182
+ self._async_client = AsyncOpenAI(
183
+ timeout=self._timeout,
184
+ max_retries=self._max_retries,
185
+ base_url=self._url,
186
+ api_key=self._api_key,
187
+ **kwargs,
188
+ )
163
189
 
164
190
  def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
165
191
  r"""Sanitize the model configuration for O1 models."""
@@ -88,8 +88,16 @@ class SambaModel(BaseModelBackend):
88
88
  (default: :obj:`None`)
89
89
  max_retries (int, optional): Maximum number of retries for API calls.
90
90
  (default: :obj:`3`)
91
+ client (Optional[Any], optional): A custom synchronous
92
+ OpenAI-compatible client instance. If provided, this client will
93
+ be used instead of creating a new one. Only applicable when using
94
+ SambaNova Cloud API. (default: :obj:`None`)
95
+ async_client (Optional[Any], optional): A custom asynchronous
96
+ OpenAI-compatible client instance. If provided, this client will
97
+ be used instead of creating a new one. Only applicable when using
98
+ SambaNova Cloud API. (default: :obj:`None`)
91
99
  **kwargs (Any): Additional arguments to pass to the client
92
- initialization.
100
+ initialization. Ignored if custom clients are provided.
93
101
  """
94
102
 
95
103
  @api_keys_required(
@@ -106,6 +114,8 @@ class SambaModel(BaseModelBackend):
106
114
  token_counter: Optional[BaseTokenCounter] = None,
107
115
  timeout: Optional[float] = None,
108
116
  max_retries: int = 3,
117
+ client: Optional[Any] = None,
118
+ async_client: Optional[Any] = None,
109
119
  **kwargs: Any,
110
120
  ) -> None:
111
121
  if model_config_dict is None:
@@ -126,21 +136,30 @@ class SambaModel(BaseModelBackend):
126
136
  max_retries,
127
137
  )
128
138
 
139
+ # Only create clients for Cloud API mode
129
140
  if self._url == "https://api.sambanova.ai/v1":
130
- self._client = OpenAI(
131
- timeout=self._timeout,
132
- max_retries=self._max_retries,
133
- base_url=self._url,
134
- api_key=self._api_key,
135
- **kwargs,
136
- )
137
- self._async_client = AsyncOpenAI(
138
- timeout=self._timeout,
139
- max_retries=self._max_retries,
140
- base_url=self._url,
141
- api_key=self._api_key,
142
- **kwargs,
143
- )
141
+ # Use custom clients if provided, otherwise create new ones
142
+ if client is not None:
143
+ self._client = client
144
+ else:
145
+ self._client = OpenAI(
146
+ timeout=self._timeout,
147
+ max_retries=self._max_retries,
148
+ base_url=self._url,
149
+ api_key=self._api_key,
150
+ **kwargs,
151
+ )
152
+
153
+ if async_client is not None:
154
+ self._async_client = async_client
155
+ else:
156
+ self._async_client = AsyncOpenAI(
157
+ timeout=self._timeout,
158
+ max_retries=self._max_retries,
159
+ base_url=self._url,
160
+ api_key=self._api_key,
161
+ **kwargs,
162
+ )
144
163
 
145
164
  @property
146
165
  def token_counter(self) -> BaseTokenCounter:
@@ -72,8 +72,16 @@ class SGLangModel(BaseModelBackend):
72
72
  (default: :obj:`None`)
73
73
  max_retries (int, optional): Maximum number of retries for API calls.
74
74
  (default: :obj:`3`)
75
+ client (Optional[Any], optional): A custom synchronous
76
+ OpenAI-compatible client instance. If provided, this client will
77
+ be used instead of creating a new one. Note: When using custom
78
+ clients with SGLang, server auto-start features will be disabled.
79
+ (default: :obj:`None`)
80
+ async_client (Optional[Any], optional): A custom asynchronous
81
+ OpenAI-compatible client instance. If provided, this client will
82
+ be used instead of creating a new one. (default: :obj:`None`)
75
83
  **kwargs (Any): Additional arguments to pass to the client
76
- initialization.
84
+ initialization. Ignored if custom clients are provided.
77
85
 
78
86
  Reference: https://sgl-project.github.io/backend/openai_api_completions.
79
87
  html
@@ -88,6 +96,8 @@ class SGLangModel(BaseModelBackend):
88
96
  token_counter: Optional[BaseTokenCounter] = None,
89
97
  timeout: Optional[float] = None,
90
98
  max_retries: int = 3,
99
+ client: Optional[Any] = None,
100
+ async_client: Optional[Any] = None,
91
101
  **kwargs: Any,
92
102
  ) -> None:
93
103
  if model_config_dict is None:
@@ -111,9 +121,10 @@ class SGLangModel(BaseModelBackend):
111
121
  max_retries,
112
122
  )
113
123
 
114
- self._client = None
115
-
116
- if self._url:
124
+ # Use custom clients if provided, otherwise create new ones
125
+ if client is not None:
126
+ self._client = client
127
+ elif self._url:
117
128
  # Initialize the client if an existing URL is provided
118
129
  self._client = OpenAI(
119
130
  timeout=self._timeout,
@@ -122,6 +133,12 @@ class SGLangModel(BaseModelBackend):
122
133
  base_url=self._url,
123
134
  **kwargs,
124
135
  )
136
+ else:
137
+ self._client = None
138
+
139
+ if async_client is not None:
140
+ self._async_client = async_client
141
+ elif self._url:
125
142
  self._async_client = AsyncOpenAI(
126
143
  timeout=self._timeout,
127
144
  max_retries=self._max_retries,
@@ -129,6 +146,8 @@ class SGLangModel(BaseModelBackend):
129
146
  base_url=self._url,
130
147
  **kwargs,
131
148
  )
149
+ else:
150
+ self._async_client = None
132
151
 
133
152
  def _start_server(self) -> None:
134
153
  try:
@@ -159,13 +178,24 @@ class SGLangModel(BaseModelBackend):
159
178
  )
160
179
  self._inactivity_thread.start()
161
180
  self.last_run_time = time.time()
162
- # Initialize the client after the server starts
163
- self._client = OpenAI(
164
- timeout=self._timeout,
165
- max_retries=self._max_retries,
166
- api_key="Set-but-ignored", # required but ignored
167
- base_url=self._url,
168
- )
181
+ # Initialize client after server starts if not already set
182
+ if self._client is None:
183
+ self._client = OpenAI(
184
+ timeout=self._timeout,
185
+ max_retries=self._max_retries,
186
+ api_key="Set-but-ignored", # required but ignored
187
+ base_url=self._url,
188
+ )
189
+ if (
190
+ not hasattr(self, '_async_client')
191
+ or self._async_client is None
192
+ ):
193
+ self._async_client = AsyncOpenAI(
194
+ timeout=self._timeout,
195
+ max_retries=self._max_retries,
196
+ api_key="Set-but-ignored", # required but ignored
197
+ base_url=self._url,
198
+ )
169
199
  except Exception as e:
170
200
  raise RuntimeError(f"Failed to start SGLang server: {e}") from e
171
201
 
@@ -14,10 +14,12 @@
14
14
 
15
15
  from .role_playing_worker import RolePlayingWorker
16
16
  from .single_agent_worker import SingleAgentWorker
17
+ from .workflow_memory_manager import WorkflowSelectionMethod
17
18
  from .workforce import Workforce
18
19
 
19
20
  __all__ = [
20
21
  "Workforce",
21
22
  "SingleAgentWorker",
22
23
  "RolePlayingWorker",
24
+ "WorkflowSelectionMethod",
23
25
  ]
@@ -0,0 +1,122 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ from __future__ import annotations
15
+
16
+ from datetime import datetime, timezone
17
+ from typing import Any, Dict, List, Literal, Optional, Union
18
+
19
+ from pydantic import BaseModel, ConfigDict, Field
20
+
21
+
22
+ class WorkforceEventBase(BaseModel):
23
+ model_config = ConfigDict(frozen=True, extra='forbid')
24
+ event_type: Literal[
25
+ "task_decomposed",
26
+ "task_created",
27
+ "task_assigned",
28
+ "task_started",
29
+ "task_completed",
30
+ "task_failed",
31
+ "worker_created",
32
+ "worker_deleted",
33
+ "queue_status",
34
+ "all_tasks_completed",
35
+ ]
36
+ metadata: Optional[Dict[str, Any]] = None
37
+ timestamp: datetime = Field(
38
+ default_factory=lambda: datetime.now(timezone.utc)
39
+ )
40
+
41
+
42
+ class WorkerCreatedEvent(WorkforceEventBase):
43
+ event_type: Literal["worker_created"] = "worker_created"
44
+ worker_id: str
45
+ worker_type: str
46
+ role: str
47
+
48
+
49
+ class WorkerDeletedEvent(WorkforceEventBase):
50
+ event_type: Literal["worker_deleted"] = "worker_deleted"
51
+ worker_id: str
52
+ reason: Optional[str] = None
53
+
54
+
55
+ class TaskDecomposedEvent(WorkforceEventBase):
56
+ event_type: Literal["task_decomposed"] = "task_decomposed"
57
+ parent_task_id: str
58
+ subtask_ids: List[str]
59
+
60
+
61
+ class TaskCreatedEvent(WorkforceEventBase):
62
+ event_type: Literal["task_created"] = "task_created"
63
+ task_id: str
64
+ description: str
65
+ parent_task_id: Optional[str] = None
66
+ task_type: Optional[str] = None
67
+
68
+
69
+ class TaskAssignedEvent(WorkforceEventBase):
70
+ event_type: Literal["task_assigned"] = "task_assigned"
71
+ task_id: str
72
+ worker_id: str
73
+ queue_time_seconds: Optional[float] = None
74
+ dependencies: Optional[List[str]] = None
75
+
76
+
77
+ class TaskStartedEvent(WorkforceEventBase):
78
+ event_type: Literal["task_started"] = "task_started"
79
+ task_id: str
80
+ worker_id: str
81
+
82
+
83
+ class TaskCompletedEvent(WorkforceEventBase):
84
+ event_type: Literal["task_completed"] = "task_completed"
85
+ task_id: str
86
+ worker_id: str
87
+ result_summary: Optional[str] = None
88
+ processing_time_seconds: Optional[float] = None
89
+ token_usage: Optional[Dict[str, int]] = None
90
+
91
+
92
+ class TaskFailedEvent(WorkforceEventBase):
93
+ event_type: Literal["task_failed"] = "task_failed"
94
+ task_id: str
95
+ error_message: str
96
+ worker_id: Optional[str] = None
97
+
98
+
99
+ class AllTasksCompletedEvent(WorkforceEventBase):
100
+ event_type: Literal["all_tasks_completed"] = "all_tasks_completed"
101
+
102
+
103
+ class QueueStatusEvent(WorkforceEventBase):
104
+ event_type: Literal["queue_status"] = "queue_status"
105
+ queue_name: str
106
+ length: int
107
+ pending_task_ids: Optional[List[str]] = None
108
+ metadata: Optional[Dict[str, Any]] = None
109
+
110
+
111
+ WorkforceEvent = Union[
112
+ TaskDecomposedEvent,
113
+ TaskCreatedEvent,
114
+ TaskAssignedEvent,
115
+ TaskStartedEvent,
116
+ TaskCompletedEvent,
117
+ TaskFailedEvent,
118
+ WorkerCreatedEvent,
119
+ WorkerDeletedEvent,
120
+ AllTasksCompletedEvent,
121
+ QueueStatusEvent,
122
+ ]
@@ -119,11 +119,13 @@ class RolePlayingWorker(Worker):
119
119
  `TaskState.FAILED`.
120
120
  """
121
121
  dependency_tasks_info = self._get_dep_tasks_info(dependencies)
122
- prompt = ROLEPLAY_PROCESS_TASK_PROMPT.format(
123
- content=task.content,
124
- parent_task_content=task.parent.content if task.parent else "",
125
- dependency_tasks_info=dependency_tasks_info,
126
- additional_info=task.additional_info,
122
+ prompt = str(
123
+ ROLEPLAY_PROCESS_TASK_PROMPT.format(
124
+ content=task.content,
125
+ parent_task_content=task.parent.content if task.parent else "",
126
+ dependency_tasks_info=dependency_tasks_info,
127
+ additional_info=task.additional_info,
128
+ )
127
129
  )
128
130
  role_play_session = RolePlaying(
129
131
  assistant_role_name=self.assistant_role_name,
@@ -183,12 +185,14 @@ class RolePlayingWorker(Worker):
183
185
  input_msg = assistant_response.msg
184
186
 
185
187
  chat_history_str = "\n".join(chat_history)
186
- prompt = ROLEPLAY_SUMMARIZE_PROMPT.format(
187
- user_role=self.user_role_name,
188
- assistant_role=self.assistant_role_name,
189
- content=task.content,
190
- chat_history=chat_history_str,
191
- additional_info=task.additional_info,
188
+ prompt = str(
189
+ ROLEPLAY_SUMMARIZE_PROMPT.format(
190
+ user_role=self.user_role_name,
191
+ assistant_role=self.assistant_role_name,
192
+ content=task.content,
193
+ chat_history=chat_history_str,
194
+ additional_info=task.additional_info,
195
+ )
192
196
  )
193
197
  if self.use_structured_output_handler and self.structured_handler:
194
198
  # Use structured output handler for prompt-based extraction