camel-ai 0.2.62__py3-none-any.whl → 0.2.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (59) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +95 -24
  3. camel/agents/mcp_agent.py +5 -1
  4. camel/benchmarks/mock_website/README.md +96 -0
  5. camel/benchmarks/mock_website/mock_web.py +299 -0
  6. camel/benchmarks/mock_website/requirements.txt +3 -0
  7. camel/benchmarks/mock_website/shopping_mall/app.py +465 -0
  8. camel/benchmarks/mock_website/task.json +104 -0
  9. camel/configs/__init__.py +3 -0
  10. camel/configs/crynux_config.py +94 -0
  11. camel/datasets/models.py +1 -1
  12. camel/datasets/static_dataset.py +6 -0
  13. camel/interpreters/base.py +14 -1
  14. camel/interpreters/docker/Dockerfile +63 -7
  15. camel/interpreters/docker_interpreter.py +65 -7
  16. camel/interpreters/e2b_interpreter.py +23 -8
  17. camel/interpreters/internal_python_interpreter.py +30 -2
  18. camel/interpreters/ipython_interpreter.py +21 -3
  19. camel/interpreters/subprocess_interpreter.py +34 -2
  20. camel/memories/records.py +5 -3
  21. camel/models/__init__.py +2 -0
  22. camel/models/azure_openai_model.py +101 -25
  23. camel/models/cohere_model.py +65 -0
  24. camel/models/crynux_model.py +94 -0
  25. camel/models/deepseek_model.py +43 -1
  26. camel/models/gemini_model.py +50 -4
  27. camel/models/litellm_model.py +38 -0
  28. camel/models/mistral_model.py +66 -0
  29. camel/models/model_factory.py +10 -1
  30. camel/models/openai_compatible_model.py +81 -17
  31. camel/models/openai_model.py +87 -16
  32. camel/models/reka_model.py +69 -0
  33. camel/models/samba_model.py +69 -2
  34. camel/models/sglang_model.py +74 -2
  35. camel/models/watsonx_model.py +62 -0
  36. camel/societies/workforce/role_playing_worker.py +11 -3
  37. camel/societies/workforce/single_agent_worker.py +31 -1
  38. camel/societies/workforce/utils.py +51 -0
  39. camel/societies/workforce/workforce.py +409 -7
  40. camel/storages/__init__.py +2 -0
  41. camel/storages/vectordb_storages/__init__.py +2 -0
  42. camel/storages/vectordb_storages/weaviate.py +714 -0
  43. camel/tasks/task.py +27 -10
  44. camel/toolkits/async_browser_toolkit.py +97 -54
  45. camel/toolkits/browser_toolkit.py +65 -18
  46. camel/toolkits/code_execution.py +37 -8
  47. camel/toolkits/function_tool.py +2 -2
  48. camel/toolkits/mcp_toolkit.py +13 -2
  49. camel/toolkits/playwright_mcp_toolkit.py +16 -3
  50. camel/toolkits/task_planning_toolkit.py +134 -0
  51. camel/types/enums.py +61 -2
  52. camel/types/unified_model_type.py +5 -0
  53. camel/utils/__init__.py +16 -0
  54. camel/utils/langfuse.py +258 -0
  55. camel/utils/mcp_client.py +84 -17
  56. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/METADATA +9 -12
  57. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/RECORD +59 -49
  58. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/WHEEL +0 -0
  59. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/licenses/LICENSE +0 -0
@@ -35,6 +35,9 @@ from camel.utils import (
35
35
  OpenAITokenCounter,
36
36
  api_keys_required,
37
37
  dependencies_required,
38
+ get_current_agent_session_id,
39
+ update_current_observation,
40
+ update_langfuse_trace,
38
41
  )
39
42
 
40
43
  logger = get_logger(__name__)
@@ -47,6 +50,14 @@ try:
47
50
  except (ImportError, AttributeError):
48
51
  LLMEvent = None
49
52
 
53
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
54
+ try:
55
+ from langfuse.decorators import observe
56
+ except ImportError:
57
+ from camel.utils import observe
58
+ else:
59
+ from camel.utils import observe
60
+
50
61
 
51
62
  class MistralModel(BaseModelBackend):
52
63
  r"""Mistral API in a unified BaseModelBackend interface.
@@ -232,6 +243,7 @@ class MistralModel(BaseModelBackend):
232
243
  )
233
244
  return self._token_counter
234
245
 
246
+ @observe(as_type="generation")
235
247
  async def _arun(
236
248
  self,
237
249
  messages: List[OpenAIMessage],
@@ -242,6 +254,29 @@ class MistralModel(BaseModelBackend):
242
254
  "Mistral does not support async inference, using sync "
243
255
  "inference instead."
244
256
  )
257
+ update_current_observation(
258
+ input={
259
+ "messages": messages,
260
+ "response_format": response_format,
261
+ "tools": tools,
262
+ },
263
+ model=str(self.model_type),
264
+ model_parameters=self.model_config_dict,
265
+ )
266
+ # Update Langfuse trace with current agent session and metadata
267
+ agent_session_id = get_current_agent_session_id()
268
+ if agent_session_id:
269
+ update_langfuse_trace(
270
+ session_id=agent_session_id,
271
+ metadata={
272
+ "source": "camel",
273
+ "agent_id": agent_session_id,
274
+ "agent_type": "camel_chat_agent",
275
+ "model_type": str(self.model_type),
276
+ },
277
+ tags=["CAMEL-AI", str(self.model_type)],
278
+ )
279
+
245
280
  request_config = self._prepare_request(
246
281
  messages, response_format, tools
247
282
  )
@@ -255,6 +290,10 @@ class MistralModel(BaseModelBackend):
255
290
 
256
291
  openai_response = self._to_openai_response(response) # type: ignore[arg-type]
257
292
 
293
+ update_current_observation(
294
+ usage=openai_response.usage,
295
+ )
296
+
258
297
  # Add AgentOps LLM Event tracking
259
298
  if LLMEvent:
260
299
  llm_event = LLMEvent(
@@ -271,6 +310,7 @@ class MistralModel(BaseModelBackend):
271
310
 
272
311
  return openai_response
273
312
 
313
+ @observe(as_type="generation")
274
314
  def _run(
275
315
  self,
276
316
  messages: List[OpenAIMessage],
@@ -290,6 +330,28 @@ class MistralModel(BaseModelBackend):
290
330
  Returns:
291
331
  ChatCompletion: The response from the model.
292
332
  """
333
+ update_current_observation(
334
+ input={
335
+ "messages": messages,
336
+ "tools": tools,
337
+ },
338
+ model=str(self.model_type),
339
+ model_parameters=self.model_config_dict,
340
+ )
341
+ # Update Langfuse trace with current agent session and metadata
342
+ agent_session_id = get_current_agent_session_id()
343
+ if agent_session_id:
344
+ update_langfuse_trace(
345
+ session_id=agent_session_id,
346
+ metadata={
347
+ "source": "camel",
348
+ "agent_id": agent_session_id,
349
+ "agent_type": "camel_chat_agent",
350
+ "model_type": str(self.model_type),
351
+ },
352
+ tags=["CAMEL-AI", str(self.model_type)],
353
+ )
354
+
293
355
  request_config = self._prepare_request(
294
356
  messages, response_format, tools
295
357
  )
@@ -303,6 +365,10 @@ class MistralModel(BaseModelBackend):
303
365
 
304
366
  openai_response = self._to_openai_response(response) # type: ignore[arg-type]
305
367
 
368
+ update_current_observation(
369
+ usage=openai_response.usage,
370
+ )
371
+
306
372
  # Add AgentOps LLM Event tracking
307
373
  if LLMEvent:
308
374
  llm_event = LLMEvent(
@@ -12,6 +12,7 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import json
15
+ import os
15
16
  from typing import ClassVar, Dict, Optional, Type, Union
16
17
 
17
18
  from camel.models.aiml_model import AIMLModel
@@ -20,6 +21,7 @@ from camel.models.aws_bedrock_model import AWSBedrockModel
20
21
  from camel.models.azure_openai_model import AzureOpenAIModel
21
22
  from camel.models.base_model import BaseModelBackend
22
23
  from camel.models.cohere_model import CohereModel
24
+ from camel.models.crynux_model import CrynuxModel
23
25
  from camel.models.deepseek_model import DeepSeekModel
24
26
  from camel.models.gemini_model import GeminiModel
25
27
  from camel.models.groq_model import GroqModel
@@ -50,7 +52,7 @@ from camel.models.watsonx_model import WatsonXModel
50
52
  from camel.models.yi_model import YiModel
51
53
  from camel.models.zhipuai_model import ZhipuAIModel
52
54
  from camel.types import ModelPlatformType, ModelType, UnifiedModelType
53
- from camel.utils import BaseTokenCounter
55
+ from camel.utils import BaseTokenCounter, configure_langfuse
54
56
 
55
57
 
56
58
  class ModelFactory:
@@ -96,6 +98,7 @@ class ModelFactory:
96
98
  ModelPlatformType.MODELSCOPE: ModelScopeModel,
97
99
  ModelPlatformType.NOVITA: NovitaModel,
98
100
  ModelPlatformType.WATSONX: WatsonXModel,
101
+ ModelPlatformType.CRYNUX: CrynuxModel,
99
102
  }
100
103
 
101
104
  @staticmethod
@@ -142,6 +145,12 @@ class ModelFactory:
142
145
  Raises:
143
146
  ValueError: If there is no backend for the model.
144
147
  """
148
+
149
+ # Auto-configure Langfuse only if explicitly enabled
150
+ env_enabled_str = os.environ.get("LANGFUSE_ENABLED")
151
+ if env_enabled_str and env_enabled_str.lower() == "true":
152
+ configure_langfuse()
153
+
145
154
  # Convert string to ModelPlatformType enum if needed
146
155
  if isinstance(model_platform, str):
147
156
  try:
@@ -31,8 +31,20 @@ from camel.types import (
31
31
  from camel.utils import (
32
32
  BaseTokenCounter,
33
33
  OpenAITokenCounter,
34
+ get_current_agent_session_id,
35
+ is_langfuse_available,
36
+ update_langfuse_trace,
34
37
  )
35
38
 
39
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
40
+ try:
41
+ from langfuse.decorators import observe
42
+ except ImportError:
43
+ from camel.utils import observe
44
+ else:
45
+ from camel.utils import observe
46
+
47
+
36
48
  logger = get_logger(__name__)
37
49
 
38
50
 
@@ -72,20 +84,37 @@ class OpenAICompatibleModel(BaseModelBackend):
72
84
  super().__init__(
73
85
  model_type, model_config_dict, api_key, url, token_counter, timeout
74
86
  )
75
- self._client = OpenAI(
76
- timeout=self._timeout,
77
- max_retries=3,
78
- api_key=self._api_key,
79
- base_url=self._url,
80
- )
81
-
82
- self._async_client = AsyncOpenAI(
83
- timeout=self._timeout,
84
- max_retries=3,
85
- api_key=self._api_key,
86
- base_url=self._url,
87
- )
87
+ if is_langfuse_available():
88
+ from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
89
+ from langfuse.openai import OpenAI as LangfuseOpenAI
90
+
91
+ self._client = LangfuseOpenAI(
92
+ timeout=self._timeout,
93
+ max_retries=3,
94
+ base_url=self._url,
95
+ api_key=self._api_key,
96
+ )
97
+ self._async_client = LangfuseAsyncOpenAI(
98
+ timeout=self._timeout,
99
+ max_retries=3,
100
+ base_url=self._url,
101
+ api_key=self._api_key,
102
+ )
103
+ else:
104
+ self._client = OpenAI(
105
+ timeout=self._timeout,
106
+ max_retries=3,
107
+ base_url=self._url,
108
+ api_key=self._api_key,
109
+ )
110
+ self._async_client = AsyncOpenAI(
111
+ timeout=self._timeout,
112
+ max_retries=3,
113
+ base_url=self._url,
114
+ api_key=self._api_key,
115
+ )
88
116
 
117
+ @observe()
89
118
  def _run(
90
119
  self,
91
120
  messages: List[OpenAIMessage],
@@ -107,14 +136,32 @@ class OpenAICompatibleModel(BaseModelBackend):
107
136
  `ChatCompletion` in the non-stream mode, or
108
137
  `Stream[ChatCompletionChunk]` in the stream mode.
109
138
  """
139
+
140
+ # Update Langfuse trace with current agent session and metadata
141
+ agent_session_id = get_current_agent_session_id()
142
+ if agent_session_id:
143
+ update_langfuse_trace(
144
+ session_id=agent_session_id,
145
+ metadata={
146
+ "agent_id": agent_session_id,
147
+ "model_type": str(self.model_type),
148
+ },
149
+ tags=["CAMEL-AI", str(self.model_type)],
150
+ )
151
+
110
152
  response_format = response_format or self.model_config_dict.get(
111
153
  "response_format", None
112
154
  )
113
155
  if response_format:
114
- return self._request_parse(messages, response_format, tools)
156
+ result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
157
+ self._request_parse(messages, response_format, tools)
158
+ )
115
159
  else:
116
- return self._request_chat_completion(messages, tools)
160
+ result = self._request_chat_completion(messages, tools)
161
+
162
+ return result
117
163
 
164
+ @observe()
118
165
  async def _arun(
119
166
  self,
120
167
  messages: List[OpenAIMessage],
@@ -136,13 +183,30 @@ class OpenAICompatibleModel(BaseModelBackend):
136
183
  `ChatCompletion` in the non-stream mode, or
137
184
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
138
185
  """
186
+
187
+ # Update Langfuse trace with current agent session and metadata
188
+ agent_session_id = get_current_agent_session_id()
189
+ if agent_session_id:
190
+ update_langfuse_trace(
191
+ session_id=agent_session_id,
192
+ metadata={
193
+ "agent_id": agent_session_id,
194
+ "model_type": str(self.model_type),
195
+ },
196
+ tags=["CAMEL-AI", str(self.model_type)],
197
+ )
198
+
139
199
  response_format = response_format or self.model_config_dict.get(
140
200
  "response_format", None
141
201
  )
142
202
  if response_format:
143
- return await self._arequest_parse(messages, response_format, tools)
203
+ result: Union[
204
+ ChatCompletion, AsyncStream[ChatCompletionChunk]
205
+ ] = await self._arequest_parse(messages, response_format, tools)
144
206
  else:
145
- return await self._arequest_chat_completion(messages, tools)
207
+ result = await self._arequest_chat_completion(messages, tools)
208
+
209
+ return result
146
210
 
147
211
  def _request_chat_completion(
148
212
  self,
@@ -30,8 +30,20 @@ from camel.utils import (
30
30
  BaseTokenCounter,
31
31
  OpenAITokenCounter,
32
32
  api_keys_required,
33
+ get_current_agent_session_id,
34
+ is_langfuse_available,
35
+ update_langfuse_trace,
33
36
  )
34
37
 
38
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
39
+ try:
40
+ from langfuse.decorators import observe
41
+ except ImportError:
42
+ from camel.utils import observe
43
+ else:
44
+ from camel.utils import observe
45
+
46
+
35
47
  UNSUPPORTED_PARAMS = {
36
48
  "temperature",
37
49
  "top_p",
@@ -90,18 +102,35 @@ class OpenAIModel(BaseModelBackend):
90
102
  model_type, model_config_dict, api_key, url, token_counter, timeout
91
103
  )
92
104
 
93
- self._client = OpenAI(
94
- timeout=self._timeout,
95
- max_retries=3,
96
- base_url=self._url,
97
- api_key=self._api_key,
98
- )
99
- self._async_client = AsyncOpenAI(
100
- timeout=self._timeout,
101
- max_retries=3,
102
- base_url=self._url,
103
- api_key=self._api_key,
104
- )
105
+ if is_langfuse_available():
106
+ from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
107
+ from langfuse.openai import OpenAI as LangfuseOpenAI
108
+
109
+ self._client = LangfuseOpenAI(
110
+ timeout=self._timeout,
111
+ max_retries=3,
112
+ base_url=self._url,
113
+ api_key=self._api_key,
114
+ )
115
+ self._async_client = LangfuseAsyncOpenAI(
116
+ timeout=self._timeout,
117
+ max_retries=3,
118
+ base_url=self._url,
119
+ api_key=self._api_key,
120
+ )
121
+ else:
122
+ self._client = OpenAI(
123
+ timeout=self._timeout,
124
+ max_retries=3,
125
+ base_url=self._url,
126
+ api_key=self._api_key,
127
+ )
128
+ self._async_client = AsyncOpenAI(
129
+ timeout=self._timeout,
130
+ max_retries=3,
131
+ base_url=self._url,
132
+ api_key=self._api_key,
133
+ )
105
134
 
106
135
  def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
107
136
  r"""Sanitize the model configuration for O1 models."""
@@ -113,6 +142,7 @@ class OpenAIModel(BaseModelBackend):
113
142
  ModelType.O3_MINI,
114
143
  ModelType.O3,
115
144
  ModelType.O4_MINI,
145
+ ModelType.O3_PRO,
116
146
  ]:
117
147
  warnings.warn(
118
148
  "Warning: You are using an reasoning model (O series), "
@@ -185,6 +215,7 @@ class OpenAIModel(BaseModelBackend):
185
215
  self._token_counter = OpenAITokenCounter(self.model_type)
186
216
  return self._token_counter
187
217
 
218
+ @observe()
188
219
  def _run(
189
220
  self,
190
221
  messages: List[OpenAIMessage],
@@ -206,15 +237,35 @@ class OpenAIModel(BaseModelBackend):
206
237
  `ChatCompletion` in the non-stream mode, or
207
238
  `Stream[ChatCompletionChunk]` in the stream mode.
208
239
  """
240
+
241
+ # Update Langfuse trace with current agent session and metadata
242
+ agent_session_id = get_current_agent_session_id()
243
+ if agent_session_id:
244
+ update_langfuse_trace(
245
+ session_id=agent_session_id,
246
+ metadata={
247
+ "source": "camel",
248
+ "agent_id": agent_session_id,
249
+ "agent_type": "camel_chat_agent",
250
+ "model_type": str(self.model_type),
251
+ },
252
+ tags=["CAMEL-AI", str(self.model_type)],
253
+ )
254
+
209
255
  messages = self._adapt_messages_for_o1_models(messages)
210
256
  response_format = response_format or self.model_config_dict.get(
211
257
  "response_format", None
212
258
  )
213
259
  if response_format:
214
- return self._request_parse(messages, response_format, tools)
260
+ result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
261
+ self._request_parse(messages, response_format, tools)
262
+ )
215
263
  else:
216
- return self._request_chat_completion(messages, tools)
264
+ result = self._request_chat_completion(messages, tools)
265
+
266
+ return result
217
267
 
268
+ @observe()
218
269
  async def _arun(
219
270
  self,
220
271
  messages: List[OpenAIMessage],
@@ -236,13 +287,33 @@ class OpenAIModel(BaseModelBackend):
236
287
  `ChatCompletion` in the non-stream mode, or
237
288
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
238
289
  """
290
+
291
+ # Update Langfuse trace with current agent session and metadata
292
+ agent_session_id = get_current_agent_session_id()
293
+ if agent_session_id:
294
+ update_langfuse_trace(
295
+ session_id=agent_session_id,
296
+ metadata={
297
+ "source": "camel",
298
+ "agent_id": agent_session_id,
299
+ "agent_type": "camel_chat_agent",
300
+ "model_type": str(self.model_type),
301
+ },
302
+ tags=["CAMEL-AI", str(self.model_type)],
303
+ )
304
+
305
+ messages = self._adapt_messages_for_o1_models(messages)
239
306
  response_format = response_format or self.model_config_dict.get(
240
307
  "response_format", None
241
308
  )
242
309
  if response_format:
243
- return await self._arequest_parse(messages, response_format, tools)
310
+ result: Union[
311
+ ChatCompletion, AsyncStream[ChatCompletionChunk]
312
+ ] = await self._arequest_parse(messages, response_format, tools)
244
313
  else:
245
- return await self._arequest_chat_completion(messages, tools)
314
+ result = await self._arequest_chat_completion(messages, tools)
315
+
316
+ return result
246
317
 
247
318
  def _request_chat_completion(
248
319
  self,
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
14
15
  from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
15
16
 
16
17
  from pydantic import BaseModel
@@ -24,8 +25,19 @@ from camel.utils import (
24
25
  OpenAITokenCounter,
25
26
  api_keys_required,
26
27
  dependencies_required,
28
+ get_current_agent_session_id,
29
+ update_current_observation,
30
+ update_langfuse_trace,
27
31
  )
28
32
 
33
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
34
+ try:
35
+ from langfuse.decorators import observe
36
+ except ImportError:
37
+ from camel.utils import observe
38
+ else:
39
+ from camel.utils import observe
40
+
29
41
  if TYPE_CHECKING:
30
42
  from reka.types import ChatMessage, ChatResponse
31
43
 
@@ -188,6 +200,7 @@ class RekaModel(BaseModelBackend):
188
200
  )
189
201
  return self._token_counter
190
202
 
203
+ @observe(as_type="generation")
191
204
  async def _arun(
192
205
  self,
193
206
  messages: List[OpenAIMessage],
@@ -203,6 +216,29 @@ class RekaModel(BaseModelBackend):
203
216
  Returns:
204
217
  ChatCompletion.
205
218
  """
219
+
220
+ update_current_observation(
221
+ input={
222
+ "messages": messages,
223
+ "tools": tools,
224
+ },
225
+ model=str(self.model_type),
226
+ model_parameters=self.model_config_dict,
227
+ )
228
+ # Update Langfuse trace with current agent session and metadata
229
+ agent_session_id = get_current_agent_session_id()
230
+ if agent_session_id:
231
+ update_langfuse_trace(
232
+ session_id=agent_session_id,
233
+ metadata={
234
+ "source": "camel",
235
+ "agent_id": agent_session_id,
236
+ "agent_type": "camel_chat_agent",
237
+ "model_type": str(self.model_type),
238
+ },
239
+ tags=["CAMEL-AI", str(self.model_type)],
240
+ )
241
+
206
242
  reka_messages = self._convert_openai_to_reka_messages(messages)
207
243
 
208
244
  response = await self._async_client.chat.create(
@@ -213,6 +249,10 @@ class RekaModel(BaseModelBackend):
213
249
 
214
250
  openai_response = self._convert_reka_to_openai_response(response)
215
251
 
252
+ update_current_observation(
253
+ usage=openai_response.usage,
254
+ )
255
+
216
256
  # Add AgentOps LLM Event tracking
217
257
  if LLMEvent:
218
258
  llm_event = LLMEvent(
@@ -229,6 +269,7 @@ class RekaModel(BaseModelBackend):
229
269
 
230
270
  return openai_response
231
271
 
272
+ @observe(as_type="generation")
232
273
  def _run(
233
274
  self,
234
275
  messages: List[OpenAIMessage],
@@ -244,6 +285,30 @@ class RekaModel(BaseModelBackend):
244
285
  Returns:
245
286
  ChatCompletion.
246
287
  """
288
+
289
+ update_current_observation(
290
+ input={
291
+ "messages": messages,
292
+ "tools": tools,
293
+ },
294
+ model=str(self.model_type),
295
+ model_parameters=self.model_config_dict,
296
+ )
297
+
298
+ # Update Langfuse trace with current agent session and metadata
299
+ agent_session_id = get_current_agent_session_id()
300
+ if agent_session_id:
301
+ update_langfuse_trace(
302
+ session_id=agent_session_id,
303
+ metadata={
304
+ "source": "camel",
305
+ "agent_id": agent_session_id,
306
+ "agent_type": "camel_chat_agent",
307
+ "model_type": str(self.model_type),
308
+ },
309
+ tags=["CAMEL-AI", str(self.model_type)],
310
+ )
311
+
247
312
  reka_messages = self._convert_openai_to_reka_messages(messages)
248
313
 
249
314
  response = self._client.chat.create(
@@ -254,6 +319,10 @@ class RekaModel(BaseModelBackend):
254
319
 
255
320
  openai_response = self._convert_reka_to_openai_response(response)
256
321
 
322
+ update_current_observation(
323
+ usage=openai_response.usage,
324
+ )
325
+
257
326
  # Add AgentOps LLM Event tracking
258
327
  if LLMEvent:
259
328
  llm_event = LLMEvent(