camel-ai 0.2.62__py3-none-any.whl → 0.2.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (59) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +95 -24
  3. camel/agents/mcp_agent.py +5 -1
  4. camel/benchmarks/mock_website/README.md +96 -0
  5. camel/benchmarks/mock_website/mock_web.py +299 -0
  6. camel/benchmarks/mock_website/requirements.txt +3 -0
  7. camel/benchmarks/mock_website/shopping_mall/app.py +465 -0
  8. camel/benchmarks/mock_website/task.json +104 -0
  9. camel/configs/__init__.py +3 -0
  10. camel/configs/crynux_config.py +94 -0
  11. camel/datasets/models.py +1 -1
  12. camel/datasets/static_dataset.py +6 -0
  13. camel/interpreters/base.py +14 -1
  14. camel/interpreters/docker/Dockerfile +63 -7
  15. camel/interpreters/docker_interpreter.py +65 -7
  16. camel/interpreters/e2b_interpreter.py +23 -8
  17. camel/interpreters/internal_python_interpreter.py +30 -2
  18. camel/interpreters/ipython_interpreter.py +21 -3
  19. camel/interpreters/subprocess_interpreter.py +34 -2
  20. camel/memories/records.py +5 -3
  21. camel/models/__init__.py +2 -0
  22. camel/models/azure_openai_model.py +101 -25
  23. camel/models/cohere_model.py +65 -0
  24. camel/models/crynux_model.py +94 -0
  25. camel/models/deepseek_model.py +43 -1
  26. camel/models/gemini_model.py +50 -4
  27. camel/models/litellm_model.py +38 -0
  28. camel/models/mistral_model.py +66 -0
  29. camel/models/model_factory.py +10 -1
  30. camel/models/openai_compatible_model.py +81 -17
  31. camel/models/openai_model.py +87 -16
  32. camel/models/reka_model.py +69 -0
  33. camel/models/samba_model.py +69 -2
  34. camel/models/sglang_model.py +74 -2
  35. camel/models/watsonx_model.py +62 -0
  36. camel/societies/workforce/role_playing_worker.py +11 -3
  37. camel/societies/workforce/single_agent_worker.py +31 -1
  38. camel/societies/workforce/utils.py +51 -0
  39. camel/societies/workforce/workforce.py +409 -7
  40. camel/storages/__init__.py +2 -0
  41. camel/storages/vectordb_storages/__init__.py +2 -0
  42. camel/storages/vectordb_storages/weaviate.py +714 -0
  43. camel/tasks/task.py +27 -10
  44. camel/toolkits/async_browser_toolkit.py +97 -54
  45. camel/toolkits/browser_toolkit.py +65 -18
  46. camel/toolkits/code_execution.py +37 -8
  47. camel/toolkits/function_tool.py +2 -2
  48. camel/toolkits/mcp_toolkit.py +13 -2
  49. camel/toolkits/playwright_mcp_toolkit.py +16 -3
  50. camel/toolkits/task_planning_toolkit.py +134 -0
  51. camel/types/enums.py +61 -2
  52. camel/types/unified_model_type.py +5 -0
  53. camel/utils/__init__.py +16 -0
  54. camel/utils/langfuse.py +258 -0
  55. camel/utils/mcp_client.py +84 -17
  56. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/METADATA +9 -12
  57. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/RECORD +59 -49
  58. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/WHEEL +0 -0
  59. {camel_ai-0.2.62.dist-info → camel_ai-0.2.65.dist-info}/licenses/LICENSE +0 -0
@@ -25,11 +25,26 @@ from camel.types import (
25
25
  ChatCompletionChunk,
26
26
  ModelType,
27
27
  )
28
- from camel.utils import BaseTokenCounter, OpenAITokenCounter
28
+ from camel.utils import (
29
+ BaseTokenCounter,
30
+ OpenAITokenCounter,
31
+ get_current_agent_session_id,
32
+ is_langfuse_available,
33
+ update_langfuse_trace,
34
+ )
29
35
 
30
36
  AzureADTokenProvider = Callable[[], str]
31
37
 
32
38
 
39
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
40
+ try:
41
+ from langfuse.decorators import observe
42
+ except ImportError:
43
+ from camel.utils import observe
44
+ else:
45
+ from camel.utils import observe
46
+
47
+
33
48
  class AzureOpenAIModel(BaseModelBackend):
34
49
  r"""Azure OpenAI API in a unified BaseModelBackend interface.
35
50
 
@@ -108,27 +123,52 @@ class AzureOpenAIModel(BaseModelBackend):
108
123
  "or `AZURE_DEPLOYMENT_NAME` environment variable."
109
124
  )
110
125
 
111
- self._client = AzureOpenAI(
112
- azure_endpoint=str(self._url),
113
- azure_deployment=self._azure_deployment_name,
114
- api_version=self.api_version,
115
- api_key=self._api_key,
116
- azure_ad_token=self._azure_ad_token,
117
- azure_ad_token_provider=self.azure_ad_token_provider,
118
- timeout=self._timeout,
119
- max_retries=3,
120
- )
126
+ if is_langfuse_available():
127
+ from langfuse.openai import AsyncAzureOpenAI as LangfuseAsyncOpenAI
128
+ from langfuse.openai import AzureOpenAI as LangfuseOpenAI
129
+
130
+ self._client = LangfuseOpenAI(
131
+ azure_endpoint=str(self._url),
132
+ azure_deployment=self._azure_deployment_name,
133
+ api_version=self.api_version,
134
+ api_key=self._api_key,
135
+ azure_ad_token=self._azure_ad_token,
136
+ azure_ad_token_provider=self.azure_ad_token_provider,
137
+ timeout=self._timeout,
138
+ max_retries=3,
139
+ )
140
+ self._async_client = LangfuseAsyncOpenAI(
141
+ azure_endpoint=str(self._url),
142
+ azure_deployment=self._azure_deployment_name,
143
+ api_version=self.api_version,
144
+ api_key=self._api_key,
145
+ azure_ad_token=self._azure_ad_token,
146
+ azure_ad_token_provider=self.azure_ad_token_provider,
147
+ timeout=self._timeout,
148
+ max_retries=3,
149
+ )
150
+ else:
151
+ self._client = AzureOpenAI(
152
+ azure_endpoint=str(self._url),
153
+ azure_deployment=self._azure_deployment_name,
154
+ api_version=self.api_version,
155
+ api_key=self._api_key,
156
+ azure_ad_token=self._azure_ad_token,
157
+ azure_ad_token_provider=self.azure_ad_token_provider,
158
+ timeout=self._timeout,
159
+ max_retries=3,
160
+ )
121
161
 
122
- self._async_client = AsyncAzureOpenAI(
123
- azure_endpoint=str(self._url),
124
- azure_deployment=self._azure_deployment_name,
125
- api_version=self.api_version,
126
- api_key=self._api_key,
127
- azure_ad_token=self._azure_ad_token,
128
- azure_ad_token_provider=self.azure_ad_token_provider,
129
- timeout=self._timeout,
130
- max_retries=3,
131
- )
162
+ self._async_client = AsyncAzureOpenAI(
163
+ azure_endpoint=str(self._url),
164
+ azure_deployment=self._azure_deployment_name,
165
+ api_version=self.api_version,
166
+ api_key=self._api_key,
167
+ azure_ad_token=self._azure_ad_token,
168
+ azure_ad_token_provider=self.azure_ad_token_provider,
169
+ timeout=self._timeout,
170
+ max_retries=3,
171
+ )
132
172
 
133
173
  @property
134
174
  def token_counter(self) -> BaseTokenCounter:
@@ -142,6 +182,7 @@ class AzureOpenAIModel(BaseModelBackend):
142
182
  self._token_counter = OpenAITokenCounter(self.model_type)
143
183
  return self._token_counter
144
184
 
185
+ @observe()
145
186
  def _run(
146
187
  self,
147
188
  messages: List[OpenAIMessage],
@@ -163,14 +204,32 @@ class AzureOpenAIModel(BaseModelBackend):
163
204
  `ChatCompletion` in the non-stream mode, or
164
205
  `Stream[ChatCompletionChunk]` in the stream mode.
165
206
  """
207
+
208
+ # Update Langfuse trace with current agent session and metadata
209
+ agent_session_id = get_current_agent_session_id()
210
+ if agent_session_id:
211
+ update_langfuse_trace(
212
+ session_id=agent_session_id,
213
+ metadata={
214
+ "agent_id": agent_session_id,
215
+ "model_type": str(self.model_type),
216
+ },
217
+ tags=["CAMEL-AI", str(self.model_type)],
218
+ )
219
+
166
220
  response_format = response_format or self.model_config_dict.get(
167
221
  "response_format", None
168
222
  )
169
223
  if response_format:
170
- return self._request_parse(messages, response_format, tools)
224
+ result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
225
+ self._request_parse(messages, response_format, tools)
226
+ )
171
227
  else:
172
- return self._request_chat_completion(messages, tools)
228
+ result = self._request_chat_completion(messages, tools)
229
+
230
+ return result
173
231
 
232
+ @observe()
174
233
  async def _arun(
175
234
  self,
176
235
  messages: List[OpenAIMessage],
@@ -192,13 +251,30 @@ class AzureOpenAIModel(BaseModelBackend):
192
251
  `ChatCompletion` in the non-stream mode, or
193
252
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
194
253
  """
254
+
255
+ # Update Langfuse trace with current agent session and metadata
256
+ agent_session_id = get_current_agent_session_id()
257
+ if agent_session_id:
258
+ update_langfuse_trace(
259
+ session_id=agent_session_id,
260
+ metadata={
261
+ "agent_id": agent_session_id,
262
+ "model_type": str(self.model_type),
263
+ },
264
+ tags=["CAMEL-AI", str(self.model_type)],
265
+ )
266
+
195
267
  response_format = response_format or self.model_config_dict.get(
196
268
  "response_format", None
197
269
  )
198
270
  if response_format:
199
- return await self._arequest_parse(messages, response_format, tools)
271
+ result: Union[
272
+ ChatCompletion, AsyncStream[ChatCompletionChunk]
273
+ ] = await self._arequest_parse(messages, response_format, tools)
200
274
  else:
201
- return await self._arequest_chat_completion(messages, tools)
275
+ result = await self._arequest_chat_completion(messages, tools)
276
+
277
+ return result
202
278
 
203
279
  def _request_chat_completion(
204
280
  self,
@@ -32,8 +32,19 @@ from camel.utils import (
32
32
  BaseTokenCounter,
33
33
  OpenAITokenCounter,
34
34
  api_keys_required,
35
+ get_current_agent_session_id,
36
+ update_current_observation,
37
+ update_langfuse_trace,
35
38
  )
36
39
 
40
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
41
+ try:
42
+ from langfuse.decorators import observe
43
+ except ImportError:
44
+ from camel.utils import observe
45
+ else:
46
+ from camel.utils import observe
47
+
37
48
  try:
38
49
  if os.getenv("AGENTOPS_API_KEY") is not None:
39
50
  from agentops import LLMEvent, record
@@ -271,6 +282,7 @@ class CohereModel(BaseModelBackend):
271
282
 
272
283
  return request_config
273
284
 
285
+ @observe(as_type="generation")
274
286
  def _run(
275
287
  self,
276
288
  messages: List[OpenAIMessage],
@@ -285,6 +297,28 @@ class CohereModel(BaseModelBackend):
285
297
  Returns:
286
298
  ChatCompletion.
287
299
  """
300
+ update_current_observation(
301
+ input={
302
+ "messages": messages,
303
+ "tools": tools,
304
+ },
305
+ model=str(self.model_type),
306
+ model_parameters=self.model_config_dict,
307
+ )
308
+ # Update Langfuse trace with current agent session and metadata
309
+ agent_session_id = get_current_agent_session_id()
310
+ if agent_session_id:
311
+ update_langfuse_trace(
312
+ session_id=agent_session_id,
313
+ metadata={
314
+ "source": "camel",
315
+ "agent_id": agent_session_id,
316
+ "agent_type": "camel_chat_agent",
317
+ "model_type": str(self.model_type),
318
+ },
319
+ tags=["CAMEL-AI", str(self.model_type)],
320
+ )
321
+
288
322
  from cohere.core.api_error import ApiError
289
323
 
290
324
  request_config = self._prepare_request(
@@ -309,6 +343,10 @@ class CohereModel(BaseModelBackend):
309
343
 
310
344
  openai_response = self._to_openai_response(response)
311
345
 
346
+ update_current_observation(
347
+ usage=openai_response.usage,
348
+ )
349
+
312
350
  # Add AgentOps LLM Event tracking
313
351
  if LLMEvent:
314
352
  llm_event = LLMEvent(
@@ -325,6 +363,7 @@ class CohereModel(BaseModelBackend):
325
363
 
326
364
  return openai_response
327
365
 
366
+ @observe(as_type="generation")
328
367
  async def _arun(
329
368
  self,
330
369
  messages: List[OpenAIMessage],
@@ -339,6 +378,28 @@ class CohereModel(BaseModelBackend):
339
378
  Returns:
340
379
  ChatCompletion.
341
380
  """
381
+ update_current_observation(
382
+ input={
383
+ "messages": messages,
384
+ "tools": tools,
385
+ },
386
+ model=str(self.model_type),
387
+ model_parameters=self.model_config_dict,
388
+ )
389
+ # Update Langfuse trace with current agent session and metadata
390
+ agent_session_id = get_current_agent_session_id()
391
+ if agent_session_id:
392
+ update_langfuse_trace(
393
+ session_id=agent_session_id,
394
+ metadata={
395
+ "source": "camel",
396
+ "agent_id": agent_session_id,
397
+ "agent_type": "camel_chat_agent",
398
+ "model_type": str(self.model_type),
399
+ },
400
+ tags=["CAMEL-AI", str(self.model_type)],
401
+ )
402
+
342
403
  from cohere.core.api_error import ApiError
343
404
 
344
405
  request_config = self._prepare_request(
@@ -363,6 +424,10 @@ class CohereModel(BaseModelBackend):
363
424
 
364
425
  openai_response = self._to_openai_response(response)
365
426
 
427
+ update_current_observation(
428
+ usage=openai_response.usage,
429
+ )
430
+
366
431
  # Add AgentOps LLM Event tracking
367
432
  if LLMEvent:
368
433
  llm_event = LLMEvent(
@@ -0,0 +1,94 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs import CRYNUX_API_PARAMS, CrynuxConfig
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import (
22
+ BaseTokenCounter,
23
+ api_keys_required,
24
+ )
25
+
26
+
27
+ class CrynuxModel(OpenAICompatibleModel):
28
+ r"""Constructor for Crynux backend with OpenAI compatibility.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created
33
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
35
+ :obj:`None`, :obj:`CrynuxConfig().as_dict()` will be used.
36
+ (default: :obj:`None`)
37
+ api_key (Optional[str], optional): The API key for authenticating with
38
+ the Crynux service. (default: :obj:`None`)
39
+ url (Optional[str], optional): The url to the Crynux service.
40
+ If not provided, "https://bridge.crynux.ai/v1/llm" will be used.
41
+ (default: :obj:`None`)
42
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
43
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
44
+ ModelType.GPT_4O_MINI)` will be used.
45
+ timeout (Optional[float], optional): The timeout value in seconds for
46
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
47
+ environment variable or default to 180 seconds.
48
+ (default: :obj:`None`)
49
+ """
50
+
51
+ @api_keys_required(
52
+ [
53
+ ("api_key", 'CRYNUX_API_KEY'),
54
+ ]
55
+ )
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ timeout: Optional[float] = None,
64
+ ) -> None:
65
+ if model_config_dict is None:
66
+ model_config_dict = CrynuxConfig().as_dict()
67
+ api_key = api_key or os.environ.get("CRYNUX_API_KEY")
68
+ url = url or os.environ.get(
69
+ "CRYNUX_API_BASE_URL", "https://bridge.crynux.ai/v1/llm"
70
+ )
71
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
72
+ super().__init__(
73
+ model_type=model_type,
74
+ model_config_dict=model_config_dict,
75
+ api_key=api_key,
76
+ url=url,
77
+ token_counter=token_counter,
78
+ timeout=timeout,
79
+ )
80
+
81
+ def check_model_config(self):
82
+ r"""Check whether the model configuration contains any
83
+ unexpected arguments to Crynux API.
84
+
85
+ Raises:
86
+ ValueError: If the model configuration dictionary contains any
87
+ unexpected arguments to Crynux API.
88
+ """
89
+ for param in self.model_config_dict:
90
+ if param not in CRYNUX_API_PARAMS:
91
+ raise ValueError(
92
+ f"Unexpected argument `{param}` is "
93
+ "input into Crynux model backend."
94
+ )
@@ -28,7 +28,21 @@ from camel.types import (
28
28
  ChatCompletionChunk,
29
29
  ModelType,
30
30
  )
31
- from camel.utils import BaseTokenCounter, api_keys_required
31
+ from camel.utils import (
32
+ BaseTokenCounter,
33
+ api_keys_required,
34
+ get_current_agent_session_id,
35
+ update_langfuse_trace,
36
+ )
37
+
38
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
39
+ try:
40
+ from langfuse.decorators import observe
41
+ except ImportError:
42
+ from camel.utils import observe
43
+ else:
44
+ from camel.utils import observe
45
+
32
46
 
33
47
  logger = get_logger(__name__)
34
48
 
@@ -176,6 +190,7 @@ class DeepSeekModel(OpenAICompatibleModel):
176
190
  )
177
191
  return response
178
192
 
193
+ @observe()
179
194
  def _run(
180
195
  self,
181
196
  messages: List[OpenAIMessage],
@@ -193,6 +208,19 @@ class DeepSeekModel(OpenAICompatibleModel):
193
208
  `ChatCompletion` in the non-stream mode, or
194
209
  `Stream[ChatCompletionChunk]` in the stream mode.
195
210
  """
211
+
212
+ # Update Langfuse trace with current agent session and metadata
213
+ agent_session_id = get_current_agent_session_id()
214
+ if agent_session_id:
215
+ update_langfuse_trace(
216
+ session_id=agent_session_id,
217
+ metadata={
218
+ "agent_id": agent_session_id,
219
+ "model_type": str(self.model_type),
220
+ },
221
+ tags=["CAMEL-AI", str(self.model_type)],
222
+ )
223
+
196
224
  request_config = self._prepare_request(
197
225
  messages, response_format, tools
198
226
  )
@@ -205,6 +233,7 @@ class DeepSeekModel(OpenAICompatibleModel):
205
233
 
206
234
  return self._post_handle_response(response)
207
235
 
236
+ @observe()
208
237
  async def _arun(
209
238
  self,
210
239
  messages: List[OpenAIMessage],
@@ -222,6 +251,19 @@ class DeepSeekModel(OpenAICompatibleModel):
222
251
  `ChatCompletion` in the non-stream mode, or
223
252
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
224
253
  """
254
+
255
+ # Update Langfuse trace with current agent session and metadata
256
+ agent_session_id = get_current_agent_session_id()
257
+ if agent_session_id:
258
+ update_langfuse_trace(
259
+ session_id=agent_session_id,
260
+ metadata={
261
+ "agent_id": agent_session_id,
262
+ "model_type": str(self.model_type),
263
+ },
264
+ tags=["CAMEL-AI", str(self.model_type)],
265
+ )
266
+
225
267
  request_config = self._prepare_request(
226
268
  messages, response_format, tools
227
269
  )
@@ -28,8 +28,18 @@ from camel.types import (
28
28
  from camel.utils import (
29
29
  BaseTokenCounter,
30
30
  api_keys_required,
31
+ get_current_agent_session_id,
32
+ update_langfuse_trace,
31
33
  )
32
34
 
35
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
36
+ try:
37
+ from langfuse.decorators import observe
38
+ except ImportError:
39
+ from camel.utils import observe
40
+ else:
41
+ from camel.utils import observe
42
+
33
43
 
34
44
  class GeminiModel(OpenAICompatibleModel):
35
45
  r"""Gemini API in a unified OpenAICompatibleModel interface.
@@ -99,6 +109,7 @@ class GeminiModel(OpenAICompatibleModel):
99
109
  processed_messages.append(msg_copy)
100
110
  return processed_messages
101
111
 
112
+ @observe()
102
113
  def _run(
103
114
  self,
104
115
  messages: List[OpenAIMessage],
@@ -120,6 +131,19 @@ class GeminiModel(OpenAICompatibleModel):
120
131
  `ChatCompletion` in the non-stream mode, or
121
132
  `Stream[ChatCompletionChunk]` in the stream mode.
122
133
  """
134
+
135
+ # Update Langfuse trace with current agent session and metadata
136
+ agent_session_id = get_current_agent_session_id()
137
+ if agent_session_id:
138
+ update_langfuse_trace(
139
+ session_id=agent_session_id,
140
+ metadata={
141
+ "agent_id": agent_session_id,
142
+ "model_type": str(self.model_type),
143
+ },
144
+ tags=["CAMEL-AI", str(self.model_type)],
145
+ )
146
+
123
147
  response_format = response_format or self.model_config_dict.get(
124
148
  "response_format", None
125
149
  )
@@ -130,10 +154,15 @@ class GeminiModel(OpenAICompatibleModel):
130
154
  "Gemini does not support function calling with "
131
155
  "response format."
132
156
  )
133
- return self._request_parse(messages, response_format)
157
+ result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
158
+ self._request_parse(messages, response_format)
159
+ )
134
160
  else:
135
- return self._request_chat_completion(messages, tools)
161
+ result = self._request_chat_completion(messages, tools)
162
+
163
+ return result
136
164
 
165
+ @observe()
137
166
  async def _arun(
138
167
  self,
139
168
  messages: List[OpenAIMessage],
@@ -155,6 +184,19 @@ class GeminiModel(OpenAICompatibleModel):
155
184
  `ChatCompletion` in the non-stream mode, or
156
185
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
157
186
  """
187
+
188
+ # Update Langfuse trace with current agent session and metadata
189
+ agent_session_id = get_current_agent_session_id()
190
+ if agent_session_id:
191
+ update_langfuse_trace(
192
+ session_id=agent_session_id,
193
+ metadata={
194
+ "agent_id": agent_session_id,
195
+ "model_type": str(self.model_type),
196
+ },
197
+ tags=["CAMEL-AI", str(self.model_type)],
198
+ )
199
+
158
200
  response_format = response_format or self.model_config_dict.get(
159
201
  "response_format", None
160
202
  )
@@ -165,9 +207,13 @@ class GeminiModel(OpenAICompatibleModel):
165
207
  "Gemini does not support function calling with "
166
208
  "response format."
167
209
  )
168
- return await self._arequest_parse(messages, response_format)
210
+ result: Union[
211
+ ChatCompletion, AsyncStream[ChatCompletionChunk]
212
+ ] = await self._arequest_parse(messages, response_format)
169
213
  else:
170
- return await self._arequest_chat_completion(messages, tools)
214
+ result = await self._arequest_chat_completion(messages, tools)
215
+
216
+ return result
171
217
 
172
218
  def _request_chat_completion(
173
219
  self,
@@ -24,8 +24,19 @@ from camel.utils import (
24
24
  BaseTokenCounter,
25
25
  LiteLLMTokenCounter,
26
26
  dependencies_required,
27
+ get_current_agent_session_id,
28
+ update_current_observation,
29
+ update_langfuse_trace,
27
30
  )
28
31
 
32
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
33
+ try:
34
+ from langfuse.decorators import observe
35
+ except ImportError:
36
+ from camel.utils import observe
37
+ else:
38
+ from camel.utils import observe
39
+
29
40
 
30
41
  class LiteLLMModel(BaseModelBackend):
31
42
  r"""Constructor for LiteLLM backend with OpenAI compatibility.
@@ -117,6 +128,7 @@ class LiteLLMModel(BaseModelBackend):
117
128
  async def _arun(self) -> None: # type: ignore[override]
118
129
  raise NotImplementedError
119
130
 
131
+ @observe(as_type='generation')
120
132
  def _run(
121
133
  self,
122
134
  messages: List[OpenAIMessage],
@@ -132,6 +144,28 @@ class LiteLLMModel(BaseModelBackend):
132
144
  Returns:
133
145
  ChatCompletion
134
146
  """
147
+ update_current_observation(
148
+ input={
149
+ "messages": messages,
150
+ "tools": tools,
151
+ },
152
+ model=str(self.model_type),
153
+ model_parameters=self.model_config_dict,
154
+ )
155
+ # Update Langfuse trace with current agent session and metadata
156
+ agent_session_id = get_current_agent_session_id()
157
+ if agent_session_id:
158
+ update_langfuse_trace(
159
+ session_id=agent_session_id,
160
+ metadata={
161
+ "source": "camel",
162
+ "agent_id": agent_session_id,
163
+ "agent_type": "camel_chat_agent",
164
+ "model_type": str(self.model_type),
165
+ },
166
+ tags=["CAMEL-AI", str(self.model_type)],
167
+ )
168
+
135
169
  response = self.client(
136
170
  timeout=self._timeout,
137
171
  api_key=self._api_key,
@@ -141,6 +175,10 @@ class LiteLLMModel(BaseModelBackend):
141
175
  **self.model_config_dict,
142
176
  )
143
177
  response = self._convert_response_from_litellm_to_openai(response)
178
+
179
+ update_current_observation(
180
+ usage=response.usage,
181
+ )
144
182
  return response
145
183
 
146
184
  def check_model_config(self):