camel-ai 0.2.62__py3-none-any.whl → 0.2.64__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (46) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +76 -17
  3. camel/agents/mcp_agent.py +5 -1
  4. camel/configs/__init__.py +3 -0
  5. camel/configs/crynux_config.py +94 -0
  6. camel/interpreters/base.py +14 -1
  7. camel/interpreters/docker/Dockerfile +63 -7
  8. camel/interpreters/docker_interpreter.py +65 -7
  9. camel/interpreters/e2b_interpreter.py +23 -8
  10. camel/interpreters/internal_python_interpreter.py +30 -2
  11. camel/interpreters/ipython_interpreter.py +21 -3
  12. camel/interpreters/subprocess_interpreter.py +34 -2
  13. camel/memories/records.py +5 -3
  14. camel/models/__init__.py +2 -0
  15. camel/models/azure_openai_model.py +101 -25
  16. camel/models/cohere_model.py +65 -0
  17. camel/models/crynux_model.py +94 -0
  18. camel/models/deepseek_model.py +43 -1
  19. camel/models/gemini_model.py +50 -4
  20. camel/models/litellm_model.py +38 -0
  21. camel/models/mistral_model.py +66 -0
  22. camel/models/model_factory.py +10 -1
  23. camel/models/openai_compatible_model.py +81 -17
  24. camel/models/openai_model.py +86 -16
  25. camel/models/reka_model.py +69 -0
  26. camel/models/samba_model.py +69 -2
  27. camel/models/sglang_model.py +74 -2
  28. camel/models/watsonx_model.py +62 -0
  29. camel/societies/workforce/role_playing_worker.py +2 -2
  30. camel/societies/workforce/single_agent_worker.py +23 -0
  31. camel/societies/workforce/workforce.py +409 -7
  32. camel/storages/__init__.py +2 -0
  33. camel/storages/vectordb_storages/__init__.py +2 -0
  34. camel/storages/vectordb_storages/weaviate.py +714 -0
  35. camel/tasks/task.py +19 -10
  36. camel/toolkits/code_execution.py +37 -8
  37. camel/toolkits/mcp_toolkit.py +13 -2
  38. camel/types/enums.py +56 -1
  39. camel/types/unified_model_type.py +5 -0
  40. camel/utils/__init__.py +16 -0
  41. camel/utils/langfuse.py +258 -0
  42. camel/utils/mcp_client.py +84 -17
  43. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/METADATA +6 -1
  44. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/RECORD +46 -42
  45. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/WHEEL +0 -0
  46. {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/licenses/LICENSE +0 -0
@@ -14,6 +14,8 @@
14
14
  import ast
15
15
  import difflib
16
16
  import importlib
17
+ import os
18
+ import subprocess
17
19
  import typing
18
20
  from typing import Any, ClassVar, Dict, List, Optional
19
21
 
@@ -89,13 +91,15 @@ class InternalPythonInterpreter(BaseInterpreter):
89
91
  raise_error: bool = False,
90
92
  ) -> None:
91
93
  self.action_space = action_space or dict()
94
+ # Add print to action space
95
+ self.action_space['print'] = print
92
96
  self.state = self.action_space.copy()
93
97
  self.fuzz_state: Dict[str, Any] = dict()
94
98
  self.import_white_list = import_white_list or list()
95
99
  self.raise_error = raise_error
96
100
  self.unsafe_mode = unsafe_mode
97
101
 
98
- def run(self, code: str, code_type: str) -> str:
102
+ def run(self, code: str, code_type: str = "python") -> str:
99
103
  r"""Executes the given code with specified code type in the
100
104
  interpreter.
101
105
 
@@ -111,7 +115,7 @@ class InternalPythonInterpreter(BaseInterpreter):
111
115
  code (str): The python code to be executed.
112
116
  code_type (str): The type of the code, which should be one of the
113
117
  supported code types (`python`, `py`, `python3`, `python2`).
114
-
118
+ (default: obj:`python`)
115
119
 
116
120
  Returns:
117
121
  str: The string representation of the output of the executed code.
@@ -531,3 +535,27 @@ class InternalPythonInterpreter(BaseInterpreter):
531
535
  return self.fuzz_state[close_matches[0]]
532
536
  else:
533
537
  raise InterpreterError(f"The variable `{key}` is not defined.")
538
+
539
+ def execute_command(self, command: str) -> tuple[str, str]:
540
+ r"""Execute a command in the internal python interpreter.
541
+
542
+ Args:
543
+ command (str): The command to execute.
544
+
545
+ Returns:
546
+ tuple: A tuple containing the stdout and stderr of the command.
547
+ """
548
+ try:
549
+ proc = subprocess.Popen(
550
+ command,
551
+ stdout=subprocess.PIPE,
552
+ stderr=subprocess.PIPE,
553
+ text=True,
554
+ env=os.environ,
555
+ shell=True,
556
+ )
557
+ stdout, stderr = proc.communicate()
558
+
559
+ return stdout, stderr
560
+ except Exception as e:
561
+ raise InterpreterError(f"Error executing command: {e}")
@@ -118,13 +118,13 @@ class JupyterKernelInterpreter(BaseInterpreter):
118
118
  exec_result = "\n".join(outputs)
119
119
  return self._clean_ipython_output(exec_result)
120
120
 
121
- def run(self, code: str, code_type: str) -> str:
121
+ def run(self, code: str, code_type: str = "python") -> str:
122
122
  r"""Executes the given code in the Jupyter kernel.
123
123
 
124
124
  Args:
125
125
  code (str): The code string to execute.
126
126
  code_type (str): The type of code to execute (e.g., 'python',
127
- 'bash').
127
+ 'bash'). (default: obj:`python`)
128
128
 
129
129
  Returns:
130
130
  str: A string containing the captured result of the
@@ -144,6 +144,24 @@ class JupyterKernelInterpreter(BaseInterpreter):
144
144
 
145
145
  return result
146
146
 
147
+ def execute_command(self, command: str) -> str:
148
+ r"""Executes a shell command in the Jupyter kernel.
149
+
150
+ Args:
151
+ command (str): The shell command to execute.
152
+
153
+ Returns:
154
+ str: A string containing the captured result of the
155
+ executed command.
156
+
157
+ """
158
+ try:
159
+ self._initialize_if_needed()
160
+ system_command = f"!{command}"
161
+ return self._execute(system_command, TIMEOUT)
162
+ except Exception as e:
163
+ raise InterpreterError(f"Error executing command: {e}")
164
+
147
165
  def supported_code_types(self) -> List[str]:
148
166
  r"""Provides supported code types by the interpreter.
149
167
 
@@ -164,5 +182,5 @@ class JupyterKernelInterpreter(BaseInterpreter):
164
182
  does not support updating the action space.
165
183
  """
166
184
  raise RuntimeError(
167
- "SubprocessInterpreter doesn't support " "`action_space`."
185
+ "JupyterKernelInterpreter doesn't support " "`action_space`."
168
186
  )
@@ -87,14 +87,14 @@ class SubprocessInterpreter(BaseInterpreter):
87
87
  def run_file(
88
88
  self,
89
89
  file: Path,
90
- code_type: str,
90
+ code_type: str = "python",
91
91
  ) -> str:
92
92
  r"""Executes a code file in a subprocess and captures its output.
93
93
 
94
94
  Args:
95
95
  file (Path): The path object of the file to run.
96
96
  code_type (str): The type of code to execute (e.g., 'python',
97
- 'bash').
97
+ 'bash'). (default: obj:`python`)
98
98
 
99
99
  Returns:
100
100
  str: A string containing the captured stdout and stderr of the
@@ -425,3 +425,35 @@ class SubprocessInterpreter(BaseInterpreter):
425
425
  return True
426
426
  except subprocess.CalledProcessError:
427
427
  return False
428
+
429
+ def execute_command(self, command: str) -> tuple[str, str]:
430
+ r"""Executes a shell command in a subprocess and captures its output.
431
+
432
+ Args:
433
+ command (str): The shell command to execute.
434
+
435
+ Returns:
436
+ tuple: A tuple containing the captured stdout and stderr of the
437
+ executed command.
438
+
439
+ Raises:
440
+ InterpreterError: If the command execution fails.
441
+ """
442
+ try:
443
+ # Get current Python executable's environment
444
+ env = os.environ.copy()
445
+
446
+ proc = subprocess.Popen(
447
+ command,
448
+ stdout=subprocess.PIPE,
449
+ stderr=subprocess.PIPE,
450
+ text=True,
451
+ env=env,
452
+ shell=True, # Use shell=True for command execution
453
+ )
454
+ # Add timeout to prevent hanging processes
455
+ stdout, stderr = proc.communicate(timeout=self.execution_timeout)
456
+
457
+ return stdout, stderr
458
+ except Exception as e:
459
+ raise InterpreterError(f"Error executing command: {e}")
camel/memories/records.py CHANGED
@@ -15,8 +15,8 @@
15
15
  # Enables postponed evaluation of annotations (for string-based type hints)
16
16
  from __future__ import annotations
17
17
 
18
+ import time
18
19
  from dataclasses import asdict
19
- from datetime import datetime, timezone
20
20
  from typing import Any, ClassVar, Dict
21
21
  from uuid import UUID, uuid4
22
22
 
@@ -53,7 +53,8 @@ class MemoryRecord(BaseModel):
53
53
  uuid: UUID = Field(default_factory=uuid4)
54
54
  extra_info: Dict[str, str] = Field(default_factory=dict)
55
55
  timestamp: float = Field(
56
- default_factory=lambda: datetime.now(timezone.utc).timestamp()
56
+ default_factory=lambda: time.time_ns()
57
+ / 1_000_000_000 # Nanosecond precision
57
58
  )
58
59
  agent_id: str = Field(default="")
59
60
 
@@ -109,5 +110,6 @@ class ContextRecord(BaseModel):
109
110
  memory_record: MemoryRecord
110
111
  score: float
111
112
  timestamp: float = Field(
112
- default_factory=lambda: datetime.now(timezone.utc).timestamp()
113
+ default_factory=lambda: time.time_ns()
114
+ / 1_000_000_000 # Nanosecond precision
113
115
  )
camel/models/__init__.py CHANGED
@@ -18,6 +18,7 @@ from .azure_openai_model import AzureOpenAIModel
18
18
  from .base_audio_model import BaseAudioModel
19
19
  from .base_model import BaseModelBackend
20
20
  from .cohere_model import CohereModel
21
+ from .crynux_model import CrynuxModel
21
22
  from .deepseek_model import DeepSeekModel
22
23
  from .fish_audio_model import FishAudioModel
23
24
  from .gemini_model import GeminiModel
@@ -96,4 +97,5 @@ __all__ = [
96
97
  'VolcanoModel',
97
98
  'LMStudioModel',
98
99
  'WatsonXModel',
100
+ 'CrynuxModel',
99
101
  ]
@@ -25,11 +25,26 @@ from camel.types import (
25
25
  ChatCompletionChunk,
26
26
  ModelType,
27
27
  )
28
- from camel.utils import BaseTokenCounter, OpenAITokenCounter
28
+ from camel.utils import (
29
+ BaseTokenCounter,
30
+ OpenAITokenCounter,
31
+ get_current_agent_session_id,
32
+ is_langfuse_available,
33
+ update_langfuse_trace,
34
+ )
29
35
 
30
36
  AzureADTokenProvider = Callable[[], str]
31
37
 
32
38
 
39
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
40
+ try:
41
+ from langfuse.decorators import observe
42
+ except ImportError:
43
+ from camel.utils import observe
44
+ else:
45
+ from camel.utils import observe
46
+
47
+
33
48
  class AzureOpenAIModel(BaseModelBackend):
34
49
  r"""Azure OpenAI API in a unified BaseModelBackend interface.
35
50
 
@@ -108,27 +123,52 @@ class AzureOpenAIModel(BaseModelBackend):
108
123
  "or `AZURE_DEPLOYMENT_NAME` environment variable."
109
124
  )
110
125
 
111
- self._client = AzureOpenAI(
112
- azure_endpoint=str(self._url),
113
- azure_deployment=self._azure_deployment_name,
114
- api_version=self.api_version,
115
- api_key=self._api_key,
116
- azure_ad_token=self._azure_ad_token,
117
- azure_ad_token_provider=self.azure_ad_token_provider,
118
- timeout=self._timeout,
119
- max_retries=3,
120
- )
126
+ if is_langfuse_available():
127
+ from langfuse.openai import AsyncAzureOpenAI as LangfuseAsyncOpenAI
128
+ from langfuse.openai import AzureOpenAI as LangfuseOpenAI
129
+
130
+ self._client = LangfuseOpenAI(
131
+ azure_endpoint=str(self._url),
132
+ azure_deployment=self._azure_deployment_name,
133
+ api_version=self.api_version,
134
+ api_key=self._api_key,
135
+ azure_ad_token=self._azure_ad_token,
136
+ azure_ad_token_provider=self.azure_ad_token_provider,
137
+ timeout=self._timeout,
138
+ max_retries=3,
139
+ )
140
+ self._async_client = LangfuseAsyncOpenAI(
141
+ azure_endpoint=str(self._url),
142
+ azure_deployment=self._azure_deployment_name,
143
+ api_version=self.api_version,
144
+ api_key=self._api_key,
145
+ azure_ad_token=self._azure_ad_token,
146
+ azure_ad_token_provider=self.azure_ad_token_provider,
147
+ timeout=self._timeout,
148
+ max_retries=3,
149
+ )
150
+ else:
151
+ self._client = AzureOpenAI(
152
+ azure_endpoint=str(self._url),
153
+ azure_deployment=self._azure_deployment_name,
154
+ api_version=self.api_version,
155
+ api_key=self._api_key,
156
+ azure_ad_token=self._azure_ad_token,
157
+ azure_ad_token_provider=self.azure_ad_token_provider,
158
+ timeout=self._timeout,
159
+ max_retries=3,
160
+ )
121
161
 
122
- self._async_client = AsyncAzureOpenAI(
123
- azure_endpoint=str(self._url),
124
- azure_deployment=self._azure_deployment_name,
125
- api_version=self.api_version,
126
- api_key=self._api_key,
127
- azure_ad_token=self._azure_ad_token,
128
- azure_ad_token_provider=self.azure_ad_token_provider,
129
- timeout=self._timeout,
130
- max_retries=3,
131
- )
162
+ self._async_client = AsyncAzureOpenAI(
163
+ azure_endpoint=str(self._url),
164
+ azure_deployment=self._azure_deployment_name,
165
+ api_version=self.api_version,
166
+ api_key=self._api_key,
167
+ azure_ad_token=self._azure_ad_token,
168
+ azure_ad_token_provider=self.azure_ad_token_provider,
169
+ timeout=self._timeout,
170
+ max_retries=3,
171
+ )
132
172
 
133
173
  @property
134
174
  def token_counter(self) -> BaseTokenCounter:
@@ -142,6 +182,7 @@ class AzureOpenAIModel(BaseModelBackend):
142
182
  self._token_counter = OpenAITokenCounter(self.model_type)
143
183
  return self._token_counter
144
184
 
185
+ @observe()
145
186
  def _run(
146
187
  self,
147
188
  messages: List[OpenAIMessage],
@@ -163,14 +204,32 @@ class AzureOpenAIModel(BaseModelBackend):
163
204
  `ChatCompletion` in the non-stream mode, or
164
205
  `Stream[ChatCompletionChunk]` in the stream mode.
165
206
  """
207
+
208
+ # Update Langfuse trace with current agent session and metadata
209
+ agent_session_id = get_current_agent_session_id()
210
+ if agent_session_id:
211
+ update_langfuse_trace(
212
+ session_id=agent_session_id,
213
+ metadata={
214
+ "agent_id": agent_session_id,
215
+ "model_type": str(self.model_type),
216
+ },
217
+ tags=["CAMEL-AI", str(self.model_type)],
218
+ )
219
+
166
220
  response_format = response_format or self.model_config_dict.get(
167
221
  "response_format", None
168
222
  )
169
223
  if response_format:
170
- return self._request_parse(messages, response_format, tools)
224
+ result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
225
+ self._request_parse(messages, response_format, tools)
226
+ )
171
227
  else:
172
- return self._request_chat_completion(messages, tools)
228
+ result = self._request_chat_completion(messages, tools)
229
+
230
+ return result
173
231
 
232
+ @observe()
174
233
  async def _arun(
175
234
  self,
176
235
  messages: List[OpenAIMessage],
@@ -192,13 +251,30 @@ class AzureOpenAIModel(BaseModelBackend):
192
251
  `ChatCompletion` in the non-stream mode, or
193
252
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
194
253
  """
254
+
255
+ # Update Langfuse trace with current agent session and metadata
256
+ agent_session_id = get_current_agent_session_id()
257
+ if agent_session_id:
258
+ update_langfuse_trace(
259
+ session_id=agent_session_id,
260
+ metadata={
261
+ "agent_id": agent_session_id,
262
+ "model_type": str(self.model_type),
263
+ },
264
+ tags=["CAMEL-AI", str(self.model_type)],
265
+ )
266
+
195
267
  response_format = response_format or self.model_config_dict.get(
196
268
  "response_format", None
197
269
  )
198
270
  if response_format:
199
- return await self._arequest_parse(messages, response_format, tools)
271
+ result: Union[
272
+ ChatCompletion, AsyncStream[ChatCompletionChunk]
273
+ ] = await self._arequest_parse(messages, response_format, tools)
200
274
  else:
201
- return await self._arequest_chat_completion(messages, tools)
275
+ result = await self._arequest_chat_completion(messages, tools)
276
+
277
+ return result
202
278
 
203
279
  def _request_chat_completion(
204
280
  self,
@@ -32,8 +32,19 @@ from camel.utils import (
32
32
  BaseTokenCounter,
33
33
  OpenAITokenCounter,
34
34
  api_keys_required,
35
+ get_current_agent_session_id,
36
+ update_current_observation,
37
+ update_langfuse_trace,
35
38
  )
36
39
 
40
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
41
+ try:
42
+ from langfuse.decorators import observe
43
+ except ImportError:
44
+ from camel.utils import observe
45
+ else:
46
+ from camel.utils import observe
47
+
37
48
  try:
38
49
  if os.getenv("AGENTOPS_API_KEY") is not None:
39
50
  from agentops import LLMEvent, record
@@ -271,6 +282,7 @@ class CohereModel(BaseModelBackend):
271
282
 
272
283
  return request_config
273
284
 
285
+ @observe(as_type="generation")
274
286
  def _run(
275
287
  self,
276
288
  messages: List[OpenAIMessage],
@@ -285,6 +297,28 @@ class CohereModel(BaseModelBackend):
285
297
  Returns:
286
298
  ChatCompletion.
287
299
  """
300
+ update_current_observation(
301
+ input={
302
+ "messages": messages,
303
+ "tools": tools,
304
+ },
305
+ model=str(self.model_type),
306
+ model_parameters=self.model_config_dict,
307
+ )
308
+ # Update Langfuse trace with current agent session and metadata
309
+ agent_session_id = get_current_agent_session_id()
310
+ if agent_session_id:
311
+ update_langfuse_trace(
312
+ session_id=agent_session_id,
313
+ metadata={
314
+ "source": "camel",
315
+ "agent_id": agent_session_id,
316
+ "agent_type": "camel_chat_agent",
317
+ "model_type": str(self.model_type),
318
+ },
319
+ tags=["CAMEL-AI", str(self.model_type)],
320
+ )
321
+
288
322
  from cohere.core.api_error import ApiError
289
323
 
290
324
  request_config = self._prepare_request(
@@ -309,6 +343,10 @@ class CohereModel(BaseModelBackend):
309
343
 
310
344
  openai_response = self._to_openai_response(response)
311
345
 
346
+ update_current_observation(
347
+ usage=openai_response.usage,
348
+ )
349
+
312
350
  # Add AgentOps LLM Event tracking
313
351
  if LLMEvent:
314
352
  llm_event = LLMEvent(
@@ -325,6 +363,7 @@ class CohereModel(BaseModelBackend):
325
363
 
326
364
  return openai_response
327
365
 
366
+ @observe(as_type="generation")
328
367
  async def _arun(
329
368
  self,
330
369
  messages: List[OpenAIMessage],
@@ -339,6 +378,28 @@ class CohereModel(BaseModelBackend):
339
378
  Returns:
340
379
  ChatCompletion.
341
380
  """
381
+ update_current_observation(
382
+ input={
383
+ "messages": messages,
384
+ "tools": tools,
385
+ },
386
+ model=str(self.model_type),
387
+ model_parameters=self.model_config_dict,
388
+ )
389
+ # Update Langfuse trace with current agent session and metadata
390
+ agent_session_id = get_current_agent_session_id()
391
+ if agent_session_id:
392
+ update_langfuse_trace(
393
+ session_id=agent_session_id,
394
+ metadata={
395
+ "source": "camel",
396
+ "agent_id": agent_session_id,
397
+ "agent_type": "camel_chat_agent",
398
+ "model_type": str(self.model_type),
399
+ },
400
+ tags=["CAMEL-AI", str(self.model_type)],
401
+ )
402
+
342
403
  from cohere.core.api_error import ApiError
343
404
 
344
405
  request_config = self._prepare_request(
@@ -363,6 +424,10 @@ class CohereModel(BaseModelBackend):
363
424
 
364
425
  openai_response = self._to_openai_response(response)
365
426
 
427
+ update_current_observation(
428
+ usage=openai_response.usage,
429
+ )
430
+
366
431
  # Add AgentOps LLM Event tracking
367
432
  if LLMEvent:
368
433
  llm_event = LLMEvent(
@@ -0,0 +1,94 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs import CRYNUX_API_PARAMS, CrynuxConfig
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import (
22
+ BaseTokenCounter,
23
+ api_keys_required,
24
+ )
25
+
26
+
27
+ class CrynuxModel(OpenAICompatibleModel):
28
+ r"""Constructor for Crynux backend with OpenAI compatibility.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created
33
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
34
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
35
+ :obj:`None`, :obj:`CrynuxConfig().as_dict()` will be used.
36
+ (default: :obj:`None`)
37
+ api_key (Optional[str], optional): The API key for authenticating with
38
+ the Crynux service. (default: :obj:`None`)
39
+ url (Optional[str], optional): The url to the Crynux service.
40
+ If not provided, "https://bridge.crynux.ai/v1/llm" will be used.
41
+ (default: :obj:`None`)
42
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
43
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
44
+ ModelType.GPT_4O_MINI)` will be used.
45
+ timeout (Optional[float], optional): The timeout value in seconds for
46
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
47
+ environment variable or default to 180 seconds.
48
+ (default: :obj:`None`)
49
+ """
50
+
51
+ @api_keys_required(
52
+ [
53
+ ("api_key", 'CRYNUX_API_KEY'),
54
+ ]
55
+ )
56
+ def __init__(
57
+ self,
58
+ model_type: Union[ModelType, str],
59
+ model_config_dict: Optional[Dict[str, Any]] = None,
60
+ api_key: Optional[str] = None,
61
+ url: Optional[str] = None,
62
+ token_counter: Optional[BaseTokenCounter] = None,
63
+ timeout: Optional[float] = None,
64
+ ) -> None:
65
+ if model_config_dict is None:
66
+ model_config_dict = CrynuxConfig().as_dict()
67
+ api_key = api_key or os.environ.get("CRYNUX_API_KEY")
68
+ url = url or os.environ.get(
69
+ "CRYNUX_API_BASE_URL", "https://bridge.crynux.ai/v1/llm"
70
+ )
71
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
72
+ super().__init__(
73
+ model_type=model_type,
74
+ model_config_dict=model_config_dict,
75
+ api_key=api_key,
76
+ url=url,
77
+ token_counter=token_counter,
78
+ timeout=timeout,
79
+ )
80
+
81
+ def check_model_config(self):
82
+ r"""Check whether the model configuration contains any
83
+ unexpected arguments to Crynux API.
84
+
85
+ Raises:
86
+ ValueError: If the model configuration dictionary contains any
87
+ unexpected arguments to Crynux API.
88
+ """
89
+ for param in self.model_config_dict:
90
+ if param not in CRYNUX_API_PARAMS:
91
+ raise ValueError(
92
+ f"Unexpected argument `{param}` is "
93
+ "input into Crynux model backend."
94
+ )
@@ -28,7 +28,21 @@ from camel.types import (
28
28
  ChatCompletionChunk,
29
29
  ModelType,
30
30
  )
31
- from camel.utils import BaseTokenCounter, api_keys_required
31
+ from camel.utils import (
32
+ BaseTokenCounter,
33
+ api_keys_required,
34
+ get_current_agent_session_id,
35
+ update_langfuse_trace,
36
+ )
37
+
38
+ if os.environ.get("LANGFUSE_ENABLED", "False").lower() == "true":
39
+ try:
40
+ from langfuse.decorators import observe
41
+ except ImportError:
42
+ from camel.utils import observe
43
+ else:
44
+ from camel.utils import observe
45
+
32
46
 
33
47
  logger = get_logger(__name__)
34
48
 
@@ -176,6 +190,7 @@ class DeepSeekModel(OpenAICompatibleModel):
176
190
  )
177
191
  return response
178
192
 
193
+ @observe()
179
194
  def _run(
180
195
  self,
181
196
  messages: List[OpenAIMessage],
@@ -193,6 +208,19 @@ class DeepSeekModel(OpenAICompatibleModel):
193
208
  `ChatCompletion` in the non-stream mode, or
194
209
  `Stream[ChatCompletionChunk]` in the stream mode.
195
210
  """
211
+
212
+ # Update Langfuse trace with current agent session and metadata
213
+ agent_session_id = get_current_agent_session_id()
214
+ if agent_session_id:
215
+ update_langfuse_trace(
216
+ session_id=agent_session_id,
217
+ metadata={
218
+ "agent_id": agent_session_id,
219
+ "model_type": str(self.model_type),
220
+ },
221
+ tags=["CAMEL-AI", str(self.model_type)],
222
+ )
223
+
196
224
  request_config = self._prepare_request(
197
225
  messages, response_format, tools
198
226
  )
@@ -205,6 +233,7 @@ class DeepSeekModel(OpenAICompatibleModel):
205
233
 
206
234
  return self._post_handle_response(response)
207
235
 
236
+ @observe()
208
237
  async def _arun(
209
238
  self,
210
239
  messages: List[OpenAIMessage],
@@ -222,6 +251,19 @@ class DeepSeekModel(OpenAICompatibleModel):
222
251
  `ChatCompletion` in the non-stream mode, or
223
252
  `AsyncStream[ChatCompletionChunk]` in the stream mode.
224
253
  """
254
+
255
+ # Update Langfuse trace with current agent session and metadata
256
+ agent_session_id = get_current_agent_session_id()
257
+ if agent_session_id:
258
+ update_langfuse_trace(
259
+ session_id=agent_session_id,
260
+ metadata={
261
+ "agent_id": agent_session_id,
262
+ "model_type": str(self.model_type),
263
+ },
264
+ tags=["CAMEL-AI", str(self.model_type)],
265
+ )
266
+
225
267
  request_config = self._prepare_request(
226
268
  messages, response_format, tools
227
269
  )