camel-ai 0.2.62__py3-none-any.whl → 0.2.64__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +76 -17
- camel/agents/mcp_agent.py +5 -1
- camel/configs/__init__.py +3 -0
- camel/configs/crynux_config.py +94 -0
- camel/interpreters/base.py +14 -1
- camel/interpreters/docker/Dockerfile +63 -7
- camel/interpreters/docker_interpreter.py +65 -7
- camel/interpreters/e2b_interpreter.py +23 -8
- camel/interpreters/internal_python_interpreter.py +30 -2
- camel/interpreters/ipython_interpreter.py +21 -3
- camel/interpreters/subprocess_interpreter.py +34 -2
- camel/memories/records.py +5 -3
- camel/models/__init__.py +2 -0
- camel/models/azure_openai_model.py +101 -25
- camel/models/cohere_model.py +65 -0
- camel/models/crynux_model.py +94 -0
- camel/models/deepseek_model.py +43 -1
- camel/models/gemini_model.py +50 -4
- camel/models/litellm_model.py +38 -0
- camel/models/mistral_model.py +66 -0
- camel/models/model_factory.py +10 -1
- camel/models/openai_compatible_model.py +81 -17
- camel/models/openai_model.py +86 -16
- camel/models/reka_model.py +69 -0
- camel/models/samba_model.py +69 -2
- camel/models/sglang_model.py +74 -2
- camel/models/watsonx_model.py +62 -0
- camel/societies/workforce/role_playing_worker.py +2 -2
- camel/societies/workforce/single_agent_worker.py +23 -0
- camel/societies/workforce/workforce.py +409 -7
- camel/storages/__init__.py +2 -0
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/weaviate.py +714 -0
- camel/tasks/task.py +19 -10
- camel/toolkits/code_execution.py +37 -8
- camel/toolkits/mcp_toolkit.py +13 -2
- camel/types/enums.py +56 -1
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +16 -0
- camel/utils/langfuse.py +258 -0
- camel/utils/mcp_client.py +84 -17
- {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/METADATA +6 -1
- {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/RECORD +46 -42
- {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.62.dist-info → camel_ai-0.2.64.dist-info}/licenses/LICENSE +0 -0
camel/tasks/task.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
|
|
15
15
|
import re
|
|
16
16
|
from enum import Enum
|
|
17
|
-
from typing import Callable, Dict, List, Literal, Optional, Union
|
|
17
|
+
from typing import Any, Callable, Dict, List, Literal, Optional, Union
|
|
18
18
|
|
|
19
19
|
from pydantic import BaseModel
|
|
20
20
|
|
|
@@ -69,14 +69,23 @@ class Task(BaseModel):
|
|
|
69
69
|
r"""Task is specific assignment that can be passed to a agent.
|
|
70
70
|
|
|
71
71
|
Attributes:
|
|
72
|
-
content: string content for task.
|
|
73
|
-
id: An unique string identifier for the task. This should
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
72
|
+
content (str): string content for task.
|
|
73
|
+
id (str): An unique string identifier for the task. This should
|
|
74
|
+
ideally be provided by the provider/model which created the task.
|
|
75
|
+
(default: :obj: `""`)
|
|
76
|
+
state (TaskState): The state which should be OPEN, RUNNING, DONE or
|
|
77
|
+
DELETED. (default: :obj: `TaskState.OPEN`)
|
|
78
|
+
type (Optional[str]): task type. (default: :obj: `None`)
|
|
79
|
+
parent (Optional[Task]): The parent task, None for root task.
|
|
80
|
+
(default: :obj: `None`)
|
|
81
|
+
subtasks (List[Task]): The childrent sub-tasks for the task.
|
|
82
|
+
(default: :obj: `[]`)
|
|
83
|
+
result (Optional[str]): The answer for the task.
|
|
84
|
+
(default: :obj: `""`)
|
|
85
|
+
failure_count (int): The failure count for the task.
|
|
86
|
+
(default: :obj: `0`)
|
|
87
|
+
additional_info (Optional[Dict[str, Any]]): Additional information for
|
|
88
|
+
the task. (default: :obj: `None`)
|
|
80
89
|
"""
|
|
81
90
|
|
|
82
91
|
content: str
|
|
@@ -95,7 +104,7 @@ class Task(BaseModel):
|
|
|
95
104
|
|
|
96
105
|
failure_count: int = 0
|
|
97
106
|
|
|
98
|
-
additional_info: Optional[str] = None
|
|
107
|
+
additional_info: Optional[Dict[str, Any]] = None
|
|
99
108
|
|
|
100
109
|
@classmethod
|
|
101
110
|
def from_message(cls, message: BaseMessage) -> "Task":
|
camel/toolkits/code_execution.py
CHANGED
|
@@ -20,10 +20,13 @@ from camel.interpreters import (
|
|
|
20
20
|
JupyterKernelInterpreter,
|
|
21
21
|
SubprocessInterpreter,
|
|
22
22
|
)
|
|
23
|
+
from camel.logger import get_logger
|
|
23
24
|
from camel.toolkits import FunctionTool
|
|
24
25
|
from camel.toolkits.base import BaseToolkit
|
|
25
26
|
from camel.utils import MCPServer
|
|
26
27
|
|
|
28
|
+
logger = get_logger(__name__)
|
|
29
|
+
|
|
27
30
|
|
|
28
31
|
@MCPServer()
|
|
29
32
|
class CodeExecutionToolkit(BaseToolkit):
|
|
@@ -36,10 +39,10 @@ class CodeExecutionToolkit(BaseToolkit):
|
|
|
36
39
|
(default: :obj:`False`)
|
|
37
40
|
unsafe_mode (bool): If `True`, the interpreter runs the code
|
|
38
41
|
by `eval()` without any security check. (default: :obj:`False`)
|
|
39
|
-
import_white_list (
|
|
42
|
+
import_white_list (Optional[List[str]]): A list of allowed imports.
|
|
40
43
|
(default: :obj:`None`)
|
|
41
|
-
require_confirm (bool): Whether to require confirmation before
|
|
42
|
-
(default: :obj:`False`)
|
|
44
|
+
require_confirm (bool): Whether to require confirmation before
|
|
45
|
+
executing code. (default: :obj:`False`)
|
|
43
46
|
"""
|
|
44
47
|
|
|
45
48
|
def __init__(
|
|
@@ -97,18 +100,41 @@ class CodeExecutionToolkit(BaseToolkit):
|
|
|
97
100
|
f"The sandbox type `{sandbox}` is not supported."
|
|
98
101
|
)
|
|
99
102
|
|
|
100
|
-
def execute_code(self, code: str) -> str:
|
|
103
|
+
def execute_code(self, code: str, code_type: str = "python") -> str:
|
|
101
104
|
r"""Execute a given code snippet.
|
|
102
105
|
|
|
103
106
|
Args:
|
|
104
107
|
code (str): The input code to the Code Interpreter tool call.
|
|
108
|
+
code_type (str): The type of the code to be executed
|
|
109
|
+
(e.g. node.js, python, etc). (default: obj:`python`)
|
|
105
110
|
|
|
106
111
|
Returns:
|
|
107
112
|
str: The text output from the Code Interpreter tool call.
|
|
108
113
|
"""
|
|
109
|
-
output = self.interpreter.run(code,
|
|
110
|
-
|
|
111
|
-
|
|
114
|
+
output = self.interpreter.run(code, code_type)
|
|
115
|
+
content = (
|
|
116
|
+
f"Executed the code below:\n```{code_type}\n{code}\n```\n"
|
|
117
|
+
f"> Executed Results:\n{output}"
|
|
118
|
+
)
|
|
119
|
+
if self.verbose:
|
|
120
|
+
print(content)
|
|
121
|
+
return content
|
|
122
|
+
|
|
123
|
+
def execute_command(self, command: str) -> Union[str, tuple[str, str]]:
|
|
124
|
+
r"""Execute a command can be used to resolve the dependency of the
|
|
125
|
+
code.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
command (str): The command to execute.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Union[str, tuple[str, str]]: The output of the command.
|
|
132
|
+
"""
|
|
133
|
+
output = self.interpreter.execute_command(command)
|
|
134
|
+
content = (
|
|
135
|
+
f"Executed the command below:\n```sh\n{command}\n```\n"
|
|
136
|
+
f"> Executed Results:\n{output}"
|
|
137
|
+
)
|
|
112
138
|
if self.verbose:
|
|
113
139
|
print(content)
|
|
114
140
|
return content
|
|
@@ -121,4 +147,7 @@ class CodeExecutionToolkit(BaseToolkit):
|
|
|
121
147
|
List[FunctionTool]: A list of FunctionTool objects
|
|
122
148
|
representing the functions in the toolkit.
|
|
123
149
|
"""
|
|
124
|
-
return [
|
|
150
|
+
return [
|
|
151
|
+
FunctionTool(self.execute_code),
|
|
152
|
+
FunctionTool(self.execute_command),
|
|
153
|
+
]
|
camel/toolkits/mcp_toolkit.py
CHANGED
|
@@ -98,6 +98,8 @@ class MCPToolkit(BaseToolkit):
|
|
|
98
98
|
timeout (Optional[float], optional): Timeout for connection attempts
|
|
99
99
|
in seconds. This timeout applies to individual client connections.
|
|
100
100
|
(default: :obj:`None`)
|
|
101
|
+
strict (Optional[bool], optional): Flag to indicate strict mode.
|
|
102
|
+
(default: :obj:`False`)
|
|
101
103
|
|
|
102
104
|
Note:
|
|
103
105
|
At least one of :obj:`clients`, :obj:`config_path`, or
|
|
@@ -146,6 +148,7 @@ class MCPToolkit(BaseToolkit):
|
|
|
146
148
|
config_path: Optional[str] = None,
|
|
147
149
|
config_dict: Optional[Dict[str, Any]] = None,
|
|
148
150
|
timeout: Optional[float] = None,
|
|
151
|
+
strict: Optional[bool] = False,
|
|
149
152
|
):
|
|
150
153
|
# Call parent constructor first
|
|
151
154
|
super().__init__(timeout=timeout)
|
|
@@ -162,6 +165,7 @@ class MCPToolkit(BaseToolkit):
|
|
|
162
165
|
raise ValueError(error_msg)
|
|
163
166
|
|
|
164
167
|
self.clients: List[MCPClient] = clients or []
|
|
168
|
+
self.strict = strict # Store strict parameter
|
|
165
169
|
self._is_connected = False
|
|
166
170
|
self._exit_stack: Optional[AsyncExitStack] = None
|
|
167
171
|
|
|
@@ -309,6 +313,7 @@ class MCPToolkit(BaseToolkit):
|
|
|
309
313
|
config_path: Optional[str] = None,
|
|
310
314
|
config_dict: Optional[Dict[str, Any]] = None,
|
|
311
315
|
timeout: Optional[float] = None,
|
|
316
|
+
strict: Optional[bool] = False,
|
|
312
317
|
) -> "MCPToolkit":
|
|
313
318
|
r"""Factory method that creates and connects to all MCP servers.
|
|
314
319
|
|
|
@@ -326,6 +331,8 @@ class MCPToolkit(BaseToolkit):
|
|
|
326
331
|
config file. (default: :obj:`None`)
|
|
327
332
|
timeout (Optional[float], optional): Timeout for connection
|
|
328
333
|
attempts in seconds. (default: :obj:`None`)
|
|
334
|
+
strict (Optional[bool], optional): Flag to indicate strict mode.
|
|
335
|
+
(default: :obj:`False`)
|
|
329
336
|
|
|
330
337
|
Returns:
|
|
331
338
|
MCPToolkit: A fully initialized and connected :obj:`MCPToolkit`
|
|
@@ -354,6 +361,7 @@ class MCPToolkit(BaseToolkit):
|
|
|
354
361
|
config_path=config_path,
|
|
355
362
|
config_dict=config_dict,
|
|
356
363
|
timeout=timeout,
|
|
364
|
+
strict=strict,
|
|
357
365
|
)
|
|
358
366
|
try:
|
|
359
367
|
await toolkit.connect()
|
|
@@ -373,10 +381,11 @@ class MCPToolkit(BaseToolkit):
|
|
|
373
381
|
config_path: Optional[str] = None,
|
|
374
382
|
config_dict: Optional[Dict[str, Any]] = None,
|
|
375
383
|
timeout: Optional[float] = None,
|
|
384
|
+
strict: Optional[bool] = False,
|
|
376
385
|
) -> "MCPToolkit":
|
|
377
386
|
r"""Synchronously create and connect to all MCP servers."""
|
|
378
387
|
return run_async(cls.create)(
|
|
379
|
-
clients, config_path, config_dict, timeout
|
|
388
|
+
clients, config_path, config_dict, timeout, strict
|
|
380
389
|
)
|
|
381
390
|
|
|
382
391
|
def _load_clients_from_config(self, config_path: str) -> List[MCPClient]:
|
|
@@ -433,10 +442,12 @@ class MCPToolkit(BaseToolkit):
|
|
|
433
442
|
|
|
434
443
|
try:
|
|
435
444
|
# Use the new mcp_client factory function
|
|
436
|
-
# Pass timeout from toolkit if available
|
|
445
|
+
# Pass timeout and strict from toolkit if available
|
|
437
446
|
kwargs = {}
|
|
438
447
|
if hasattr(self, "timeout") and self.timeout is not None:
|
|
439
448
|
kwargs["timeout"] = self.timeout
|
|
449
|
+
if hasattr(self, "strict") and self.strict is not None:
|
|
450
|
+
kwargs["strict"] = self.strict
|
|
440
451
|
|
|
441
452
|
client = create_mcp_client(cfg, **kwargs)
|
|
442
453
|
return client
|
camel/types/enums.py
CHANGED
|
@@ -185,7 +185,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
185
185
|
|
|
186
186
|
# Gemini models
|
|
187
187
|
GEMINI_2_5_FLASH_PREVIEW = "gemini-2.5-flash-preview-04-17"
|
|
188
|
-
GEMINI_2_5_PRO_PREVIEW = "gemini-2.5-pro-preview-05
|
|
188
|
+
GEMINI_2_5_PRO_PREVIEW = "gemini-2.5-pro-preview-06-05"
|
|
189
189
|
GEMINI_2_0_FLASH = "gemini-2.0-flash"
|
|
190
190
|
GEMINI_2_0_FLASH_EXP = "gemini-2.0-flash-exp"
|
|
191
191
|
GEMINI_2_0_FLASH_THINKING = "gemini-2.0-flash-thinking-exp"
|
|
@@ -207,6 +207,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
207
207
|
MISTRAL_NEMO = "open-mistral-nemo"
|
|
208
208
|
MISTRAL_PIXTRAL_12B = "pixtral-12b-2409"
|
|
209
209
|
MISTRAL_MEDIUM_3 = "mistral-medium-latest"
|
|
210
|
+
MAGISTRAL_MEDIUM = "magistral-medium-2506"
|
|
210
211
|
|
|
211
212
|
# Reka models
|
|
212
213
|
REKA_CORE = "reka-core"
|
|
@@ -384,6 +385,25 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
384
385
|
)
|
|
385
386
|
WATSONX_MISTRAL_LARGE = "mistralai/mistral-large"
|
|
386
387
|
|
|
388
|
+
# Crynux models
|
|
389
|
+
CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_1_5B = (
|
|
390
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
|
391
|
+
)
|
|
392
|
+
CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_7B = (
|
|
393
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
|
|
394
|
+
)
|
|
395
|
+
CRYNUX_DEEPSEEK_R1_DISTILL_LLAMA_8B = (
|
|
396
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
CRYNUX_QWEN_3_4_B = "Qwen/Qwen3-4B"
|
|
400
|
+
CRYNUX_QWEN_3_8_B = "Qwen/Qwen3-8B"
|
|
401
|
+
CRYNUX_QWEN_2_5_7B = "Qwen/Qwen2.5-7B"
|
|
402
|
+
CRYNUX_QWEN_2_5_7B_INSTRUCT = "Qwen/Qwen2.5-7B-Instruct"
|
|
403
|
+
|
|
404
|
+
CRYNUX_NOUS_HERMES_3_LLAMA_3_1_8B = "NousResearch/Hermes-3-Llama-3.1-8B"
|
|
405
|
+
CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B = "NousResearch/Hermes-3-Llama-3.2-3B"
|
|
406
|
+
|
|
387
407
|
def __str__(self):
|
|
388
408
|
return self.value
|
|
389
409
|
|
|
@@ -612,6 +632,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
612
632
|
ModelType.MISTRAL_8B,
|
|
613
633
|
ModelType.MISTRAL_3B,
|
|
614
634
|
ModelType.MISTRAL_MEDIUM_3,
|
|
635
|
+
ModelType.MAGISTRAL_MEDIUM,
|
|
615
636
|
}
|
|
616
637
|
|
|
617
638
|
@property
|
|
@@ -891,6 +912,20 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
891
912
|
ModelType.NOVITA_L31_70B_EURYALE_V2_2,
|
|
892
913
|
}
|
|
893
914
|
|
|
915
|
+
@property
|
|
916
|
+
def is_crynux(self) -> bool:
|
|
917
|
+
return self in {
|
|
918
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_1_5B,
|
|
919
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_7B,
|
|
920
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_LLAMA_8B,
|
|
921
|
+
ModelType.CRYNUX_QWEN_3_4_B,
|
|
922
|
+
ModelType.CRYNUX_QWEN_3_8_B,
|
|
923
|
+
ModelType.CRYNUX_QWEN_2_5_7B,
|
|
924
|
+
ModelType.CRYNUX_QWEN_2_5_7B_INSTRUCT,
|
|
925
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_1_8B,
|
|
926
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B,
|
|
927
|
+
}
|
|
928
|
+
|
|
894
929
|
@property
|
|
895
930
|
def is_aiml(self) -> bool:
|
|
896
931
|
return self in {
|
|
@@ -991,6 +1026,15 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
991
1026
|
ModelType.NOVITA_GLM_4_32B_0414,
|
|
992
1027
|
ModelType.NOVITA_GLM_Z1_RUMINATION_32B_0414,
|
|
993
1028
|
ModelType.NOVITA_QWEN_2_5_7B,
|
|
1029
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_1_5B,
|
|
1030
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_7B,
|
|
1031
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_LLAMA_8B,
|
|
1032
|
+
ModelType.CRYNUX_QWEN_3_4_B,
|
|
1033
|
+
ModelType.CRYNUX_QWEN_3_8_B,
|
|
1034
|
+
ModelType.CRYNUX_QWEN_2_5_7B,
|
|
1035
|
+
ModelType.CRYNUX_QWEN_2_5_7B_INSTRUCT,
|
|
1036
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_1_8B,
|
|
1037
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B,
|
|
994
1038
|
}:
|
|
995
1039
|
return 32_000
|
|
996
1040
|
elif self in {
|
|
@@ -1223,6 +1267,11 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
1223
1267
|
ModelType.TOGETHER_LLAMA_4_SCOUT,
|
|
1224
1268
|
}:
|
|
1225
1269
|
return 10_000_000
|
|
1270
|
+
elif self in {
|
|
1271
|
+
ModelType.MAGISTRAL_MEDIUM,
|
|
1272
|
+
}:
|
|
1273
|
+
return 40_000
|
|
1274
|
+
|
|
1226
1275
|
else:
|
|
1227
1276
|
logger.warning(
|
|
1228
1277
|
f"Unknown model type {self}, set maximum token limit "
|
|
@@ -1449,6 +1498,7 @@ class ModelPlatformType(Enum):
|
|
|
1449
1498
|
NETMIND = "netmind"
|
|
1450
1499
|
NOVITA = "novita"
|
|
1451
1500
|
WATSONX = "watsonx"
|
|
1501
|
+
CRYNUX = "crynux"
|
|
1452
1502
|
|
|
1453
1503
|
@classmethod
|
|
1454
1504
|
def from_name(cls, name):
|
|
@@ -1624,6 +1674,11 @@ class ModelPlatformType(Enum):
|
|
|
1624
1674
|
r"""Returns whether this platform is WatsonX."""
|
|
1625
1675
|
return self is ModelPlatformType.WATSONX
|
|
1626
1676
|
|
|
1677
|
+
@property
|
|
1678
|
+
def is_crynux(self) -> bool:
|
|
1679
|
+
r"""Returns whether this platform is Crynux."""
|
|
1680
|
+
return self is ModelPlatformType.CRYNUX
|
|
1681
|
+
|
|
1627
1682
|
|
|
1628
1683
|
class AudioModelType(Enum):
|
|
1629
1684
|
TTS_1 = "tts-1"
|
|
@@ -163,6 +163,11 @@ class UnifiedModelType(str):
|
|
|
163
163
|
r"""Returns whether the model is a WatsonX served model."""
|
|
164
164
|
return True
|
|
165
165
|
|
|
166
|
+
@property
|
|
167
|
+
def is_crynux(self) -> bool:
|
|
168
|
+
r"""Returns whether the model is a Crynux served model."""
|
|
169
|
+
return True
|
|
170
|
+
|
|
166
171
|
@property
|
|
167
172
|
def support_native_structured_output(self) -> bool:
|
|
168
173
|
r"""Returns whether the model supports native structured output."""
|
camel/utils/__init__.py
CHANGED
|
@@ -44,6 +44,15 @@ from .commons import (
|
|
|
44
44
|
from .constants import Constants
|
|
45
45
|
from .deduplication import DeduplicationResult, deduplicate_internally
|
|
46
46
|
from .filename import sanitize_filename
|
|
47
|
+
from .langfuse import (
|
|
48
|
+
configure_langfuse,
|
|
49
|
+
get_current_agent_session_id,
|
|
50
|
+
get_langfuse_status,
|
|
51
|
+
is_langfuse_available,
|
|
52
|
+
observe,
|
|
53
|
+
update_current_observation,
|
|
54
|
+
update_langfuse_trace,
|
|
55
|
+
)
|
|
47
56
|
from .mcp import MCPServer
|
|
48
57
|
from .response_format import get_pydantic_model, model_from_json_schema
|
|
49
58
|
from .token_counting import (
|
|
@@ -97,4 +106,11 @@ __all__ = [
|
|
|
97
106
|
"sanitize_filename",
|
|
98
107
|
"browser_toolkit_save_auth_cookie",
|
|
99
108
|
"run_async",
|
|
109
|
+
"configure_langfuse",
|
|
110
|
+
"is_langfuse_available",
|
|
111
|
+
"get_current_agent_session_id",
|
|
112
|
+
"update_langfuse_trace",
|
|
113
|
+
"observe",
|
|
114
|
+
"update_current_observation",
|
|
115
|
+
"get_langfuse_status",
|
|
100
116
|
]
|
camel/utils/langfuse.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
import threading
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from camel.logger import get_logger
|
|
19
|
+
from camel.utils import dependencies_required
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
# Thread-local storage for agent session IDs
|
|
23
|
+
_local = threading.local()
|
|
24
|
+
|
|
25
|
+
# Global flag to track if Langfuse has been configured
|
|
26
|
+
_langfuse_configured = False
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
from langfuse.decorators import langfuse_context
|
|
30
|
+
|
|
31
|
+
LANGFUSE_AVAILABLE = True
|
|
32
|
+
except ImportError:
|
|
33
|
+
LANGFUSE_AVAILABLE = False
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dependencies_required('langfuse')
|
|
37
|
+
def configure_langfuse(
|
|
38
|
+
public_key: Optional[str] = None,
|
|
39
|
+
secret_key: Optional[str] = None,
|
|
40
|
+
host: Optional[str] = None,
|
|
41
|
+
debug: Optional[bool] = None,
|
|
42
|
+
enabled: Optional[bool] = None,
|
|
43
|
+
):
|
|
44
|
+
r"""Configure Langfuse for CAMEL models.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
public_key(Optional[str]): Langfuse public key. Can be set via LANGFUSE_PUBLIC_KEY.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
secret_key(Optional[str]): Langfuse secret key. Can be set via LANGFUSE_SECRET_KEY.
|
|
50
|
+
(default: :obj:`None`)
|
|
51
|
+
host(Optional[str]): Langfuse host URL. Can be set via LANGFUSE_HOST.
|
|
52
|
+
(default: :obj:`https://cloud.langfuse.com`)
|
|
53
|
+
debug(Optional[bool]): Enable debug mode. Can be set via LANGFUSE_DEBUG.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
enabled(Optional[bool]): Enable/disable tracing. Can be set via LANGFUSE_ENABLED.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
|
|
58
|
+
Note:
|
|
59
|
+
This function configures the native langfuse_context which works with
|
|
60
|
+
@observe() decorators. Set enabled=False to disable all tracing.
|
|
61
|
+
""" # noqa: E501
|
|
62
|
+
global _langfuse_configured
|
|
63
|
+
|
|
64
|
+
# Get configuration from environment or parameters
|
|
65
|
+
public_key = public_key or os.environ.get("LANGFUSE_PUBLIC_KEY")
|
|
66
|
+
secret_key = secret_key or os.environ.get("LANGFUSE_SECRET_KEY")
|
|
67
|
+
host = host or os.environ.get(
|
|
68
|
+
"LANGFUSE_HOST", "https://cloud.langfuse.com"
|
|
69
|
+
)
|
|
70
|
+
debug = (
|
|
71
|
+
debug
|
|
72
|
+
if debug is not None
|
|
73
|
+
else os.environ.get("LANGFUSE_DEBUG", "False").lower() == "true"
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Handle enabled parameter
|
|
77
|
+
if enabled is None:
|
|
78
|
+
env_enabled_str = os.environ.get("LANGFUSE_ENABLED")
|
|
79
|
+
if env_enabled_str is not None:
|
|
80
|
+
enabled = env_enabled_str.lower() == "true"
|
|
81
|
+
else:
|
|
82
|
+
enabled = False # Default to disabled
|
|
83
|
+
|
|
84
|
+
# If not enabled, don't configure anything and don't call langfuse function
|
|
85
|
+
if not enabled:
|
|
86
|
+
_langfuse_configured = False
|
|
87
|
+
logger.info("Langfuse tracing disabled for CAMEL models")
|
|
88
|
+
|
|
89
|
+
logger.debug(
|
|
90
|
+
f"Configuring Langfuse - enabled: {enabled}, "
|
|
91
|
+
f"public_key: {'***' + public_key[-4:] if public_key else None}, "
|
|
92
|
+
f"host: {host}, debug: {debug}"
|
|
93
|
+
)
|
|
94
|
+
if enabled and public_key and secret_key and LANGFUSE_AVAILABLE:
|
|
95
|
+
_langfuse_configured = True
|
|
96
|
+
else:
|
|
97
|
+
_langfuse_configured = False
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
# Configure langfuse_context with native method
|
|
101
|
+
langfuse_context.configure(
|
|
102
|
+
public_key=public_key,
|
|
103
|
+
secret_key=secret_key,
|
|
104
|
+
host=host,
|
|
105
|
+
debug=debug,
|
|
106
|
+
enabled=True, # Always True here since we checked enabled above
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
logger.info("Langfuse tracing enabled for CAMEL models")
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.error(f"Failed to configure Langfuse: {e}")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def is_langfuse_available() -> bool:
|
|
116
|
+
r"""Check if Langfuse is configured."""
|
|
117
|
+
return _langfuse_configured
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def set_current_agent_session_id(session_id: str) -> None:
|
|
121
|
+
r"""Set the session ID for the current agent in thread-local storage.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
session_id(str): The session ID to set for the current agent.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
_local.agent_session_id = session_id
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def get_current_agent_session_id() -> Optional[str]:
|
|
131
|
+
r"""Get the session ID for the current agent from thread-local storage.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Optional[str]: The session ID for the current agent.
|
|
135
|
+
"""
|
|
136
|
+
if is_langfuse_available():
|
|
137
|
+
return getattr(_local, 'agent_session_id', None)
|
|
138
|
+
return None
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def update_langfuse_trace(
|
|
142
|
+
session_id: Optional[str] = None,
|
|
143
|
+
user_id: Optional[str] = None,
|
|
144
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
145
|
+
tags: Optional[List[str]] = None,
|
|
146
|
+
) -> bool:
|
|
147
|
+
r"""Update the current Langfuse trace with session ID and metadata.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
session_id(Optional[str]): Optional session ID to use. If :obj:`None`
|
|
151
|
+
uses the current agent's session ID. (default: :obj:`None`)
|
|
152
|
+
user_id(Optional[str]): Optional user ID for the trace.
|
|
153
|
+
(default: :obj:`None`)
|
|
154
|
+
metadata(Optional[Dict[str, Any]]): Optional metadata dictionary.
|
|
155
|
+
(default: :obj:`None`)
|
|
156
|
+
tags(Optional[List[str]]): Optional list of tags.
|
|
157
|
+
(default: :obj:`None`)
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
bool: True if update was successful, False otherwise.
|
|
161
|
+
"""
|
|
162
|
+
if not is_langfuse_available():
|
|
163
|
+
return False
|
|
164
|
+
|
|
165
|
+
# Use provided session_id or get from thread-local storage
|
|
166
|
+
final_session_id = session_id or get_current_agent_session_id()
|
|
167
|
+
|
|
168
|
+
update_data: Dict[str, Any] = {}
|
|
169
|
+
if final_session_id:
|
|
170
|
+
update_data["session_id"] = final_session_id
|
|
171
|
+
if user_id:
|
|
172
|
+
update_data["user_id"] = user_id
|
|
173
|
+
if metadata:
|
|
174
|
+
update_data["metadata"] = metadata
|
|
175
|
+
if tags:
|
|
176
|
+
update_data["tags"] = tags
|
|
177
|
+
|
|
178
|
+
if update_data:
|
|
179
|
+
langfuse_context.update_current_trace(**update_data)
|
|
180
|
+
return True
|
|
181
|
+
|
|
182
|
+
return False
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def update_current_observation(
|
|
186
|
+
input: Optional[Dict[str, Any]] = None,
|
|
187
|
+
output: Optional[Dict[str, Any]] = None,
|
|
188
|
+
model: Optional[str] = None,
|
|
189
|
+
model_parameters: Optional[Dict[str, Any]] = None,
|
|
190
|
+
usage_details: Optional[Dict[str, Any]] = None,
|
|
191
|
+
**kwargs,
|
|
192
|
+
) -> None:
|
|
193
|
+
r"""Update the current Langfuse observation with input, output,
|
|
194
|
+
model, model_parameters, and usage_details.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
input(Optional[Dict[str, Any]]): Optional input dictionary.
|
|
198
|
+
(default: :obj:`None`)
|
|
199
|
+
output(Optional[Dict[str, Any]]): Optional output dictionary.
|
|
200
|
+
(default: :obj:`None`)
|
|
201
|
+
model(Optional[str]): Optional model name. (default: :obj:`None`)
|
|
202
|
+
model_parameters(Optional[Dict[str, Any]]): Optional model parameters
|
|
203
|
+
dictionary. (default: :obj:`None`)
|
|
204
|
+
usage_details(Optional[Dict[str, Any]]): Optional usage details
|
|
205
|
+
dictionary. (default: :obj:`None`)
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
None
|
|
209
|
+
"""
|
|
210
|
+
if not is_langfuse_available():
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
langfuse_context.update_current_observation(
|
|
214
|
+
input=input,
|
|
215
|
+
output=output,
|
|
216
|
+
model=model,
|
|
217
|
+
model_parameters=model_parameters,
|
|
218
|
+
usage_details=usage_details,
|
|
219
|
+
**kwargs,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def get_langfuse_status() -> Dict[str, Any]:
|
|
224
|
+
r"""Get detailed Langfuse configuration status for debugging.
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Dict[str, Any]: Status information including configuration state.
|
|
228
|
+
"""
|
|
229
|
+
env_enabled_str = os.environ.get("LANGFUSE_ENABLED")
|
|
230
|
+
env_enabled = (
|
|
231
|
+
env_enabled_str.lower() == "true" if env_enabled_str else None
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
status = {
|
|
235
|
+
"configured": _langfuse_configured,
|
|
236
|
+
"has_public_key": bool(os.environ.get("LANGFUSE_PUBLIC_KEY")),
|
|
237
|
+
"has_secret_key": bool(os.environ.get("LANGFUSE_SECRET_KEY")),
|
|
238
|
+
"env_enabled": env_enabled,
|
|
239
|
+
"host": os.environ.get("LANGFUSE_HOST", "https://cloud.langfuse.com"),
|
|
240
|
+
"debug": os.environ.get("LANGFUSE_DEBUG", "false").lower() == "true",
|
|
241
|
+
"current_session_id": get_current_agent_session_id(),
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
if _langfuse_configured:
|
|
245
|
+
try:
|
|
246
|
+
# Try to get some context information
|
|
247
|
+
status["langfuse_context_available"] = True
|
|
248
|
+
except Exception as e:
|
|
249
|
+
status["langfuse_context_error"] = str(e)
|
|
250
|
+
|
|
251
|
+
return status
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def observe(*args, **kwargs):
|
|
255
|
+
def decorator(func):
|
|
256
|
+
return func
|
|
257
|
+
|
|
258
|
+
return decorator
|