camel-ai 0.2.61__py3-none-any.whl → 0.2.64__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +75 -16
- camel/agents/mcp_agent.py +10 -6
- camel/configs/__init__.py +3 -0
- camel/configs/crynux_config.py +94 -0
- camel/{data_collector → data_collectors}/alpaca_collector.py +1 -1
- camel/{data_collector → data_collectors}/sharegpt_collector.py +1 -1
- camel/interpreters/base.py +14 -1
- camel/interpreters/docker/Dockerfile +63 -7
- camel/interpreters/docker_interpreter.py +65 -7
- camel/interpreters/e2b_interpreter.py +23 -8
- camel/interpreters/internal_python_interpreter.py +30 -2
- camel/interpreters/ipython_interpreter.py +21 -3
- camel/interpreters/subprocess_interpreter.py +34 -2
- camel/memories/records.py +5 -3
- camel/models/__init__.py +2 -0
- camel/models/azure_openai_model.py +101 -25
- camel/models/cohere_model.py +65 -0
- camel/models/crynux_model.py +94 -0
- camel/models/deepseek_model.py +43 -1
- camel/models/gemini_model.py +50 -4
- camel/models/litellm_model.py +38 -0
- camel/models/mistral_model.py +66 -0
- camel/models/model_factory.py +10 -1
- camel/models/openai_compatible_model.py +81 -17
- camel/models/openai_model.py +86 -16
- camel/models/reka_model.py +69 -0
- camel/models/samba_model.py +69 -2
- camel/models/sglang_model.py +74 -2
- camel/models/watsonx_model.py +62 -0
- camel/retrievers/auto_retriever.py +20 -1
- camel/{runtime → runtimes}/daytona_runtime.py +1 -1
- camel/{runtime → runtimes}/docker_runtime.py +1 -1
- camel/{runtime → runtimes}/llm_guard_runtime.py +2 -2
- camel/{runtime → runtimes}/remote_http_runtime.py +1 -1
- camel/{runtime → runtimes}/ubuntu_docker_runtime.py +1 -1
- camel/societies/workforce/base.py +7 -3
- camel/societies/workforce/role_playing_worker.py +2 -2
- camel/societies/workforce/single_agent_worker.py +25 -1
- camel/societies/workforce/worker.py +5 -3
- camel/societies/workforce/workforce.py +409 -7
- camel/storages/__init__.py +2 -0
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/weaviate.py +714 -0
- camel/tasks/task.py +19 -10
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/code_execution.py +37 -8
- camel/toolkits/file_write_toolkit.py +4 -2
- camel/toolkits/mcp_toolkit.py +480 -733
- camel/toolkits/pptx_toolkit.py +777 -0
- camel/types/enums.py +56 -1
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +16 -0
- camel/utils/langfuse.py +258 -0
- camel/utils/mcp_client.py +1046 -0
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/METADATA +9 -1
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/RECORD +68 -62
- /camel/{data_collector → data_collectors}/__init__.py +0 -0
- /camel/{data_collector → data_collectors}/base.py +0 -0
- /camel/{runtime → runtimes}/__init__.py +0 -0
- /camel/{runtime → runtimes}/api.py +0 -0
- /camel/{runtime → runtimes}/base.py +0 -0
- /camel/{runtime → runtimes}/configs.py +0 -0
- /camel/{runtime → runtimes}/utils/__init__.py +0 -0
- /camel/{runtime → runtimes}/utils/function_risk_toolkit.py +0 -0
- /camel/{runtime → runtimes}/utils/ignore_risk_toolkit.py +0 -0
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.61.dist-info → camel_ai-0.2.64.dist-info}/licenses/LICENSE +0 -0
camel/types/enums.py
CHANGED
|
@@ -185,7 +185,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
185
185
|
|
|
186
186
|
# Gemini models
|
|
187
187
|
GEMINI_2_5_FLASH_PREVIEW = "gemini-2.5-flash-preview-04-17"
|
|
188
|
-
GEMINI_2_5_PRO_PREVIEW = "gemini-2.5-pro-preview-05
|
|
188
|
+
GEMINI_2_5_PRO_PREVIEW = "gemini-2.5-pro-preview-06-05"
|
|
189
189
|
GEMINI_2_0_FLASH = "gemini-2.0-flash"
|
|
190
190
|
GEMINI_2_0_FLASH_EXP = "gemini-2.0-flash-exp"
|
|
191
191
|
GEMINI_2_0_FLASH_THINKING = "gemini-2.0-flash-thinking-exp"
|
|
@@ -207,6 +207,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
207
207
|
MISTRAL_NEMO = "open-mistral-nemo"
|
|
208
208
|
MISTRAL_PIXTRAL_12B = "pixtral-12b-2409"
|
|
209
209
|
MISTRAL_MEDIUM_3 = "mistral-medium-latest"
|
|
210
|
+
MAGISTRAL_MEDIUM = "magistral-medium-2506"
|
|
210
211
|
|
|
211
212
|
# Reka models
|
|
212
213
|
REKA_CORE = "reka-core"
|
|
@@ -384,6 +385,25 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
384
385
|
)
|
|
385
386
|
WATSONX_MISTRAL_LARGE = "mistralai/mistral-large"
|
|
386
387
|
|
|
388
|
+
# Crynux models
|
|
389
|
+
CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_1_5B = (
|
|
390
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B"
|
|
391
|
+
)
|
|
392
|
+
CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_7B = (
|
|
393
|
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"
|
|
394
|
+
)
|
|
395
|
+
CRYNUX_DEEPSEEK_R1_DISTILL_LLAMA_8B = (
|
|
396
|
+
"deepseek-ai/DeepSeek-R1-Distill-Llama-8B"
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
CRYNUX_QWEN_3_4_B = "Qwen/Qwen3-4B"
|
|
400
|
+
CRYNUX_QWEN_3_8_B = "Qwen/Qwen3-8B"
|
|
401
|
+
CRYNUX_QWEN_2_5_7B = "Qwen/Qwen2.5-7B"
|
|
402
|
+
CRYNUX_QWEN_2_5_7B_INSTRUCT = "Qwen/Qwen2.5-7B-Instruct"
|
|
403
|
+
|
|
404
|
+
CRYNUX_NOUS_HERMES_3_LLAMA_3_1_8B = "NousResearch/Hermes-3-Llama-3.1-8B"
|
|
405
|
+
CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B = "NousResearch/Hermes-3-Llama-3.2-3B"
|
|
406
|
+
|
|
387
407
|
def __str__(self):
|
|
388
408
|
return self.value
|
|
389
409
|
|
|
@@ -612,6 +632,7 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
612
632
|
ModelType.MISTRAL_8B,
|
|
613
633
|
ModelType.MISTRAL_3B,
|
|
614
634
|
ModelType.MISTRAL_MEDIUM_3,
|
|
635
|
+
ModelType.MAGISTRAL_MEDIUM,
|
|
615
636
|
}
|
|
616
637
|
|
|
617
638
|
@property
|
|
@@ -891,6 +912,20 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
891
912
|
ModelType.NOVITA_L31_70B_EURYALE_V2_2,
|
|
892
913
|
}
|
|
893
914
|
|
|
915
|
+
@property
|
|
916
|
+
def is_crynux(self) -> bool:
|
|
917
|
+
return self in {
|
|
918
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_1_5B,
|
|
919
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_7B,
|
|
920
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_LLAMA_8B,
|
|
921
|
+
ModelType.CRYNUX_QWEN_3_4_B,
|
|
922
|
+
ModelType.CRYNUX_QWEN_3_8_B,
|
|
923
|
+
ModelType.CRYNUX_QWEN_2_5_7B,
|
|
924
|
+
ModelType.CRYNUX_QWEN_2_5_7B_INSTRUCT,
|
|
925
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_1_8B,
|
|
926
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B,
|
|
927
|
+
}
|
|
928
|
+
|
|
894
929
|
@property
|
|
895
930
|
def is_aiml(self) -> bool:
|
|
896
931
|
return self in {
|
|
@@ -991,6 +1026,15 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
991
1026
|
ModelType.NOVITA_GLM_4_32B_0414,
|
|
992
1027
|
ModelType.NOVITA_GLM_Z1_RUMINATION_32B_0414,
|
|
993
1028
|
ModelType.NOVITA_QWEN_2_5_7B,
|
|
1029
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_1_5B,
|
|
1030
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_QWEN_7B,
|
|
1031
|
+
ModelType.CRYNUX_DEEPSEEK_R1_DISTILL_LLAMA_8B,
|
|
1032
|
+
ModelType.CRYNUX_QWEN_3_4_B,
|
|
1033
|
+
ModelType.CRYNUX_QWEN_3_8_B,
|
|
1034
|
+
ModelType.CRYNUX_QWEN_2_5_7B,
|
|
1035
|
+
ModelType.CRYNUX_QWEN_2_5_7B_INSTRUCT,
|
|
1036
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_1_8B,
|
|
1037
|
+
ModelType.CRYNUX_NOUS_HERMES_3_LLAMA_3_2_3B,
|
|
994
1038
|
}:
|
|
995
1039
|
return 32_000
|
|
996
1040
|
elif self in {
|
|
@@ -1223,6 +1267,11 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
1223
1267
|
ModelType.TOGETHER_LLAMA_4_SCOUT,
|
|
1224
1268
|
}:
|
|
1225
1269
|
return 10_000_000
|
|
1270
|
+
elif self in {
|
|
1271
|
+
ModelType.MAGISTRAL_MEDIUM,
|
|
1272
|
+
}:
|
|
1273
|
+
return 40_000
|
|
1274
|
+
|
|
1226
1275
|
else:
|
|
1227
1276
|
logger.warning(
|
|
1228
1277
|
f"Unknown model type {self}, set maximum token limit "
|
|
@@ -1449,6 +1498,7 @@ class ModelPlatformType(Enum):
|
|
|
1449
1498
|
NETMIND = "netmind"
|
|
1450
1499
|
NOVITA = "novita"
|
|
1451
1500
|
WATSONX = "watsonx"
|
|
1501
|
+
CRYNUX = "crynux"
|
|
1452
1502
|
|
|
1453
1503
|
@classmethod
|
|
1454
1504
|
def from_name(cls, name):
|
|
@@ -1624,6 +1674,11 @@ class ModelPlatformType(Enum):
|
|
|
1624
1674
|
r"""Returns whether this platform is WatsonX."""
|
|
1625
1675
|
return self is ModelPlatformType.WATSONX
|
|
1626
1676
|
|
|
1677
|
+
@property
|
|
1678
|
+
def is_crynux(self) -> bool:
|
|
1679
|
+
r"""Returns whether this platform is Crynux."""
|
|
1680
|
+
return self is ModelPlatformType.CRYNUX
|
|
1681
|
+
|
|
1627
1682
|
|
|
1628
1683
|
class AudioModelType(Enum):
|
|
1629
1684
|
TTS_1 = "tts-1"
|
|
@@ -163,6 +163,11 @@ class UnifiedModelType(str):
|
|
|
163
163
|
r"""Returns whether the model is a WatsonX served model."""
|
|
164
164
|
return True
|
|
165
165
|
|
|
166
|
+
@property
|
|
167
|
+
def is_crynux(self) -> bool:
|
|
168
|
+
r"""Returns whether the model is a Crynux served model."""
|
|
169
|
+
return True
|
|
170
|
+
|
|
166
171
|
@property
|
|
167
172
|
def support_native_structured_output(self) -> bool:
|
|
168
173
|
r"""Returns whether the model supports native structured output."""
|
camel/utils/__init__.py
CHANGED
|
@@ -44,6 +44,15 @@ from .commons import (
|
|
|
44
44
|
from .constants import Constants
|
|
45
45
|
from .deduplication import DeduplicationResult, deduplicate_internally
|
|
46
46
|
from .filename import sanitize_filename
|
|
47
|
+
from .langfuse import (
|
|
48
|
+
configure_langfuse,
|
|
49
|
+
get_current_agent_session_id,
|
|
50
|
+
get_langfuse_status,
|
|
51
|
+
is_langfuse_available,
|
|
52
|
+
observe,
|
|
53
|
+
update_current_observation,
|
|
54
|
+
update_langfuse_trace,
|
|
55
|
+
)
|
|
47
56
|
from .mcp import MCPServer
|
|
48
57
|
from .response_format import get_pydantic_model, model_from_json_schema
|
|
49
58
|
from .token_counting import (
|
|
@@ -97,4 +106,11 @@ __all__ = [
|
|
|
97
106
|
"sanitize_filename",
|
|
98
107
|
"browser_toolkit_save_auth_cookie",
|
|
99
108
|
"run_async",
|
|
109
|
+
"configure_langfuse",
|
|
110
|
+
"is_langfuse_available",
|
|
111
|
+
"get_current_agent_session_id",
|
|
112
|
+
"update_langfuse_trace",
|
|
113
|
+
"observe",
|
|
114
|
+
"update_current_observation",
|
|
115
|
+
"get_langfuse_status",
|
|
100
116
|
]
|
camel/utils/langfuse.py
ADDED
|
@@ -0,0 +1,258 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
import threading
|
|
16
|
+
from typing import Any, Dict, List, Optional
|
|
17
|
+
|
|
18
|
+
from camel.logger import get_logger
|
|
19
|
+
from camel.utils import dependencies_required
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
# Thread-local storage for agent session IDs
|
|
23
|
+
_local = threading.local()
|
|
24
|
+
|
|
25
|
+
# Global flag to track if Langfuse has been configured
|
|
26
|
+
_langfuse_configured = False
|
|
27
|
+
|
|
28
|
+
try:
|
|
29
|
+
from langfuse.decorators import langfuse_context
|
|
30
|
+
|
|
31
|
+
LANGFUSE_AVAILABLE = True
|
|
32
|
+
except ImportError:
|
|
33
|
+
LANGFUSE_AVAILABLE = False
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dependencies_required('langfuse')
|
|
37
|
+
def configure_langfuse(
|
|
38
|
+
public_key: Optional[str] = None,
|
|
39
|
+
secret_key: Optional[str] = None,
|
|
40
|
+
host: Optional[str] = None,
|
|
41
|
+
debug: Optional[bool] = None,
|
|
42
|
+
enabled: Optional[bool] = None,
|
|
43
|
+
):
|
|
44
|
+
r"""Configure Langfuse for CAMEL models.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
public_key(Optional[str]): Langfuse public key. Can be set via LANGFUSE_PUBLIC_KEY.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
secret_key(Optional[str]): Langfuse secret key. Can be set via LANGFUSE_SECRET_KEY.
|
|
50
|
+
(default: :obj:`None`)
|
|
51
|
+
host(Optional[str]): Langfuse host URL. Can be set via LANGFUSE_HOST.
|
|
52
|
+
(default: :obj:`https://cloud.langfuse.com`)
|
|
53
|
+
debug(Optional[bool]): Enable debug mode. Can be set via LANGFUSE_DEBUG.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
enabled(Optional[bool]): Enable/disable tracing. Can be set via LANGFUSE_ENABLED.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
|
|
58
|
+
Note:
|
|
59
|
+
This function configures the native langfuse_context which works with
|
|
60
|
+
@observe() decorators. Set enabled=False to disable all tracing.
|
|
61
|
+
""" # noqa: E501
|
|
62
|
+
global _langfuse_configured
|
|
63
|
+
|
|
64
|
+
# Get configuration from environment or parameters
|
|
65
|
+
public_key = public_key or os.environ.get("LANGFUSE_PUBLIC_KEY")
|
|
66
|
+
secret_key = secret_key or os.environ.get("LANGFUSE_SECRET_KEY")
|
|
67
|
+
host = host or os.environ.get(
|
|
68
|
+
"LANGFUSE_HOST", "https://cloud.langfuse.com"
|
|
69
|
+
)
|
|
70
|
+
debug = (
|
|
71
|
+
debug
|
|
72
|
+
if debug is not None
|
|
73
|
+
else os.environ.get("LANGFUSE_DEBUG", "False").lower() == "true"
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Handle enabled parameter
|
|
77
|
+
if enabled is None:
|
|
78
|
+
env_enabled_str = os.environ.get("LANGFUSE_ENABLED")
|
|
79
|
+
if env_enabled_str is not None:
|
|
80
|
+
enabled = env_enabled_str.lower() == "true"
|
|
81
|
+
else:
|
|
82
|
+
enabled = False # Default to disabled
|
|
83
|
+
|
|
84
|
+
# If not enabled, don't configure anything and don't call langfuse function
|
|
85
|
+
if not enabled:
|
|
86
|
+
_langfuse_configured = False
|
|
87
|
+
logger.info("Langfuse tracing disabled for CAMEL models")
|
|
88
|
+
|
|
89
|
+
logger.debug(
|
|
90
|
+
f"Configuring Langfuse - enabled: {enabled}, "
|
|
91
|
+
f"public_key: {'***' + public_key[-4:] if public_key else None}, "
|
|
92
|
+
f"host: {host}, debug: {debug}"
|
|
93
|
+
)
|
|
94
|
+
if enabled and public_key and secret_key and LANGFUSE_AVAILABLE:
|
|
95
|
+
_langfuse_configured = True
|
|
96
|
+
else:
|
|
97
|
+
_langfuse_configured = False
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
# Configure langfuse_context with native method
|
|
101
|
+
langfuse_context.configure(
|
|
102
|
+
public_key=public_key,
|
|
103
|
+
secret_key=secret_key,
|
|
104
|
+
host=host,
|
|
105
|
+
debug=debug,
|
|
106
|
+
enabled=True, # Always True here since we checked enabled above
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
logger.info("Langfuse tracing enabled for CAMEL models")
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.error(f"Failed to configure Langfuse: {e}")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def is_langfuse_available() -> bool:
|
|
116
|
+
r"""Check if Langfuse is configured."""
|
|
117
|
+
return _langfuse_configured
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def set_current_agent_session_id(session_id: str) -> None:
|
|
121
|
+
r"""Set the session ID for the current agent in thread-local storage.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
session_id(str): The session ID to set for the current agent.
|
|
125
|
+
"""
|
|
126
|
+
|
|
127
|
+
_local.agent_session_id = session_id
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def get_current_agent_session_id() -> Optional[str]:
|
|
131
|
+
r"""Get the session ID for the current agent from thread-local storage.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Optional[str]: The session ID for the current agent.
|
|
135
|
+
"""
|
|
136
|
+
if is_langfuse_available():
|
|
137
|
+
return getattr(_local, 'agent_session_id', None)
|
|
138
|
+
return None
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def update_langfuse_trace(
|
|
142
|
+
session_id: Optional[str] = None,
|
|
143
|
+
user_id: Optional[str] = None,
|
|
144
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
145
|
+
tags: Optional[List[str]] = None,
|
|
146
|
+
) -> bool:
|
|
147
|
+
r"""Update the current Langfuse trace with session ID and metadata.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
session_id(Optional[str]): Optional session ID to use. If :obj:`None`
|
|
151
|
+
uses the current agent's session ID. (default: :obj:`None`)
|
|
152
|
+
user_id(Optional[str]): Optional user ID for the trace.
|
|
153
|
+
(default: :obj:`None`)
|
|
154
|
+
metadata(Optional[Dict[str, Any]]): Optional metadata dictionary.
|
|
155
|
+
(default: :obj:`None`)
|
|
156
|
+
tags(Optional[List[str]]): Optional list of tags.
|
|
157
|
+
(default: :obj:`None`)
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
bool: True if update was successful, False otherwise.
|
|
161
|
+
"""
|
|
162
|
+
if not is_langfuse_available():
|
|
163
|
+
return False
|
|
164
|
+
|
|
165
|
+
# Use provided session_id or get from thread-local storage
|
|
166
|
+
final_session_id = session_id or get_current_agent_session_id()
|
|
167
|
+
|
|
168
|
+
update_data: Dict[str, Any] = {}
|
|
169
|
+
if final_session_id:
|
|
170
|
+
update_data["session_id"] = final_session_id
|
|
171
|
+
if user_id:
|
|
172
|
+
update_data["user_id"] = user_id
|
|
173
|
+
if metadata:
|
|
174
|
+
update_data["metadata"] = metadata
|
|
175
|
+
if tags:
|
|
176
|
+
update_data["tags"] = tags
|
|
177
|
+
|
|
178
|
+
if update_data:
|
|
179
|
+
langfuse_context.update_current_trace(**update_data)
|
|
180
|
+
return True
|
|
181
|
+
|
|
182
|
+
return False
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
def update_current_observation(
|
|
186
|
+
input: Optional[Dict[str, Any]] = None,
|
|
187
|
+
output: Optional[Dict[str, Any]] = None,
|
|
188
|
+
model: Optional[str] = None,
|
|
189
|
+
model_parameters: Optional[Dict[str, Any]] = None,
|
|
190
|
+
usage_details: Optional[Dict[str, Any]] = None,
|
|
191
|
+
**kwargs,
|
|
192
|
+
) -> None:
|
|
193
|
+
r"""Update the current Langfuse observation with input, output,
|
|
194
|
+
model, model_parameters, and usage_details.
|
|
195
|
+
|
|
196
|
+
Args:
|
|
197
|
+
input(Optional[Dict[str, Any]]): Optional input dictionary.
|
|
198
|
+
(default: :obj:`None`)
|
|
199
|
+
output(Optional[Dict[str, Any]]): Optional output dictionary.
|
|
200
|
+
(default: :obj:`None`)
|
|
201
|
+
model(Optional[str]): Optional model name. (default: :obj:`None`)
|
|
202
|
+
model_parameters(Optional[Dict[str, Any]]): Optional model parameters
|
|
203
|
+
dictionary. (default: :obj:`None`)
|
|
204
|
+
usage_details(Optional[Dict[str, Any]]): Optional usage details
|
|
205
|
+
dictionary. (default: :obj:`None`)
|
|
206
|
+
|
|
207
|
+
Returns:
|
|
208
|
+
None
|
|
209
|
+
"""
|
|
210
|
+
if not is_langfuse_available():
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
langfuse_context.update_current_observation(
|
|
214
|
+
input=input,
|
|
215
|
+
output=output,
|
|
216
|
+
model=model,
|
|
217
|
+
model_parameters=model_parameters,
|
|
218
|
+
usage_details=usage_details,
|
|
219
|
+
**kwargs,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def get_langfuse_status() -> Dict[str, Any]:
|
|
224
|
+
r"""Get detailed Langfuse configuration status for debugging.
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Dict[str, Any]: Status information including configuration state.
|
|
228
|
+
"""
|
|
229
|
+
env_enabled_str = os.environ.get("LANGFUSE_ENABLED")
|
|
230
|
+
env_enabled = (
|
|
231
|
+
env_enabled_str.lower() == "true" if env_enabled_str else None
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
status = {
|
|
235
|
+
"configured": _langfuse_configured,
|
|
236
|
+
"has_public_key": bool(os.environ.get("LANGFUSE_PUBLIC_KEY")),
|
|
237
|
+
"has_secret_key": bool(os.environ.get("LANGFUSE_SECRET_KEY")),
|
|
238
|
+
"env_enabled": env_enabled,
|
|
239
|
+
"host": os.environ.get("LANGFUSE_HOST", "https://cloud.langfuse.com"),
|
|
240
|
+
"debug": os.environ.get("LANGFUSE_DEBUG", "false").lower() == "true",
|
|
241
|
+
"current_session_id": get_current_agent_session_id(),
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
if _langfuse_configured:
|
|
245
|
+
try:
|
|
246
|
+
# Try to get some context information
|
|
247
|
+
status["langfuse_context_available"] = True
|
|
248
|
+
except Exception as e:
|
|
249
|
+
status["langfuse_context_error"] = str(e)
|
|
250
|
+
|
|
251
|
+
return status
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
def observe(*args, **kwargs):
|
|
255
|
+
def decorator(func):
|
|
256
|
+
return func
|
|
257
|
+
|
|
258
|
+
return decorator
|