lucidicai 3.0.0__tar.gz → 3.3.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lucidicai-3.0.0 → lucidicai-3.3.1}/PKG-INFO +1 -1
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/__init__.py +6 -1
- lucidicai-3.3.1/lucidicai/api/resources/evals.py +209 -0
- lucidicai-3.3.1/lucidicai/api/resources/prompt.py +140 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/client.py +25 -1
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/core/config.py +34 -7
- lucidicai-3.3.1/lucidicai/integrations/__init__.py +9 -0
- lucidicai-3.3.1/lucidicai/integrations/livekit.py +409 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/decorators.py +2 -2
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai.egg-info/PKG-INFO +1 -1
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai.egg-info/SOURCES.txt +3 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/setup.py +1 -1
- lucidicai-3.0.0/lucidicai/api/resources/prompt.py +0 -84
- {lucidicai-3.0.0 → lucidicai-3.3.1}/README.md +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/client.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/resources/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/resources/dataset.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/resources/event.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/resources/experiment.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/resources/feature_flag.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/api/resources/session.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/core/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/core/errors.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/core/types.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/context.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/error_boundary.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/event.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/event_builder.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/features/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/features/dataset.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/features/feature_flag.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/init.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/session.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/sdk/shutdown_manager.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/session_obj.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/context_bridge.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/context_capture_processor.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/extract.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/litellm_bridge.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/lucidic_exporter.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/openai_agents_instrumentor.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/openai_patch.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/openai_uninstrument.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/telemetry_init.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/telemetry_manager.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/utils/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/utils/model_pricing.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/telemetry/utils/provider.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/utils/__init__.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/utils/logger.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai/utils/serialization.py +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai.egg-info/dependency_links.txt +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai.egg-info/requires.txt +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/lucidicai.egg-info/top_level.txt +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/setup.cfg +0 -0
- {lucidicai-3.0.0 → lucidicai-3.3.1}/tests/test_event_creation.py +0 -0
|
@@ -34,8 +34,11 @@ from .core.errors import (
|
|
|
34
34
|
FeatureFlagError,
|
|
35
35
|
)
|
|
36
36
|
|
|
37
|
+
# Integrations
|
|
38
|
+
from .integrations.livekit import setup_livekit
|
|
39
|
+
|
|
37
40
|
# Version
|
|
38
|
-
__version__ = "3.
|
|
41
|
+
__version__ = "3.3.1"
|
|
39
42
|
|
|
40
43
|
# All exports
|
|
41
44
|
__all__ = [
|
|
@@ -50,6 +53,8 @@ __all__ = [
|
|
|
50
53
|
"InvalidOperationError",
|
|
51
54
|
"PromptError",
|
|
52
55
|
"FeatureFlagError",
|
|
56
|
+
# Integrations
|
|
57
|
+
"setup_livekit",
|
|
53
58
|
# Version
|
|
54
59
|
"__version__",
|
|
55
60
|
]
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
"""Evals resource API operations."""
|
|
2
|
+
import logging
|
|
3
|
+
import threading
|
|
4
|
+
from typing import Any, Dict, Optional, Union
|
|
5
|
+
|
|
6
|
+
from ..client import HttpClient
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger("Lucidic")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _truncate_id(id_str: Optional[str]) -> str:
|
|
12
|
+
"""Truncate ID for logging."""
|
|
13
|
+
if not id_str:
|
|
14
|
+
return "None"
|
|
15
|
+
return f"{id_str[:8]}..." if len(id_str) > 8 else id_str
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _infer_result_type(result: Any) -> str:
|
|
19
|
+
"""Infer result type from Python value.
|
|
20
|
+
|
|
21
|
+
Note: bool must be checked first because bool is a subclass of int in Python.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
result: The evaluation result value.
|
|
25
|
+
|
|
26
|
+
Returns:
|
|
27
|
+
The result type string: "boolean", "number", or "string".
|
|
28
|
+
|
|
29
|
+
Raises:
|
|
30
|
+
ValueError: If result is not a supported type.
|
|
31
|
+
"""
|
|
32
|
+
if isinstance(result, bool):
|
|
33
|
+
return "boolean"
|
|
34
|
+
elif isinstance(result, (int, float)):
|
|
35
|
+
return "number"
|
|
36
|
+
elif isinstance(result, str):
|
|
37
|
+
return "string"
|
|
38
|
+
else:
|
|
39
|
+
raise ValueError(
|
|
40
|
+
f"Unsupported result type: {type(result).__name__}. "
|
|
41
|
+
"Must be bool, int, float, or str."
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _validate_result_type(result: Any, result_type: str) -> bool:
|
|
46
|
+
"""Validate that result matches the specified result_type.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
result: The evaluation result value.
|
|
50
|
+
result_type: The expected type ("boolean", "number", "string").
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
True if the result matches the type, False otherwise.
|
|
54
|
+
"""
|
|
55
|
+
if result_type == "boolean":
|
|
56
|
+
return isinstance(result, bool)
|
|
57
|
+
elif result_type == "number":
|
|
58
|
+
# Check for bool first since bool is subclass of int
|
|
59
|
+
return isinstance(result, (int, float)) and not isinstance(result, bool)
|
|
60
|
+
elif result_type == "string":
|
|
61
|
+
return isinstance(result, str)
|
|
62
|
+
return False
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class EvalsResource:
|
|
66
|
+
"""Handle evaluation-related API operations."""
|
|
67
|
+
|
|
68
|
+
def __init__(self, http: HttpClient, production: bool = False):
|
|
69
|
+
"""Initialize evals resource.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
http: HTTP client instance
|
|
73
|
+
production: Whether to suppress errors in production mode
|
|
74
|
+
"""
|
|
75
|
+
self.http = http
|
|
76
|
+
self._production = production
|
|
77
|
+
|
|
78
|
+
def emit(
|
|
79
|
+
self,
|
|
80
|
+
result: Union[bool, int, float, str],
|
|
81
|
+
name: Optional[str] = None,
|
|
82
|
+
description: Optional[str] = None,
|
|
83
|
+
result_type: Optional[str] = None,
|
|
84
|
+
session_id: Optional[str] = None,
|
|
85
|
+
) -> None:
|
|
86
|
+
"""Fire-and-forget evaluation submission that returns instantly.
|
|
87
|
+
|
|
88
|
+
This function returns immediately while the actual evaluation
|
|
89
|
+
submission happens in a background thread. Perfect for non-blocking
|
|
90
|
+
evaluation logging.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
result: The evaluation result. Can be bool, int, float, or str.
|
|
94
|
+
name: Optional name for the evaluation. If not provided, the backend
|
|
95
|
+
will generate a default name based on the result type.
|
|
96
|
+
description: Optional description of the evaluation.
|
|
97
|
+
result_type: Optional explicit result type ("boolean", "number", "string").
|
|
98
|
+
If not provided, it will be inferred from the result value.
|
|
99
|
+
session_id: Optional session ID. If not provided, uses the current
|
|
100
|
+
session from context.
|
|
101
|
+
|
|
102
|
+
Example:
|
|
103
|
+
# Basic usage - type inferred
|
|
104
|
+
client.evals.emit(result=True)
|
|
105
|
+
client.evals.emit(result=0.95)
|
|
106
|
+
client.evals.emit(result="excellent")
|
|
107
|
+
|
|
108
|
+
# With name and description
|
|
109
|
+
client.evals.emit(
|
|
110
|
+
result=True,
|
|
111
|
+
name="task_completed",
|
|
112
|
+
description="User task was successful"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Explicit session_id
|
|
116
|
+
client.evals.emit(result=0.87, name="accuracy_score", session_id="abc-123")
|
|
117
|
+
"""
|
|
118
|
+
from ...sdk.context import current_session_id
|
|
119
|
+
|
|
120
|
+
# Capture session from context if not provided
|
|
121
|
+
captured_session_id = session_id
|
|
122
|
+
if not captured_session_id:
|
|
123
|
+
captured_session_id = current_session_id.get(None)
|
|
124
|
+
|
|
125
|
+
if not captured_session_id:
|
|
126
|
+
logger.debug("[EvalsResource] No active session for emit()")
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
# Infer or validate result_type
|
|
130
|
+
try:
|
|
131
|
+
if result_type is None:
|
|
132
|
+
inferred_type = _infer_result_type(result)
|
|
133
|
+
else:
|
|
134
|
+
# Validate that result matches the explicit type
|
|
135
|
+
if not _validate_result_type(result, result_type):
|
|
136
|
+
error_msg = (
|
|
137
|
+
f"Result type mismatch: result is {type(result).__name__} "
|
|
138
|
+
f"but result_type is '{result_type}'"
|
|
139
|
+
)
|
|
140
|
+
if self._production:
|
|
141
|
+
logger.error(f"[EvalsResource] {error_msg}")
|
|
142
|
+
return
|
|
143
|
+
else:
|
|
144
|
+
raise ValueError(error_msg)
|
|
145
|
+
inferred_type = result_type
|
|
146
|
+
except ValueError as e:
|
|
147
|
+
if self._production:
|
|
148
|
+
logger.error(f"[EvalsResource] {e}")
|
|
149
|
+
return
|
|
150
|
+
else:
|
|
151
|
+
raise
|
|
152
|
+
|
|
153
|
+
# Capture all data for background thread
|
|
154
|
+
captured_result = result
|
|
155
|
+
captured_name = name
|
|
156
|
+
captured_description = description
|
|
157
|
+
captured_type = inferred_type
|
|
158
|
+
|
|
159
|
+
def _background_emit():
|
|
160
|
+
try:
|
|
161
|
+
params: Dict[str, Any] = {
|
|
162
|
+
"session_id": captured_session_id,
|
|
163
|
+
"result": captured_result,
|
|
164
|
+
"result_type": captured_type,
|
|
165
|
+
}
|
|
166
|
+
if captured_name is not None:
|
|
167
|
+
params["name"] = captured_name
|
|
168
|
+
if captured_description is not None:
|
|
169
|
+
params["description"] = captured_description
|
|
170
|
+
|
|
171
|
+
self._create_eval(params)
|
|
172
|
+
except Exception as e:
|
|
173
|
+
logger.debug(f"[EvalsResource] Background emit() failed: {e}")
|
|
174
|
+
|
|
175
|
+
# Start background thread
|
|
176
|
+
thread = threading.Thread(target=_background_emit, daemon=True)
|
|
177
|
+
thread.start()
|
|
178
|
+
|
|
179
|
+
def _create_eval(self, params: Dict[str, Any]) -> Dict[str, Any]:
|
|
180
|
+
"""Send evaluation to backend API.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
params: Evaluation parameters including:
|
|
184
|
+
- session_id: Session ID
|
|
185
|
+
- result: Evaluation result value
|
|
186
|
+
- result_type: Type of result ("boolean", "number", "string")
|
|
187
|
+
- name: Optional evaluation name
|
|
188
|
+
- description: Optional description
|
|
189
|
+
|
|
190
|
+
Returns:
|
|
191
|
+
API response (typically empty for 201 Created)
|
|
192
|
+
"""
|
|
193
|
+
session_id = params.get("session_id")
|
|
194
|
+
name = params.get("name")
|
|
195
|
+
result_type = params.get("result_type")
|
|
196
|
+
logger.debug(
|
|
197
|
+
f"[Evals] _create_eval() called - "
|
|
198
|
+
f"session_id={_truncate_id(session_id)}, name={name!r}, "
|
|
199
|
+
f"result_type={result_type!r}"
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
response = self.http.post("sdk/evals", params)
|
|
203
|
+
|
|
204
|
+
logger.debug(
|
|
205
|
+
f"[Evals] _create_eval() response - "
|
|
206
|
+
f"session_id={_truncate_id(session_id)}, "
|
|
207
|
+
f"response_keys={list(response.keys()) if response else 'None'}"
|
|
208
|
+
)
|
|
209
|
+
return response
|
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""Prompt resource API operations."""
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
from typing import Any, Dict, Optional, Tuple, TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from ..client import HttpClient
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from ...core.config import SDKConfig
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger("Lucidic")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class PromptResource:
|
|
15
|
+
"""Handle prompt-related API operations."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, http: HttpClient, config: "SDKConfig", production: bool = False):
|
|
18
|
+
"""Initialize prompt resource.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
http: HTTP client instance
|
|
22
|
+
config: SDK configuration
|
|
23
|
+
production: Whether to suppress errors in production mode
|
|
24
|
+
"""
|
|
25
|
+
self.http = http
|
|
26
|
+
self._config = config
|
|
27
|
+
self._production = production
|
|
28
|
+
self._cache: Dict[Tuple[str, str], Dict[str, Any]] = {}
|
|
29
|
+
|
|
30
|
+
def _is_cache_valid(self, cache_key: Tuple[str, str], cache_ttl: int) -> bool:
|
|
31
|
+
"""Check if a cached prompt is still valid.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
cache_key: The (prompt_name, label) tuple
|
|
35
|
+
cache_ttl: Cache TTL in seconds (-1 = indefinite, 0 = no cache)
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
True if cache is valid, False otherwise
|
|
39
|
+
"""
|
|
40
|
+
if cache_ttl == 0:
|
|
41
|
+
return False
|
|
42
|
+
if cache_key not in self._cache:
|
|
43
|
+
return False
|
|
44
|
+
if cache_ttl == -1:
|
|
45
|
+
return True
|
|
46
|
+
cached = self._cache[cache_key]
|
|
47
|
+
return (time.time() - cached["timestamp"]) < cache_ttl
|
|
48
|
+
|
|
49
|
+
def get(
|
|
50
|
+
self,
|
|
51
|
+
prompt_name: str,
|
|
52
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
53
|
+
label: str = "production",
|
|
54
|
+
cache_ttl: int = 0,
|
|
55
|
+
) -> str:
|
|
56
|
+
"""Get a prompt from the prompt database.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
prompt_name: Name of the prompt.
|
|
60
|
+
variables: Variables to interpolate into the prompt.
|
|
61
|
+
label: Prompt version label (default: "production").
|
|
62
|
+
cache_ttl: Cache TTL in seconds. 0 = no cache, -1 = cache indefinitely,
|
|
63
|
+
positive value = seconds before refetching.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
The prompt content with variables interpolated.
|
|
67
|
+
"""
|
|
68
|
+
try:
|
|
69
|
+
cache_key = (prompt_name, label)
|
|
70
|
+
|
|
71
|
+
# Check cache
|
|
72
|
+
if self._is_cache_valid(cache_key, cache_ttl):
|
|
73
|
+
prompt = self._cache[cache_key]["content"]
|
|
74
|
+
else:
|
|
75
|
+
response = self.http.get(
|
|
76
|
+
"getprompt",
|
|
77
|
+
{"prompt_name": prompt_name, "label": label, "agent_id": self._config.agent_id},
|
|
78
|
+
)
|
|
79
|
+
prompt = response.get("prompt_content", "")
|
|
80
|
+
|
|
81
|
+
# Store in cache if caching is enabled
|
|
82
|
+
if cache_ttl != 0:
|
|
83
|
+
self._cache[cache_key] = {
|
|
84
|
+
"content": prompt,
|
|
85
|
+
"timestamp": time.time(),
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
# Replace variables
|
|
89
|
+
if variables:
|
|
90
|
+
for key, value in variables.items():
|
|
91
|
+
prompt = prompt.replace(f"{{{{{key}}}}}", str(value))
|
|
92
|
+
|
|
93
|
+
return prompt
|
|
94
|
+
except Exception as e:
|
|
95
|
+
if self._production:
|
|
96
|
+
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
97
|
+
return ""
|
|
98
|
+
raise
|
|
99
|
+
|
|
100
|
+
async def aget(
|
|
101
|
+
self,
|
|
102
|
+
prompt_name: str,
|
|
103
|
+
variables: Optional[Dict[str, Any]] = None,
|
|
104
|
+
label: str = "production",
|
|
105
|
+
cache_ttl: int = 0,
|
|
106
|
+
) -> str:
|
|
107
|
+
"""Get a prompt from the prompt database (asynchronous).
|
|
108
|
+
|
|
109
|
+
See get() for full documentation.
|
|
110
|
+
"""
|
|
111
|
+
try:
|
|
112
|
+
cache_key = (prompt_name, label)
|
|
113
|
+
|
|
114
|
+
# Check cache
|
|
115
|
+
if self._is_cache_valid(cache_key, cache_ttl):
|
|
116
|
+
prompt = self._cache[cache_key]["content"]
|
|
117
|
+
else:
|
|
118
|
+
response = await self.http.aget(
|
|
119
|
+
"getprompt",
|
|
120
|
+
{"prompt_name": prompt_name, "label": label, "agent_id": self._config.agent_id},
|
|
121
|
+
)
|
|
122
|
+
prompt = response.get("prompt_content", "")
|
|
123
|
+
|
|
124
|
+
# Store in cache if caching is enabled
|
|
125
|
+
if cache_ttl != 0:
|
|
126
|
+
self._cache[cache_key] = {
|
|
127
|
+
"content": prompt,
|
|
128
|
+
"timestamp": time.time(),
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
if variables:
|
|
132
|
+
for key, value in variables.items():
|
|
133
|
+
prompt = prompt.replace(f"{{{{{key}}}}}", str(value))
|
|
134
|
+
|
|
135
|
+
return prompt
|
|
136
|
+
except Exception as e:
|
|
137
|
+
if self._production:
|
|
138
|
+
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
139
|
+
return ""
|
|
140
|
+
raise
|
|
@@ -30,6 +30,7 @@ from .api.resources.dataset import DatasetResource
|
|
|
30
30
|
from .api.resources.experiment import ExperimentResource
|
|
31
31
|
from .api.resources.prompt import PromptResource
|
|
32
32
|
from .api.resources.feature_flag import FeatureFlagResource
|
|
33
|
+
from .api.resources.evals import EvalsResource
|
|
33
34
|
from .core.config import SDKConfig
|
|
34
35
|
from .core.errors import LucidicError
|
|
35
36
|
from .session_obj import Session
|
|
@@ -63,6 +64,8 @@ class LucidicAI:
|
|
|
63
64
|
auto_end: Whether sessions auto-end on context exit or process shutdown.
|
|
64
65
|
production: If True, suppress SDK errors. If None, checks LUCIDIC_PRODUCTION env var.
|
|
65
66
|
region: Deployment region ("us", "india"). Falls back to LUCIDIC_REGION env var.
|
|
67
|
+
base_url: Custom base URL for API requests. Takes precedence over region.
|
|
68
|
+
Falls back to LUCIDIC_BASE_URL env var.
|
|
66
69
|
**kwargs: Additional configuration options passed to SDKConfig.
|
|
67
70
|
|
|
68
71
|
Raises:
|
|
@@ -92,6 +95,13 @@ class LucidicAI:
|
|
|
92
95
|
agent_id="...",
|
|
93
96
|
region="india"
|
|
94
97
|
)
|
|
98
|
+
|
|
99
|
+
# Custom base URL (e.g., self-hosted deployment)
|
|
100
|
+
client = LucidicAI(
|
|
101
|
+
api_key="...",
|
|
102
|
+
agent_id="...",
|
|
103
|
+
base_url="https://custom.example.com/api"
|
|
104
|
+
)
|
|
95
105
|
"""
|
|
96
106
|
|
|
97
107
|
def __init__(
|
|
@@ -102,6 +112,7 @@ class LucidicAI:
|
|
|
102
112
|
auto_end: bool = True,
|
|
103
113
|
production: Optional[bool] = None,
|
|
104
114
|
region: Optional[str] = None,
|
|
115
|
+
base_url: Optional[str] = None,
|
|
105
116
|
**kwargs,
|
|
106
117
|
):
|
|
107
118
|
# Generate unique client ID for telemetry routing
|
|
@@ -118,6 +129,7 @@ class LucidicAI:
|
|
|
118
129
|
agent_id=agent_id,
|
|
119
130
|
auto_end=auto_end,
|
|
120
131
|
region=region,
|
|
132
|
+
base_url=base_url,
|
|
121
133
|
**kwargs,
|
|
122
134
|
)
|
|
123
135
|
|
|
@@ -143,8 +155,9 @@ class LucidicAI:
|
|
|
143
155
|
"events": EventResource(self._http, self._production),
|
|
144
156
|
"datasets": DatasetResource(self._http, self._config.agent_id, self._production),
|
|
145
157
|
"experiments": ExperimentResource(self._http, self._config.agent_id, self._production),
|
|
146
|
-
"prompts": PromptResource(self._http, self._production),
|
|
158
|
+
"prompts": PromptResource(self._http, self._config, self._production),
|
|
147
159
|
"feature_flags": FeatureFlagResource(self._http, self._config.agent_id, self._production),
|
|
160
|
+
"evals": EvalsResource(self._http, self._production),
|
|
148
161
|
}
|
|
149
162
|
|
|
150
163
|
# Active sessions for this client
|
|
@@ -271,6 +284,17 @@ class LucidicAI:
|
|
|
271
284
|
"""
|
|
272
285
|
return self._resources["datasets"]
|
|
273
286
|
|
|
287
|
+
@property
|
|
288
|
+
def evals(self) -> EvalsResource:
|
|
289
|
+
"""Access evals resource for submitting evaluation results.
|
|
290
|
+
|
|
291
|
+
Example:
|
|
292
|
+
client.evals.emit(result=True, name="task_success")
|
|
293
|
+
client.evals.emit(result=0.95, name="accuracy")
|
|
294
|
+
client.evals.emit(result="excellent", name="quality")
|
|
295
|
+
"""
|
|
296
|
+
return self._resources["evals"]
|
|
297
|
+
|
|
274
298
|
# ==================== Decorators ====================
|
|
275
299
|
|
|
276
300
|
def event(
|
|
@@ -53,16 +53,20 @@ class NetworkConfig:
|
|
|
53
53
|
connection_pool_maxsize: int = 100
|
|
54
54
|
|
|
55
55
|
@classmethod
|
|
56
|
-
def from_env(cls, region: Optional[str] = None, debug: bool = False) -> 'NetworkConfig':
|
|
56
|
+
def from_env(cls, region: Optional[str] = None, base_url: Optional[str] = None, debug: bool = False) -> 'NetworkConfig':
|
|
57
57
|
"""Load network configuration from environment variables.
|
|
58
58
|
|
|
59
|
-
Priority: debug >
|
|
59
|
+
Priority: debug > base_url argument > LUCIDIC_BASE_URL > region argument > LUCIDIC_REGION > default
|
|
60
60
|
|
|
61
61
|
Args:
|
|
62
62
|
region: Region string override (e.g., "us", "india")
|
|
63
|
-
|
|
63
|
+
base_url: Custom base URL override (takes precedence over region)
|
|
64
|
+
debug: If True, use localhost URL regardless of other settings
|
|
64
65
|
"""
|
|
65
|
-
|
|
66
|
+
import logging
|
|
67
|
+
logger = logging.getLogger("Lucidic")
|
|
68
|
+
|
|
69
|
+
# If debug mode, use localhost (highest priority)
|
|
66
70
|
if debug:
|
|
67
71
|
return cls(
|
|
68
72
|
base_url=DEBUG_URL,
|
|
@@ -74,7 +78,28 @@ class NetworkConfig:
|
|
|
74
78
|
connection_pool_maxsize=int(os.getenv("LUCIDIC_CONNECTION_POOL_MAXSIZE", "100"))
|
|
75
79
|
)
|
|
76
80
|
|
|
77
|
-
# Resolve
|
|
81
|
+
# Resolve base_url: argument > env var
|
|
82
|
+
resolved_base_url = base_url or os.getenv("LUCIDIC_BASE_URL")
|
|
83
|
+
|
|
84
|
+
if resolved_base_url:
|
|
85
|
+
# base_url takes precedence over region
|
|
86
|
+
region_str = region or os.getenv("LUCIDIC_REGION")
|
|
87
|
+
if region_str:
|
|
88
|
+
logger.warning(
|
|
89
|
+
f"[LucidicAI] Both base_url and region specified. "
|
|
90
|
+
f"Using base_url '{resolved_base_url}', ignoring region '{region_str}'."
|
|
91
|
+
)
|
|
92
|
+
return cls(
|
|
93
|
+
base_url=resolved_base_url,
|
|
94
|
+
region=None, # Custom deployment, no region
|
|
95
|
+
timeout=int(os.getenv("LUCIDIC_TIMEOUT", "30")),
|
|
96
|
+
max_retries=int(os.getenv("LUCIDIC_MAX_RETRIES", "3")),
|
|
97
|
+
backoff_factor=float(os.getenv("LUCIDIC_BACKOFF_FACTOR", "0.5")),
|
|
98
|
+
connection_pool_size=int(os.getenv("LUCIDIC_CONNECTION_POOL_SIZE", "20")),
|
|
99
|
+
connection_pool_maxsize=int(os.getenv("LUCIDIC_CONNECTION_POOL_MAXSIZE", "100"))
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
# Fall back to region-based URL resolution
|
|
78
103
|
region_str = region or os.getenv("LUCIDIC_REGION")
|
|
79
104
|
resolved_region = Region.from_string(region_str) if region_str else DEFAULT_REGION
|
|
80
105
|
|
|
@@ -147,11 +172,13 @@ class SDKConfig:
|
|
|
147
172
|
debug: bool = False
|
|
148
173
|
|
|
149
174
|
@classmethod
|
|
150
|
-
def from_env(cls, region: Optional[str] = None, **overrides) -> 'SDKConfig':
|
|
175
|
+
def from_env(cls, region: Optional[str] = None, base_url: Optional[str] = None, **overrides) -> 'SDKConfig':
|
|
151
176
|
"""Create configuration from environment variables with optional overrides.
|
|
152
177
|
|
|
153
178
|
Args:
|
|
154
179
|
region: Region string (e.g., "us", "india"). Priority: arg > env var > default
|
|
180
|
+
base_url: Custom base URL override. Takes precedence over region.
|
|
181
|
+
Falls back to LUCIDIC_BASE_URL env var.
|
|
155
182
|
**overrides: Additional configuration overrides
|
|
156
183
|
"""
|
|
157
184
|
from dotenv import load_dotenv
|
|
@@ -165,7 +192,7 @@ class SDKConfig:
|
|
|
165
192
|
auto_end=os.getenv("LUCIDIC_AUTO_END", "true").lower() == "true",
|
|
166
193
|
production_monitoring=False,
|
|
167
194
|
blob_threshold=int(os.getenv("LUCIDIC_BLOB_THRESHOLD", "65536")),
|
|
168
|
-
network=NetworkConfig.from_env(region=region, debug=debug),
|
|
195
|
+
network=NetworkConfig.from_env(region=region, base_url=base_url, debug=debug),
|
|
169
196
|
error_handling=ErrorHandlingConfig.from_env(),
|
|
170
197
|
telemetry=TelemetryConfig.from_env(),
|
|
171
198
|
environment=Environment.DEBUG if debug else Environment.PRODUCTION,
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Third-party integrations for Lucidic AI SDK.
|
|
2
|
+
|
|
3
|
+
This module provides integrations with external platforms and frameworks
|
|
4
|
+
that have their own OpenTelemetry instrumentation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .livekit import setup_livekit, LucidicLiveKitExporter
|
|
8
|
+
|
|
9
|
+
__all__ = ["setup_livekit", "LucidicLiveKitExporter"]
|
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
"""LiveKit voice agent integration for Lucidic AI SDK.
|
|
2
|
+
|
|
3
|
+
This module provides OpenTelemetry span export for LiveKit voice agents,
|
|
4
|
+
converting LiveKit's internal spans into Lucidic events with full metadata
|
|
5
|
+
support including latency diagnostics, EOU detection data, and tool context.
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
from lucidicai import LucidicAI
|
|
9
|
+
from lucidicai.integrations.livekit import setup_livekit
|
|
10
|
+
from livekit.agents import AgentServer, JobContext, AgentSession, cli
|
|
11
|
+
from livekit.agents.telemetry import set_tracer_provider
|
|
12
|
+
|
|
13
|
+
client = LucidicAI(api_key="...", agent_id="...")
|
|
14
|
+
server = AgentServer()
|
|
15
|
+
|
|
16
|
+
@server.rtc_session()
|
|
17
|
+
async def entrypoint(ctx: JobContext):
|
|
18
|
+
trace_provider = setup_livekit(
|
|
19
|
+
client=client,
|
|
20
|
+
session_id=ctx.room.name,
|
|
21
|
+
)
|
|
22
|
+
set_tracer_provider(trace_provider)
|
|
23
|
+
# ... rest of agent setup
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
from __future__ import annotations
|
|
27
|
+
|
|
28
|
+
import json
|
|
29
|
+
import logging
|
|
30
|
+
from datetime import datetime, timezone
|
|
31
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
|
|
32
|
+
|
|
33
|
+
from opentelemetry import context as otel_context
|
|
34
|
+
from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor
|
|
35
|
+
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
|
|
36
|
+
from opentelemetry.trace import Span
|
|
37
|
+
from opentelemetry.util.types import AttributeValue
|
|
38
|
+
|
|
39
|
+
if TYPE_CHECKING:
|
|
40
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
41
|
+
from ..client import LucidicAI
|
|
42
|
+
|
|
43
|
+
logger = logging.getLogger("lucidicai.integrations.livekit")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class LucidicLiveKitExporter(SpanExporter):
|
|
47
|
+
"""Custom OpenTelemetry exporter for LiveKit voice agent spans.
|
|
48
|
+
|
|
49
|
+
Converts LiveKit spans (llm_node, function_tool) into Lucidic events
|
|
50
|
+
with full metadata including latency diagnostics, EOU detection,
|
|
51
|
+
and tool context.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
# livekit span names we care about
|
|
55
|
+
LIVEKIT_LLM_SPANS = {"llm_node", "function_tool"}
|
|
56
|
+
|
|
57
|
+
def __init__(self, client: "LucidicAI", session_id: str):
|
|
58
|
+
"""Initialize the exporter.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
client: Initialized LucidicAI client instance
|
|
62
|
+
session_id: Session ID for all events created by this exporter
|
|
63
|
+
"""
|
|
64
|
+
self._client = client
|
|
65
|
+
self._session_id = session_id
|
|
66
|
+
self._shutdown = False
|
|
67
|
+
|
|
68
|
+
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
|
|
69
|
+
"""Export spans to Lucidic as events.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
spans: Sequence of completed OpenTelemetry spans
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
SpanExportResult indicating success or failure
|
|
76
|
+
"""
|
|
77
|
+
if self._shutdown:
|
|
78
|
+
return SpanExportResult.SUCCESS
|
|
79
|
+
|
|
80
|
+
try:
|
|
81
|
+
for span in spans:
|
|
82
|
+
if self._is_livekit_llm_span(span):
|
|
83
|
+
self._process_span(span)
|
|
84
|
+
return SpanExportResult.SUCCESS
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.error(f"[LiveKit] Failed to export spans: {e}")
|
|
87
|
+
return SpanExportResult.FAILURE
|
|
88
|
+
|
|
89
|
+
def _is_livekit_llm_span(self, span: ReadableSpan) -> bool:
|
|
90
|
+
"""Check if span is a LiveKit LLM-related span we should process."""
|
|
91
|
+
return span.name in self.LIVEKIT_LLM_SPANS
|
|
92
|
+
|
|
93
|
+
def _process_span(self, span: ReadableSpan) -> None:
|
|
94
|
+
"""Process a single LiveKit span and create corresponding Lucidic event."""
|
|
95
|
+
try:
|
|
96
|
+
if span.name == "llm_node":
|
|
97
|
+
event_data = self._convert_llm_span(span)
|
|
98
|
+
self._client.events.create(**event_data)
|
|
99
|
+
logger.debug(f"[LiveKit] Created llm_generation event for span {span.name}")
|
|
100
|
+
elif span.name == "function_tool":
|
|
101
|
+
event_data = self._convert_function_span(span)
|
|
102
|
+
self._client.events.create(**event_data)
|
|
103
|
+
logger.debug(f"[LiveKit] Created function_call event for span {span.name}")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.error(f"[LiveKit] Failed to process span {span.name}: {e}")
|
|
106
|
+
|
|
107
|
+
def _convert_llm_span(self, span: ReadableSpan) -> Dict[str, Any]:
|
|
108
|
+
"""Convert an llm_node span to llm_generation event data."""
|
|
109
|
+
attrs = dict(span.attributes or {})
|
|
110
|
+
|
|
111
|
+
# extract messages from chat context
|
|
112
|
+
messages = self._parse_chat_context(attrs.get("lk.chat_ctx"))
|
|
113
|
+
|
|
114
|
+
# extract output text
|
|
115
|
+
output = attrs.get("lk.response.text", "")
|
|
116
|
+
|
|
117
|
+
# build metadata with diagnostics
|
|
118
|
+
metadata = self._build_metadata(attrs)
|
|
119
|
+
|
|
120
|
+
# calculate duration
|
|
121
|
+
duration = None
|
|
122
|
+
if span.start_time and span.end_time:
|
|
123
|
+
duration = (span.end_time - span.start_time) / 1e9
|
|
124
|
+
|
|
125
|
+
# extract timing for occurred_at
|
|
126
|
+
occurred_at = None
|
|
127
|
+
if span.start_time:
|
|
128
|
+
occurred_at = datetime.fromtimestamp(
|
|
129
|
+
span.start_time / 1e9, tz=timezone.utc
|
|
130
|
+
).isoformat()
|
|
131
|
+
|
|
132
|
+
return {
|
|
133
|
+
"type": "llm_generation",
|
|
134
|
+
"session_id": self._session_id,
|
|
135
|
+
"model": attrs.get("gen_ai.request.model", "unknown"),
|
|
136
|
+
"messages": messages,
|
|
137
|
+
"output": output,
|
|
138
|
+
"input_tokens": attrs.get("gen_ai.usage.input_tokens"),
|
|
139
|
+
"output_tokens": attrs.get("gen_ai.usage.output_tokens"),
|
|
140
|
+
"duration": duration,
|
|
141
|
+
"occurred_at": occurred_at,
|
|
142
|
+
"metadata": metadata,
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
def _convert_function_span(self, span: ReadableSpan) -> Dict[str, Any]:
|
|
146
|
+
"""Convert a function_tool span to function_call event data."""
|
|
147
|
+
attrs = dict(span.attributes or {})
|
|
148
|
+
|
|
149
|
+
# calculate duration
|
|
150
|
+
duration = None
|
|
151
|
+
if span.start_time and span.end_time:
|
|
152
|
+
duration = (span.end_time - span.start_time) / 1e9
|
|
153
|
+
|
|
154
|
+
# extract timing for occurred_at
|
|
155
|
+
occurred_at = None
|
|
156
|
+
if span.start_time:
|
|
157
|
+
occurred_at = datetime.fromtimestamp(
|
|
158
|
+
span.start_time / 1e9, tz=timezone.utc
|
|
159
|
+
).isoformat()
|
|
160
|
+
|
|
161
|
+
# build metadata (subset for function calls)
|
|
162
|
+
metadata = {
|
|
163
|
+
"job_id": attrs.get("lk.job_id"),
|
|
164
|
+
"room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
|
|
165
|
+
"agent_name": attrs.get("lk.agent_name"),
|
|
166
|
+
"generation_id": attrs.get("lk.generation_id"),
|
|
167
|
+
"tool_call_id": attrs.get("lk.function_tool.id"),
|
|
168
|
+
}
|
|
169
|
+
metadata = self._clean_none_values(metadata)
|
|
170
|
+
|
|
171
|
+
return {
|
|
172
|
+
"type": "function_call",
|
|
173
|
+
"session_id": self._session_id,
|
|
174
|
+
"function_name": attrs.get("lk.function_tool.name", "unknown"),
|
|
175
|
+
"arguments": attrs.get("lk.function_tool.arguments"),
|
|
176
|
+
"return_value": attrs.get("lk.function_tool.output"),
|
|
177
|
+
"duration": duration,
|
|
178
|
+
"occurred_at": occurred_at,
|
|
179
|
+
"metadata": metadata,
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
def _parse_chat_context(self, chat_ctx_json: Optional[str]) -> List[Dict[str, str]]:
|
|
183
|
+
"""Parse LiveKit's lk.chat_ctx JSON into Lucidic messages format.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
chat_ctx_json: JSON string of LiveKit chat context
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
List of message dicts with role and content keys
|
|
190
|
+
"""
|
|
191
|
+
if not chat_ctx_json:
|
|
192
|
+
return []
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
chat_ctx = json.loads(chat_ctx_json)
|
|
196
|
+
messages = []
|
|
197
|
+
|
|
198
|
+
# livekit chat context has 'items' list
|
|
199
|
+
items = chat_ctx.get("items", [])
|
|
200
|
+
for item in items:
|
|
201
|
+
if item.get("type") == "message":
|
|
202
|
+
role = item.get("role", "user")
|
|
203
|
+
# livekit stores content in various ways
|
|
204
|
+
content = item.get("text_content", "")
|
|
205
|
+
if not content:
|
|
206
|
+
# try content array
|
|
207
|
+
content_list = item.get("content", [])
|
|
208
|
+
if isinstance(content_list, list):
|
|
209
|
+
text_parts = []
|
|
210
|
+
for c in content_list:
|
|
211
|
+
if isinstance(c, str):
|
|
212
|
+
text_parts.append(c)
|
|
213
|
+
elif isinstance(c, dict) and c.get("type") == "text":
|
|
214
|
+
text_parts.append(c.get("text", ""))
|
|
215
|
+
content = " ".join(text_parts)
|
|
216
|
+
elif isinstance(content_list, str):
|
|
217
|
+
content = content_list
|
|
218
|
+
|
|
219
|
+
messages.append({"role": role, "content": content})
|
|
220
|
+
|
|
221
|
+
return messages
|
|
222
|
+
except (json.JSONDecodeError, TypeError) as e:
|
|
223
|
+
logger.debug(f"[LiveKit] Failed to parse chat context: {e}")
|
|
224
|
+
return []
|
|
225
|
+
|
|
226
|
+
def _build_metadata(self, attrs: Dict[str, Any]) -> Dict[str, Any]:
|
|
227
|
+
"""Build metadata dict with diagnostics from span attributes.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
attrs: Span attributes dictionary
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
Cleaned metadata dict with nested diagnostics
|
|
234
|
+
"""
|
|
235
|
+
metadata = {
|
|
236
|
+
# identity & tracking
|
|
237
|
+
"job_id": attrs.get("lk.job_id"),
|
|
238
|
+
"room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
|
|
239
|
+
"agent_name": attrs.get("lk.agent_name"),
|
|
240
|
+
"participant_id": attrs.get("lk.participant_id"),
|
|
241
|
+
"generation_id": attrs.get("lk.generation_id"),
|
|
242
|
+
"parent_generation_id": attrs.get("lk.parent_generation_id"),
|
|
243
|
+
"speech_id": attrs.get("lk.speech_id"),
|
|
244
|
+
"interrupted": attrs.get("lk.interrupted"),
|
|
245
|
+
# diagnostics (nested)
|
|
246
|
+
"diagnostics": {
|
|
247
|
+
"latency": {
|
|
248
|
+
"llm_ttft": attrs.get("llm_node_ttft"),
|
|
249
|
+
"tts_ttfb": attrs.get("tts_node_ttfb"),
|
|
250
|
+
"e2e_latency": attrs.get("e2e_latency"),
|
|
251
|
+
"transcription_delay": attrs.get("lk.transcription_delay"),
|
|
252
|
+
"end_of_turn_delay": attrs.get("lk.end_of_turn_delay"),
|
|
253
|
+
},
|
|
254
|
+
"eou": {
|
|
255
|
+
"probability": attrs.get("lk.eou.probability"),
|
|
256
|
+
"threshold": attrs.get("lk.eou.unlikely_threshold"),
|
|
257
|
+
"delay": attrs.get("lk.eou.endpointing_delay"),
|
|
258
|
+
"language": attrs.get("lk.eou.language"),
|
|
259
|
+
},
|
|
260
|
+
"tools": {
|
|
261
|
+
"function_tools": attrs.get("lk.function_tools"),
|
|
262
|
+
"provider_tools": attrs.get("lk.provider_tools"),
|
|
263
|
+
"tool_sets": attrs.get("lk.tool_sets"),
|
|
264
|
+
},
|
|
265
|
+
"session_options": attrs.get("lk.session_options"),
|
|
266
|
+
},
|
|
267
|
+
}
|
|
268
|
+
return self._clean_none_values(metadata)
|
|
269
|
+
|
|
270
|
+
def _clean_none_values(self, d: Dict[str, Any]) -> Dict[str, Any]:
|
|
271
|
+
"""Recursively remove None values and empty dicts.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
d: Dictionary to clean
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
Cleaned dictionary with no None values or empty nested dicts
|
|
278
|
+
"""
|
|
279
|
+
cleaned = {}
|
|
280
|
+
for k, v in d.items():
|
|
281
|
+
if isinstance(v, dict):
|
|
282
|
+
nested = self._clean_none_values(v)
|
|
283
|
+
if nested: # only include non-empty dicts
|
|
284
|
+
cleaned[k] = nested
|
|
285
|
+
elif v is not None:
|
|
286
|
+
cleaned[k] = v
|
|
287
|
+
return cleaned
|
|
288
|
+
|
|
289
|
+
def shutdown(self) -> None:
|
|
290
|
+
"""Shutdown the exporter."""
|
|
291
|
+
self._shutdown = True
|
|
292
|
+
logger.debug("[LiveKit] Exporter shutdown")
|
|
293
|
+
|
|
294
|
+
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
|
295
|
+
"""Force flush pending exports.
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
True (events are created synchronously)
|
|
299
|
+
"""
|
|
300
|
+
return True
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
class _MetadataSpanProcessor(SpanProcessor):
|
|
304
|
+
"""Span processor that adds metadata to all spans.
|
|
305
|
+
|
|
306
|
+
This allows users to attach custom metadata (e.g., customer_id, environment)
|
|
307
|
+
that will be included on every span exported.
|
|
308
|
+
"""
|
|
309
|
+
|
|
310
|
+
def __init__(self, metadata: Dict[str, AttributeValue]):
|
|
311
|
+
"""Initialize with metadata to attach.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
metadata: Dictionary of metadata key-value pairs
|
|
315
|
+
"""
|
|
316
|
+
self._metadata = metadata
|
|
317
|
+
|
|
318
|
+
def on_start(
|
|
319
|
+
self, span: Span, parent_context: Optional[otel_context.Context] = None
|
|
320
|
+
) -> None:
|
|
321
|
+
"""Called when a span is started - attach metadata."""
|
|
322
|
+
span.set_attributes(self._metadata)
|
|
323
|
+
|
|
324
|
+
def on_end(self, span: ReadableSpan) -> None:
|
|
325
|
+
"""Called when a span ends - no action needed."""
|
|
326
|
+
pass
|
|
327
|
+
|
|
328
|
+
def shutdown(self) -> None:
|
|
329
|
+
"""Shutdown the processor."""
|
|
330
|
+
pass
|
|
331
|
+
|
|
332
|
+
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
|
333
|
+
"""Force flush - no buffering in this processor."""
|
|
334
|
+
return True
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def setup_livekit(
|
|
338
|
+
client: "LucidicAI",
|
|
339
|
+
session_id: str,
|
|
340
|
+
session_name: Optional[str] = None,
|
|
341
|
+
metadata: Optional[Dict[str, AttributeValue]] = None,
|
|
342
|
+
) -> "TracerProvider":
|
|
343
|
+
"""Set up Lucidic tracing for LiveKit voice agents.
|
|
344
|
+
|
|
345
|
+
Automatically creates a Lucidic session and configures OpenTelemetry
|
|
346
|
+
to export LiveKit spans as Lucidic events.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
client: Initialized LucidicAI client instance
|
|
350
|
+
session_id: Session ID for all events (typically ctx.room.name)
|
|
351
|
+
session_name: Optional human-readable session name
|
|
352
|
+
metadata: Optional metadata to attach to all spans (e.g., customer_id)
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
TracerProvider to pass to livekit's set_tracer_provider()
|
|
356
|
+
|
|
357
|
+
Example:
|
|
358
|
+
from lucidicai import LucidicAI
|
|
359
|
+
from lucidicai.integrations.livekit import setup_livekit
|
|
360
|
+
from livekit.agents import AgentServer, JobContext, AgentSession, cli
|
|
361
|
+
from livekit.agents.telemetry import set_tracer_provider
|
|
362
|
+
|
|
363
|
+
client = LucidicAI(api_key="...", agent_id="...")
|
|
364
|
+
server = AgentServer()
|
|
365
|
+
|
|
366
|
+
@server.rtc_session()
|
|
367
|
+
async def entrypoint(ctx: JobContext):
|
|
368
|
+
trace_provider = setup_livekit(
|
|
369
|
+
client=client,
|
|
370
|
+
session_id=ctx.room.name,
|
|
371
|
+
session_name=f"Voice Call - {ctx.room.name}",
|
|
372
|
+
)
|
|
373
|
+
set_tracer_provider(trace_provider)
|
|
374
|
+
|
|
375
|
+
async def cleanup():
|
|
376
|
+
trace_provider.force_flush()
|
|
377
|
+
ctx.add_shutdown_callback(cleanup)
|
|
378
|
+
|
|
379
|
+
session = AgentSession(...)
|
|
380
|
+
await session.start(agent=MyAgent(), room=ctx.room)
|
|
381
|
+
|
|
382
|
+
if __name__ == "__main__":
|
|
383
|
+
cli.run_app(server)
|
|
384
|
+
"""
|
|
385
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
386
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
387
|
+
|
|
388
|
+
# auto-create Lucidic session
|
|
389
|
+
client.sessions.create(
|
|
390
|
+
session_id=session_id,
|
|
391
|
+
session_name=session_name or f"LiveKit Voice Session - {session_id}",
|
|
392
|
+
)
|
|
393
|
+
logger.info(f"[LiveKit] Created Lucidic session: {session_id}")
|
|
394
|
+
|
|
395
|
+
# create exporter
|
|
396
|
+
exporter = LucidicLiveKitExporter(client, session_id)
|
|
397
|
+
|
|
398
|
+
# create tracer provider
|
|
399
|
+
trace_provider = TracerProvider()
|
|
400
|
+
|
|
401
|
+
# add metadata processor if metadata provided
|
|
402
|
+
if metadata:
|
|
403
|
+
trace_provider.add_span_processor(_MetadataSpanProcessor(metadata))
|
|
404
|
+
|
|
405
|
+
# add exporter via batch processor
|
|
406
|
+
trace_provider.add_span_processor(BatchSpanProcessor(exporter))
|
|
407
|
+
|
|
408
|
+
logger.info("[LiveKit] Lucidic tracing configured")
|
|
409
|
+
return trace_provider
|
|
@@ -51,7 +51,7 @@ def _emit_event_to_client(
|
|
|
51
51
|
"session_id": session_id,
|
|
52
52
|
**event_data,
|
|
53
53
|
}
|
|
54
|
-
response = client._resources["events"].
|
|
54
|
+
response = client._resources["events"].create(**event_payload)
|
|
55
55
|
return response.get("event_id") if response else None
|
|
56
56
|
except Exception as e:
|
|
57
57
|
debug(f"[Decorator] Failed to emit event: {e}")
|
|
@@ -81,7 +81,7 @@ async def _aemit_event_to_client(
|
|
|
81
81
|
"session_id": session_id,
|
|
82
82
|
**event_data,
|
|
83
83
|
}
|
|
84
|
-
response = await client._resources["events"].
|
|
84
|
+
response = await client._resources["events"].acreate(**event_payload)
|
|
85
85
|
return response.get("event_id") if response else None
|
|
86
86
|
except Exception as e:
|
|
87
87
|
debug(f"[Decorator] Failed to emit async event: {e}")
|
|
@@ -12,6 +12,7 @@ lucidicai/api/__init__.py
|
|
|
12
12
|
lucidicai/api/client.py
|
|
13
13
|
lucidicai/api/resources/__init__.py
|
|
14
14
|
lucidicai/api/resources/dataset.py
|
|
15
|
+
lucidicai/api/resources/evals.py
|
|
15
16
|
lucidicai/api/resources/event.py
|
|
16
17
|
lucidicai/api/resources/experiment.py
|
|
17
18
|
lucidicai/api/resources/feature_flag.py
|
|
@@ -21,6 +22,8 @@ lucidicai/core/__init__.py
|
|
|
21
22
|
lucidicai/core/config.py
|
|
22
23
|
lucidicai/core/errors.py
|
|
23
24
|
lucidicai/core/types.py
|
|
25
|
+
lucidicai/integrations/__init__.py
|
|
26
|
+
lucidicai/integrations/livekit.py
|
|
24
27
|
lucidicai/sdk/__init__.py
|
|
25
28
|
lucidicai/sdk/context.py
|
|
26
29
|
lucidicai/sdk/decorators.py
|
|
@@ -1,84 +0,0 @@
|
|
|
1
|
-
"""Prompt resource API operations."""
|
|
2
|
-
import logging
|
|
3
|
-
from typing import Any, Dict, Optional
|
|
4
|
-
|
|
5
|
-
from ..client import HttpClient
|
|
6
|
-
|
|
7
|
-
logger = logging.getLogger("Lucidic")
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class PromptResource:
|
|
11
|
-
"""Handle prompt-related API operations."""
|
|
12
|
-
|
|
13
|
-
def __init__(self, http: HttpClient, production: bool = False):
|
|
14
|
-
"""Initialize prompt resource.
|
|
15
|
-
|
|
16
|
-
Args:
|
|
17
|
-
http: HTTP client instance
|
|
18
|
-
production: Whether to suppress errors in production mode
|
|
19
|
-
"""
|
|
20
|
-
self.http = http
|
|
21
|
-
self._production = production
|
|
22
|
-
|
|
23
|
-
def get(
|
|
24
|
-
self,
|
|
25
|
-
prompt_name: str,
|
|
26
|
-
variables: Optional[Dict[str, Any]] = None,
|
|
27
|
-
label: str = "production",
|
|
28
|
-
) -> str:
|
|
29
|
-
"""Get a prompt from the prompt database.
|
|
30
|
-
|
|
31
|
-
Args:
|
|
32
|
-
prompt_name: Name of the prompt.
|
|
33
|
-
variables: Variables to interpolate into the prompt.
|
|
34
|
-
label: Prompt version label (default: "production").
|
|
35
|
-
|
|
36
|
-
Returns:
|
|
37
|
-
The prompt content with variables interpolated.
|
|
38
|
-
"""
|
|
39
|
-
try:
|
|
40
|
-
response = self.http.get(
|
|
41
|
-
"getprompt",
|
|
42
|
-
{"prompt_name": prompt_name, "label": label},
|
|
43
|
-
)
|
|
44
|
-
prompt = response.get("prompt_content", "")
|
|
45
|
-
|
|
46
|
-
# Replace variables
|
|
47
|
-
if variables:
|
|
48
|
-
for key, value in variables.items():
|
|
49
|
-
prompt = prompt.replace(f"{{{key}}}", str(value))
|
|
50
|
-
|
|
51
|
-
return prompt
|
|
52
|
-
except Exception as e:
|
|
53
|
-
if self._production:
|
|
54
|
-
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
55
|
-
return ""
|
|
56
|
-
raise
|
|
57
|
-
|
|
58
|
-
async def aget(
|
|
59
|
-
self,
|
|
60
|
-
prompt_name: str,
|
|
61
|
-
variables: Optional[Dict[str, Any]] = None,
|
|
62
|
-
label: str = "production",
|
|
63
|
-
) -> str:
|
|
64
|
-
"""Get a prompt from the prompt database (asynchronous).
|
|
65
|
-
|
|
66
|
-
See get() for full documentation.
|
|
67
|
-
"""
|
|
68
|
-
try:
|
|
69
|
-
response = await self.http.aget(
|
|
70
|
-
"getprompt",
|
|
71
|
-
{"prompt_name": prompt_name, "label": label},
|
|
72
|
-
)
|
|
73
|
-
prompt = response.get("prompt_content", "")
|
|
74
|
-
|
|
75
|
-
if variables:
|
|
76
|
-
for key, value in variables.items():
|
|
77
|
-
prompt = prompt.replace(f"{{{key}}}", str(value))
|
|
78
|
-
|
|
79
|
-
return prompt
|
|
80
|
-
except Exception as e:
|
|
81
|
-
if self._production:
|
|
82
|
-
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
83
|
-
return ""
|
|
84
|
-
raise
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|