lucidicai 3.4.1__py3-none-any.whl → 3.4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +1 -1
- lucidicai/api/client.py +26 -0
- lucidicai/api/resources/prompt.py +170 -3
- lucidicai/integrations/livekit.py +278 -66
- lucidicai/telemetry/utils/model_pricing.py +2 -2
- {lucidicai-3.4.1.dist-info → lucidicai-3.4.3.dist-info}/METADATA +1 -1
- {lucidicai-3.4.1.dist-info → lucidicai-3.4.3.dist-info}/RECORD +9 -9
- {lucidicai-3.4.1.dist-info → lucidicai-3.4.3.dist-info}/WHEEL +0 -0
- {lucidicai-3.4.1.dist-info → lucidicai-3.4.3.dist-info}/top_level.txt +0 -0
lucidicai/__init__.py
CHANGED
lucidicai/api/client.py
CHANGED
|
@@ -207,6 +207,19 @@ class HttpClient:
|
|
|
207
207
|
data = self._add_timestamp(data)
|
|
208
208
|
return self.request("PUT", endpoint, json=data)
|
|
209
209
|
|
|
210
|
+
def patch(self, endpoint: str, data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
211
|
+
"""Make a synchronous PATCH request.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
endpoint: API endpoint (without base URL)
|
|
215
|
+
data: Request body data
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
Response data as dictionary
|
|
219
|
+
"""
|
|
220
|
+
data = self._add_timestamp(data)
|
|
221
|
+
return self.request("PATCH", endpoint, json=data)
|
|
222
|
+
|
|
210
223
|
def delete(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
211
224
|
"""Make a synchronous DELETE request.
|
|
212
225
|
|
|
@@ -301,6 +314,19 @@ class HttpClient:
|
|
|
301
314
|
data = self._add_timestamp(data)
|
|
302
315
|
return await self.arequest("PUT", endpoint, json=data)
|
|
303
316
|
|
|
317
|
+
async def apatch(self, endpoint: str, data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
318
|
+
"""Make an asynchronous PATCH request.
|
|
319
|
+
|
|
320
|
+
Args:
|
|
321
|
+
endpoint: API endpoint (without base URL)
|
|
322
|
+
data: Request body data
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Response data as dictionary
|
|
326
|
+
"""
|
|
327
|
+
data = self._add_timestamp(data)
|
|
328
|
+
return await self.arequest("PATCH", endpoint, json=data)
|
|
329
|
+
|
|
304
330
|
async def adelete(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
305
331
|
"""Make an asynchronous DELETE request.
|
|
306
332
|
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
import logging
|
|
3
3
|
import time
|
|
4
4
|
from dataclasses import dataclass
|
|
5
|
-
from typing import Any, Dict, Optional, Tuple, TYPE_CHECKING
|
|
5
|
+
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING
|
|
6
6
|
|
|
7
7
|
from ..client import HttpClient
|
|
8
8
|
|
|
@@ -58,6 +58,21 @@ class PromptResource:
|
|
|
58
58
|
self._production = production
|
|
59
59
|
self._cache: Dict[Tuple[str, str], Dict[str, Any]] = {}
|
|
60
60
|
|
|
61
|
+
def _invalidate_cache(self, prompt_name: str, label: Optional[str] = None) -> None:
|
|
62
|
+
"""Invalidate cached prompt entries.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
prompt_name: Name of the prompt to invalidate.
|
|
66
|
+
label: If provided, only invalidate the specific (prompt_name, label) entry.
|
|
67
|
+
If None, invalidate all entries matching prompt_name.
|
|
68
|
+
"""
|
|
69
|
+
if label is not None:
|
|
70
|
+
self._cache.pop((prompt_name, label), None)
|
|
71
|
+
else:
|
|
72
|
+
keys_to_remove = [k for k in self._cache if k[0] == prompt_name]
|
|
73
|
+
for k in keys_to_remove:
|
|
74
|
+
del self._cache[k]
|
|
75
|
+
|
|
61
76
|
def _is_cache_valid(self, cache_key: Tuple[str, str], cache_ttl: int) -> bool:
|
|
62
77
|
"""Check if a cached prompt is still valid.
|
|
63
78
|
|
|
@@ -106,7 +121,7 @@ class PromptResource:
|
|
|
106
121
|
metadata = self._cache[cache_key]["metadata"]
|
|
107
122
|
else:
|
|
108
123
|
response = self.http.get(
|
|
109
|
-
"
|
|
124
|
+
"sdk/prompts",
|
|
110
125
|
{"prompt_name": prompt_name, "label": label, "agent_id": self._config.agent_id},
|
|
111
126
|
)
|
|
112
127
|
raw_content = response.get("prompt_content", "")
|
|
@@ -150,7 +165,7 @@ class PromptResource:
|
|
|
150
165
|
metadata = self._cache[cache_key]["metadata"]
|
|
151
166
|
else:
|
|
152
167
|
response = await self.http.aget(
|
|
153
|
-
"
|
|
168
|
+
"sdk/prompts",
|
|
154
169
|
{"prompt_name": prompt_name, "label": label, "agent_id": self._config.agent_id},
|
|
155
170
|
)
|
|
156
171
|
raw_content = response.get("prompt_content", "")
|
|
@@ -173,3 +188,155 @@ class PromptResource:
|
|
|
173
188
|
logger.error(f"[PromptResource] Failed to get prompt: {e}")
|
|
174
189
|
return Prompt(raw_content="", content="", metadata={})
|
|
175
190
|
raise
|
|
191
|
+
|
|
192
|
+
def update(
|
|
193
|
+
self,
|
|
194
|
+
prompt_name: str,
|
|
195
|
+
prompt_content: str,
|
|
196
|
+
description: Optional[str] = None,
|
|
197
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
198
|
+
labels: Optional[List[str]] = None,
|
|
199
|
+
) -> Prompt:
|
|
200
|
+
"""Update a prompt, creating a new immutable version.
|
|
201
|
+
|
|
202
|
+
Args:
|
|
203
|
+
prompt_name: Name of the prompt to update.
|
|
204
|
+
prompt_content: New content for the prompt.
|
|
205
|
+
description: Optional description for the prompt version.
|
|
206
|
+
metadata: Optional metadata dict to attach to the prompt version.
|
|
207
|
+
labels: Optional list of labels to assign to the new version.
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
A Prompt object with the new content and metadata from the response.
|
|
211
|
+
"""
|
|
212
|
+
try:
|
|
213
|
+
body: Dict[str, Any] = {
|
|
214
|
+
"agent_id": self._config.agent_id,
|
|
215
|
+
"prompt_name": prompt_name,
|
|
216
|
+
"prompt_content": prompt_content,
|
|
217
|
+
}
|
|
218
|
+
if description is not None:
|
|
219
|
+
body["description"] = description
|
|
220
|
+
if metadata is not None:
|
|
221
|
+
body["metadata"] = metadata
|
|
222
|
+
if labels is not None:
|
|
223
|
+
body["labels"] = labels
|
|
224
|
+
|
|
225
|
+
response = self.http.put("sdk/prompts", data=body)
|
|
226
|
+
response_metadata = response.get("metadata", {})
|
|
227
|
+
|
|
228
|
+
self._invalidate_cache(prompt_name)
|
|
229
|
+
|
|
230
|
+
return Prompt(raw_content=prompt_content, content=prompt_content, metadata=response_metadata)
|
|
231
|
+
except Exception as e:
|
|
232
|
+
if self._production:
|
|
233
|
+
logger.error(f"[PromptResource] Failed to update prompt: {e}")
|
|
234
|
+
return Prompt(raw_content="", content="", metadata={})
|
|
235
|
+
raise
|
|
236
|
+
|
|
237
|
+
async def aupdate(
|
|
238
|
+
self,
|
|
239
|
+
prompt_name: str,
|
|
240
|
+
prompt_content: str,
|
|
241
|
+
description: Optional[str] = None,
|
|
242
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
243
|
+
labels: Optional[List[str]] = None,
|
|
244
|
+
) -> Prompt:
|
|
245
|
+
"""Update a prompt, creating a new immutable version (asynchronous).
|
|
246
|
+
|
|
247
|
+
See update() for full documentation.
|
|
248
|
+
"""
|
|
249
|
+
try:
|
|
250
|
+
body: Dict[str, Any] = {
|
|
251
|
+
"agent_id": self._config.agent_id,
|
|
252
|
+
"prompt_name": prompt_name,
|
|
253
|
+
"prompt_content": prompt_content,
|
|
254
|
+
}
|
|
255
|
+
if description is not None:
|
|
256
|
+
body["description"] = description
|
|
257
|
+
if metadata is not None:
|
|
258
|
+
body["metadata"] = metadata
|
|
259
|
+
if labels is not None:
|
|
260
|
+
body["labels"] = labels
|
|
261
|
+
|
|
262
|
+
response = await self.http.aput("sdk/prompts", data=body)
|
|
263
|
+
response_metadata = response.get("metadata", {})
|
|
264
|
+
|
|
265
|
+
self._invalidate_cache(prompt_name)
|
|
266
|
+
|
|
267
|
+
return Prompt(raw_content=prompt_content, content=prompt_content, metadata=response_metadata)
|
|
268
|
+
except Exception as e:
|
|
269
|
+
if self._production:
|
|
270
|
+
logger.error(f"[PromptResource] Failed to update prompt: {e}")
|
|
271
|
+
return Prompt(raw_content="", content="", metadata={})
|
|
272
|
+
raise
|
|
273
|
+
|
|
274
|
+
def update_metadata(
|
|
275
|
+
self,
|
|
276
|
+
prompt_name: str,
|
|
277
|
+
label: str,
|
|
278
|
+
metadata: Dict[str, Any],
|
|
279
|
+
) -> Prompt:
|
|
280
|
+
"""Update metadata on an existing prompt version.
|
|
281
|
+
|
|
282
|
+
Sends a PATCH request to update only the metadata for the prompt version
|
|
283
|
+
identified by (prompt_name, label). The prompt content is not returned
|
|
284
|
+
by this endpoint, so the returned Prompt will have empty content fields.
|
|
285
|
+
|
|
286
|
+
Args:
|
|
287
|
+
prompt_name: Name of the prompt.
|
|
288
|
+
label: Label identifying the prompt version to update.
|
|
289
|
+
metadata: Metadata dict to set on the prompt version.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
A Prompt object with empty content and the updated metadata.
|
|
293
|
+
"""
|
|
294
|
+
try:
|
|
295
|
+
body: Dict[str, Any] = {
|
|
296
|
+
"agent_id": self._config.agent_id,
|
|
297
|
+
"prompt_name": prompt_name,
|
|
298
|
+
"label": label,
|
|
299
|
+
"metadata": metadata,
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
response = self.http.patch("sdk/prompts", data=body)
|
|
303
|
+
response_metadata = response.get("metadata", {})
|
|
304
|
+
|
|
305
|
+
self._invalidate_cache(prompt_name, label)
|
|
306
|
+
|
|
307
|
+
return Prompt(raw_content="", content="", metadata=response_metadata)
|
|
308
|
+
except Exception as e:
|
|
309
|
+
if self._production:
|
|
310
|
+
logger.error(f"[PromptResource] Failed to update prompt metadata: {e}")
|
|
311
|
+
return Prompt(raw_content="", content="", metadata={})
|
|
312
|
+
raise
|
|
313
|
+
|
|
314
|
+
async def aupdate_metadata(
|
|
315
|
+
self,
|
|
316
|
+
prompt_name: str,
|
|
317
|
+
label: str,
|
|
318
|
+
metadata: Dict[str, Any],
|
|
319
|
+
) -> Prompt:
|
|
320
|
+
"""Update metadata on an existing prompt version (asynchronous).
|
|
321
|
+
|
|
322
|
+
See update_metadata() for full documentation.
|
|
323
|
+
"""
|
|
324
|
+
try:
|
|
325
|
+
body: Dict[str, Any] = {
|
|
326
|
+
"agent_id": self._config.agent_id,
|
|
327
|
+
"prompt_name": prompt_name,
|
|
328
|
+
"label": label,
|
|
329
|
+
"metadata": metadata,
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
response = await self.http.apatch("sdk/prompts", data=body)
|
|
333
|
+
response_metadata = response.get("metadata", {})
|
|
334
|
+
|
|
335
|
+
self._invalidate_cache(prompt_name, label)
|
|
336
|
+
|
|
337
|
+
return Prompt(raw_content="", content="", metadata=response_metadata)
|
|
338
|
+
except Exception as e:
|
|
339
|
+
if self._production:
|
|
340
|
+
logger.error(f"[PromptResource] Failed to update prompt metadata: {e}")
|
|
341
|
+
return Prompt(raw_content="", content="", metadata={})
|
|
342
|
+
raise
|
|
@@ -40,19 +40,37 @@ if TYPE_CHECKING:
|
|
|
40
40
|
from opentelemetry.sdk.trace import TracerProvider
|
|
41
41
|
from ..client import LucidicAI
|
|
42
42
|
|
|
43
|
+
from ..telemetry.utils.model_pricing import calculate_cost
|
|
44
|
+
from ..telemetry.utils.provider import detect_provider
|
|
45
|
+
|
|
43
46
|
logger = logging.getLogger("lucidicai.integrations.livekit")
|
|
44
47
|
|
|
45
48
|
|
|
46
49
|
class LucidicLiveKitExporter(SpanExporter):
|
|
47
50
|
"""Custom OpenTelemetry exporter for LiveKit voice agent spans.
|
|
48
51
|
|
|
49
|
-
Converts LiveKit spans
|
|
50
|
-
|
|
51
|
-
|
|
52
|
+
Converts LiveKit spans into Lucidic events:
|
|
53
|
+
- llm_request spans -> llm_generation events (with model, tokens, messages, output)
|
|
54
|
+
- function_tool spans -> function_call events (with name, arguments, return value)
|
|
55
|
+
|
|
56
|
+
The llm_request span contains:
|
|
57
|
+
- gen_ai.request.model: Model name
|
|
58
|
+
- gen_ai.usage.input_tokens / gen_ai.usage.output_tokens: Token counts
|
|
59
|
+
- lk.llm_metrics: JSON with ttft, duration, tokens_per_second, metadata
|
|
60
|
+
- GenAI events: gen_ai.system.message, gen_ai.user.message, gen_ai.assistant.message,
|
|
61
|
+
gen_ai.tool.message (inputs), gen_ai.choice (output with optional tool_calls)
|
|
62
|
+
|
|
63
|
+
The function_tool span contains:
|
|
64
|
+
- lk.function_tool.id: Tool call ID
|
|
65
|
+
- lk.function_tool.name: Function name
|
|
66
|
+
- lk.function_tool.arguments: JSON arguments
|
|
67
|
+
- lk.function_tool.output: Return value
|
|
68
|
+
- lk.function_tool.is_error: Error flag
|
|
52
69
|
"""
|
|
53
70
|
|
|
54
71
|
# livekit span names we care about
|
|
55
|
-
|
|
72
|
+
# note: llm_request has model/provider/tokens, llm_node is parent without these
|
|
73
|
+
LIVEKIT_LLM_SPANS = {"llm_request", "function_tool"}
|
|
56
74
|
|
|
57
75
|
def __init__(self, client: "LucidicAI", session_id: str):
|
|
58
76
|
"""Initialize the exporter.
|
|
@@ -90,10 +108,150 @@ class LucidicLiveKitExporter(SpanExporter):
|
|
|
90
108
|
"""Check if span is a LiveKit LLM-related span we should process."""
|
|
91
109
|
return span.name in self.LIVEKIT_LLM_SPANS
|
|
92
110
|
|
|
111
|
+
def _parse_llm_metrics(self, attrs: Dict[str, Any]) -> Dict[str, Any]:
|
|
112
|
+
"""Parse lk.llm_metrics JSON to extract provider, model, and timing info.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
attrs: Span attributes dictionary
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
Dict with 'provider', 'model', 'ttft', 'tokens_per_second' keys if found
|
|
119
|
+
"""
|
|
120
|
+
llm_metrics_json = attrs.get("lk.llm_metrics")
|
|
121
|
+
if not llm_metrics_json:
|
|
122
|
+
return {}
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
if isinstance(llm_metrics_json, str):
|
|
126
|
+
metrics = json.loads(llm_metrics_json)
|
|
127
|
+
else:
|
|
128
|
+
metrics = llm_metrics_json
|
|
129
|
+
|
|
130
|
+
result = {}
|
|
131
|
+
metadata = metrics.get("metadata", {})
|
|
132
|
+
|
|
133
|
+
if metadata.get("model_provider"):
|
|
134
|
+
result["provider"] = metadata["model_provider"]
|
|
135
|
+
if metadata.get("model_name"):
|
|
136
|
+
result["model"] = metadata["model_name"]
|
|
137
|
+
|
|
138
|
+
# extract timing and performance metrics
|
|
139
|
+
if metrics.get("ttft") is not None:
|
|
140
|
+
result["ttft"] = metrics["ttft"]
|
|
141
|
+
if metrics.get("duration") is not None:
|
|
142
|
+
result["duration"] = metrics["duration"]
|
|
143
|
+
if metrics.get("tokens_per_second") is not None:
|
|
144
|
+
result["tokens_per_second"] = metrics["tokens_per_second"]
|
|
145
|
+
if metrics.get("cancelled") is not None:
|
|
146
|
+
result["cancelled"] = metrics["cancelled"]
|
|
147
|
+
|
|
148
|
+
return result
|
|
149
|
+
except (json.JSONDecodeError, TypeError) as e:
|
|
150
|
+
logger.debug(f"[LiveKit] Failed to parse llm_metrics: {e}")
|
|
151
|
+
return {}
|
|
152
|
+
|
|
153
|
+
def _parse_span_events(self, span: ReadableSpan) -> tuple[List[Dict[str, Any]], str, List[Dict[str, Any]]]:
|
|
154
|
+
"""Parse span events to extract messages, output, and tool calls.
|
|
155
|
+
|
|
156
|
+
llm_request spans have GenAI events:
|
|
157
|
+
- gen_ai.system.message, gen_ai.user.message, gen_ai.assistant.message (input)
|
|
158
|
+
- gen_ai.tool.message (tool output)
|
|
159
|
+
- gen_ai.choice (output/completion with optional tool_calls)
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
span: The OpenTelemetry span
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Tuple of (messages list, output string, tool_calls list)
|
|
166
|
+
"""
|
|
167
|
+
messages: List[Dict[str, Any]] = []
|
|
168
|
+
output = ""
|
|
169
|
+
tool_calls: List[Dict[str, Any]] = []
|
|
170
|
+
|
|
171
|
+
# map event names to roles
|
|
172
|
+
event_to_role = {
|
|
173
|
+
"gen_ai.system.message": "system",
|
|
174
|
+
"gen_ai.user.message": "user",
|
|
175
|
+
"gen_ai.assistant.message": "assistant",
|
|
176
|
+
"gen_ai.tool.message": "tool",
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
if not span.events:
|
|
180
|
+
return messages, output, tool_calls
|
|
181
|
+
|
|
182
|
+
for event in span.events:
|
|
183
|
+
event_name = event.name
|
|
184
|
+
event_attrs = dict(event.attributes or {})
|
|
185
|
+
|
|
186
|
+
if event_name in event_to_role:
|
|
187
|
+
# message event
|
|
188
|
+
role = event_to_role[event_name]
|
|
189
|
+
msg: Dict[str, Any] = {"role": role}
|
|
190
|
+
|
|
191
|
+
content = event_attrs.get("content", "")
|
|
192
|
+
if content:
|
|
193
|
+
msg["content"] = content
|
|
194
|
+
|
|
195
|
+
# handle tool_calls in assistant messages (input tool calls)
|
|
196
|
+
if event_name == "gen_ai.assistant.message" and "tool_calls" in event_attrs:
|
|
197
|
+
msg["tool_calls"] = self._parse_tool_calls(event_attrs["tool_calls"])
|
|
198
|
+
|
|
199
|
+
# handle tool message metadata
|
|
200
|
+
if event_name == "gen_ai.tool.message":
|
|
201
|
+
if "name" in event_attrs:
|
|
202
|
+
msg["name"] = event_attrs["name"]
|
|
203
|
+
if "id" in event_attrs:
|
|
204
|
+
msg["tool_call_id"] = event_attrs["id"]
|
|
205
|
+
|
|
206
|
+
messages.append(msg)
|
|
207
|
+
|
|
208
|
+
elif event_name == "gen_ai.choice":
|
|
209
|
+
# completion/output event
|
|
210
|
+
content = event_attrs.get("content", "")
|
|
211
|
+
if content:
|
|
212
|
+
output = content
|
|
213
|
+
|
|
214
|
+
# extract tool_calls from completion if present
|
|
215
|
+
if "tool_calls" in event_attrs:
|
|
216
|
+
tool_calls = self._parse_tool_calls(event_attrs["tool_calls"])
|
|
217
|
+
|
|
218
|
+
return messages, output, tool_calls
|
|
219
|
+
|
|
220
|
+
def _parse_tool_calls(self, tool_calls_attr: Any) -> List[Dict[str, Any]]:
|
|
221
|
+
"""Parse tool_calls attribute from GenAI events.
|
|
222
|
+
|
|
223
|
+
Tool calls are stored as a list of JSON strings.
|
|
224
|
+
|
|
225
|
+
Args:
|
|
226
|
+
tool_calls_attr: The tool_calls attribute value (list of JSON strings)
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
List of parsed tool call dicts
|
|
230
|
+
"""
|
|
231
|
+
if not tool_calls_attr:
|
|
232
|
+
return []
|
|
233
|
+
|
|
234
|
+
parsed = []
|
|
235
|
+
try:
|
|
236
|
+
# tool_calls is a list of JSON strings
|
|
237
|
+
if isinstance(tool_calls_attr, (list, tuple)):
|
|
238
|
+
for tc in tool_calls_attr:
|
|
239
|
+
if isinstance(tc, str):
|
|
240
|
+
parsed.append(json.loads(tc))
|
|
241
|
+
elif isinstance(tc, dict):
|
|
242
|
+
parsed.append(tc)
|
|
243
|
+
elif isinstance(tool_calls_attr, str):
|
|
244
|
+
# single JSON string
|
|
245
|
+
parsed.append(json.loads(tool_calls_attr))
|
|
246
|
+
except (json.JSONDecodeError, TypeError) as e:
|
|
247
|
+
logger.debug(f"[LiveKit] Failed to parse tool_calls: {e}")
|
|
248
|
+
|
|
249
|
+
return parsed
|
|
250
|
+
|
|
93
251
|
def _process_span(self, span: ReadableSpan) -> None:
|
|
94
252
|
"""Process a single LiveKit span and create corresponding Lucidic event."""
|
|
95
253
|
try:
|
|
96
|
-
if span.name == "
|
|
254
|
+
if span.name == "llm_request":
|
|
97
255
|
event_data = self._convert_llm_span(span)
|
|
98
256
|
self._client.events.create(**event_data)
|
|
99
257
|
logger.debug(f"[LiveKit] Created llm_generation event for span {span.name}")
|
|
@@ -105,21 +263,48 @@ class LucidicLiveKitExporter(SpanExporter):
|
|
|
105
263
|
logger.error(f"[LiveKit] Failed to process span {span.name}: {e}")
|
|
106
264
|
|
|
107
265
|
def _convert_llm_span(self, span: ReadableSpan) -> Dict[str, Any]:
|
|
108
|
-
"""Convert an
|
|
266
|
+
"""Convert an llm_request span to llm_generation event data."""
|
|
109
267
|
attrs = dict(span.attributes or {})
|
|
110
268
|
|
|
111
|
-
#
|
|
112
|
-
|
|
269
|
+
# parse lk.llm_metrics for provider/model/timing
|
|
270
|
+
llm_info = self._parse_llm_metrics(attrs)
|
|
113
271
|
|
|
114
|
-
# extract
|
|
115
|
-
|
|
272
|
+
# extract model (gen_ai attribute takes precedence, fallback to metrics)
|
|
273
|
+
model = attrs.get("gen_ai.request.model") or llm_info.get("model") or "unknown"
|
|
116
274
|
|
|
117
|
-
#
|
|
118
|
-
|
|
275
|
+
# extract provider (from metrics first, then detect from model)
|
|
276
|
+
provider = llm_info.get("provider") or detect_provider(model=model, attributes=attrs)
|
|
119
277
|
|
|
120
|
-
#
|
|
121
|
-
|
|
122
|
-
|
|
278
|
+
# extract messages, output, and tool_calls from span events (llm_request uses GenAI events)
|
|
279
|
+
messages, output, tool_calls = self._parse_span_events(span)
|
|
280
|
+
|
|
281
|
+
# fallback to lk.chat_ctx if no events (backwards compatibility)
|
|
282
|
+
if not messages:
|
|
283
|
+
messages = self._parse_chat_context(attrs.get("lk.chat_ctx"))
|
|
284
|
+
|
|
285
|
+
# fallback to lk.response.text if no output from events
|
|
286
|
+
if not output:
|
|
287
|
+
output = attrs.get("lk.response.text", "")
|
|
288
|
+
|
|
289
|
+
# extract token counts
|
|
290
|
+
input_tokens = attrs.get("gen_ai.usage.input_tokens")
|
|
291
|
+
output_tokens = attrs.get("gen_ai.usage.output_tokens")
|
|
292
|
+
|
|
293
|
+
# calculate cost using existing pricing utility
|
|
294
|
+
cost = None
|
|
295
|
+
if input_tokens is not None and output_tokens is not None:
|
|
296
|
+
token_usage = {
|
|
297
|
+
"input_tokens": input_tokens,
|
|
298
|
+
"output_tokens": output_tokens,
|
|
299
|
+
}
|
|
300
|
+
cost = calculate_cost(model, token_usage)
|
|
301
|
+
|
|
302
|
+
# build metadata with diagnostics from llm_metrics
|
|
303
|
+
metadata = self._build_llm_metadata(attrs, llm_info)
|
|
304
|
+
|
|
305
|
+
# calculate duration (prefer from metrics, fallback to span timing)
|
|
306
|
+
duration = llm_info.get("duration")
|
|
307
|
+
if duration is None and span.start_time and span.end_time:
|
|
123
308
|
duration = (span.end_time - span.start_time) / 1e9
|
|
124
309
|
|
|
125
310
|
# extract timing for occurred_at
|
|
@@ -129,19 +314,27 @@ class LucidicLiveKitExporter(SpanExporter):
|
|
|
129
314
|
span.start_time / 1e9, tz=timezone.utc
|
|
130
315
|
).isoformat()
|
|
131
316
|
|
|
132
|
-
|
|
317
|
+
result: Dict[str, Any] = {
|
|
133
318
|
"type": "llm_generation",
|
|
134
319
|
"session_id": self._session_id,
|
|
135
|
-
"
|
|
320
|
+
"provider": provider,
|
|
321
|
+
"model": model,
|
|
136
322
|
"messages": messages,
|
|
137
323
|
"output": output,
|
|
138
|
-
"input_tokens":
|
|
139
|
-
"output_tokens":
|
|
324
|
+
"input_tokens": input_tokens,
|
|
325
|
+
"output_tokens": output_tokens,
|
|
326
|
+
"cost": cost,
|
|
140
327
|
"duration": duration,
|
|
141
328
|
"occurred_at": occurred_at,
|
|
142
329
|
"metadata": metadata,
|
|
143
330
|
}
|
|
144
331
|
|
|
332
|
+
# include tool_calls if present (LLM requested function calls)
|
|
333
|
+
if tool_calls:
|
|
334
|
+
result["tool_calls"] = tool_calls
|
|
335
|
+
|
|
336
|
+
return result
|
|
337
|
+
|
|
145
338
|
def _convert_function_span(self, span: ReadableSpan) -> Dict[str, Any]:
|
|
146
339
|
"""Convert a function_tool span to function_call event data."""
|
|
147
340
|
attrs = dict(span.attributes or {})
|
|
@@ -158,17 +351,16 @@ class LucidicLiveKitExporter(SpanExporter):
|
|
|
158
351
|
span.start_time / 1e9, tz=timezone.utc
|
|
159
352
|
).isoformat()
|
|
160
353
|
|
|
161
|
-
#
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
"room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
|
|
165
|
-
"agent_name": attrs.get("lk.agent_name"),
|
|
166
|
-
"generation_id": attrs.get("lk.generation_id"),
|
|
167
|
-
"tool_call_id": attrs.get("lk.function_tool.id"),
|
|
168
|
-
}
|
|
169
|
-
metadata = self._clean_none_values(metadata)
|
|
354
|
+
# extract function call details (these are the attributes actually on function_tool span)
|
|
355
|
+
tool_call_id = attrs.get("lk.function_tool.id")
|
|
356
|
+
is_error = attrs.get("lk.function_tool.is_error", False)
|
|
170
357
|
|
|
171
|
-
|
|
358
|
+
# build metadata with tool call id
|
|
359
|
+
metadata: Dict[str, Any] = {}
|
|
360
|
+
if tool_call_id:
|
|
361
|
+
metadata["tool_call_id"] = tool_call_id
|
|
362
|
+
|
|
363
|
+
result: Dict[str, Any] = {
|
|
172
364
|
"type": "function_call",
|
|
173
365
|
"session_id": self._session_id,
|
|
174
366
|
"function_name": attrs.get("lk.function_tool.name", "unknown"),
|
|
@@ -176,9 +368,17 @@ class LucidicLiveKitExporter(SpanExporter):
|
|
|
176
368
|
"return_value": attrs.get("lk.function_tool.output"),
|
|
177
369
|
"duration": duration,
|
|
178
370
|
"occurred_at": occurred_at,
|
|
179
|
-
"metadata": metadata,
|
|
180
371
|
}
|
|
181
372
|
|
|
373
|
+
# include is_error flag if the tool execution failed
|
|
374
|
+
if is_error:
|
|
375
|
+
result["is_error"] = True
|
|
376
|
+
|
|
377
|
+
if metadata:
|
|
378
|
+
result["metadata"] = metadata
|
|
379
|
+
|
|
380
|
+
return result
|
|
381
|
+
|
|
182
382
|
def _parse_chat_context(self, chat_ctx_json: Optional[str]) -> List[Dict[str, str]]:
|
|
183
383
|
"""Parse LiveKit's lk.chat_ctx JSON into Lucidic messages format.
|
|
184
384
|
|
|
@@ -223,49 +423,61 @@ class LucidicLiveKitExporter(SpanExporter):
|
|
|
223
423
|
logger.debug(f"[LiveKit] Failed to parse chat context: {e}")
|
|
224
424
|
return []
|
|
225
425
|
|
|
226
|
-
def
|
|
227
|
-
"""Build metadata dict with diagnostics from span attributes.
|
|
426
|
+
def _build_llm_metadata(self, attrs: Dict[str, Any], llm_info: Dict[str, Any]) -> Dict[str, Any]:
|
|
427
|
+
"""Build metadata dict with diagnostics from llm_request span attributes.
|
|
428
|
+
|
|
429
|
+
Note: llm_request spans have limited attributes. Most metadata (job_id, room_name,
|
|
430
|
+
generation_id, etc.) are on parent spans (agent_session, agent_turn) and are only
|
|
431
|
+
available if a MetadataSpanProcessor propagates them.
|
|
228
432
|
|
|
229
433
|
Args:
|
|
230
434
|
attrs: Span attributes dictionary
|
|
435
|
+
llm_info: Parsed lk.llm_metrics data
|
|
231
436
|
|
|
232
437
|
Returns:
|
|
233
|
-
Cleaned metadata dict with
|
|
438
|
+
Cleaned metadata dict with available diagnostics
|
|
234
439
|
"""
|
|
235
|
-
metadata = {
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
"
|
|
240
|
-
|
|
241
|
-
"
|
|
242
|
-
|
|
243
|
-
"
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
"provider_tools": attrs.get("lk.provider_tools"),
|
|
263
|
-
"tool_sets": attrs.get("lk.tool_sets"),
|
|
264
|
-
},
|
|
265
|
-
"session_options": attrs.get("lk.session_options"),
|
|
266
|
-
},
|
|
440
|
+
metadata: Dict[str, Any] = {}
|
|
441
|
+
|
|
442
|
+
# timing metrics from lk.llm_metrics (actually available on llm_request)
|
|
443
|
+
if llm_info.get("ttft") is not None:
|
|
444
|
+
metadata["ttft"] = llm_info["ttft"]
|
|
445
|
+
if llm_info.get("tokens_per_second") is not None:
|
|
446
|
+
metadata["tokens_per_second"] = llm_info["tokens_per_second"]
|
|
447
|
+
if llm_info.get("cancelled"):
|
|
448
|
+
metadata["cancelled"] = llm_info["cancelled"]
|
|
449
|
+
|
|
450
|
+
# retry count if available (set on llm_request_run, may be propagated)
|
|
451
|
+
retry_count = attrs.get("lk.retry_count")
|
|
452
|
+
if retry_count is not None and retry_count > 0:
|
|
453
|
+
metadata["retry_count"] = retry_count
|
|
454
|
+
|
|
455
|
+
# attributes that may be available via MetadataSpanProcessor
|
|
456
|
+
# (set by user or propagated from parent spans)
|
|
457
|
+
optional_attrs = {
|
|
458
|
+
"job_id": "lk.job_id",
|
|
459
|
+
"room_name": "lk.room_name",
|
|
460
|
+
"room_id": "room_id",
|
|
461
|
+
"agent_name": "lk.agent_name",
|
|
462
|
+
"participant_id": "lk.participant_id",
|
|
463
|
+
"generation_id": "lk.generation_id",
|
|
464
|
+
"parent_generation_id": "lk.parent_generation_id",
|
|
465
|
+
"speech_id": "lk.speech_id",
|
|
466
|
+
"interrupted": "lk.interrupted",
|
|
267
467
|
}
|
|
268
|
-
|
|
468
|
+
|
|
469
|
+
for key, attr_name in optional_attrs.items():
|
|
470
|
+
value = attrs.get(attr_name)
|
|
471
|
+
if value is not None:
|
|
472
|
+
metadata[key] = value
|
|
473
|
+
|
|
474
|
+
# prefer room_name over room_id
|
|
475
|
+
if "room_id" in metadata and "room_name" not in metadata:
|
|
476
|
+
metadata["room_name"] = metadata.pop("room_id")
|
|
477
|
+
elif "room_id" in metadata:
|
|
478
|
+
del metadata["room_id"]
|
|
479
|
+
|
|
480
|
+
return metadata
|
|
269
481
|
|
|
270
482
|
def _clean_none_values(self, d: Dict[str, Any]) -> Dict[str, Any]:
|
|
271
483
|
"""Recursively remove None values and empty dicts.
|
|
@@ -92,8 +92,8 @@ MODEL_PRICING = {
|
|
|
92
92
|
# Google Gemini 2.5 Series (2025) - Verified
|
|
93
93
|
"gemini-2.5-pro": {"input": 1.25, "output": 10.0}, # Up to 200k tokens
|
|
94
94
|
"gemini-2.5-pro-preview": {"input": 1.25, "output": 10.0},
|
|
95
|
-
"gemini-2.5-flash": {"input": 0.
|
|
96
|
-
"gemini-2.5-flash-preview": {"input": 0.
|
|
95
|
+
"gemini-2.5-flash": {"input": 0.30, "output": 2.5}, # Non-thinking
|
|
96
|
+
"gemini-2.5-flash-preview": {"input": 0.30, "output": 2.5},
|
|
97
97
|
|
|
98
98
|
# Google Gemini 2.0 Series - Verified
|
|
99
99
|
"gemini-2.0-flash": {"input": 0.1, "output": 0.4},
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
lucidicai/__init__.py,sha256=
|
|
1
|
+
lucidicai/__init__.py,sha256=Qc3hC7-4pxHthUdGnIKdlrJ8Sxrou8yvCo17FUFPb7M,1376
|
|
2
2
|
lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
|
|
3
3
|
lucidicai/client.py,sha256=BGKP91_Oj5kHQU0osYf1T_BWakL8KIhs0AgUc5X99sU,15104
|
|
4
4
|
lucidicai/constants.py,sha256=zN8O7TjoRHRlaGa9CZUWppS73rhzKGwaEkF9XMTV0Cg,1160
|
|
@@ -19,21 +19,21 @@ lucidicai/state.py,sha256=4Tb1X6l2or6w_e62FYSuEeghAv3xXm5gquKwzCpvdok,235
|
|
|
19
19
|
lucidicai/step.py,sha256=_oBIyTBZBvNkUkYHIrwWd75KMSlMtR9Ws2Lo71Lyff8,2522
|
|
20
20
|
lucidicai/streaming.py,sha256=QOLAzhwxetvx711J8VcphY5kXWPJz9XEBJrmHveRKMc,9796
|
|
21
21
|
lucidicai/api/__init__.py,sha256=UOYuFZupG0TgzMAxbLNgpodDXhDRXBgMva8ZblgBN9Y,31
|
|
22
|
-
lucidicai/api/client.py,sha256=
|
|
22
|
+
lucidicai/api/client.py,sha256=T3EIDReCsHQ4vqgwKdsVxZ0gz_XrFQLIoJNhS7lhRuo,14054
|
|
23
23
|
lucidicai/api/resources/__init__.py,sha256=DDgviDW3Su-G1ofkZGlaJMc2pzYJqrbBnEruNg1whCM,416
|
|
24
24
|
lucidicai/api/resources/dataset.py,sha256=I6g9ah4vaqEH1jyeouBn7xvC0oAuDNPeyl-bmtNj-T0,17400
|
|
25
25
|
lucidicai/api/resources/evals.py,sha256=_3nLE6dMLht844mWw7kl_hctjv5JIuC6MP06YWUgLnI,7235
|
|
26
26
|
lucidicai/api/resources/event.py,sha256=GTIU5sIbLNTWAHk4rB120xWTRkhnraz9JNfamEygyNo,14267
|
|
27
27
|
lucidicai/api/resources/experiment.py,sha256=fOIKJ5d89bHJBVZ3wjbhY_6XF3kLHz9TE3BVPA5pNpA,3563
|
|
28
28
|
lucidicai/api/resources/feature_flag.py,sha256=ii412DIkZCEAhrXdGydcpQKveqGlFq4NlgdmWQnU83c,2259
|
|
29
|
-
lucidicai/api/resources/prompt.py,sha256=
|
|
29
|
+
lucidicai/api/resources/prompt.py,sha256=kddb9X05om9xbWBLjUGoCFEsao-J-vyqjn_RzZIyht0,12278
|
|
30
30
|
lucidicai/api/resources/session.py,sha256=jW_bftHdunhLHl_3-k0nqB5FrtLhlFeCF0tMFE82nNw,20761
|
|
31
31
|
lucidicai/core/__init__.py,sha256=b0YQkd8190Y_GgwUcmf0tOiSLARd7L4kq4jwfhhGAyI,39
|
|
32
32
|
lucidicai/core/config.py,sha256=q4h-yR35Ay_3znL7vavri6ScfeM69RjHShNNzjoQthc,10194
|
|
33
33
|
lucidicai/core/errors.py,sha256=bYSRPqadXUCPadVLb-2fj63CB6jlAnfDeu2azHB2z8M,2137
|
|
34
34
|
lucidicai/core/types.py,sha256=KabcTBQe7SemigccKfJSDiJmjSJDJJvvtefSd8pfrJI,702
|
|
35
35
|
lucidicai/integrations/__init__.py,sha256=9eJxdcw9C_zLXLQGdKK-uwCYhjdnEelrXbYYNo48ewk,292
|
|
36
|
-
lucidicai/integrations/livekit.py,sha256=
|
|
36
|
+
lucidicai/integrations/livekit.py,sha256=DduLEDvc2rjHUBNhmvSovCthXSZuBOLCDSD1dZ6BxjA,23140
|
|
37
37
|
lucidicai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
38
|
lucidicai/providers/anthropic_handler.py,sha256=GZEa4QOrjZ9ftu_qTwY3L410HwKzkXgN7omYRsEQ4LU,10174
|
|
39
39
|
lucidicai/providers/base_providers.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
|
|
@@ -84,7 +84,7 @@ lucidicai/telemetry/telemetry_init.py,sha256=YpjcYDcqlWpUDDz76-x2v4K0yz_ToEpuaDz
|
|
|
84
84
|
lucidicai/telemetry/telemetry_manager.py,sha256=XiNv-etC6ZRMyYav0v8HqURD2PGaXbwlY0O86lxfWIk,6587
|
|
85
85
|
lucidicai/telemetry/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
86
86
|
lucidicai/telemetry/utils/image_storage.py,sha256=4Z59ZpVexr7-lcExfr8GsqXe0y2VZmr8Yjwa-3DeOxU,1457
|
|
87
|
-
lucidicai/telemetry/utils/model_pricing.py,sha256=
|
|
87
|
+
lucidicai/telemetry/utils/model_pricing.py,sha256=kqQh8-rO_XW3c8WvPrGgUmYdhGIm1M0psUaAYt5BYYo,11897
|
|
88
88
|
lucidicai/telemetry/utils/provider.py,sha256=Ca6C7n4TGTLfnRgC1f7zr_-aZfg87vCjEjRJkMJyFwE,2682
|
|
89
89
|
lucidicai/telemetry/utils/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6cFwx2DE,1503
|
|
90
90
|
lucidicai/telemetry/utils/universal_image_interceptor.py,sha256=vARgMk1hVSF--zfi5b8qBpJJOESuD17YlH9xqxmB9Uw,15954
|
|
@@ -93,7 +93,7 @@ lucidicai/utils/images.py,sha256=z8mlIKgFfrIbuk-l4L2rB62uw_uPO79sHPXPY7eLu2A,128
|
|
|
93
93
|
lucidicai/utils/logger.py,sha256=R3B3gSee64F6UVHUrShihBq_O7W7bgfrBiVDXTO3Isg,4777
|
|
94
94
|
lucidicai/utils/queue.py,sha256=8DQwnGw7pINEJ0dNSkB0PhdPW-iBQQ-YZg23poe4umE,17323
|
|
95
95
|
lucidicai/utils/serialization.py,sha256=KdOREZd7XBxFBAZ86DePMfYPzSVyKr4RcgUa82aFxrs,820
|
|
96
|
-
lucidicai-3.4.
|
|
97
|
-
lucidicai-3.4.
|
|
98
|
-
lucidicai-3.4.
|
|
99
|
-
lucidicai-3.4.
|
|
96
|
+
lucidicai-3.4.3.dist-info/METADATA,sha256=9HOARc8RzOxYwhidKz27iaG3i3RTD92XQAbI4vTe72A,902
|
|
97
|
+
lucidicai-3.4.3.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
|
|
98
|
+
lucidicai-3.4.3.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
|
|
99
|
+
lucidicai-3.4.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|