lucidicai 3.4.2__tar.gz → 3.4.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. {lucidicai-3.4.2 → lucidicai-3.4.3}/PKG-INFO +1 -1
  2. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/__init__.py +1 -1
  3. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/integrations/livekit.py +278 -66
  4. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/utils/model_pricing.py +2 -2
  5. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai.egg-info/PKG-INFO +1 -1
  6. {lucidicai-3.4.2 → lucidicai-3.4.3}/setup.py +1 -1
  7. {lucidicai-3.4.2 → lucidicai-3.4.3}/README.md +0 -0
  8. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/__init__.py +0 -0
  9. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/client.py +0 -0
  10. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/__init__.py +0 -0
  11. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/dataset.py +0 -0
  12. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/evals.py +0 -0
  13. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/event.py +0 -0
  14. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/experiment.py +0 -0
  15. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/feature_flag.py +0 -0
  16. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/prompt.py +0 -0
  17. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/api/resources/session.py +0 -0
  18. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/client.py +0 -0
  19. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/core/__init__.py +0 -0
  20. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/core/config.py +0 -0
  21. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/core/errors.py +0 -0
  22. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/core/types.py +0 -0
  23. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/integrations/__init__.py +0 -0
  24. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/__init__.py +0 -0
  25. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/context.py +0 -0
  26. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/decorators.py +0 -0
  27. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/error_boundary.py +0 -0
  28. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/event.py +0 -0
  29. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/event_builder.py +0 -0
  30. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/features/__init__.py +0 -0
  31. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/features/dataset.py +0 -0
  32. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/features/feature_flag.py +0 -0
  33. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/init.py +0 -0
  34. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/session.py +0 -0
  35. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/sdk/shutdown_manager.py +0 -0
  36. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/session_obj.py +0 -0
  37. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/__init__.py +0 -0
  38. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/context_bridge.py +0 -0
  39. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/context_capture_processor.py +0 -0
  40. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/extract.py +0 -0
  41. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/litellm_bridge.py +0 -0
  42. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/lucidic_exporter.py +0 -0
  43. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/openai_agents_instrumentor.py +0 -0
  44. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/openai_patch.py +0 -0
  45. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/openai_uninstrument.py +0 -0
  46. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/telemetry_init.py +0 -0
  47. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/telemetry_manager.py +0 -0
  48. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/utils/__init__.py +0 -0
  49. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/telemetry/utils/provider.py +0 -0
  50. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/utils/__init__.py +0 -0
  51. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/utils/logger.py +0 -0
  52. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai/utils/serialization.py +0 -0
  53. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai.egg-info/SOURCES.txt +0 -0
  54. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai.egg-info/dependency_links.txt +0 -0
  55. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai.egg-info/requires.txt +0 -0
  56. {lucidicai-3.4.2 → lucidicai-3.4.3}/lucidicai.egg-info/top_level.txt +0 -0
  57. {lucidicai-3.4.2 → lucidicai-3.4.3}/setup.cfg +0 -0
  58. {lucidicai-3.4.2 → lucidicai-3.4.3}/tests/test_event_creation.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 3.4.2
3
+ Version: 3.4.3
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -41,7 +41,7 @@ from .api.resources.prompt import Prompt
41
41
  from .integrations.livekit import setup_livekit
42
42
 
43
43
  # Version
44
- __version__ = "3.4.2"
44
+ __version__ = "3.4.3"
45
45
 
46
46
  # All exports
47
47
  __all__ = [
@@ -40,19 +40,37 @@ if TYPE_CHECKING:
40
40
  from opentelemetry.sdk.trace import TracerProvider
41
41
  from ..client import LucidicAI
42
42
 
43
+ from ..telemetry.utils.model_pricing import calculate_cost
44
+ from ..telemetry.utils.provider import detect_provider
45
+
43
46
  logger = logging.getLogger("lucidicai.integrations.livekit")
44
47
 
45
48
 
46
49
  class LucidicLiveKitExporter(SpanExporter):
47
50
  """Custom OpenTelemetry exporter for LiveKit voice agent spans.
48
51
 
49
- Converts LiveKit spans (llm_node, function_tool) into Lucidic events
50
- with full metadata including latency diagnostics, EOU detection,
51
- and tool context.
52
+ Converts LiveKit spans into Lucidic events:
53
+ - llm_request spans -> llm_generation events (with model, tokens, messages, output)
54
+ - function_tool spans -> function_call events (with name, arguments, return value)
55
+
56
+ The llm_request span contains:
57
+ - gen_ai.request.model: Model name
58
+ - gen_ai.usage.input_tokens / gen_ai.usage.output_tokens: Token counts
59
+ - lk.llm_metrics: JSON with ttft, duration, tokens_per_second, metadata
60
+ - GenAI events: gen_ai.system.message, gen_ai.user.message, gen_ai.assistant.message,
61
+ gen_ai.tool.message (inputs), gen_ai.choice (output with optional tool_calls)
62
+
63
+ The function_tool span contains:
64
+ - lk.function_tool.id: Tool call ID
65
+ - lk.function_tool.name: Function name
66
+ - lk.function_tool.arguments: JSON arguments
67
+ - lk.function_tool.output: Return value
68
+ - lk.function_tool.is_error: Error flag
52
69
  """
53
70
 
54
71
  # livekit span names we care about
55
- LIVEKIT_LLM_SPANS = {"llm_node", "function_tool"}
72
+ # note: llm_request has model/provider/tokens, llm_node is parent without these
73
+ LIVEKIT_LLM_SPANS = {"llm_request", "function_tool"}
56
74
 
57
75
  def __init__(self, client: "LucidicAI", session_id: str):
58
76
  """Initialize the exporter.
@@ -90,10 +108,150 @@ class LucidicLiveKitExporter(SpanExporter):
90
108
  """Check if span is a LiveKit LLM-related span we should process."""
91
109
  return span.name in self.LIVEKIT_LLM_SPANS
92
110
 
111
+ def _parse_llm_metrics(self, attrs: Dict[str, Any]) -> Dict[str, Any]:
112
+ """Parse lk.llm_metrics JSON to extract provider, model, and timing info.
113
+
114
+ Args:
115
+ attrs: Span attributes dictionary
116
+
117
+ Returns:
118
+ Dict with 'provider', 'model', 'ttft', 'tokens_per_second' keys if found
119
+ """
120
+ llm_metrics_json = attrs.get("lk.llm_metrics")
121
+ if not llm_metrics_json:
122
+ return {}
123
+
124
+ try:
125
+ if isinstance(llm_metrics_json, str):
126
+ metrics = json.loads(llm_metrics_json)
127
+ else:
128
+ metrics = llm_metrics_json
129
+
130
+ result = {}
131
+ metadata = metrics.get("metadata", {})
132
+
133
+ if metadata.get("model_provider"):
134
+ result["provider"] = metadata["model_provider"]
135
+ if metadata.get("model_name"):
136
+ result["model"] = metadata["model_name"]
137
+
138
+ # extract timing and performance metrics
139
+ if metrics.get("ttft") is not None:
140
+ result["ttft"] = metrics["ttft"]
141
+ if metrics.get("duration") is not None:
142
+ result["duration"] = metrics["duration"]
143
+ if metrics.get("tokens_per_second") is not None:
144
+ result["tokens_per_second"] = metrics["tokens_per_second"]
145
+ if metrics.get("cancelled") is not None:
146
+ result["cancelled"] = metrics["cancelled"]
147
+
148
+ return result
149
+ except (json.JSONDecodeError, TypeError) as e:
150
+ logger.debug(f"[LiveKit] Failed to parse llm_metrics: {e}")
151
+ return {}
152
+
153
+ def _parse_span_events(self, span: ReadableSpan) -> tuple[List[Dict[str, Any]], str, List[Dict[str, Any]]]:
154
+ """Parse span events to extract messages, output, and tool calls.
155
+
156
+ llm_request spans have GenAI events:
157
+ - gen_ai.system.message, gen_ai.user.message, gen_ai.assistant.message (input)
158
+ - gen_ai.tool.message (tool output)
159
+ - gen_ai.choice (output/completion with optional tool_calls)
160
+
161
+ Args:
162
+ span: The OpenTelemetry span
163
+
164
+ Returns:
165
+ Tuple of (messages list, output string, tool_calls list)
166
+ """
167
+ messages: List[Dict[str, Any]] = []
168
+ output = ""
169
+ tool_calls: List[Dict[str, Any]] = []
170
+
171
+ # map event names to roles
172
+ event_to_role = {
173
+ "gen_ai.system.message": "system",
174
+ "gen_ai.user.message": "user",
175
+ "gen_ai.assistant.message": "assistant",
176
+ "gen_ai.tool.message": "tool",
177
+ }
178
+
179
+ if not span.events:
180
+ return messages, output, tool_calls
181
+
182
+ for event in span.events:
183
+ event_name = event.name
184
+ event_attrs = dict(event.attributes or {})
185
+
186
+ if event_name in event_to_role:
187
+ # message event
188
+ role = event_to_role[event_name]
189
+ msg: Dict[str, Any] = {"role": role}
190
+
191
+ content = event_attrs.get("content", "")
192
+ if content:
193
+ msg["content"] = content
194
+
195
+ # handle tool_calls in assistant messages (input tool calls)
196
+ if event_name == "gen_ai.assistant.message" and "tool_calls" in event_attrs:
197
+ msg["tool_calls"] = self._parse_tool_calls(event_attrs["tool_calls"])
198
+
199
+ # handle tool message metadata
200
+ if event_name == "gen_ai.tool.message":
201
+ if "name" in event_attrs:
202
+ msg["name"] = event_attrs["name"]
203
+ if "id" in event_attrs:
204
+ msg["tool_call_id"] = event_attrs["id"]
205
+
206
+ messages.append(msg)
207
+
208
+ elif event_name == "gen_ai.choice":
209
+ # completion/output event
210
+ content = event_attrs.get("content", "")
211
+ if content:
212
+ output = content
213
+
214
+ # extract tool_calls from completion if present
215
+ if "tool_calls" in event_attrs:
216
+ tool_calls = self._parse_tool_calls(event_attrs["tool_calls"])
217
+
218
+ return messages, output, tool_calls
219
+
220
+ def _parse_tool_calls(self, tool_calls_attr: Any) -> List[Dict[str, Any]]:
221
+ """Parse tool_calls attribute from GenAI events.
222
+
223
+ Tool calls are stored as a list of JSON strings.
224
+
225
+ Args:
226
+ tool_calls_attr: The tool_calls attribute value (list of JSON strings)
227
+
228
+ Returns:
229
+ List of parsed tool call dicts
230
+ """
231
+ if not tool_calls_attr:
232
+ return []
233
+
234
+ parsed = []
235
+ try:
236
+ # tool_calls is a list of JSON strings
237
+ if isinstance(tool_calls_attr, (list, tuple)):
238
+ for tc in tool_calls_attr:
239
+ if isinstance(tc, str):
240
+ parsed.append(json.loads(tc))
241
+ elif isinstance(tc, dict):
242
+ parsed.append(tc)
243
+ elif isinstance(tool_calls_attr, str):
244
+ # single JSON string
245
+ parsed.append(json.loads(tool_calls_attr))
246
+ except (json.JSONDecodeError, TypeError) as e:
247
+ logger.debug(f"[LiveKit] Failed to parse tool_calls: {e}")
248
+
249
+ return parsed
250
+
93
251
  def _process_span(self, span: ReadableSpan) -> None:
94
252
  """Process a single LiveKit span and create corresponding Lucidic event."""
95
253
  try:
96
- if span.name == "llm_node":
254
+ if span.name == "llm_request":
97
255
  event_data = self._convert_llm_span(span)
98
256
  self._client.events.create(**event_data)
99
257
  logger.debug(f"[LiveKit] Created llm_generation event for span {span.name}")
@@ -105,21 +263,48 @@ class LucidicLiveKitExporter(SpanExporter):
105
263
  logger.error(f"[LiveKit] Failed to process span {span.name}: {e}")
106
264
 
107
265
  def _convert_llm_span(self, span: ReadableSpan) -> Dict[str, Any]:
108
- """Convert an llm_node span to llm_generation event data."""
266
+ """Convert an llm_request span to llm_generation event data."""
109
267
  attrs = dict(span.attributes or {})
110
268
 
111
- # extract messages from chat context
112
- messages = self._parse_chat_context(attrs.get("lk.chat_ctx"))
269
+ # parse lk.llm_metrics for provider/model/timing
270
+ llm_info = self._parse_llm_metrics(attrs)
113
271
 
114
- # extract output text
115
- output = attrs.get("lk.response.text", "")
272
+ # extract model (gen_ai attribute takes precedence, fallback to metrics)
273
+ model = attrs.get("gen_ai.request.model") or llm_info.get("model") or "unknown"
116
274
 
117
- # build metadata with diagnostics
118
- metadata = self._build_metadata(attrs)
275
+ # extract provider (from metrics first, then detect from model)
276
+ provider = llm_info.get("provider") or detect_provider(model=model, attributes=attrs)
119
277
 
120
- # calculate duration
121
- duration = None
122
- if span.start_time and span.end_time:
278
+ # extract messages, output, and tool_calls from span events (llm_request uses GenAI events)
279
+ messages, output, tool_calls = self._parse_span_events(span)
280
+
281
+ # fallback to lk.chat_ctx if no events (backwards compatibility)
282
+ if not messages:
283
+ messages = self._parse_chat_context(attrs.get("lk.chat_ctx"))
284
+
285
+ # fallback to lk.response.text if no output from events
286
+ if not output:
287
+ output = attrs.get("lk.response.text", "")
288
+
289
+ # extract token counts
290
+ input_tokens = attrs.get("gen_ai.usage.input_tokens")
291
+ output_tokens = attrs.get("gen_ai.usage.output_tokens")
292
+
293
+ # calculate cost using existing pricing utility
294
+ cost = None
295
+ if input_tokens is not None and output_tokens is not None:
296
+ token_usage = {
297
+ "input_tokens": input_tokens,
298
+ "output_tokens": output_tokens,
299
+ }
300
+ cost = calculate_cost(model, token_usage)
301
+
302
+ # build metadata with diagnostics from llm_metrics
303
+ metadata = self._build_llm_metadata(attrs, llm_info)
304
+
305
+ # calculate duration (prefer from metrics, fallback to span timing)
306
+ duration = llm_info.get("duration")
307
+ if duration is None and span.start_time and span.end_time:
123
308
  duration = (span.end_time - span.start_time) / 1e9
124
309
 
125
310
  # extract timing for occurred_at
@@ -129,19 +314,27 @@ class LucidicLiveKitExporter(SpanExporter):
129
314
  span.start_time / 1e9, tz=timezone.utc
130
315
  ).isoformat()
131
316
 
132
- return {
317
+ result: Dict[str, Any] = {
133
318
  "type": "llm_generation",
134
319
  "session_id": self._session_id,
135
- "model": attrs.get("gen_ai.request.model", "unknown"),
320
+ "provider": provider,
321
+ "model": model,
136
322
  "messages": messages,
137
323
  "output": output,
138
- "input_tokens": attrs.get("gen_ai.usage.input_tokens"),
139
- "output_tokens": attrs.get("gen_ai.usage.output_tokens"),
324
+ "input_tokens": input_tokens,
325
+ "output_tokens": output_tokens,
326
+ "cost": cost,
140
327
  "duration": duration,
141
328
  "occurred_at": occurred_at,
142
329
  "metadata": metadata,
143
330
  }
144
331
 
332
+ # include tool_calls if present (LLM requested function calls)
333
+ if tool_calls:
334
+ result["tool_calls"] = tool_calls
335
+
336
+ return result
337
+
145
338
  def _convert_function_span(self, span: ReadableSpan) -> Dict[str, Any]:
146
339
  """Convert a function_tool span to function_call event data."""
147
340
  attrs = dict(span.attributes or {})
@@ -158,17 +351,16 @@ class LucidicLiveKitExporter(SpanExporter):
158
351
  span.start_time / 1e9, tz=timezone.utc
159
352
  ).isoformat()
160
353
 
161
- # build metadata (subset for function calls)
162
- metadata = {
163
- "job_id": attrs.get("lk.job_id"),
164
- "room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
165
- "agent_name": attrs.get("lk.agent_name"),
166
- "generation_id": attrs.get("lk.generation_id"),
167
- "tool_call_id": attrs.get("lk.function_tool.id"),
168
- }
169
- metadata = self._clean_none_values(metadata)
354
+ # extract function call details (these are the attributes actually on function_tool span)
355
+ tool_call_id = attrs.get("lk.function_tool.id")
356
+ is_error = attrs.get("lk.function_tool.is_error", False)
170
357
 
171
- return {
358
+ # build metadata with tool call id
359
+ metadata: Dict[str, Any] = {}
360
+ if tool_call_id:
361
+ metadata["tool_call_id"] = tool_call_id
362
+
363
+ result: Dict[str, Any] = {
172
364
  "type": "function_call",
173
365
  "session_id": self._session_id,
174
366
  "function_name": attrs.get("lk.function_tool.name", "unknown"),
@@ -176,9 +368,17 @@ class LucidicLiveKitExporter(SpanExporter):
176
368
  "return_value": attrs.get("lk.function_tool.output"),
177
369
  "duration": duration,
178
370
  "occurred_at": occurred_at,
179
- "metadata": metadata,
180
371
  }
181
372
 
373
+ # include is_error flag if the tool execution failed
374
+ if is_error:
375
+ result["is_error"] = True
376
+
377
+ if metadata:
378
+ result["metadata"] = metadata
379
+
380
+ return result
381
+
182
382
  def _parse_chat_context(self, chat_ctx_json: Optional[str]) -> List[Dict[str, str]]:
183
383
  """Parse LiveKit's lk.chat_ctx JSON into Lucidic messages format.
184
384
 
@@ -223,49 +423,61 @@ class LucidicLiveKitExporter(SpanExporter):
223
423
  logger.debug(f"[LiveKit] Failed to parse chat context: {e}")
224
424
  return []
225
425
 
226
- def _build_metadata(self, attrs: Dict[str, Any]) -> Dict[str, Any]:
227
- """Build metadata dict with diagnostics from span attributes.
426
+ def _build_llm_metadata(self, attrs: Dict[str, Any], llm_info: Dict[str, Any]) -> Dict[str, Any]:
427
+ """Build metadata dict with diagnostics from llm_request span attributes.
428
+
429
+ Note: llm_request spans have limited attributes. Most metadata (job_id, room_name,
430
+ generation_id, etc.) are on parent spans (agent_session, agent_turn) and are only
431
+ available if a MetadataSpanProcessor propagates them.
228
432
 
229
433
  Args:
230
434
  attrs: Span attributes dictionary
435
+ llm_info: Parsed lk.llm_metrics data
231
436
 
232
437
  Returns:
233
- Cleaned metadata dict with nested diagnostics
438
+ Cleaned metadata dict with available diagnostics
234
439
  """
235
- metadata = {
236
- # identity & tracking
237
- "job_id": attrs.get("lk.job_id"),
238
- "room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
239
- "agent_name": attrs.get("lk.agent_name"),
240
- "participant_id": attrs.get("lk.participant_id"),
241
- "generation_id": attrs.get("lk.generation_id"),
242
- "parent_generation_id": attrs.get("lk.parent_generation_id"),
243
- "speech_id": attrs.get("lk.speech_id"),
244
- "interrupted": attrs.get("lk.interrupted"),
245
- # diagnostics (nested)
246
- "diagnostics": {
247
- "latency": {
248
- "llm_ttft": attrs.get("llm_node_ttft"),
249
- "tts_ttfb": attrs.get("tts_node_ttfb"),
250
- "e2e_latency": attrs.get("e2e_latency"),
251
- "transcription_delay": attrs.get("lk.transcription_delay"),
252
- "end_of_turn_delay": attrs.get("lk.end_of_turn_delay"),
253
- },
254
- "eou": {
255
- "probability": attrs.get("lk.eou.probability"),
256
- "threshold": attrs.get("lk.eou.unlikely_threshold"),
257
- "delay": attrs.get("lk.eou.endpointing_delay"),
258
- "language": attrs.get("lk.eou.language"),
259
- },
260
- "tools": {
261
- "function_tools": attrs.get("lk.function_tools"),
262
- "provider_tools": attrs.get("lk.provider_tools"),
263
- "tool_sets": attrs.get("lk.tool_sets"),
264
- },
265
- "session_options": attrs.get("lk.session_options"),
266
- },
440
+ metadata: Dict[str, Any] = {}
441
+
442
+ # timing metrics from lk.llm_metrics (actually available on llm_request)
443
+ if llm_info.get("ttft") is not None:
444
+ metadata["ttft"] = llm_info["ttft"]
445
+ if llm_info.get("tokens_per_second") is not None:
446
+ metadata["tokens_per_second"] = llm_info["tokens_per_second"]
447
+ if llm_info.get("cancelled"):
448
+ metadata["cancelled"] = llm_info["cancelled"]
449
+
450
+ # retry count if available (set on llm_request_run, may be propagated)
451
+ retry_count = attrs.get("lk.retry_count")
452
+ if retry_count is not None and retry_count > 0:
453
+ metadata["retry_count"] = retry_count
454
+
455
+ # attributes that may be available via MetadataSpanProcessor
456
+ # (set by user or propagated from parent spans)
457
+ optional_attrs = {
458
+ "job_id": "lk.job_id",
459
+ "room_name": "lk.room_name",
460
+ "room_id": "room_id",
461
+ "agent_name": "lk.agent_name",
462
+ "participant_id": "lk.participant_id",
463
+ "generation_id": "lk.generation_id",
464
+ "parent_generation_id": "lk.parent_generation_id",
465
+ "speech_id": "lk.speech_id",
466
+ "interrupted": "lk.interrupted",
267
467
  }
268
- return self._clean_none_values(metadata)
468
+
469
+ for key, attr_name in optional_attrs.items():
470
+ value = attrs.get(attr_name)
471
+ if value is not None:
472
+ metadata[key] = value
473
+
474
+ # prefer room_name over room_id
475
+ if "room_id" in metadata and "room_name" not in metadata:
476
+ metadata["room_name"] = metadata.pop("room_id")
477
+ elif "room_id" in metadata:
478
+ del metadata["room_id"]
479
+
480
+ return metadata
269
481
 
270
482
  def _clean_none_values(self, d: Dict[str, Any]) -> Dict[str, Any]:
271
483
  """Recursively remove None values and empty dicts.
@@ -92,8 +92,8 @@ MODEL_PRICING = {
92
92
  # Google Gemini 2.5 Series (2025) - Verified
93
93
  "gemini-2.5-pro": {"input": 1.25, "output": 10.0}, # Up to 200k tokens
94
94
  "gemini-2.5-pro-preview": {"input": 1.25, "output": 10.0},
95
- "gemini-2.5-flash": {"input": 0.15, "output": 0.6}, # Non-thinking
96
- "gemini-2.5-flash-preview": {"input": 0.15, "output": 0.6},
95
+ "gemini-2.5-flash": {"input": 0.30, "output": 2.5}, # Non-thinking
96
+ "gemini-2.5-flash-preview": {"input": 0.30, "output": 2.5},
97
97
 
98
98
  # Google Gemini 2.0 Series - Verified
99
99
  "gemini-2.0-flash": {"input": 0.1, "output": 0.4},
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 3.4.2
3
+ Version: 3.4.3
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="lucidicai",
5
- version="3.4.2",
5
+ version="3.4.3",
6
6
  packages=find_packages(),
7
7
  install_requires=[
8
8
  "requests>=2.25.1",
File without changes
File without changes
File without changes