lucidicai 3.1.0__py3-none-any.whl → 3.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +6 -1
- lucidicai/integrations/__init__.py +9 -0
- lucidicai/integrations/livekit.py +409 -0
- lucidicai/sdk/decorators.py +2 -2
- {lucidicai-3.1.0.dist-info → lucidicai-3.2.0.dist-info}/METADATA +1 -1
- {lucidicai-3.1.0.dist-info → lucidicai-3.2.0.dist-info}/RECORD +8 -6
- {lucidicai-3.1.0.dist-info → lucidicai-3.2.0.dist-info}/WHEEL +0 -0
- {lucidicai-3.1.0.dist-info → lucidicai-3.2.0.dist-info}/top_level.txt +0 -0
lucidicai/__init__.py
CHANGED
|
@@ -34,8 +34,11 @@ from .core.errors import (
|
|
|
34
34
|
FeatureFlagError,
|
|
35
35
|
)
|
|
36
36
|
|
|
37
|
+
# Integrations
|
|
38
|
+
from .integrations.livekit import setup_livekit
|
|
39
|
+
|
|
37
40
|
# Version
|
|
38
|
-
__version__ = "3.
|
|
41
|
+
__version__ = "3.2.0"
|
|
39
42
|
|
|
40
43
|
# All exports
|
|
41
44
|
__all__ = [
|
|
@@ -50,6 +53,8 @@ __all__ = [
|
|
|
50
53
|
"InvalidOperationError",
|
|
51
54
|
"PromptError",
|
|
52
55
|
"FeatureFlagError",
|
|
56
|
+
# Integrations
|
|
57
|
+
"setup_livekit",
|
|
53
58
|
# Version
|
|
54
59
|
"__version__",
|
|
55
60
|
]
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
"""Third-party integrations for Lucidic AI SDK.
|
|
2
|
+
|
|
3
|
+
This module provides integrations with external platforms and frameworks
|
|
4
|
+
that have their own OpenTelemetry instrumentation.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .livekit import setup_livekit, LucidicLiveKitExporter
|
|
8
|
+
|
|
9
|
+
__all__ = ["setup_livekit", "LucidicLiveKitExporter"]
|
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
"""LiveKit voice agent integration for Lucidic AI SDK.
|
|
2
|
+
|
|
3
|
+
This module provides OpenTelemetry span export for LiveKit voice agents,
|
|
4
|
+
converting LiveKit's internal spans into Lucidic events with full metadata
|
|
5
|
+
support including latency diagnostics, EOU detection data, and tool context.
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
from lucidicai import LucidicAI
|
|
9
|
+
from lucidicai.integrations.livekit import setup_livekit
|
|
10
|
+
from livekit.agents import AgentServer, JobContext, AgentSession, cli
|
|
11
|
+
from livekit.agents.telemetry import set_tracer_provider
|
|
12
|
+
|
|
13
|
+
client = LucidicAI(api_key="...", agent_id="...")
|
|
14
|
+
server = AgentServer()
|
|
15
|
+
|
|
16
|
+
@server.rtc_session()
|
|
17
|
+
async def entrypoint(ctx: JobContext):
|
|
18
|
+
trace_provider = setup_livekit(
|
|
19
|
+
client=client,
|
|
20
|
+
session_id=ctx.room.name,
|
|
21
|
+
)
|
|
22
|
+
set_tracer_provider(trace_provider)
|
|
23
|
+
# ... rest of agent setup
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
from __future__ import annotations
|
|
27
|
+
|
|
28
|
+
import json
|
|
29
|
+
import logging
|
|
30
|
+
from datetime import datetime, timezone
|
|
31
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
|
|
32
|
+
|
|
33
|
+
from opentelemetry import context as otel_context
|
|
34
|
+
from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor
|
|
35
|
+
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
|
|
36
|
+
from opentelemetry.trace import Span
|
|
37
|
+
from opentelemetry.util.types import AttributeValue
|
|
38
|
+
|
|
39
|
+
if TYPE_CHECKING:
|
|
40
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
41
|
+
from ..client import LucidicAI
|
|
42
|
+
|
|
43
|
+
logger = logging.getLogger("lucidicai.integrations.livekit")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class LucidicLiveKitExporter(SpanExporter):
|
|
47
|
+
"""Custom OpenTelemetry exporter for LiveKit voice agent spans.
|
|
48
|
+
|
|
49
|
+
Converts LiveKit spans (llm_node, function_tool) into Lucidic events
|
|
50
|
+
with full metadata including latency diagnostics, EOU detection,
|
|
51
|
+
and tool context.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
# livekit span names we care about
|
|
55
|
+
LIVEKIT_LLM_SPANS = {"llm_node", "function_tool"}
|
|
56
|
+
|
|
57
|
+
def __init__(self, client: "LucidicAI", session_id: str):
|
|
58
|
+
"""Initialize the exporter.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
client: Initialized LucidicAI client instance
|
|
62
|
+
session_id: Session ID for all events created by this exporter
|
|
63
|
+
"""
|
|
64
|
+
self._client = client
|
|
65
|
+
self._session_id = session_id
|
|
66
|
+
self._shutdown = False
|
|
67
|
+
|
|
68
|
+
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
|
|
69
|
+
"""Export spans to Lucidic as events.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
spans: Sequence of completed OpenTelemetry spans
|
|
73
|
+
|
|
74
|
+
Returns:
|
|
75
|
+
SpanExportResult indicating success or failure
|
|
76
|
+
"""
|
|
77
|
+
if self._shutdown:
|
|
78
|
+
return SpanExportResult.SUCCESS
|
|
79
|
+
|
|
80
|
+
try:
|
|
81
|
+
for span in spans:
|
|
82
|
+
if self._is_livekit_llm_span(span):
|
|
83
|
+
self._process_span(span)
|
|
84
|
+
return SpanExportResult.SUCCESS
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.error(f"[LiveKit] Failed to export spans: {e}")
|
|
87
|
+
return SpanExportResult.FAILURE
|
|
88
|
+
|
|
89
|
+
def _is_livekit_llm_span(self, span: ReadableSpan) -> bool:
|
|
90
|
+
"""Check if span is a LiveKit LLM-related span we should process."""
|
|
91
|
+
return span.name in self.LIVEKIT_LLM_SPANS
|
|
92
|
+
|
|
93
|
+
def _process_span(self, span: ReadableSpan) -> None:
|
|
94
|
+
"""Process a single LiveKit span and create corresponding Lucidic event."""
|
|
95
|
+
try:
|
|
96
|
+
if span.name == "llm_node":
|
|
97
|
+
event_data = self._convert_llm_span(span)
|
|
98
|
+
self._client.events.create(**event_data)
|
|
99
|
+
logger.debug(f"[LiveKit] Created llm_generation event for span {span.name}")
|
|
100
|
+
elif span.name == "function_tool":
|
|
101
|
+
event_data = self._convert_function_span(span)
|
|
102
|
+
self._client.events.create(**event_data)
|
|
103
|
+
logger.debug(f"[LiveKit] Created function_call event for span {span.name}")
|
|
104
|
+
except Exception as e:
|
|
105
|
+
logger.error(f"[LiveKit] Failed to process span {span.name}: {e}")
|
|
106
|
+
|
|
107
|
+
def _convert_llm_span(self, span: ReadableSpan) -> Dict[str, Any]:
|
|
108
|
+
"""Convert an llm_node span to llm_generation event data."""
|
|
109
|
+
attrs = dict(span.attributes or {})
|
|
110
|
+
|
|
111
|
+
# extract messages from chat context
|
|
112
|
+
messages = self._parse_chat_context(attrs.get("lk.chat_ctx"))
|
|
113
|
+
|
|
114
|
+
# extract output text
|
|
115
|
+
output = attrs.get("lk.response.text", "")
|
|
116
|
+
|
|
117
|
+
# build metadata with diagnostics
|
|
118
|
+
metadata = self._build_metadata(attrs)
|
|
119
|
+
|
|
120
|
+
# calculate duration
|
|
121
|
+
duration = None
|
|
122
|
+
if span.start_time and span.end_time:
|
|
123
|
+
duration = (span.end_time - span.start_time) / 1e9
|
|
124
|
+
|
|
125
|
+
# extract timing for occurred_at
|
|
126
|
+
occurred_at = None
|
|
127
|
+
if span.start_time:
|
|
128
|
+
occurred_at = datetime.fromtimestamp(
|
|
129
|
+
span.start_time / 1e9, tz=timezone.utc
|
|
130
|
+
).isoformat()
|
|
131
|
+
|
|
132
|
+
return {
|
|
133
|
+
"type": "llm_generation",
|
|
134
|
+
"session_id": self._session_id,
|
|
135
|
+
"model": attrs.get("gen_ai.request.model", "unknown"),
|
|
136
|
+
"messages": messages,
|
|
137
|
+
"output": output,
|
|
138
|
+
"input_tokens": attrs.get("gen_ai.usage.input_tokens"),
|
|
139
|
+
"output_tokens": attrs.get("gen_ai.usage.output_tokens"),
|
|
140
|
+
"duration": duration,
|
|
141
|
+
"occurred_at": occurred_at,
|
|
142
|
+
"metadata": metadata,
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
def _convert_function_span(self, span: ReadableSpan) -> Dict[str, Any]:
|
|
146
|
+
"""Convert a function_tool span to function_call event data."""
|
|
147
|
+
attrs = dict(span.attributes or {})
|
|
148
|
+
|
|
149
|
+
# calculate duration
|
|
150
|
+
duration = None
|
|
151
|
+
if span.start_time and span.end_time:
|
|
152
|
+
duration = (span.end_time - span.start_time) / 1e9
|
|
153
|
+
|
|
154
|
+
# extract timing for occurred_at
|
|
155
|
+
occurred_at = None
|
|
156
|
+
if span.start_time:
|
|
157
|
+
occurred_at = datetime.fromtimestamp(
|
|
158
|
+
span.start_time / 1e9, tz=timezone.utc
|
|
159
|
+
).isoformat()
|
|
160
|
+
|
|
161
|
+
# build metadata (subset for function calls)
|
|
162
|
+
metadata = {
|
|
163
|
+
"job_id": attrs.get("lk.job_id"),
|
|
164
|
+
"room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
|
|
165
|
+
"agent_name": attrs.get("lk.agent_name"),
|
|
166
|
+
"generation_id": attrs.get("lk.generation_id"),
|
|
167
|
+
"tool_call_id": attrs.get("lk.function_tool.id"),
|
|
168
|
+
}
|
|
169
|
+
metadata = self._clean_none_values(metadata)
|
|
170
|
+
|
|
171
|
+
return {
|
|
172
|
+
"type": "function_call",
|
|
173
|
+
"session_id": self._session_id,
|
|
174
|
+
"function_name": attrs.get("lk.function_tool.name", "unknown"),
|
|
175
|
+
"arguments": attrs.get("lk.function_tool.arguments"),
|
|
176
|
+
"return_value": attrs.get("lk.function_tool.output"),
|
|
177
|
+
"duration": duration,
|
|
178
|
+
"occurred_at": occurred_at,
|
|
179
|
+
"metadata": metadata,
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
def _parse_chat_context(self, chat_ctx_json: Optional[str]) -> List[Dict[str, str]]:
|
|
183
|
+
"""Parse LiveKit's lk.chat_ctx JSON into Lucidic messages format.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
chat_ctx_json: JSON string of LiveKit chat context
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
List of message dicts with role and content keys
|
|
190
|
+
"""
|
|
191
|
+
if not chat_ctx_json:
|
|
192
|
+
return []
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
chat_ctx = json.loads(chat_ctx_json)
|
|
196
|
+
messages = []
|
|
197
|
+
|
|
198
|
+
# livekit chat context has 'items' list
|
|
199
|
+
items = chat_ctx.get("items", [])
|
|
200
|
+
for item in items:
|
|
201
|
+
if item.get("type") == "message":
|
|
202
|
+
role = item.get("role", "user")
|
|
203
|
+
# livekit stores content in various ways
|
|
204
|
+
content = item.get("text_content", "")
|
|
205
|
+
if not content:
|
|
206
|
+
# try content array
|
|
207
|
+
content_list = item.get("content", [])
|
|
208
|
+
if isinstance(content_list, list):
|
|
209
|
+
text_parts = []
|
|
210
|
+
for c in content_list:
|
|
211
|
+
if isinstance(c, str):
|
|
212
|
+
text_parts.append(c)
|
|
213
|
+
elif isinstance(c, dict) and c.get("type") == "text":
|
|
214
|
+
text_parts.append(c.get("text", ""))
|
|
215
|
+
content = " ".join(text_parts)
|
|
216
|
+
elif isinstance(content_list, str):
|
|
217
|
+
content = content_list
|
|
218
|
+
|
|
219
|
+
messages.append({"role": role, "content": content})
|
|
220
|
+
|
|
221
|
+
return messages
|
|
222
|
+
except (json.JSONDecodeError, TypeError) as e:
|
|
223
|
+
logger.debug(f"[LiveKit] Failed to parse chat context: {e}")
|
|
224
|
+
return []
|
|
225
|
+
|
|
226
|
+
def _build_metadata(self, attrs: Dict[str, Any]) -> Dict[str, Any]:
|
|
227
|
+
"""Build metadata dict with diagnostics from span attributes.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
attrs: Span attributes dictionary
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
Cleaned metadata dict with nested diagnostics
|
|
234
|
+
"""
|
|
235
|
+
metadata = {
|
|
236
|
+
# identity & tracking
|
|
237
|
+
"job_id": attrs.get("lk.job_id"),
|
|
238
|
+
"room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
|
|
239
|
+
"agent_name": attrs.get("lk.agent_name"),
|
|
240
|
+
"participant_id": attrs.get("lk.participant_id"),
|
|
241
|
+
"generation_id": attrs.get("lk.generation_id"),
|
|
242
|
+
"parent_generation_id": attrs.get("lk.parent_generation_id"),
|
|
243
|
+
"speech_id": attrs.get("lk.speech_id"),
|
|
244
|
+
"interrupted": attrs.get("lk.interrupted"),
|
|
245
|
+
# diagnostics (nested)
|
|
246
|
+
"diagnostics": {
|
|
247
|
+
"latency": {
|
|
248
|
+
"llm_ttft": attrs.get("llm_node_ttft"),
|
|
249
|
+
"tts_ttfb": attrs.get("tts_node_ttfb"),
|
|
250
|
+
"e2e_latency": attrs.get("e2e_latency"),
|
|
251
|
+
"transcription_delay": attrs.get("lk.transcription_delay"),
|
|
252
|
+
"end_of_turn_delay": attrs.get("lk.end_of_turn_delay"),
|
|
253
|
+
},
|
|
254
|
+
"eou": {
|
|
255
|
+
"probability": attrs.get("lk.eou.probability"),
|
|
256
|
+
"threshold": attrs.get("lk.eou.unlikely_threshold"),
|
|
257
|
+
"delay": attrs.get("lk.eou.endpointing_delay"),
|
|
258
|
+
"language": attrs.get("lk.eou.language"),
|
|
259
|
+
},
|
|
260
|
+
"tools": {
|
|
261
|
+
"function_tools": attrs.get("lk.function_tools"),
|
|
262
|
+
"provider_tools": attrs.get("lk.provider_tools"),
|
|
263
|
+
"tool_sets": attrs.get("lk.tool_sets"),
|
|
264
|
+
},
|
|
265
|
+
"session_options": attrs.get("lk.session_options"),
|
|
266
|
+
},
|
|
267
|
+
}
|
|
268
|
+
return self._clean_none_values(metadata)
|
|
269
|
+
|
|
270
|
+
def _clean_none_values(self, d: Dict[str, Any]) -> Dict[str, Any]:
|
|
271
|
+
"""Recursively remove None values and empty dicts.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
d: Dictionary to clean
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
Cleaned dictionary with no None values or empty nested dicts
|
|
278
|
+
"""
|
|
279
|
+
cleaned = {}
|
|
280
|
+
for k, v in d.items():
|
|
281
|
+
if isinstance(v, dict):
|
|
282
|
+
nested = self._clean_none_values(v)
|
|
283
|
+
if nested: # only include non-empty dicts
|
|
284
|
+
cleaned[k] = nested
|
|
285
|
+
elif v is not None:
|
|
286
|
+
cleaned[k] = v
|
|
287
|
+
return cleaned
|
|
288
|
+
|
|
289
|
+
def shutdown(self) -> None:
|
|
290
|
+
"""Shutdown the exporter."""
|
|
291
|
+
self._shutdown = True
|
|
292
|
+
logger.debug("[LiveKit] Exporter shutdown")
|
|
293
|
+
|
|
294
|
+
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
|
295
|
+
"""Force flush pending exports.
|
|
296
|
+
|
|
297
|
+
Returns:
|
|
298
|
+
True (events are created synchronously)
|
|
299
|
+
"""
|
|
300
|
+
return True
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
class _MetadataSpanProcessor(SpanProcessor):
|
|
304
|
+
"""Span processor that adds metadata to all spans.
|
|
305
|
+
|
|
306
|
+
This allows users to attach custom metadata (e.g., customer_id, environment)
|
|
307
|
+
that will be included on every span exported.
|
|
308
|
+
"""
|
|
309
|
+
|
|
310
|
+
def __init__(self, metadata: Dict[str, AttributeValue]):
|
|
311
|
+
"""Initialize with metadata to attach.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
metadata: Dictionary of metadata key-value pairs
|
|
315
|
+
"""
|
|
316
|
+
self._metadata = metadata
|
|
317
|
+
|
|
318
|
+
def on_start(
|
|
319
|
+
self, span: Span, parent_context: Optional[otel_context.Context] = None
|
|
320
|
+
) -> None:
|
|
321
|
+
"""Called when a span is started - attach metadata."""
|
|
322
|
+
span.set_attributes(self._metadata)
|
|
323
|
+
|
|
324
|
+
def on_end(self, span: ReadableSpan) -> None:
|
|
325
|
+
"""Called when a span ends - no action needed."""
|
|
326
|
+
pass
|
|
327
|
+
|
|
328
|
+
def shutdown(self) -> None:
|
|
329
|
+
"""Shutdown the processor."""
|
|
330
|
+
pass
|
|
331
|
+
|
|
332
|
+
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
|
333
|
+
"""Force flush - no buffering in this processor."""
|
|
334
|
+
return True
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def setup_livekit(
|
|
338
|
+
client: "LucidicAI",
|
|
339
|
+
session_id: str,
|
|
340
|
+
session_name: Optional[str] = None,
|
|
341
|
+
metadata: Optional[Dict[str, AttributeValue]] = None,
|
|
342
|
+
) -> "TracerProvider":
|
|
343
|
+
"""Set up Lucidic tracing for LiveKit voice agents.
|
|
344
|
+
|
|
345
|
+
Automatically creates a Lucidic session and configures OpenTelemetry
|
|
346
|
+
to export LiveKit spans as Lucidic events.
|
|
347
|
+
|
|
348
|
+
Args:
|
|
349
|
+
client: Initialized LucidicAI client instance
|
|
350
|
+
session_id: Session ID for all events (typically ctx.room.name)
|
|
351
|
+
session_name: Optional human-readable session name
|
|
352
|
+
metadata: Optional metadata to attach to all spans (e.g., customer_id)
|
|
353
|
+
|
|
354
|
+
Returns:
|
|
355
|
+
TracerProvider to pass to livekit's set_tracer_provider()
|
|
356
|
+
|
|
357
|
+
Example:
|
|
358
|
+
from lucidicai import LucidicAI
|
|
359
|
+
from lucidicai.integrations.livekit import setup_livekit
|
|
360
|
+
from livekit.agents import AgentServer, JobContext, AgentSession, cli
|
|
361
|
+
from livekit.agents.telemetry import set_tracer_provider
|
|
362
|
+
|
|
363
|
+
client = LucidicAI(api_key="...", agent_id="...")
|
|
364
|
+
server = AgentServer()
|
|
365
|
+
|
|
366
|
+
@server.rtc_session()
|
|
367
|
+
async def entrypoint(ctx: JobContext):
|
|
368
|
+
trace_provider = setup_livekit(
|
|
369
|
+
client=client,
|
|
370
|
+
session_id=ctx.room.name,
|
|
371
|
+
session_name=f"Voice Call - {ctx.room.name}",
|
|
372
|
+
)
|
|
373
|
+
set_tracer_provider(trace_provider)
|
|
374
|
+
|
|
375
|
+
async def cleanup():
|
|
376
|
+
trace_provider.force_flush()
|
|
377
|
+
ctx.add_shutdown_callback(cleanup)
|
|
378
|
+
|
|
379
|
+
session = AgentSession(...)
|
|
380
|
+
await session.start(agent=MyAgent(), room=ctx.room)
|
|
381
|
+
|
|
382
|
+
if __name__ == "__main__":
|
|
383
|
+
cli.run_app(server)
|
|
384
|
+
"""
|
|
385
|
+
from opentelemetry.sdk.trace import TracerProvider
|
|
386
|
+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
|
387
|
+
|
|
388
|
+
# auto-create Lucidic session
|
|
389
|
+
client.sessions.create(
|
|
390
|
+
session_id=session_id,
|
|
391
|
+
session_name=session_name or f"LiveKit Voice Session - {session_id}",
|
|
392
|
+
)
|
|
393
|
+
logger.info(f"[LiveKit] Created Lucidic session: {session_id}")
|
|
394
|
+
|
|
395
|
+
# create exporter
|
|
396
|
+
exporter = LucidicLiveKitExporter(client, session_id)
|
|
397
|
+
|
|
398
|
+
# create tracer provider
|
|
399
|
+
trace_provider = TracerProvider()
|
|
400
|
+
|
|
401
|
+
# add metadata processor if metadata provided
|
|
402
|
+
if metadata:
|
|
403
|
+
trace_provider.add_span_processor(_MetadataSpanProcessor(metadata))
|
|
404
|
+
|
|
405
|
+
# add exporter via batch processor
|
|
406
|
+
trace_provider.add_span_processor(BatchSpanProcessor(exporter))
|
|
407
|
+
|
|
408
|
+
logger.info("[LiveKit] Lucidic tracing configured")
|
|
409
|
+
return trace_provider
|
lucidicai/sdk/decorators.py
CHANGED
|
@@ -51,7 +51,7 @@ def _emit_event_to_client(
|
|
|
51
51
|
"session_id": session_id,
|
|
52
52
|
**event_data,
|
|
53
53
|
}
|
|
54
|
-
response = client._resources["events"].
|
|
54
|
+
response = client._resources["events"].create(**event_payload)
|
|
55
55
|
return response.get("event_id") if response else None
|
|
56
56
|
except Exception as e:
|
|
57
57
|
debug(f"[Decorator] Failed to emit event: {e}")
|
|
@@ -81,7 +81,7 @@ async def _aemit_event_to_client(
|
|
|
81
81
|
"session_id": session_id,
|
|
82
82
|
**event_data,
|
|
83
83
|
}
|
|
84
|
-
response = await client._resources["events"].
|
|
84
|
+
response = await client._resources["events"].acreate(**event_payload)
|
|
85
85
|
return response.get("event_id") if response else None
|
|
86
86
|
except Exception as e:
|
|
87
87
|
debug(f"[Decorator] Failed to emit async event: {e}")
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
lucidicai/__init__.py,sha256=
|
|
1
|
+
lucidicai/__init__.py,sha256=L5Zy-XJVTIqJAi3nrh8lAp8J4ii4yaEudaQxB94tXQM,1284
|
|
2
2
|
lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
|
|
3
3
|
lucidicai/client.py,sha256=WnkUeo_Z0uP4xh66gNC6MJhYdyhRpjC61OBjHEJLHq4,14674
|
|
4
4
|
lucidicai/constants.py,sha256=zN8O7TjoRHRlaGa9CZUWppS73rhzKGwaEkF9XMTV0Cg,1160
|
|
@@ -32,6 +32,8 @@ lucidicai/core/__init__.py,sha256=b0YQkd8190Y_GgwUcmf0tOiSLARd7L4kq4jwfhhGAyI,39
|
|
|
32
32
|
lucidicai/core/config.py,sha256=06XZPOCpB8YY9nzqt7deR3CP6MAQIKCTZYdSzscAPDY,8730
|
|
33
33
|
lucidicai/core/errors.py,sha256=bYSRPqadXUCPadVLb-2fj63CB6jlAnfDeu2azHB2z8M,2137
|
|
34
34
|
lucidicai/core/types.py,sha256=KabcTBQe7SemigccKfJSDiJmjSJDJJvvtefSd8pfrJI,702
|
|
35
|
+
lucidicai/integrations/__init__.py,sha256=9eJxdcw9C_zLXLQGdKK-uwCYhjdnEelrXbYYNo48ewk,292
|
|
36
|
+
lucidicai/integrations/livekit.py,sha256=vcP55JFTmBJtpSP6MCnQ94WQdbR221VZFwvXxs3oOq8,15036
|
|
35
37
|
lucidicai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
36
38
|
lucidicai/providers/anthropic_handler.py,sha256=GZEa4QOrjZ9ftu_qTwY3L410HwKzkXgN7omYRsEQ4LU,10174
|
|
37
39
|
lucidicai/providers/base_providers.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
|
|
@@ -52,7 +54,7 @@ lucidicai/providers/universal_image_interceptor.py,sha256=7d-hw4xihRwvvA1AP8-vqY
|
|
|
52
54
|
lucidicai/sdk/__init__.py,sha256=UrkV9FYbZkBxaX9qwxGbCJdXp-JqMpn0_u-huO9Y-ec,32
|
|
53
55
|
lucidicai/sdk/bound_decorators.py,sha256=SzmNZwORhArXL9D8T8BpPltT-jQ-tVpy71t8bJWOIU0,12151
|
|
54
56
|
lucidicai/sdk/context.py,sha256=y58_C9JlBML_xFPUbmAn6WuxsnM03bECJ2pKBWz0TuQ,10386
|
|
55
|
-
lucidicai/sdk/decorators.py,sha256
|
|
57
|
+
lucidicai/sdk/decorators.py,sha256=FV0259ki6x26d2-YKZJwkUNzHain-QGghq94OzAk2Os,15652
|
|
56
58
|
lucidicai/sdk/error_boundary.py,sha256=IPr5wS9rS7ZQNgEaBwK53UaixAm6L2rijKKFfxcxjUI,9190
|
|
57
59
|
lucidicai/sdk/event.py,sha256=hpBJfqKteOuQKoZfhxQfbeVOrdmR8wCcQc8P6658VRo,22658
|
|
58
60
|
lucidicai/sdk/event_builder.py,sha256=Z376RKlStM7IBcAm5LKgTDh3x_fjmcvkWltUrjZ6RAc,10304
|
|
@@ -91,7 +93,7 @@ lucidicai/utils/images.py,sha256=z8mlIKgFfrIbuk-l4L2rB62uw_uPO79sHPXPY7eLu2A,128
|
|
|
91
93
|
lucidicai/utils/logger.py,sha256=R3B3gSee64F6UVHUrShihBq_O7W7bgfrBiVDXTO3Isg,4777
|
|
92
94
|
lucidicai/utils/queue.py,sha256=8DQwnGw7pINEJ0dNSkB0PhdPW-iBQQ-YZg23poe4umE,17323
|
|
93
95
|
lucidicai/utils/serialization.py,sha256=KdOREZd7XBxFBAZ86DePMfYPzSVyKr4RcgUa82aFxrs,820
|
|
94
|
-
lucidicai-3.
|
|
95
|
-
lucidicai-3.
|
|
96
|
-
lucidicai-3.
|
|
97
|
-
lucidicai-3.
|
|
96
|
+
lucidicai-3.2.0.dist-info/METADATA,sha256=4H-66e1GD2248FpoRJBLUb1oW3ICnJhIYXcngIirq6Q,902
|
|
97
|
+
lucidicai-3.2.0.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
|
|
98
|
+
lucidicai-3.2.0.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
|
|
99
|
+
lucidicai-3.2.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|