lucidicai 3.1.1__py3-none-any.whl → 3.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lucidicai/__init__.py CHANGED
@@ -34,8 +34,11 @@ from .core.errors import (
34
34
  FeatureFlagError,
35
35
  )
36
36
 
37
+ # Integrations
38
+ from .integrations.livekit import setup_livekit
39
+
37
40
  # Version
38
- __version__ = "3.1.1"
41
+ __version__ = "3.3.0"
39
42
 
40
43
  # All exports
41
44
  __all__ = [
@@ -50,6 +53,8 @@ __all__ = [
50
53
  "InvalidOperationError",
51
54
  "PromptError",
52
55
  "FeatureFlagError",
56
+ # Integrations
57
+ "setup_livekit",
53
58
  # Version
54
59
  "__version__",
55
60
  ]
lucidicai/client.py CHANGED
@@ -64,6 +64,8 @@ class LucidicAI:
64
64
  auto_end: Whether sessions auto-end on context exit or process shutdown.
65
65
  production: If True, suppress SDK errors. If None, checks LUCIDIC_PRODUCTION env var.
66
66
  region: Deployment region ("us", "india"). Falls back to LUCIDIC_REGION env var.
67
+ base_url: Custom base URL for API requests. Takes precedence over region.
68
+ Falls back to LUCIDIC_BASE_URL env var.
67
69
  **kwargs: Additional configuration options passed to SDKConfig.
68
70
 
69
71
  Raises:
@@ -93,6 +95,13 @@ class LucidicAI:
93
95
  agent_id="...",
94
96
  region="india"
95
97
  )
98
+
99
+ # Custom base URL (e.g., self-hosted deployment)
100
+ client = LucidicAI(
101
+ api_key="...",
102
+ agent_id="...",
103
+ base_url="https://custom.example.com/api"
104
+ )
96
105
  """
97
106
 
98
107
  def __init__(
@@ -103,6 +112,7 @@ class LucidicAI:
103
112
  auto_end: bool = True,
104
113
  production: Optional[bool] = None,
105
114
  region: Optional[str] = None,
115
+ base_url: Optional[str] = None,
106
116
  **kwargs,
107
117
  ):
108
118
  # Generate unique client ID for telemetry routing
@@ -119,6 +129,7 @@ class LucidicAI:
119
129
  agent_id=agent_id,
120
130
  auto_end=auto_end,
121
131
  region=region,
132
+ base_url=base_url,
122
133
  **kwargs,
123
134
  )
124
135
 
lucidicai/core/config.py CHANGED
@@ -53,16 +53,20 @@ class NetworkConfig:
53
53
  connection_pool_maxsize: int = 100
54
54
 
55
55
  @classmethod
56
- def from_env(cls, region: Optional[str] = None, debug: bool = False) -> 'NetworkConfig':
56
+ def from_env(cls, region: Optional[str] = None, base_url: Optional[str] = None, debug: bool = False) -> 'NetworkConfig':
57
57
  """Load network configuration from environment variables.
58
58
 
59
- Priority: debug > region argument > LUCIDIC_REGION env var > default
59
+ Priority: debug > base_url argument > LUCIDIC_BASE_URL > region argument > LUCIDIC_REGION > default
60
60
 
61
61
  Args:
62
62
  region: Region string override (e.g., "us", "india")
63
- debug: If True, use localhost URL regardless of region
63
+ base_url: Custom base URL override (takes precedence over region)
64
+ debug: If True, use localhost URL regardless of other settings
64
65
  """
65
- # If debug mode, use localhost (ignores region)
66
+ import logging
67
+ logger = logging.getLogger("Lucidic")
68
+
69
+ # If debug mode, use localhost (highest priority)
66
70
  if debug:
67
71
  return cls(
68
72
  base_url=DEBUG_URL,
@@ -74,7 +78,28 @@ class NetworkConfig:
74
78
  connection_pool_maxsize=int(os.getenv("LUCIDIC_CONNECTION_POOL_MAXSIZE", "100"))
75
79
  )
76
80
 
77
- # Resolve region: argument > env var > default
81
+ # Resolve base_url: argument > env var
82
+ resolved_base_url = base_url or os.getenv("LUCIDIC_BASE_URL")
83
+
84
+ if resolved_base_url:
85
+ # base_url takes precedence over region
86
+ region_str = region or os.getenv("LUCIDIC_REGION")
87
+ if region_str:
88
+ logger.warning(
89
+ f"[LucidicAI] Both base_url and region specified. "
90
+ f"Using base_url '{resolved_base_url}', ignoring region '{region_str}'."
91
+ )
92
+ return cls(
93
+ base_url=resolved_base_url,
94
+ region=None, # Custom deployment, no region
95
+ timeout=int(os.getenv("LUCIDIC_TIMEOUT", "30")),
96
+ max_retries=int(os.getenv("LUCIDIC_MAX_RETRIES", "3")),
97
+ backoff_factor=float(os.getenv("LUCIDIC_BACKOFF_FACTOR", "0.5")),
98
+ connection_pool_size=int(os.getenv("LUCIDIC_CONNECTION_POOL_SIZE", "20")),
99
+ connection_pool_maxsize=int(os.getenv("LUCIDIC_CONNECTION_POOL_MAXSIZE", "100"))
100
+ )
101
+
102
+ # Fall back to region-based URL resolution
78
103
  region_str = region or os.getenv("LUCIDIC_REGION")
79
104
  resolved_region = Region.from_string(region_str) if region_str else DEFAULT_REGION
80
105
 
@@ -147,11 +172,13 @@ class SDKConfig:
147
172
  debug: bool = False
148
173
 
149
174
  @classmethod
150
- def from_env(cls, region: Optional[str] = None, **overrides) -> 'SDKConfig':
175
+ def from_env(cls, region: Optional[str] = None, base_url: Optional[str] = None, **overrides) -> 'SDKConfig':
151
176
  """Create configuration from environment variables with optional overrides.
152
177
 
153
178
  Args:
154
179
  region: Region string (e.g., "us", "india"). Priority: arg > env var > default
180
+ base_url: Custom base URL override. Takes precedence over region.
181
+ Falls back to LUCIDIC_BASE_URL env var.
155
182
  **overrides: Additional configuration overrides
156
183
  """
157
184
  from dotenv import load_dotenv
@@ -165,7 +192,7 @@ class SDKConfig:
165
192
  auto_end=os.getenv("LUCIDIC_AUTO_END", "true").lower() == "true",
166
193
  production_monitoring=False,
167
194
  blob_threshold=int(os.getenv("LUCIDIC_BLOB_THRESHOLD", "65536")),
168
- network=NetworkConfig.from_env(region=region, debug=debug),
195
+ network=NetworkConfig.from_env(region=region, base_url=base_url, debug=debug),
169
196
  error_handling=ErrorHandlingConfig.from_env(),
170
197
  telemetry=TelemetryConfig.from_env(),
171
198
  environment=Environment.DEBUG if debug else Environment.PRODUCTION,
@@ -0,0 +1,9 @@
1
+ """Third-party integrations for Lucidic AI SDK.
2
+
3
+ This module provides integrations with external platforms and frameworks
4
+ that have their own OpenTelemetry instrumentation.
5
+ """
6
+
7
+ from .livekit import setup_livekit, LucidicLiveKitExporter
8
+
9
+ __all__ = ["setup_livekit", "LucidicLiveKitExporter"]
@@ -0,0 +1,409 @@
1
+ """LiveKit voice agent integration for Lucidic AI SDK.
2
+
3
+ This module provides OpenTelemetry span export for LiveKit voice agents,
4
+ converting LiveKit's internal spans into Lucidic events with full metadata
5
+ support including latency diagnostics, EOU detection data, and tool context.
6
+
7
+ Example:
8
+ from lucidicai import LucidicAI
9
+ from lucidicai.integrations.livekit import setup_livekit
10
+ from livekit.agents import AgentServer, JobContext, AgentSession, cli
11
+ from livekit.agents.telemetry import set_tracer_provider
12
+
13
+ client = LucidicAI(api_key="...", agent_id="...")
14
+ server = AgentServer()
15
+
16
+ @server.rtc_session()
17
+ async def entrypoint(ctx: JobContext):
18
+ trace_provider = setup_livekit(
19
+ client=client,
20
+ session_id=ctx.room.name,
21
+ )
22
+ set_tracer_provider(trace_provider)
23
+ # ... rest of agent setup
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ import json
29
+ import logging
30
+ from datetime import datetime, timezone
31
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence
32
+
33
+ from opentelemetry import context as otel_context
34
+ from opentelemetry.sdk.trace import ReadableSpan, SpanProcessor
35
+ from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
36
+ from opentelemetry.trace import Span
37
+ from opentelemetry.util.types import AttributeValue
38
+
39
+ if TYPE_CHECKING:
40
+ from opentelemetry.sdk.trace import TracerProvider
41
+ from ..client import LucidicAI
42
+
43
+ logger = logging.getLogger("lucidicai.integrations.livekit")
44
+
45
+
46
+ class LucidicLiveKitExporter(SpanExporter):
47
+ """Custom OpenTelemetry exporter for LiveKit voice agent spans.
48
+
49
+ Converts LiveKit spans (llm_node, function_tool) into Lucidic events
50
+ with full metadata including latency diagnostics, EOU detection,
51
+ and tool context.
52
+ """
53
+
54
+ # livekit span names we care about
55
+ LIVEKIT_LLM_SPANS = {"llm_node", "function_tool"}
56
+
57
+ def __init__(self, client: "LucidicAI", session_id: str):
58
+ """Initialize the exporter.
59
+
60
+ Args:
61
+ client: Initialized LucidicAI client instance
62
+ session_id: Session ID for all events created by this exporter
63
+ """
64
+ self._client = client
65
+ self._session_id = session_id
66
+ self._shutdown = False
67
+
68
+ def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
69
+ """Export spans to Lucidic as events.
70
+
71
+ Args:
72
+ spans: Sequence of completed OpenTelemetry spans
73
+
74
+ Returns:
75
+ SpanExportResult indicating success or failure
76
+ """
77
+ if self._shutdown:
78
+ return SpanExportResult.SUCCESS
79
+
80
+ try:
81
+ for span in spans:
82
+ if self._is_livekit_llm_span(span):
83
+ self._process_span(span)
84
+ return SpanExportResult.SUCCESS
85
+ except Exception as e:
86
+ logger.error(f"[LiveKit] Failed to export spans: {e}")
87
+ return SpanExportResult.FAILURE
88
+
89
+ def _is_livekit_llm_span(self, span: ReadableSpan) -> bool:
90
+ """Check if span is a LiveKit LLM-related span we should process."""
91
+ return span.name in self.LIVEKIT_LLM_SPANS
92
+
93
+ def _process_span(self, span: ReadableSpan) -> None:
94
+ """Process a single LiveKit span and create corresponding Lucidic event."""
95
+ try:
96
+ if span.name == "llm_node":
97
+ event_data = self._convert_llm_span(span)
98
+ self._client.events.create(**event_data)
99
+ logger.debug(f"[LiveKit] Created llm_generation event for span {span.name}")
100
+ elif span.name == "function_tool":
101
+ event_data = self._convert_function_span(span)
102
+ self._client.events.create(**event_data)
103
+ logger.debug(f"[LiveKit] Created function_call event for span {span.name}")
104
+ except Exception as e:
105
+ logger.error(f"[LiveKit] Failed to process span {span.name}: {e}")
106
+
107
+ def _convert_llm_span(self, span: ReadableSpan) -> Dict[str, Any]:
108
+ """Convert an llm_node span to llm_generation event data."""
109
+ attrs = dict(span.attributes or {})
110
+
111
+ # extract messages from chat context
112
+ messages = self._parse_chat_context(attrs.get("lk.chat_ctx"))
113
+
114
+ # extract output text
115
+ output = attrs.get("lk.response.text", "")
116
+
117
+ # build metadata with diagnostics
118
+ metadata = self._build_metadata(attrs)
119
+
120
+ # calculate duration
121
+ duration = None
122
+ if span.start_time and span.end_time:
123
+ duration = (span.end_time - span.start_time) / 1e9
124
+
125
+ # extract timing for occurred_at
126
+ occurred_at = None
127
+ if span.start_time:
128
+ occurred_at = datetime.fromtimestamp(
129
+ span.start_time / 1e9, tz=timezone.utc
130
+ ).isoformat()
131
+
132
+ return {
133
+ "type": "llm_generation",
134
+ "session_id": self._session_id,
135
+ "model": attrs.get("gen_ai.request.model", "unknown"),
136
+ "messages": messages,
137
+ "output": output,
138
+ "input_tokens": attrs.get("gen_ai.usage.input_tokens"),
139
+ "output_tokens": attrs.get("gen_ai.usage.output_tokens"),
140
+ "duration": duration,
141
+ "occurred_at": occurred_at,
142
+ "metadata": metadata,
143
+ }
144
+
145
+ def _convert_function_span(self, span: ReadableSpan) -> Dict[str, Any]:
146
+ """Convert a function_tool span to function_call event data."""
147
+ attrs = dict(span.attributes or {})
148
+
149
+ # calculate duration
150
+ duration = None
151
+ if span.start_time and span.end_time:
152
+ duration = (span.end_time - span.start_time) / 1e9
153
+
154
+ # extract timing for occurred_at
155
+ occurred_at = None
156
+ if span.start_time:
157
+ occurred_at = datetime.fromtimestamp(
158
+ span.start_time / 1e9, tz=timezone.utc
159
+ ).isoformat()
160
+
161
+ # build metadata (subset for function calls)
162
+ metadata = {
163
+ "job_id": attrs.get("lk.job_id"),
164
+ "room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
165
+ "agent_name": attrs.get("lk.agent_name"),
166
+ "generation_id": attrs.get("lk.generation_id"),
167
+ "tool_call_id": attrs.get("lk.function_tool.id"),
168
+ }
169
+ metadata = self._clean_none_values(metadata)
170
+
171
+ return {
172
+ "type": "function_call",
173
+ "session_id": self._session_id,
174
+ "function_name": attrs.get("lk.function_tool.name", "unknown"),
175
+ "arguments": attrs.get("lk.function_tool.arguments"),
176
+ "return_value": attrs.get("lk.function_tool.output"),
177
+ "duration": duration,
178
+ "occurred_at": occurred_at,
179
+ "metadata": metadata,
180
+ }
181
+
182
+ def _parse_chat_context(self, chat_ctx_json: Optional[str]) -> List[Dict[str, str]]:
183
+ """Parse LiveKit's lk.chat_ctx JSON into Lucidic messages format.
184
+
185
+ Args:
186
+ chat_ctx_json: JSON string of LiveKit chat context
187
+
188
+ Returns:
189
+ List of message dicts with role and content keys
190
+ """
191
+ if not chat_ctx_json:
192
+ return []
193
+
194
+ try:
195
+ chat_ctx = json.loads(chat_ctx_json)
196
+ messages = []
197
+
198
+ # livekit chat context has 'items' list
199
+ items = chat_ctx.get("items", [])
200
+ for item in items:
201
+ if item.get("type") == "message":
202
+ role = item.get("role", "user")
203
+ # livekit stores content in various ways
204
+ content = item.get("text_content", "")
205
+ if not content:
206
+ # try content array
207
+ content_list = item.get("content", [])
208
+ if isinstance(content_list, list):
209
+ text_parts = []
210
+ for c in content_list:
211
+ if isinstance(c, str):
212
+ text_parts.append(c)
213
+ elif isinstance(c, dict) and c.get("type") == "text":
214
+ text_parts.append(c.get("text", ""))
215
+ content = " ".join(text_parts)
216
+ elif isinstance(content_list, str):
217
+ content = content_list
218
+
219
+ messages.append({"role": role, "content": content})
220
+
221
+ return messages
222
+ except (json.JSONDecodeError, TypeError) as e:
223
+ logger.debug(f"[LiveKit] Failed to parse chat context: {e}")
224
+ return []
225
+
226
+ def _build_metadata(self, attrs: Dict[str, Any]) -> Dict[str, Any]:
227
+ """Build metadata dict with diagnostics from span attributes.
228
+
229
+ Args:
230
+ attrs: Span attributes dictionary
231
+
232
+ Returns:
233
+ Cleaned metadata dict with nested diagnostics
234
+ """
235
+ metadata = {
236
+ # identity & tracking
237
+ "job_id": attrs.get("lk.job_id"),
238
+ "room_name": attrs.get("lk.room_name") or attrs.get("room_id"),
239
+ "agent_name": attrs.get("lk.agent_name"),
240
+ "participant_id": attrs.get("lk.participant_id"),
241
+ "generation_id": attrs.get("lk.generation_id"),
242
+ "parent_generation_id": attrs.get("lk.parent_generation_id"),
243
+ "speech_id": attrs.get("lk.speech_id"),
244
+ "interrupted": attrs.get("lk.interrupted"),
245
+ # diagnostics (nested)
246
+ "diagnostics": {
247
+ "latency": {
248
+ "llm_ttft": attrs.get("llm_node_ttft"),
249
+ "tts_ttfb": attrs.get("tts_node_ttfb"),
250
+ "e2e_latency": attrs.get("e2e_latency"),
251
+ "transcription_delay": attrs.get("lk.transcription_delay"),
252
+ "end_of_turn_delay": attrs.get("lk.end_of_turn_delay"),
253
+ },
254
+ "eou": {
255
+ "probability": attrs.get("lk.eou.probability"),
256
+ "threshold": attrs.get("lk.eou.unlikely_threshold"),
257
+ "delay": attrs.get("lk.eou.endpointing_delay"),
258
+ "language": attrs.get("lk.eou.language"),
259
+ },
260
+ "tools": {
261
+ "function_tools": attrs.get("lk.function_tools"),
262
+ "provider_tools": attrs.get("lk.provider_tools"),
263
+ "tool_sets": attrs.get("lk.tool_sets"),
264
+ },
265
+ "session_options": attrs.get("lk.session_options"),
266
+ },
267
+ }
268
+ return self._clean_none_values(metadata)
269
+
270
+ def _clean_none_values(self, d: Dict[str, Any]) -> Dict[str, Any]:
271
+ """Recursively remove None values and empty dicts.
272
+
273
+ Args:
274
+ d: Dictionary to clean
275
+
276
+ Returns:
277
+ Cleaned dictionary with no None values or empty nested dicts
278
+ """
279
+ cleaned = {}
280
+ for k, v in d.items():
281
+ if isinstance(v, dict):
282
+ nested = self._clean_none_values(v)
283
+ if nested: # only include non-empty dicts
284
+ cleaned[k] = nested
285
+ elif v is not None:
286
+ cleaned[k] = v
287
+ return cleaned
288
+
289
+ def shutdown(self) -> None:
290
+ """Shutdown the exporter."""
291
+ self._shutdown = True
292
+ logger.debug("[LiveKit] Exporter shutdown")
293
+
294
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
295
+ """Force flush pending exports.
296
+
297
+ Returns:
298
+ True (events are created synchronously)
299
+ """
300
+ return True
301
+
302
+
303
+ class _MetadataSpanProcessor(SpanProcessor):
304
+ """Span processor that adds metadata to all spans.
305
+
306
+ This allows users to attach custom metadata (e.g., customer_id, environment)
307
+ that will be included on every span exported.
308
+ """
309
+
310
+ def __init__(self, metadata: Dict[str, AttributeValue]):
311
+ """Initialize with metadata to attach.
312
+
313
+ Args:
314
+ metadata: Dictionary of metadata key-value pairs
315
+ """
316
+ self._metadata = metadata
317
+
318
+ def on_start(
319
+ self, span: Span, parent_context: Optional[otel_context.Context] = None
320
+ ) -> None:
321
+ """Called when a span is started - attach metadata."""
322
+ span.set_attributes(self._metadata)
323
+
324
+ def on_end(self, span: ReadableSpan) -> None:
325
+ """Called when a span ends - no action needed."""
326
+ pass
327
+
328
+ def shutdown(self) -> None:
329
+ """Shutdown the processor."""
330
+ pass
331
+
332
+ def force_flush(self, timeout_millis: int = 30000) -> bool:
333
+ """Force flush - no buffering in this processor."""
334
+ return True
335
+
336
+
337
+ def setup_livekit(
338
+ client: "LucidicAI",
339
+ session_id: str,
340
+ session_name: Optional[str] = None,
341
+ metadata: Optional[Dict[str, AttributeValue]] = None,
342
+ ) -> "TracerProvider":
343
+ """Set up Lucidic tracing for LiveKit voice agents.
344
+
345
+ Automatically creates a Lucidic session and configures OpenTelemetry
346
+ to export LiveKit spans as Lucidic events.
347
+
348
+ Args:
349
+ client: Initialized LucidicAI client instance
350
+ session_id: Session ID for all events (typically ctx.room.name)
351
+ session_name: Optional human-readable session name
352
+ metadata: Optional metadata to attach to all spans (e.g., customer_id)
353
+
354
+ Returns:
355
+ TracerProvider to pass to livekit's set_tracer_provider()
356
+
357
+ Example:
358
+ from lucidicai import LucidicAI
359
+ from lucidicai.integrations.livekit import setup_livekit
360
+ from livekit.agents import AgentServer, JobContext, AgentSession, cli
361
+ from livekit.agents.telemetry import set_tracer_provider
362
+
363
+ client = LucidicAI(api_key="...", agent_id="...")
364
+ server = AgentServer()
365
+
366
+ @server.rtc_session()
367
+ async def entrypoint(ctx: JobContext):
368
+ trace_provider = setup_livekit(
369
+ client=client,
370
+ session_id=ctx.room.name,
371
+ session_name=f"Voice Call - {ctx.room.name}",
372
+ )
373
+ set_tracer_provider(trace_provider)
374
+
375
+ async def cleanup():
376
+ trace_provider.force_flush()
377
+ ctx.add_shutdown_callback(cleanup)
378
+
379
+ session = AgentSession(...)
380
+ await session.start(agent=MyAgent(), room=ctx.room)
381
+
382
+ if __name__ == "__main__":
383
+ cli.run_app(server)
384
+ """
385
+ from opentelemetry.sdk.trace import TracerProvider
386
+ from opentelemetry.sdk.trace.export import BatchSpanProcessor
387
+
388
+ # auto-create Lucidic session
389
+ client.sessions.create(
390
+ session_id=session_id,
391
+ session_name=session_name or f"LiveKit Voice Session - {session_id}",
392
+ )
393
+ logger.info(f"[LiveKit] Created Lucidic session: {session_id}")
394
+
395
+ # create exporter
396
+ exporter = LucidicLiveKitExporter(client, session_id)
397
+
398
+ # create tracer provider
399
+ trace_provider = TracerProvider()
400
+
401
+ # add metadata processor if metadata provided
402
+ if metadata:
403
+ trace_provider.add_span_processor(_MetadataSpanProcessor(metadata))
404
+
405
+ # add exporter via batch processor
406
+ trace_provider.add_span_processor(BatchSpanProcessor(exporter))
407
+
408
+ logger.info("[LiveKit] Lucidic tracing configured")
409
+ return trace_provider
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 3.1.1
3
+ Version: 3.3.0
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai
@@ -1,6 +1,6 @@
1
- lucidicai/__init__.py,sha256=83k3iIyPlLhhZ_fRyy54x4Ry9WbRhNTK1bczhvHSj_A,1180
1
+ lucidicai/__init__.py,sha256=gdk3Y-cq-RaTbhBcBE77saBwHSbIvqaScU9gNoTi7_g,1284
2
2
  lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
3
- lucidicai/client.py,sha256=WnkUeo_Z0uP4xh66gNC6MJhYdyhRpjC61OBjHEJLHq4,14674
3
+ lucidicai/client.py,sha256=LC9KPhZgTnt5klntHoyCQWWpRUE-X58lM80uAaWBNiU,15090
4
4
  lucidicai/constants.py,sha256=zN8O7TjoRHRlaGa9CZUWppS73rhzKGwaEkF9XMTV0Cg,1160
5
5
  lucidicai/context.py,sha256=ruEXAndSv0gQ-YEXLlC4Fx6NNbaylfp_dZxbpwmLZSA,4622
6
6
  lucidicai/dataset.py,sha256=wu25X02JyWkht_yQabgQpGZFfzbNTxG6tf5k9ol8Amo,4005
@@ -29,9 +29,11 @@ lucidicai/api/resources/feature_flag.py,sha256=ii412DIkZCEAhrXdGydcpQKveqGlFq4Nl
29
29
  lucidicai/api/resources/prompt.py,sha256=tdMVTaLc3DDRbd_R8Xd5mkvpdwQONfr8OwkJRTE0atE,2495
30
30
  lucidicai/api/resources/session.py,sha256=jW_bftHdunhLHl_3-k0nqB5FrtLhlFeCF0tMFE82nNw,20761
31
31
  lucidicai/core/__init__.py,sha256=b0YQkd8190Y_GgwUcmf0tOiSLARd7L4kq4jwfhhGAyI,39
32
- lucidicai/core/config.py,sha256=06XZPOCpB8YY9nzqt7deR3CP6MAQIKCTZYdSzscAPDY,8730
32
+ lucidicai/core/config.py,sha256=q4h-yR35Ay_3znL7vavri6ScfeM69RjHShNNzjoQthc,10194
33
33
  lucidicai/core/errors.py,sha256=bYSRPqadXUCPadVLb-2fj63CB6jlAnfDeu2azHB2z8M,2137
34
34
  lucidicai/core/types.py,sha256=KabcTBQe7SemigccKfJSDiJmjSJDJJvvtefSd8pfrJI,702
35
+ lucidicai/integrations/__init__.py,sha256=9eJxdcw9C_zLXLQGdKK-uwCYhjdnEelrXbYYNo48ewk,292
36
+ lucidicai/integrations/livekit.py,sha256=vcP55JFTmBJtpSP6MCnQ94WQdbR221VZFwvXxs3oOq8,15036
35
37
  lucidicai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
38
  lucidicai/providers/anthropic_handler.py,sha256=GZEa4QOrjZ9ftu_qTwY3L410HwKzkXgN7omYRsEQ4LU,10174
37
39
  lucidicai/providers/base_providers.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
@@ -91,7 +93,7 @@ lucidicai/utils/images.py,sha256=z8mlIKgFfrIbuk-l4L2rB62uw_uPO79sHPXPY7eLu2A,128
91
93
  lucidicai/utils/logger.py,sha256=R3B3gSee64F6UVHUrShihBq_O7W7bgfrBiVDXTO3Isg,4777
92
94
  lucidicai/utils/queue.py,sha256=8DQwnGw7pINEJ0dNSkB0PhdPW-iBQQ-YZg23poe4umE,17323
93
95
  lucidicai/utils/serialization.py,sha256=KdOREZd7XBxFBAZ86DePMfYPzSVyKr4RcgUa82aFxrs,820
94
- lucidicai-3.1.1.dist-info/METADATA,sha256=OCfPUqhEyRmBCQWTOoKUwbw3hR7T6JNZY9XtxyUbXxE,902
95
- lucidicai-3.1.1.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
96
- lucidicai-3.1.1.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
97
- lucidicai-3.1.1.dist-info/RECORD,,
96
+ lucidicai-3.3.0.dist-info/METADATA,sha256=p8D3cXfEzfEC4HnYYz68keBQwzNDOkSqifh8H13m1t4,902
97
+ lucidicai-3.3.0.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
98
+ lucidicai-3.3.0.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
99
+ lucidicai-3.3.0.dist-info/RECORD,,