lucidicai 2.0.2__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. lucidicai/__init__.py +367 -899
  2. lucidicai/api/__init__.py +1 -0
  3. lucidicai/api/client.py +218 -0
  4. lucidicai/api/resources/__init__.py +1 -0
  5. lucidicai/api/resources/dataset.py +192 -0
  6. lucidicai/api/resources/event.py +88 -0
  7. lucidicai/api/resources/session.py +126 -0
  8. lucidicai/core/__init__.py +1 -0
  9. lucidicai/core/config.py +223 -0
  10. lucidicai/core/errors.py +60 -0
  11. lucidicai/core/types.py +35 -0
  12. lucidicai/sdk/__init__.py +1 -0
  13. lucidicai/sdk/context.py +231 -0
  14. lucidicai/sdk/decorators.py +187 -0
  15. lucidicai/sdk/error_boundary.py +299 -0
  16. lucidicai/sdk/event.py +126 -0
  17. lucidicai/sdk/event_builder.py +304 -0
  18. lucidicai/sdk/features/__init__.py +1 -0
  19. lucidicai/sdk/features/dataset.py +605 -0
  20. lucidicai/sdk/features/feature_flag.py +383 -0
  21. lucidicai/sdk/init.py +361 -0
  22. lucidicai/sdk/shutdown_manager.py +302 -0
  23. lucidicai/telemetry/context_bridge.py +82 -0
  24. lucidicai/telemetry/context_capture_processor.py +25 -9
  25. lucidicai/telemetry/litellm_bridge.py +20 -24
  26. lucidicai/telemetry/lucidic_exporter.py +99 -60
  27. lucidicai/telemetry/openai_patch.py +295 -0
  28. lucidicai/telemetry/openai_uninstrument.py +87 -0
  29. lucidicai/telemetry/telemetry_init.py +16 -1
  30. lucidicai/telemetry/utils/model_pricing.py +278 -0
  31. lucidicai/utils/__init__.py +1 -0
  32. lucidicai/utils/images.py +337 -0
  33. lucidicai/utils/logger.py +168 -0
  34. lucidicai/utils/queue.py +393 -0
  35. {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/METADATA +1 -1
  36. {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/RECORD +38 -9
  37. {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/WHEEL +0 -0
  38. {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/top_level.txt +0 -0
lucidicai/__init__.py CHANGED
@@ -1,941 +1,409 @@
1
- import atexit
2
- import logging
3
- import os
4
- import signal
5
- import sys
6
- import traceback
7
- import threading
8
- from typing import List, Literal, Optional
9
-
10
- from dotenv import load_dotenv
11
-
12
- from .client import Client
13
- from .errors import APIKeyVerificationError, InvalidOperationError, LucidicNotInitializedError, PromptError
14
- from .event import Event
15
- from .session import Session
16
- from .singleton import clear_singletons
17
-
18
- # Import decorators
19
- from .decorators import event
20
- from .context import (
1
+ """Lucidic AI SDK - Clean Export-Only Entry Point
2
+
3
+ This file only contains exports, with all logic moved to appropriate modules.
4
+ """
5
+
6
+ # Import core modules
7
+ from .sdk import init as init_module
8
+ from .sdk import event as event_module
9
+ from .sdk import error_boundary
10
+ from .core.config import get_config
11
+
12
+ # Import raw functions
13
+ from .sdk.init import (
14
+ init as _init,
15
+ get_session_id as _get_session_id,
16
+ clear_state as _clear_state,
17
+ # Thread-local session management (advanced users)
18
+ set_thread_session,
19
+ clear_thread_session,
20
+ get_thread_session,
21
+ )
22
+
23
+ from .sdk.event import (
24
+ create_event as _create_event,
25
+ create_error_event as _create_error_event,
26
+ flush as _flush,
27
+ )
28
+
29
+ # Context management exports
30
+ from .sdk.context import (
21
31
  set_active_session,
32
+ clear_active_session,
22
33
  bind_session,
23
34
  bind_session_async,
24
- clear_active_session,
25
- current_session_id,
26
35
  session,
27
36
  session_async,
28
37
  run_session,
29
38
  run_in_session,
39
+ thread_worker_with_session, # Thread isolation helper
40
+ current_session_id,
41
+ current_parent_event_id,
30
42
  )
31
- from .dataset import get_dataset, get_dataset_items
32
- from .feature_flag import (
33
- get_feature_flag,
34
- get_bool_flag,
35
- get_int_flag,
36
- get_float_flag,
37
- get_string_flag,
38
- get_json_flag,
39
- clear_feature_flag_cache,
40
- FeatureFlagError
41
- )
42
-
43
- ProviderType = Literal[
44
- "openai",
45
- "anthropic",
46
- "langchain",
47
- "pydantic_ai",
48
- "openai_agents",
49
- "litellm",
50
- "bedrock",
51
- "aws_bedrock",
52
- "amazon_bedrock",
53
- "google",
54
- "google_generativeai",
55
- "vertexai",
56
- "vertex_ai",
57
- "cohere",
58
- "groq",
59
- ]
60
-
61
- # Configure logging
62
- logger = logging.getLogger("Lucidic")
63
- if not logger.handlers:
64
- handler = logging.StreamHandler()
65
- formatter = logging.Formatter('[Lucidic] %(message)s')
66
- handler.setFormatter(formatter)
67
- logger.addHandler(handler)
68
- logger.setLevel(logging.INFO)
69
-
70
-
71
- # Crash/exit capture configuration
72
- MAX_ERROR_DESCRIPTION_LENGTH = 16384
73
- _crash_handlers_installed = False
74
- _original_sys_excepthook = None
75
- _original_threading_excepthook = None
76
- _shutdown_lock = threading.Lock()
77
- _is_shutting_down = False
78
-
79
-
80
- def _mask_and_truncate(text: Optional[str]) -> Optional[str]:
81
- """Apply masking and truncate to a safe length. Best effort; never raises."""
82
- if text is None:
83
- return text
84
- try:
85
- masked = Client().mask(text)
86
- except Exception:
87
- masked = text
88
- if masked is None:
89
- return masked
90
- return masked[:MAX_ERROR_DESCRIPTION_LENGTH]
91
-
92
-
93
- def _post_fatal_event(exit_code: int, description: str, extra: Optional[dict] = None) -> None:
94
- """Best-effort creation of a final Lucidic event on fatal paths.
95
-
96
- - Idempotent using a process-wide shutdown flag to avoid duplicates when
97
- multiple hooks fire (signal + excepthook).
98
- - Swallows all exceptions to avoid interfering with shutdown.
99
- """
100
- global _is_shutting_down
101
- with _shutdown_lock:
102
- if _is_shutting_down:
103
- return
104
- _is_shutting_down = True
105
- try:
106
- client = Client()
107
- session = getattr(client, 'session', None)
108
- if not session or getattr(session, 'is_finished', False):
109
- return
110
- arguments = {"exit_code": exit_code}
111
- if extra:
112
- try:
113
- arguments.update(extra)
114
- except Exception:
115
- pass
116
-
117
- # Create a single immutable event describing the crash
118
- session.create_event(
119
- type="error_traceback",
120
- error=_mask_and_truncate(description),
121
- traceback="",
122
- metadata={"exit_code": exit_code, **({} if not extra else extra)},
123
- )
124
- except Exception:
125
- # Never raise during shutdown
126
- pass
127
43
 
44
+ # Decorators
45
+ from .sdk.decorators import event, event as step # step is deprecated alias
46
+
47
+ # Error types
48
+ from .core.errors import (
49
+ LucidicError,
50
+ LucidicNotInitializedError,
51
+ APIKeyVerificationError,
52
+ InvalidOperationError,
53
+ PromptError,
54
+ FeatureFlagError,
55
+ )
128
56
 
129
- def _install_crash_handlers() -> None:
130
- """Install global uncaught exception handlers (idempotent)."""
131
- global _crash_handlers_installed, _original_sys_excepthook, _original_threading_excepthook
132
- if _crash_handlers_installed:
57
+ # Import functions that need to be implemented
58
+ def _update_session(
59
+ task=None,
60
+ session_eval=None,
61
+ session_eval_reason=None,
62
+ is_successful=None,
63
+ is_successful_reason=None,
64
+ session_id=None # Accept explicit session_id
65
+ ):
66
+ """Update the current session."""
67
+ from .sdk.init import get_resources, get_session_id
68
+
69
+ # Use provided session_id or fall back to context
70
+ if not session_id:
71
+ session_id = get_session_id()
72
+ if not session_id:
133
73
  return
134
-
135
- _original_sys_excepthook = sys.excepthook
136
-
137
- def _sys_hook(exc_type, exc, tb):
138
- try:
139
- trace_str = ''.join(traceback.format_exception(exc_type, exc, tb))
140
- except Exception:
141
- trace_str = f"Uncaught exception: {getattr(exc_type, '__name__', str(exc_type))}: {exc}"
142
-
143
- # Emit final event and end the session as unsuccessful
144
- _post_fatal_event(1, trace_str, {
145
- "exception_type": getattr(exc_type, "__name__", str(exc_type)),
146
- "exception_message": str(exc),
147
- "thread_name": threading.current_thread().name,
148
- })
149
-
150
- # Follow proper shutdown sequence to prevent broken pipes
151
- try:
152
- client = Client()
153
-
154
- # 1. Flush OpenTelemetry spans first
155
- if hasattr(client, '_tracer_provider'):
156
- try:
157
- client._tracer_provider.force_flush(timeout_millis=5000)
158
- except Exception:
159
- pass
160
-
161
- # 2. Flush and shutdown EventQueue (with active sessions cleared)
162
- if hasattr(client, "_event_queue"):
163
- try:
164
- # Clear active sessions to allow shutdown
165
- client._event_queue._active_sessions.clear()
166
- client._event_queue.force_flush()
167
- client._event_queue.shutdown(timeout=5.0)
168
- except Exception:
169
- pass
170
-
171
- # 3. Shutdown TracerProvider after EventQueue
172
- if hasattr(client, '_tracer_provider'):
173
- try:
174
- client._tracer_provider.shutdown()
175
- except Exception:
176
- pass
177
-
178
- # 4. Mark client as shutting down to prevent new requests
179
- client._shutdown = True
180
-
181
- # 5. Prevent auto_end double work
182
- try:
183
- client.auto_end = False
184
- except Exception:
185
- pass
186
-
187
- # 6. End session explicitly as unsuccessful
188
- end_session()
74
+
75
+ resources = get_resources()
76
+ if resources and 'sessions' in resources:
77
+ updates = {}
78
+ if task is not None:
79
+ updates['task'] = task
80
+ if session_eval is not None:
81
+ updates['session_eval'] = session_eval
82
+ if session_eval_reason is not None:
83
+ updates['session_eval_reason'] = session_eval_reason
84
+ if is_successful is not None:
85
+ updates['is_successful'] = is_successful
86
+ if is_successful_reason is not None:
87
+ updates['is_successful_reason'] = is_successful_reason
189
88
 
190
- except Exception:
191
- pass
192
-
193
- # Chain to original to preserve default printing/behavior
194
- try:
195
- _original_sys_excepthook(exc_type, exc, tb)
196
- except Exception:
197
- # Avoid recursion/errors in fatal path
198
- pass
199
-
200
- sys.excepthook = _sys_hook
201
-
202
- # For Python 3.8+, only treat main-thread exceptions as fatal (process-exiting)
203
- if hasattr(threading, 'excepthook'):
204
- _original_threading_excepthook = threading.excepthook
205
-
206
- def _thread_hook(args):
207
- try:
208
- if args.thread is threading.main_thread():
209
- # For main thread exceptions, use full shutdown sequence
210
- _sys_hook(args.exc_type, args.exc_value, args.exc_traceback)
211
- else:
212
- # For non-main threads, just flush spans without full shutdown
213
- try:
214
- client = Client()
215
- # Flush any pending spans from this thread
216
- if hasattr(client, '_tracer_provider'):
217
- client._tracer_provider.force_flush(timeout_millis=1000)
218
- # Force flush events but don't shutdown
219
- if hasattr(client, "_event_queue"):
220
- client._event_queue.force_flush()
221
- except Exception:
222
- pass
223
- except Exception:
224
- pass
225
- try:
226
- _original_threading_excepthook(args)
227
- except Exception:
228
- pass
229
-
230
- threading.excepthook = _thread_hook
231
-
232
- _crash_handlers_installed = True
233
-
234
- __all__ = [
235
- 'Session',
236
- 'Event',
237
- 'init',
238
- 'create_experiment',
239
- 'create_event',
240
- 'end_session',
241
- 'get_prompt',
242
- 'get_session',
243
- 'get_dataset',
244
- 'get_dataset_items',
245
- 'get_feature_flag',
246
- 'get_bool_flag',
247
- 'get_int_flag',
248
- 'get_float_flag',
249
- 'get_string_flag',
250
- 'get_json_flag',
251
- 'clear_feature_flag_cache',
252
- 'FeatureFlagError',
253
- 'ProviderType',
254
- 'APIKeyVerificationError',
255
- 'LucidicNotInitializedError',
256
- 'PromptError',
257
- 'InvalidOperationError',
258
- 'event',
259
- 'set_active_session',
260
- 'bind_session',
261
- 'bind_session_async',
262
- 'clear_active_session',
263
- 'session',
264
- 'session_async',
265
- 'run_session',
266
- 'run_in_session',
267
- ]
268
-
269
-
270
- def init(
271
- session_name: Optional[str] = None,
272
- session_id: Optional[str] = None,
273
- api_key: Optional[str] = None,
274
- agent_id: Optional[str] = None,
275
- task: Optional[str] = None,
276
- providers: Optional[List[ProviderType]] = [],
277
- production_monitoring: Optional[bool] = False,
278
- experiment_id: Optional[str] = None,
279
- rubrics: Optional[list] = None,
280
- tags: Optional[list] = None,
281
- dataset_item_id: Optional[str] = None,
282
- masking_function = None,
283
- auto_end: Optional[bool] = True,
284
- capture_uncaught: Optional[bool] = True,
285
- ) -> str:
286
- """
287
- Initialize the Lucidic client.
89
+ if updates:
90
+ resources['sessions'].update_session(session_id, updates)
91
+
92
+
93
+ def _end_session(
94
+ session_eval=None,
95
+ session_eval_reason=None,
96
+ is_successful=None,
97
+ is_successful_reason=None,
98
+ wait_for_flush=True,
99
+ session_id=None # Accept explicit session_id
100
+ ):
101
+ """End the current session."""
102
+ from .sdk.init import get_resources, get_session_id, get_event_queue
103
+
104
+ # Use provided session_id or fall back to context
105
+ if not session_id:
106
+ session_id = get_session_id()
107
+ if not session_id:
108
+ return
288
109
 
289
- Args:
290
- session_name: The display name of the session.
291
- session_id: Custom ID of the session. If not provided, a random ID will be generated.
292
- api_key: API key for authentication. If not provided, will use the LUCIDIC_API_KEY environment variable.
293
- agent_id: Agent ID. If not provided, will use the LUCIDIC_AGENT_ID environment variable.
294
- task: Task description.
295
- providers: List of provider types ("openai", "anthropic", "langchain", "pydantic_ai").
296
- experiment_id: Optional experiment ID, if session is to be part of an experiment.
297
- rubrics: Optional rubrics for evaluation, list of strings.
298
- tags: Optional tags for the session, list of strings.
299
- dataset_item_id: Optional dataset item ID to link session to a dataset item.
300
- masking_function: Optional function to mask sensitive data.
301
- auto_end: If True, automatically end the session on process exit. Defaults to True.
110
+ # Flush events if requested
111
+ if wait_for_flush:
112
+ flush(timeout_seconds=5.0)
302
113
 
303
- Raises:
304
- InvalidOperationError: If the client is already initialized.
305
- APIKeyVerificationError: If the API key is invalid.
306
- """
307
-
308
- load_dotenv()
309
-
310
- if os.getenv("LUCIDIC_DEBUG", "False").lower() == "true":
311
- logger.setLevel(logging.DEBUG)
114
+ # End session via API
115
+ resources = get_resources()
116
+ if resources and 'sessions' in resources:
117
+ resources['sessions'].end_session(
118
+ session_id,
119
+ is_successful=is_successful,
120
+ session_eval=session_eval,
121
+ is_successful_reason=is_successful_reason,
122
+ session_eval_reason=session_eval_reason
123
+ )
312
124
 
313
- # get current client which will be NullClient if never lai is never initialized
314
- client = Client()
315
- # if not yet initialized or still the NullClient -> creaet a real client when init is called
316
- if not getattr(client, 'initialized', False):
317
- if api_key is None:
318
- api_key = os.getenv("LUCIDIC_API_KEY", None)
319
- if api_key is None:
320
- raise APIKeyVerificationError("Make sure to either pass your API key into lai.init() or set the LUCIDIC_API_KEY environment variable.")
321
- if agent_id is None:
322
- agent_id = os.getenv("LUCIDIC_AGENT_ID", None)
323
- if agent_id is None:
324
- raise APIKeyVerificationError("Lucidic agent ID not specified. Make sure to either pass your agent ID into lai.init() or set the LUCIDIC_AGENT_ID environment variable.")
325
- client = Client(api_key=api_key, agent_id=agent_id)
326
- else:
327
- # Already initialized, this is a re-init
328
- api_key = api_key or os.getenv("LUCIDIC_API_KEY", None)
329
- agent_id = agent_id or os.getenv("LUCIDIC_AGENT_ID", None)
330
- client.agent_id = agent_id
331
- if api_key is not None and agent_id is not None and (api_key != client.api_key or agent_id != client.agent_id):
332
- client.set_api_key(api_key)
333
- client.agent_id = agent_id
334
-
125
+ # Clear session context
126
+ clear_active_session()
127
+
128
+
129
+ def _get_session():
130
+ """Get the current session object."""
131
+ from .sdk.init import get_session_id
132
+ return get_session_id()
133
+
134
+
135
+ def _create_experiment(
136
+ experiment_name,
137
+ LLM_boolean_evaluators=None,
138
+ LLM_numeric_evaluators=None,
139
+ description=None,
140
+ tags=None,
141
+ api_key=None,
142
+ agent_id=None,
143
+ ):
144
+ """Create a new experiment."""
145
+ from .sdk.init import get_http
146
+ from .core.config import SDKConfig, get_config
335
147
 
336
- # Handle auto_end with environment variable support
337
- if auto_end is None:
338
- auto_end = os.getenv("LUCIDIC_AUTO_END", "True").lower() == "true"
148
+ # Get or create HTTP client
149
+ http = get_http()
150
+ config = get_config()
339
151
 
340
- # Set up providers
341
- # Use the client's singleton telemetry initialization
342
- if providers:
343
- success = client.initialize_telemetry(providers)
344
- if not success:
345
- logger.warning("[Telemetry] Failed to initialize telemetry for some providers")
346
- real_session_id = client.init_session(
347
- session_name=session_name,
348
- task=task,
349
- rubrics=rubrics,
350
- tags=tags,
351
- production_monitoring=production_monitoring,
352
- session_id=session_id,
353
- experiment_id=experiment_id,
354
- dataset_item_id=dataset_item_id,
355
- )
356
- if masking_function:
357
- client.masking_function = masking_function
152
+ if not http:
153
+ config = SDKConfig.from_env(api_key=api_key, agent_id=agent_id)
154
+ from .api.client import HttpClient
155
+ http = HttpClient(config)
358
156
 
359
- # Set the auto_end flag on the client
360
- client.auto_end = auto_end
361
- # Bind this session id to the current execution context for async-safety
362
- try:
363
- set_active_session(real_session_id)
364
- except Exception:
365
- pass
366
- # Install crash handlers unless explicitly disabled
367
- try:
368
- if capture_uncaught:
369
- _install_crash_handlers()
370
- # Also install error event handler for uncaught exceptions
371
- try:
372
- from .errors import install_error_handler
373
- install_error_handler()
374
- except Exception:
375
- pass
376
- except Exception:
377
- pass
157
+ # Use provided agent_id or fall back to config
158
+ final_agent_id = agent_id or config.agent_id
159
+ if not final_agent_id:
160
+ raise ValueError("Agent ID is required for creating experiments")
378
161
 
379
- logger.info("Session initialized successfully")
380
- return real_session_id
381
-
382
-
383
- def update_session(
384
- task: Optional[str] = None,
385
- session_eval: Optional[float] = None,
386
- session_eval_reason: Optional[str] = None,
387
- is_successful: Optional[bool] = None,
388
- is_successful_reason: Optional[str] = None
389
- ) -> None:
390
- """
391
- Update the current session.
162
+ evaluator_names = []
163
+ if LLM_boolean_evaluators:
164
+ evaluator_names.extend(LLM_boolean_evaluators)
165
+ if LLM_numeric_evaluators:
166
+ evaluator_names.extend(LLM_numeric_evaluators)
392
167
 
393
- Args:
394
- task: Task description.
395
- session_eval: Session evaluation.
396
- session_eval_reason: Session evaluation reason.
397
- is_successful: Whether the session was successful.
398
- is_successful_reason: Session success reason.
399
- """
400
- # Prefer context-bound session over global active session
401
- client = Client()
402
- target_sid = None
403
- try:
404
- target_sid = current_session_id.get(None)
405
- except Exception:
406
- target_sid = None
407
- if not target_sid and client.session:
408
- target_sid = client.session.session_id
409
- if not target_sid:
410
- return
411
- # Use ephemeral session facade to avoid mutating global state
412
- session = client.session if (client.session and client.session.session_id == target_sid) else Session(agent_id=client.agent_id, session_id=target_sid)
413
- session.update_session(**locals())
414
-
415
-
416
- def end_session(
417
- session_eval: Optional[float] = None,
418
- session_eval_reason: Optional[str] = None,
419
- is_successful: Optional[bool] = None,
420
- is_successful_reason: Optional[str] = None,
421
- wait_for_flush: bool = True
422
- ) -> None:
423
- """
424
- End the current session.
168
+ # Create experiment via API (matching TypeScript exactly)
169
+ response = http.post('createexperiment', {
170
+ 'agent_id': final_agent_id,
171
+ 'experiment_name': experiment_name,
172
+ 'description': description or '',
173
+ 'tags': tags or [],
174
+ 'evaluator_names': evaluator_names
175
+ })
425
176
 
426
- Args:
427
- session_eval: Session evaluation.
428
- session_eval_reason: Session evaluation reason.
429
- is_successful: Whether the session was successful.
430
- is_successful_reason: Session success reason.
431
- wait_for_flush: Whether to block until event queue is empty (default True).
432
- Set to False during signal handling to prevent hangs.
433
- """
434
- client = Client()
435
- # Prefer context-bound session id
436
- target_sid = None
437
- try:
438
- target_sid = current_session_id.get(None)
439
- except Exception:
440
- target_sid = None
441
- if not target_sid and client.session:
442
- target_sid = client.session.session_id
443
- if not target_sid:
444
- return
177
+ return response.get('experiment_id')
445
178
 
446
- # If ending the globally active session, perform cleanup
447
- if client.session and client.session.session_id == target_sid:
448
- # Best-effort: wait for LiteLLM callbacks to flush before ending
449
- try:
450
- import litellm
451
- cbs = getattr(litellm, 'callbacks', None)
452
- if cbs:
453
- for cb in cbs:
454
- try:
455
- if hasattr(cb, 'wait_for_pending_callbacks'):
456
- cb.wait_for_pending_callbacks(timeout=1)
457
- except Exception:
458
- pass
459
- except Exception:
460
- pass
461
- # CRITICAL: Flush OpenTelemetry spans FIRST (blocking)
462
- # This ensures all spans are converted to events before we flush the event queue
463
- try:
464
- if hasattr(client, '_tracer_provider') and client._tracer_provider:
465
- logger.debug("[Session] Flushing OpenTelemetry spans before session end...")
466
- # Force flush with generous timeout to ensure all spans are exported
467
- # The BatchSpanProcessor now exports every 100ms, so this should be quick
468
- success = client._tracer_provider.force_flush(timeout_millis=10000) # 10 second timeout
469
- if not success:
470
- logger.warning("[Session] OpenTelemetry flush timed out - some spans may be lost")
471
- else:
472
- logger.debug("[Session] OpenTelemetry spans flushed successfully")
473
- except Exception as e:
474
- logger.debug(f"[Session] Failed to flush telemetry spans: {e}")
475
-
476
- # THEN flush event queue (which now contains events from flushed spans)
477
- try:
478
- if hasattr(client, '_event_queue'):
479
- logger.debug("[Session] Flushing event queue...")
480
- client._event_queue.force_flush(timeout_seconds=10.0)
481
-
482
- # Wait for queue to be completely empty (only if blocking)
483
- if wait_for_flush:
484
- import time
485
- wait_start = time.time()
486
- max_wait = 10.0 # seconds - timeout for blob uploads
487
- while not client._event_queue.is_empty():
488
- if time.time() - wait_start > max_wait:
489
- logger.warning(f"[Session] EventQueue not empty after {max_wait}s timeout")
490
- break
491
- time.sleep(0.1)
492
-
493
- if client._event_queue.is_empty():
494
- logger.debug("[Session] EventQueue confirmed empty")
495
- else:
496
- logger.debug("[Session] Non-blocking mode - skipping wait for empty queue")
497
- except Exception as e:
498
- logger.debug(f"[Session] Failed to flush event queue: {e}")
499
-
500
- # Mark session as inactive FIRST (prevents race conditions)
501
- client.mark_session_inactive(target_sid)
502
-
503
- # Send only expected fields to update endpoint
504
- update_kwargs = {
505
- "is_finished": True,
506
- "session_eval": session_eval,
507
- "session_eval_reason": session_eval_reason,
508
- "is_successful": is_successful,
509
- "is_successful_reason": is_successful_reason,
510
- }
511
- try:
512
- client.session.update_session(**update_kwargs)
513
- except Exception as e:
514
- logger.warning(f"[Session] Failed to update session: {e}")
515
-
516
- # Clear only the global session reference, not the singleton
517
- # This preserves the client and event queue for other threads
518
- client.session = None
519
- logger.debug(f"[Session] Ended global session {target_sid}")
520
- # DO NOT shutdown event queue - other threads may be using it
521
- # DO NOT call client.clear() - preserve singleton for other threads
522
- return
523
179
 
524
- # Otherwise, end the specified session id without clearing global state
525
- # First flush telemetry and event queue for non-global sessions too
526
- try:
527
- if hasattr(client, '_tracer_provider') and client._tracer_provider:
528
- logger.debug(f"[Session] Flushing OpenTelemetry spans for session {target_sid[:8]}...")
529
- success = client._tracer_provider.force_flush(timeout_millis=10000)
530
- if not success:
531
- logger.warning("[Session] OpenTelemetry flush timed out")
532
- except Exception as e:
533
- logger.debug(f"[Session] Failed to flush telemetry spans: {e}")
180
+ def _get_prompt(
181
+ prompt_name,
182
+ variables=None,
183
+ cache_ttl=300,
184
+ label='production'
185
+ ):
186
+ """Get a prompt from the prompt database."""
187
+ from .sdk.init import get_http
534
188
 
535
- # Flush and wait for event queue to empty
536
- try:
537
- if hasattr(client, '_event_queue'):
538
- logger.debug(f"[Session] Flushing event queue for session {target_sid[:8]}...")
539
- client._event_queue.force_flush(timeout_seconds=10.0)
540
-
541
- # Wait for queue to be completely empty (only if blocking)
542
- if wait_for_flush:
543
- import time
544
- wait_start = time.time()
545
- max_wait = 10.0 # seconds - timeout for blob uploads
546
- while not client._event_queue.is_empty():
547
- if time.time() - wait_start > max_wait:
548
- logger.warning(f"[Session] EventQueue not empty after {max_wait}s timeout")
549
- break
550
- time.sleep(0.1)
551
-
552
- if client._event_queue.is_empty():
553
- logger.debug(f"[Session] EventQueue confirmed empty for session {target_sid[:8]}")
554
- else:
555
- logger.debug(f"[Session] Non-blocking mode - skipping wait for session {target_sid[:8]}")
556
- except Exception as e:
557
- logger.debug(f"[Session] Failed to flush event queue: {e}")
558
-
559
- # CRITICAL: Mark session as inactive FIRST for ALL sessions
560
- client.mark_session_inactive(target_sid)
189
+ http = get_http()
190
+ if not http:
191
+ return ""
561
192
 
562
- temp = Session(agent_id=client.agent_id, session_id=target_sid)
563
- update_kwargs = {
564
- "is_finished": True,
565
- "session_eval": session_eval,
566
- "session_eval_reason": session_eval_reason,
567
- "is_successful": is_successful,
568
- "is_successful_reason": is_successful_reason,
569
- }
193
+ # Get prompt from API
570
194
  try:
571
- temp.update_session(**update_kwargs)
572
- except Exception as e:
573
- logger.warning(f"[Session] Failed to update session: {e}")
574
-
575
-
576
- def flush(timeout_seconds: float = 2.0) -> bool:
577
- """
578
- Manually flush all pending telemetry data.
579
-
580
- Flushes both OpenTelemetry spans and queued events to ensure
581
- all telemetry data is sent to the backend. This is called
582
- automatically on process exit but can be called manually
583
- for explicit control.
584
-
585
- Args:
586
- timeout_seconds: Maximum time to wait for flush
587
-
588
- Returns:
589
- True if all flushes succeeded, False otherwise
590
-
591
- Example:
592
- ```python
593
- import lucidicai as lai
594
-
595
- # ... your code using Lucidic ...
195
+ response = http.get('getprompt', {
196
+ 'prompt_name': prompt_name,
197
+ 'label': label
198
+ })
596
199
 
597
- # Manually flush before critical operation
598
- lai.flush()
599
- ```
600
- """
601
- try:
602
- client = Client()
603
- success = True
200
+ # TypeScript SDK expects 'prompt_content' field
201
+ prompt = response.get('prompt_content', '')
604
202
 
605
- # Flush OpenTelemetry spans first
606
- if hasattr(client, 'flush_telemetry'):
607
- span_success = client.flush_telemetry(timeout_seconds)
608
- success = success and span_success
203
+ # Replace variables if provided
204
+ if variables:
205
+ for key, value in variables.items():
206
+ prompt = prompt.replace(f"{{{key}}}", str(value))
609
207
 
610
- # Then flush event queue
611
- if hasattr(client, '_event_queue'):
612
- client._event_queue.force_flush(timeout_seconds)
613
-
614
- logger.debug(f"[Flush] Manual flush completed (success={success})")
615
- return success
616
- except Exception as e:
617
- logger.error(f"Failed to flush telemetry: {e}")
618
- return False
208
+ return prompt
209
+ except Exception:
210
+ return ""
619
211
 
620
212
 
621
- def _auto_end_session():
622
- """Automatically end session on exit if auto_end is enabled"""
623
- try:
624
- client = Client()
625
- if hasattr(client, 'auto_end') and client.auto_end and client.session and not client.session.is_finished:
626
- logger.info("Auto-ending active session on exit")
627
- client.auto_end = False # To avoid repeating auto-end on exit
628
-
629
- # Flush telemetry
630
- if hasattr(client, '_tracer_provider'):
631
- client._tracer_provider.force_flush(timeout_millis=5000)
632
-
633
- # Force flush event queue before ending session
634
- if hasattr(client, '_event_queue'):
635
- if logger.isEnabledFor(logging.DEBUG):
636
- logger.debug("[Shutdown] Flushing event queue before session end")
637
- client._event_queue.force_flush(timeout_seconds=5.0)
638
-
639
- # Use non-blocking mode during shutdown to prevent hangs
640
- # The actual wait for queue empty happens in _cleanup_singleton_on_exit
641
- end_session(wait_for_flush=False)
642
-
643
- except Exception as e:
644
- logger.debug(f"Error during auto-end session: {e}")
213
+ def _get_dataset(dataset_id, api_key=None, agent_id=None):
214
+ """Get a dataset by ID."""
215
+ from .sdk.features.dataset import get_dataset as __get_dataset
216
+ return __get_dataset(dataset_id, api_key, agent_id)
645
217
 
646
218
 
647
- def _cleanup_singleton_on_exit():
648
- """
649
- Clean up singleton resources only on process exit.
650
-
651
- CRITICAL ORDER:
652
- 1. Flush OpenTelemetry spans (blocking) - ensures spans become events
653
- 2. Flush EventQueue - sends all events including those from spans
654
- 3. Close HTTP session - graceful TCP FIN prevents broken pipes
655
- 4. Clear singletons - final cleanup
656
-
657
- This order is essential to prevent lost events and broken connections.
658
- """
659
- try:
660
- client = Client()
661
-
662
- # 1. FIRST: Flush OpenTelemetry spans (blocking until exported)
663
- # This is the critical fix - we must flush spans before events
664
- if hasattr(client, '_tracer_provider') and client._tracer_provider:
665
- try:
666
- # Small delay to ensure spans have reached the processor
667
- import time
668
- time.sleep(0.1) # 100ms to let spans reach BatchSpanProcessor
669
-
670
- logger.debug("[Exit] Flushing OpenTelemetry spans...")
671
- # force_flush() blocks until all spans are exported or timeout
672
- success = client._tracer_provider.force_flush(timeout_millis=3000)
673
- if success:
674
- logger.debug("[Exit] OpenTelemetry spans flushed successfully")
675
- else:
676
- logger.warning("[Exit] OpenTelemetry flush timed out - some spans may be lost")
677
-
678
- # DON'T shutdown TracerProvider yet - wait until after EventQueue
679
- # This prevents losing spans that are still being processed
680
- except Exception as e:
681
- logger.debug(f"[Exit] Telemetry cleanup error: {e}")
682
-
683
- # 2. SECOND: Flush and shutdown EventQueue
684
- # Now it contains all events from the flushed spans
685
- if hasattr(client, '_event_queue'):
686
- try:
687
- logger.debug("[Exit] Flushing event queue...")
688
- client._event_queue.force_flush(timeout_seconds=2.0)
689
-
690
- # Wait for queue to be completely empty before proceeding
691
- import time
692
- max_wait = 5.0 # seconds
693
- start_time = time.time()
694
- while not client._event_queue.is_empty():
695
- if time.time() - start_time > max_wait:
696
- logger.warning("[Exit] EventQueue not empty after timeout")
697
- break
698
- time.sleep(0.01) # Small sleep to avoid busy waiting
699
-
700
- if client._event_queue.is_empty():
701
- logger.debug("[Exit] EventQueue is empty, proceeding with shutdown")
702
-
703
- # Clear any stale active sessions (threads may have died without cleanup)
704
- if hasattr(client, '_active_sessions'):
705
- with client._active_sessions_lock:
706
- if client._active_sessions:
707
- logger.debug(f"[Exit] Clearing {len(client._active_sessions)} remaining active sessions")
708
- client._active_sessions.clear()
709
-
710
- # Now shutdown EventQueue
711
- client._event_queue.shutdown()
712
- logger.debug("[Exit] Event queue shutdown complete")
713
- except Exception as e:
714
- logger.debug(f"[Exit] Event queue cleanup error: {e}")
715
-
716
- # 3. THIRD: Shutdown TracerProvider after EventQueue is done
717
- # This ensures all spans can be exported before shutdown
718
- if hasattr(client, '_tracer_provider') and client._tracer_provider:
719
- try:
720
- logger.debug("[Exit] Shutting down TracerProvider...")
721
- client._tracer_provider.shutdown()
722
- logger.debug("[Exit] TracerProvider shutdown complete")
723
- except Exception as e:
724
- logger.debug(f"[Exit] TracerProvider shutdown error: {e}")
725
-
726
- # 4. FOURTH: Close HTTP session ONLY after everything else
727
- # This prevents broken pipes by ensuring all events are sent first
728
- if hasattr(client, 'request_session'):
729
- try:
730
- # Mark client as shutting down to prevent new requests
731
- client._shutdown = True
732
- logger.debug("[Exit] Closing HTTP session (queue empty, worker stopped)")
733
- client.request_session.close()
734
- logger.debug("[Exit] HTTP session closed gracefully")
735
- except Exception as e:
736
- logger.debug(f"[Exit] HTTP session cleanup error: {e}")
737
-
738
- # 5. FINALLY: Clear singletons
739
- # Safe to destroy now that all data is flushed
740
- clear_singletons()
741
- logger.debug("[Exit] Singleton cleanup complete")
742
-
743
- except Exception as e:
744
- # Silent fail on exit to avoid disrupting process termination
745
- if logger.isEnabledFor(logging.DEBUG):
746
- logger.debug(f"[Exit] Cleanup error: {e}")
219
+ def _get_dataset_items(dataset_id, api_key=None, agent_id=None):
220
+ """Get dataset items."""
221
+ from .sdk.features.dataset import get_dataset_items as __get_dataset_items
222
+ return __get_dataset_items(dataset_id, api_key, agent_id)
747
223
 
748
224
 
749
- def _signal_handler(signum, frame):
750
- """Handle interruption signals with better queue flushing."""
751
- # Best-effort final event for signal exits
752
- try:
753
- try:
754
- name = signal.Signals(signum).name
755
- except Exception:
756
- name = str(signum)
757
- try:
758
- stack_str = ''.join(traceback.format_stack(frame)) if frame else ''
759
- except Exception:
760
- stack_str = ''
761
- desc = _mask_and_truncate(f"Received signal {name}\n{stack_str}")
762
- _post_fatal_event(128 + signum, desc, {"signal": name, "signum": signum})
763
- except Exception:
764
- pass
225
+ def _list_datasets(api_key=None, agent_id=None):
226
+ """List all datasets."""
227
+ from .sdk.features.dataset import list_datasets as __list_datasets
228
+ return __list_datasets(api_key, agent_id)
229
+
230
+
231
+ def _create_dataset(name, description=None, tags=None, suggested_flag_config=None, api_key=None, agent_id=None):
232
+ """Create a new dataset."""
233
+ from .sdk.features.dataset import create_dataset as __create_dataset
234
+ return __create_dataset(name, description, tags, suggested_flag_config, api_key, agent_id)
235
+
236
+
237
+ def _update_dataset(dataset_id, name=None, description=None, tags=None, suggested_flag_config=None, api_key=None, agent_id=None):
238
+ """Update dataset metadata."""
239
+ from .sdk.features.dataset import update_dataset as __update_dataset
240
+ return __update_dataset(dataset_id, name, description, tags, suggested_flag_config, api_key, agent_id)
241
+
242
+
243
+ def _delete_dataset(dataset_id, api_key=None, agent_id=None):
244
+ """Delete a dataset."""
245
+ from .sdk.features.dataset import delete_dataset as __delete_dataset
246
+ return __delete_dataset(dataset_id, api_key, agent_id)
247
+
248
+
249
+ def _create_dataset_item(dataset_id, name, input_data, expected_output=None, description=None, tags=None, metadata=None, flag_overrides=None, api_key=None, agent_id=None):
250
+ """Create a dataset item."""
251
+ from .sdk.features.dataset import create_dataset_item as __create_dataset_item
252
+ return __create_dataset_item(dataset_id, name, input_data, expected_output, description, tags, metadata, flag_overrides, api_key, agent_id)
253
+
254
+
255
+ def _get_dataset_item(dataset_id, item_id, api_key=None, agent_id=None):
256
+ """Get a specific dataset item."""
257
+ from .sdk.features.dataset import get_dataset_item as __get_dataset_item
258
+ return __get_dataset_item(dataset_id, item_id, api_key, agent_id)
259
+
260
+
261
+ def _update_dataset_item(dataset_id, item_id, name=None, input_data=None, expected_output=None, description=None, tags=None, metadata=None, flag_overrides=None, api_key=None, agent_id=None):
262
+ """Update a dataset item."""
263
+ from .sdk.features.dataset import update_dataset_item as __update_dataset_item
264
+ return __update_dataset_item(dataset_id, item_id, name, input_data, expected_output, description, tags, metadata, flag_overrides, api_key, agent_id)
265
+
266
+
267
+ def _delete_dataset_item(dataset_id, item_id, api_key=None, agent_id=None):
268
+ """Delete a dataset item."""
269
+ from .sdk.features.dataset import delete_dataset_item as __delete_dataset_item
270
+ return __delete_dataset_item(dataset_id, item_id, api_key, agent_id)
271
+
272
+
273
+ def _list_dataset_item_sessions(dataset_id, item_id, api_key=None, agent_id=None):
274
+ """List all sessions for a dataset item."""
275
+ from .sdk.features.dataset import list_dataset_item_sessions as __list_dataset_item_sessions
276
+ return __list_dataset_item_sessions(dataset_id, item_id, api_key, agent_id)
277
+
278
+
279
+ # Feature flags
280
+ from .sdk.features.feature_flag import (
281
+ get_feature_flag,
282
+ get_bool_flag,
283
+ get_int_flag,
284
+ get_float_flag,
285
+ get_string_flag,
286
+ get_json_flag,
287
+ clear_feature_flag_cache,
288
+ )
289
+
290
+ # Error boundary utilities
291
+ is_silent_mode = error_boundary.is_silent_mode
292
+ get_error_history = error_boundary.get_error_history
293
+ clear_error_history = error_boundary.clear_error_history
294
+
295
+ # Version
296
+ __version__ = "2.1.1"
297
+
298
+ # Apply error boundary wrapping to all SDK functions
299
+ from .sdk.error_boundary import wrap_sdk_function
300
+
301
+ # Wrap main SDK functions
302
+ init = wrap_sdk_function(_init, "init")
303
+ get_session_id = wrap_sdk_function(_get_session_id, "init")
304
+ clear_state = wrap_sdk_function(_clear_state, "init")
305
+ create_event = wrap_sdk_function(_create_event, "event")
306
+ create_error_event = wrap_sdk_function(_create_error_event, "event")
307
+ flush = wrap_sdk_function(_flush, "event")
308
+
309
+ # Wrap session functions
310
+ update_session = wrap_sdk_function(_update_session, "session")
311
+ end_session = wrap_sdk_function(_end_session, "session")
312
+ get_session = wrap_sdk_function(_get_session, "session")
313
+
314
+ # Wrap feature functions
315
+ create_experiment = wrap_sdk_function(_create_experiment, "experiment")
316
+ get_prompt = wrap_sdk_function(_get_prompt, "prompt")
317
+
318
+ # Dataset management - complete CRUD
319
+ list_datasets = wrap_sdk_function(_list_datasets, "dataset")
320
+ create_dataset = wrap_sdk_function(_create_dataset, "dataset")
321
+ get_dataset = wrap_sdk_function(_get_dataset, "dataset")
322
+ update_dataset = wrap_sdk_function(_update_dataset, "dataset")
323
+ delete_dataset = wrap_sdk_function(_delete_dataset, "dataset")
324
+
325
+ # Dataset item management
326
+ create_dataset_item = wrap_sdk_function(_create_dataset_item, "dataset")
327
+ get_dataset_item = wrap_sdk_function(_get_dataset_item, "dataset")
328
+ update_dataset_item = wrap_sdk_function(_update_dataset_item, "dataset")
329
+ delete_dataset_item = wrap_sdk_function(_delete_dataset_item, "dataset")
330
+ get_dataset_items = wrap_sdk_function(_get_dataset_items, "dataset")
331
+ list_dataset_item_sessions = wrap_sdk_function(_list_dataset_item_sessions, "dataset")
332
+
333
+ # All exports
334
+ __all__ = [
335
+ # Main functions
336
+ 'init',
337
+ 'get_session_id',
338
+ 'clear_state',
339
+ 'update_session',
340
+ 'end_session',
341
+ 'get_session',
342
+ 'create_event',
343
+ 'create_error_event',
344
+ 'flush',
765
345
 
766
- # Proper shutdown sequence matching atexit handler
767
- try:
768
- client = Client()
769
-
770
- # 1. FIRST: Flush OpenTelemetry spans
771
- if hasattr(client, '_tracer_provider') and client._tracer_provider:
772
- try:
773
- logger.debug(f"[Signal] Flushing OpenTelemetry spans on signal {signum}")
774
- client._tracer_provider.force_flush(timeout_millis=2000) # Shorter timeout for signals
775
- except Exception:
776
- pass
777
-
778
- # 2. SECOND: Flush and shutdown EventQueue
779
- if hasattr(client, "_event_queue"):
780
- logger.debug(f"[Signal] Flushing event queue on signal {signum}")
781
- client._event_queue.force_flush(timeout_seconds=2.0)
782
-
783
- # Clear active sessions to allow shutdown
784
- if hasattr(client, '_active_sessions'):
785
- with client._active_sessions_lock:
786
- client._active_sessions.clear()
787
-
788
- client._event_queue.shutdown()
789
-
790
- # 3. THIRD: Shutdown TracerProvider after EventQueue
791
- if hasattr(client, '_tracer_provider') and client._tracer_provider:
792
- logger.debug(f"[Signal] Shutting down TracerProvider on signal {signum}")
793
- try:
794
- client._tracer_provider.shutdown()
795
- except Exception:
796
- pass
797
-
798
- # 4. Mark client as shutting down
799
- client._shutdown = True
800
-
801
- except Exception:
802
- pass
346
+ # Decorators
347
+ 'event',
348
+ 'step',
803
349
 
804
- logger.debug(f"[Signal] Auto-ending session on signal {signum}")
805
- _auto_end_session()
806
- # Re-raise the signal for default handling
807
- signal.signal(signum, signal.SIG_DFL)
808
- os.kill(os.getpid(), signum)
809
-
810
-
811
- # Register cleanup functions
812
- atexit.register(_cleanup_singleton_on_exit) # Clean up singleton resources on exit
813
- atexit.register(_auto_end_session) # Auto-end session if enabled
814
-
815
- # Register signal handlers for graceful shutdown
816
- signal.signal(signal.SIGINT, _signal_handler)
817
- signal.signal(signal.SIGTERM, _signal_handler)
818
-
819
-
820
- def create_experiment(
821
- experiment_name: str,
822
- pass_fail_rubrics: Optional[list] = None,
823
- score_rubrics: Optional[list] = None,
824
- description: Optional[str] = None,
825
- tags: Optional[list] = None,
826
- api_key: Optional[str] = None,
827
- agent_id: Optional[str] = None,
828
- ) -> str:
829
- """
830
- Create a new experiment for grouping and analyzing sessions.
831
-
832
- Args:
833
- experiment_name: Name of the experiment (required)
834
- pass_fail_rubrics: List of pass/fail rubric names to associate
835
- description: Description of the experiment
836
- task: Task description.
837
- tags: List of tags for categorization
838
- score_rubrics: List of score rubric names to associate
839
- api_key: API key (uses env if not provided)
840
- agent_id: Agent ID (uses env if not provided)
841
-
842
- Returns:
843
- experiment_id: UUID of the created experiment
844
-
845
- Raises:
846
- APIKeyVerificationError: If API key is invalid or missing
847
- InvalidOperationError: If experiment creation fails
848
- ValueError: If name is empty
849
- """
850
-
851
- # validation
852
- if not experiment_name:
853
- raise ValueError("Experiment name is required")
854
-
855
- if api_key is None:
856
- api_key = os.getenv("LUCIDIC_API_KEY", None)
857
- if api_key is None:
858
- raise APIKeyVerificationError("Make sure to either pass your API key into create_experiment() or set the LUCIDIC_API_KEY environment variable.")
859
- if agent_id is None:
860
- agent_id = os.getenv("LUCIDIC_AGENT_ID", None)
861
- if agent_id is None:
862
- raise APIKeyVerificationError("Lucidic agent ID not specified. Make sure to either pass your agent ID into create_experiment() or set the LUCIDIC_AGENT_ID environment variable.")
863
-
864
- # combine rubrics into single list
865
- rubric_names = (pass_fail_rubrics or []) + (score_rubrics or [])
866
-
867
- # get current client which will be NullClient if never lai.init() is never called
868
- client = Client()
869
- # if not yet initialized or still the NullClient -> create a real client when init is called
870
- if not getattr(client, 'initialized', False):
871
- client = Client(api_key=api_key, agent_id=agent_id)
872
- else:
873
- # Already initialized, this is a re-init
874
- if api_key is not None and agent_id is not None and (api_key != client.api_key or agent_id != client.agent_id):
875
- client.set_api_key(api_key)
876
- client.agent_id = agent_id
877
-
878
- # create experiment
879
- experiment_id = client.create_experiment(experiment_name=experiment_name, rubric_names=rubric_names, description=description, tags=tags)
880
- logger.info(f"Created experiment with ID: {experiment_id}")
881
-
882
- return experiment_id
883
-
884
-
885
- def create_event(
886
- type: str = "generic",
887
- **kwargs
888
- ) -> str:
889
- client = Client()
890
- if not client.session:
891
- return
892
- return client.session.create_event(type=type, **kwargs)
350
+ # Features
351
+ 'create_experiment',
352
+ 'get_prompt',
893
353
 
354
+ # Dataset management
355
+ 'list_datasets',
356
+ 'create_dataset',
357
+ 'get_dataset',
358
+ 'update_dataset',
359
+ 'delete_dataset',
360
+ 'create_dataset_item',
361
+ 'get_dataset_item',
362
+ 'update_dataset_item',
363
+ 'delete_dataset_item',
364
+ 'get_dataset_items',
365
+ 'list_dataset_item_sessions',
894
366
 
895
- def get_prompt(
896
- prompt_name: str,
897
- variables: Optional[dict] = None,
898
- cache_ttl: Optional[int] = 300,
899
- label: Optional[str] = 'production'
900
- ) -> str:
901
- """
902
- Get a prompt from the prompt database.
367
+ # Feature flags
368
+ 'get_feature_flag',
369
+ 'get_bool_flag',
370
+ 'get_int_flag',
371
+ 'get_float_flag',
372
+ 'get_string_flag',
373
+ 'get_json_flag',
374
+ 'clear_feature_flag_cache',
903
375
 
904
- Args:
905
- prompt_name: Name of the prompt.
906
- variables: {{Variables}} to replace in the prompt, supplied as a dictionary.
907
- cache_ttl: Time-to-live for the prompt in the cache in seconds (default: 300). Set to -1 to cache forever. Set to 0 to disable caching.
908
- label: Optional label for the prompt.
376
+ # Context management
377
+ 'set_active_session',
378
+ 'clear_active_session',
379
+ 'bind_session',
380
+ 'bind_session_async',
381
+ 'session',
382
+ 'session_async',
383
+ 'run_session',
384
+ 'run_in_session',
385
+ 'thread_worker_with_session',
386
+ 'current_session_id',
387
+ 'current_parent_event_id',
388
+
389
+ # Thread-local session management (advanced)
390
+ 'set_thread_session',
391
+ 'clear_thread_session',
392
+ 'get_thread_session',
909
393
 
910
- Returns:
911
- str: The prompt.
912
- """
913
- client = Client()
914
- if not client.session:
915
- return ""
916
- prompt = client.get_prompt(prompt_name, cache_ttl, label)
917
- if variables:
918
- for key, val in variables.items():
919
- index = prompt.find("{{" + key +"}}")
920
- if index == -1:
921
- raise PromptError("Supplied variable not found in prompt")
922
- prompt = prompt.replace("{{" + key +"}}", str(val))
923
- if "{{" in prompt and "}}" in prompt and prompt.find("{{") < prompt.find("}}"):
924
- logger.warning("Unreplaced variable(s) left in prompt. Please check your prompt.")
925
- return prompt
926
-
927
-
928
- def get_session():
929
- """Get the current session object
394
+ # Error types
395
+ 'LucidicError',
396
+ 'LucidicNotInitializedError',
397
+ 'APIKeyVerificationError',
398
+ 'InvalidOperationError',
399
+ 'PromptError',
400
+ 'FeatureFlagError',
930
401
 
931
- Returns:
932
- Session: The current session object, or None if no session exists
933
- """
934
- try:
935
- client = Client()
936
- return client.session
937
- except (LucidicNotInitializedError, AttributeError) as e:
938
- logger.debug(f"No active session: {str(e)}")
939
- return None
940
-
941
-
402
+ # Error boundary
403
+ 'is_silent_mode',
404
+ 'get_error_history',
405
+ 'clear_error_history',
406
+
407
+ # Version
408
+ '__version__',
409
+ ]