lucidicai 2.1.3__py3-none-any.whl → 3.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. lucidicai/__init__.py +32 -390
  2. lucidicai/api/client.py +31 -2
  3. lucidicai/api/resources/__init__.py +16 -1
  4. lucidicai/api/resources/dataset.py +422 -82
  5. lucidicai/api/resources/event.py +399 -27
  6. lucidicai/api/resources/experiment.py +108 -0
  7. lucidicai/api/resources/feature_flag.py +78 -0
  8. lucidicai/api/resources/prompt.py +84 -0
  9. lucidicai/api/resources/session.py +545 -38
  10. lucidicai/client.py +395 -480
  11. lucidicai/core/config.py +73 -48
  12. lucidicai/core/errors.py +3 -3
  13. lucidicai/sdk/bound_decorators.py +321 -0
  14. lucidicai/sdk/context.py +20 -2
  15. lucidicai/sdk/decorators.py +283 -74
  16. lucidicai/sdk/event.py +538 -36
  17. lucidicai/sdk/event_builder.py +2 -4
  18. lucidicai/sdk/features/dataset.py +391 -1
  19. lucidicai/sdk/features/feature_flag.py +344 -3
  20. lucidicai/sdk/init.py +49 -347
  21. lucidicai/sdk/session.py +502 -0
  22. lucidicai/sdk/shutdown_manager.py +103 -46
  23. lucidicai/session_obj.py +321 -0
  24. lucidicai/telemetry/context_capture_processor.py +13 -6
  25. lucidicai/telemetry/extract.py +60 -63
  26. lucidicai/telemetry/litellm_bridge.py +3 -44
  27. lucidicai/telemetry/lucidic_exporter.py +143 -131
  28. lucidicai/telemetry/openai_agents_instrumentor.py +2 -2
  29. lucidicai/telemetry/openai_patch.py +7 -6
  30. lucidicai/telemetry/telemetry_manager.py +183 -0
  31. lucidicai/telemetry/utils/model_pricing.py +21 -30
  32. lucidicai/telemetry/utils/provider.py +77 -0
  33. lucidicai/utils/images.py +27 -11
  34. lucidicai/utils/serialization.py +27 -0
  35. {lucidicai-2.1.3.dist-info → lucidicai-3.0.0.dist-info}/METADATA +1 -1
  36. {lucidicai-2.1.3.dist-info → lucidicai-3.0.0.dist-info}/RECORD +38 -29
  37. {lucidicai-2.1.3.dist-info → lucidicai-3.0.0.dist-info}/WHEEL +0 -0
  38. {lucidicai-2.1.3.dist-info → lucidicai-3.0.0.dist-info}/top_level.txt +0 -0
lucidicai/sdk/event.py CHANGED
@@ -1,42 +1,140 @@
1
1
  """SDK event creation and management."""
2
+ import asyncio
3
+ import gzip
4
+ import io
5
+ import json
6
+ import sys
7
+ import threading
2
8
  import uuid
3
9
  from datetime import datetime, timezone
4
- from typing import Any, Dict, Optional, Union
10
+ from typing import Any, Dict, Optional, Union, Set
11
+ from weakref import WeakSet
12
+ import traceback
13
+ import httpx
5
14
 
6
15
  from .context import current_parent_event_id
7
16
  from ..core.config import get_config
8
17
  from .event_builder import EventBuilder
9
- from ..utils.logger import debug, truncate_id
18
+ from ..utils.logger import debug, warning, error, truncate_id
10
19
 
11
20
 
12
- def create_event(
13
- type: str = "generic",
14
- event_id: Optional[str] = None,
15
- session_id: Optional[str] = None, # accept explicit session_id
16
- **kwargs
17
- ) -> str:
18
- """Create a new event.
21
+ # Default blob threshold (64KB)
22
+ DEFAULT_BLOB_THRESHOLD = 65536
19
23
 
20
- Args:
21
- type: Event type (llm_generation, function_call, error_traceback, generic)
22
- event_id: Optional client event ID (will generate if not provided)
23
- session_id: Optional session ID (will use context if not provided)
24
- **kwargs: Event-specific fields
24
+ # Track background threads and tasks for flush()
25
+ _background_threads: Set[threading.Thread] = WeakSet()
26
+ _background_tasks: Set[asyncio.Task] = WeakSet()
27
+
28
+
29
+ def _compress_json(payload: Dict[str, Any]) -> bytes:
30
+ """Compress JSON payload using gzip."""
31
+ raw = json.dumps(payload, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
32
+ buf = io.BytesIO()
33
+ with gzip.GzipFile(fileobj=buf, mode="wb") as gz:
34
+ gz.write(raw)
35
+ return buf.getvalue()
36
+
37
+
38
+ def _upload_blob_sync(blob_url: str, data: bytes) -> None:
39
+ """Upload compressed blob to presigned URL (synchronous)."""
40
+ headers = {"Content-Type": "application/json", "Content-Encoding": "gzip"}
41
+ resp = httpx.put(blob_url, content=data, headers=headers)
42
+ resp.raise_for_status()
43
+
44
+
45
+ async def _upload_blob_async(blob_url: str, data: bytes) -> None:
46
+ """Upload compressed blob to presigned URL (asynchronous)."""
47
+ headers = {"Content-Type": "application/json", "Content-Encoding": "gzip"}
48
+ async with httpx.AsyncClient() as client:
49
+ resp = await client.put(blob_url, content=data, headers=headers)
50
+ resp.raise_for_status()
51
+
52
+
53
+ def _track_background_task(task: asyncio.Task) -> None:
54
+ """Track a background task for flush()."""
55
+ _background_tasks.add(task)
56
+
57
+
58
+ def _create_preview(event_type: Optional[str], payload: Dict[str, Any]) -> Dict[str, Any]:
59
+ """Create preview of large payload for logging."""
60
+ try:
61
+ t = (event_type or "generic").lower()
62
+
63
+ if t == "llm_generation":
64
+ req = payload.get("request", {})
65
+ usage = payload.get("usage", {})
66
+ messages = req.get("messages", [])[:5]
67
+ output = payload.get("response", {}).get("output", {})
68
+ compressed_messages = []
69
+ for i, m in enumerate(messages):
70
+ compressed_message_item = {}
71
+ for k, v in messages[i].items():
72
+ compressed_message_item[k] = str(v)[:200] if v else None
73
+ compressed_messages.append(compressed_message_item)
74
+ return {
75
+ "request": {
76
+ "model": req.get("model")[:200] if req.get("model") else None,
77
+ "provider": req.get("provider")[:200] if req.get("provider") else None,
78
+ "messages": compressed_messages,
79
+ },
80
+ "usage": {
81
+ k: usage.get(k) for k in ("input_tokens", "output_tokens", "cost") if k in usage
82
+ },
83
+ "response": {
84
+ "output": str(output)[:200] if output else None,
85
+ }
86
+ }
87
+
88
+ elif t == "function_call":
89
+ args = payload.get("arguments")
90
+ truncated_args = (
91
+ {k: (str(v)[:200] if v is not None else None) for k, v in args.items()}
92
+ if isinstance(args, dict)
93
+ else (str(args)[:200] if args is not None else None)
94
+ )
95
+ return {
96
+ "function_name": payload.get("function_name")[:200] if payload.get("function_name") else None,
97
+ "arguments": truncated_args,
98
+ }
99
+
100
+ elif t == "error_traceback":
101
+ return {
102
+ "error": payload.get("error")[:200] if payload.get("error") else None,
103
+ }
104
+
105
+ elif t == "generic":
106
+ return {
107
+ "details": payload.get("details")[:200] if payload.get("details") else None,
108
+ }
109
+ else:
110
+ return {"details": "preview_unavailable"}
111
+
112
+ except Exception:
113
+ return {"details": "preview_error"}
25
114
 
115
+
116
+ def _prepare_event_request(
117
+ type: str,
118
+ event_id: Optional[str],
119
+ session_id: Optional[str],
120
+ blob_threshold: int,
121
+ **kwargs
122
+ ) -> tuple[Dict[str, Any], bool, Optional[Dict[str, Any]]]:
123
+ """Prepare event request, determining if blob offload is needed.
124
+
26
125
  Returns:
27
- Event ID (client-generated or provided UUID)
126
+ Tuple of (send_body, needs_blob, original_payload)
28
127
  """
29
- # Import here to avoid circular dependency
30
- from ..sdk.init import get_session_id, get_event_queue
128
+ from ..sdk.init import get_session_id
31
129
 
32
130
  # Use provided session_id or fall back to context
33
131
  if not session_id:
34
132
  session_id = get_session_id()
35
133
 
36
134
  if not session_id:
37
- # No active session, return dummy ID
135
+ # No active session
38
136
  debug("[Event] No active session, returning dummy event ID")
39
- return str(uuid.uuid4())
137
+ return None, False, None
40
138
 
41
139
  # Get parent event ID from context
42
140
  parent_event_id = None
@@ -55,7 +153,7 @@ def create_event(
55
153
  'parent_event_id': parent_event_id,
56
154
  'session_id': session_id,
57
155
  'occurred_at': kwargs.get('occurred_at') or datetime.now(timezone.utc).isoformat(),
58
- **kwargs # Include all other kwargs
156
+ **kwargs
59
157
  }
60
158
 
61
159
  # Use EventBuilder to create normalized event request
@@ -63,21 +161,187 @@ def create_event(
63
161
 
64
162
  debug(f"[Event] Creating {type} event {truncate_id(client_event_id)} (parent: {truncate_id(parent_event_id)}, session: {truncate_id(session_id)})")
65
163
 
66
- # Queue event for async sending
67
- event_queue = get_event_queue()
68
- if event_queue:
69
- event_queue.queue_event(event_request)
164
+ # Check for blob offloading
165
+ payload = event_request.get("payload", {})
166
+ raw_bytes = json.dumps(payload, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
167
+ needs_blob = len(raw_bytes) > blob_threshold
168
+
169
+ if needs_blob:
170
+ debug(f"[Event] Event {truncate_id(client_event_id)} needs blob storage ({len(raw_bytes)} bytes > {blob_threshold} threshold)")
70
171
 
172
+ send_body: Dict[str, Any] = dict(event_request)
173
+ if needs_blob:
174
+ send_body["needs_blob"] = True
175
+ send_body["payload"] = _create_preview(send_body.get("type"), payload)
176
+ else:
177
+ send_body["needs_blob"] = False
178
+
179
+ return send_body, needs_blob, payload if needs_blob else None
180
+
181
+
182
+ def _get_event_resource():
183
+ """Get an event resource from a registered client.
184
+
185
+ Returns:
186
+ Event resource or None if no client is available.
187
+ """
188
+ try:
189
+ from .shutdown_manager import get_shutdown_manager
190
+ manager = get_shutdown_manager()
191
+ with manager._client_lock:
192
+ # Return first available client's event resource
193
+ for client in manager._clients.values():
194
+ if hasattr(client, '_resources') and 'events' in client._resources:
195
+ return client._resources['events']
196
+ except Exception as e:
197
+ debug(f"[Event] Failed to get event resource from client registry: {e}")
198
+ return None
199
+
200
+
201
+ def create_event(
202
+ type: str = "generic",
203
+ event_id: Optional[str] = None,
204
+ session_id: Optional[str] = None,
205
+ **kwargs
206
+ ) -> str:
207
+ """Create a new event (synchronous).
208
+
209
+ Args:
210
+ type: Event type (llm_generation, function_call, error_traceback, generic)
211
+ event_id: Optional client event ID (will generate if not provided)
212
+ session_id: Optional session ID (will use context if not provided)
213
+ **kwargs: Event-specific fields
214
+
215
+ Returns:
216
+ Event ID (client-generated or provided UUID)
217
+ """
218
+ config = get_config()
219
+ blob_threshold = getattr(config, 'blob_threshold', DEFAULT_BLOB_THRESHOLD)
220
+
221
+ send_body, needs_blob, original_payload = _prepare_event_request(
222
+ type, event_id, session_id, blob_threshold, **kwargs
223
+ )
224
+
225
+ if send_body is None:
226
+ # No active session
227
+ return str(uuid.uuid4())
228
+
229
+ client_event_id = send_body.get('client_event_id', str(uuid.uuid4()))
230
+
231
+ # Get event resource from client registry
232
+ event_resource = _get_event_resource()
233
+ if not event_resource:
234
+ warning("[Event] No event resource available (no client registered), event not sent")
235
+ return client_event_id
236
+
237
+ try:
238
+ response = event_resource.create_event(send_body)
239
+
240
+ # Handle blob upload if needed (blocking)
241
+ if needs_blob and original_payload:
242
+ blob_url = response.get("blob_url")
243
+ if blob_url:
244
+ compressed = _compress_json(original_payload)
245
+ _upload_blob_sync(blob_url, compressed)
246
+ debug(f"[Event] Blob uploaded for event {truncate_id(client_event_id)}")
247
+ else:
248
+ error("[Event] No blob_url received for large payload")
249
+
250
+ debug(f"[Event] Event {truncate_id(client_event_id)} sent successfully")
251
+
252
+ except Exception as e:
253
+ error(f"[Event] Failed to send event {truncate_id(client_event_id)}: {e}")
254
+
71
255
  return client_event_id
72
256
 
73
257
 
258
+ async def acreate_event(
259
+ type: str = "generic",
260
+ event_id: Optional[str] = None,
261
+ session_id: Optional[str] = None,
262
+ **kwargs
263
+ ) -> str:
264
+ """Create a new event (asynchronous).
265
+
266
+ Args:
267
+ type: Event type (llm_generation, function_call, error_traceback, generic)
268
+ event_id: Optional client event ID (will generate if not provided)
269
+ session_id: Optional session ID (will use context if not provided)
270
+ **kwargs: Event-specific fields
271
+
272
+ Returns:
273
+ Event ID (client-generated or provided UUID)
274
+ """
275
+ # Check if we're in shutdown - fall back to sync if we are
276
+ if sys.is_finalizing():
277
+ debug(f"[Event] Python is finalizing in acreate_event, falling back to sync")
278
+ return create_event(type, event_id, session_id, **kwargs)
279
+
280
+ config = get_config()
281
+ blob_threshold = getattr(config, 'blob_threshold', DEFAULT_BLOB_THRESHOLD)
282
+
283
+ send_body, needs_blob, original_payload = _prepare_event_request(
284
+ type, event_id, session_id, blob_threshold, **kwargs
285
+ )
286
+
287
+ if send_body is None:
288
+ # No active session
289
+ return str(uuid.uuid4())
290
+
291
+ client_event_id = send_body.get('client_event_id', str(uuid.uuid4()))
292
+
293
+ # Get event resource from client registry
294
+ event_resource = _get_event_resource()
295
+ if not event_resource:
296
+ warning("[Event] No event resource available (no client registered), event not sent")
297
+ return client_event_id
298
+
299
+ try:
300
+ # Try async first, fall back to sync if we get shutdown errors
301
+ try:
302
+ response = await event_resource.acreate_event(send_body)
303
+ except RuntimeError as e:
304
+ if "cannot schedule new futures after interpreter shutdown" in str(e).lower():
305
+ debug(f"[Event] Detected shutdown in acreate_event, falling back to sync")
306
+ response = event_resource.create_event(send_body)
307
+ else:
308
+ raise
309
+
310
+ # Handle blob upload if needed (background task)
311
+ if needs_blob and original_payload:
312
+ blob_url = response.get("blob_url")
313
+ if blob_url:
314
+ compressed = _compress_json(original_payload)
315
+ try:
316
+ # Try to create background task
317
+ task = asyncio.create_task(_upload_blob_async(blob_url, compressed))
318
+ _track_background_task(task)
319
+ debug(f"[Event] Blob upload started in background for event {truncate_id(client_event_id)}")
320
+ except RuntimeError as e:
321
+ if "cannot schedule new futures" in str(e).lower() or sys.is_finalizing():
322
+ # Can't create tasks, do it synchronously
323
+ debug(f"[Event] Cannot create background task, uploading blob synchronously")
324
+ _upload_blob_sync(blob_url, compressed)
325
+ debug(f"[Event] Blob uploaded synchronously for event {truncate_id(client_event_id)}")
326
+ else:
327
+ raise
328
+ else:
329
+ error("[Event] No blob_url received for large payload")
330
+
331
+ debug(f"[Event] Event {truncate_id(client_event_id)} sent successfully")
332
+
333
+ except Exception as e:
334
+ error(f"[Event] Failed to send event {truncate_id(client_event_id)}: {e}")
335
+
336
+ return client_event_id
337
+
74
338
 
75
339
  def create_error_event(
76
340
  error: Union[str, Exception],
77
341
  parent_event_id: Optional[str] = None,
78
342
  **kwargs
79
343
  ) -> str:
80
- """Create an error traceback event.
344
+ """Create an error traceback event (synchronous).
81
345
 
82
346
  This is a convenience function for creating error events with proper
83
347
  traceback information.
@@ -108,19 +372,257 @@ def create_error_event(
108
372
  )
109
373
 
110
374
 
111
- def flush(timeout_seconds: float = 2.0) -> bool:
112
- """Flush pending events.
375
+ async def acreate_error_event(
376
+ error: Union[str, Exception],
377
+ parent_event_id: Optional[str] = None,
378
+ **kwargs
379
+ ) -> str:
380
+ """Create an error traceback event (asynchronous).
381
+
382
+ This is a convenience function for creating error events with proper
383
+ traceback information.
384
+
385
+ Args:
386
+ error: The error message or exception object
387
+ parent_event_id: Optional parent event ID for nesting
388
+ **kwargs: Additional event parameters
389
+
390
+ Returns:
391
+ Event ID of the created error event
392
+ """
393
+ import traceback
394
+
395
+ if isinstance(error, Exception):
396
+ error_str = str(error)
397
+ traceback_str = traceback.format_exc()
398
+ else:
399
+ error_str = str(error)
400
+ traceback_str = kwargs.pop('traceback', '')
401
+
402
+ return await acreate_event(
403
+ type="error_traceback",
404
+ error=error_str,
405
+ traceback=traceback_str,
406
+ parent_event_id=parent_event_id,
407
+ **kwargs
408
+ )
409
+
410
+
411
+ def emit_event(
412
+ type: str = "generic",
413
+ event_id: Optional[str] = None,
414
+ session_id: Optional[str] = None,
415
+ **kwargs
416
+ ) -> str:
417
+ """Fire-and-forget event creation that returns instantly.
418
+
419
+ This function returns immediately with an event ID, while the actual
420
+ event creation and any blob uploads happen in a background thread.
421
+ Perfect for hot path telemetry where latency is critical.
422
+
423
+ During shutdown, falls back to synchronous event creation to avoid
424
+ "cannot schedule new futures after interpreter shutdown" errors.
425
+
426
+ Args:
427
+ type: Event type (llm_generation, function_call, error_traceback, generic)
428
+ event_id: Optional client event ID (will generate if not provided)
429
+ session_id: Optional session ID (will use context if not provided)
430
+ **kwargs: Event-specific fields
431
+
432
+ Returns:
433
+ Event ID (client-generated or provided UUID) - returned immediately
434
+ """
435
+ from ..sdk.init import get_session_id
436
+ from .context import current_session_id
437
+ from .shutdown_manager import get_shutdown_manager
438
+
439
+ # Pre-generate event ID for instant return
440
+ client_event_id = event_id or str(uuid.uuid4())
441
+
442
+ # Capture context variables BEFORE creating the thread
443
+ # This preserves the context chain across thread boundaries
444
+ captured_parent_id = kwargs.get('parent_event_id')
445
+ if captured_parent_id is None:
446
+ try:
447
+ captured_parent_id = current_parent_event_id.get()
448
+ except Exception:
449
+ captured_parent_id = None
450
+
451
+ # Capture session from context if not provided
452
+ if not session_id:
453
+ try:
454
+ # Try context variable first (most specific)
455
+ session_id = current_session_id.get()
456
+ except Exception:
457
+ pass
458
+
459
+ # Fall back to get_session_id if still None
460
+ if not session_id:
461
+ session_id = get_session_id()
462
+
463
+ if not session_id:
464
+ debug("[Event] No active session for emit_event, returning dummy event ID")
465
+ return client_event_id
466
+
467
+ # Update kwargs with captured context
468
+ if captured_parent_id is not None:
469
+ kwargs['parent_event_id'] = captured_parent_id
470
+
471
+ # Check if Python interpreter is shutting down
472
+ if sys.is_finalizing():
473
+ debug(f"[Event] Python is finalizing, using synchronous event creation for {truncate_id(client_event_id)}")
474
+ try:
475
+ return create_event(type, client_event_id, session_id, **kwargs)
476
+ except Exception as e:
477
+ error(f"[Event] Failed to create event during finalization: {e}")
478
+ return client_event_id
479
+
480
+ # Check if shutdown manager thinks we're shutting down
481
+ try:
482
+ from .shutdown_manager import get_shutdown_manager
483
+ shutdown_manager = get_shutdown_manager()
484
+ if shutdown_manager.is_shutting_down:
485
+ debug(f"[Event] ShutdownManager indicates shutdown, using synchronous event creation for {truncate_id(client_event_id)}")
486
+ try:
487
+ return create_event(type, client_event_id, session_id, **kwargs)
488
+ except Exception as e:
489
+ error(f"[Event] Failed to create event during shutdown: {e}")
490
+ return client_event_id
491
+ except Exception:
492
+ pass # ShutdownManager not available
493
+
494
+ # Try to create and start thread - fall back to sync if it fails
495
+ try:
496
+ # Normal path: Run async function in background thread
497
+ def _run():
498
+ try:
499
+ # Create new event loop for this thread
500
+ loop = asyncio.new_event_loop()
501
+ asyncio.set_event_loop(loop)
502
+ try:
503
+ loop.run_until_complete(
504
+ acreate_event(type, client_event_id, session_id, **kwargs)
505
+ )
506
+ finally:
507
+ loop.close()
508
+ except RuntimeError as e:
509
+ if "cannot schedule new futures after interpreter shutdown" in str(e).lower():
510
+ # Interpreter is shutting down, can't use async
511
+ debug(f"[Event] Detected interpreter shutdown in thread, falling back to sync")
512
+ create_event(type, client_event_id, session_id, **kwargs)
513
+ else:
514
+ error(f"[Event] Background emit failed for {truncate_id(client_event_id)}: {e}")
515
+ except Exception as e:
516
+ error(f"[Event] Background emit failed for {truncate_id(client_event_id)}: {e}")
517
+
518
+ thread = threading.Thread(target=_run, daemon=True, name=f"emit-{truncate_id(client_event_id)}")
519
+ _background_threads.add(thread)
520
+ thread.start()
521
+ except (RuntimeError, SystemError) as e:
522
+ # Can't create threads during shutdown
523
+ debug(f"[Event] Cannot create thread (likely shutdown): {e}. Using synchronous fallback.")
524
+ try:
525
+ return create_event(type, client_event_id, session_id, **kwargs)
526
+ except Exception as e2:
527
+ error(f"[Event] Synchronous fallback also failed: {e2}")
528
+ return client_event_id
529
+
530
+ debug(f"[Event] Emitted {type} event {truncate_id(client_event_id)} (fire-and-forget)")
531
+ return client_event_id
532
+
533
+
534
+ def emit_error_event(
535
+ error: Union[str, Exception],
536
+ parent_event_id: Optional[str] = None,
537
+ **kwargs
538
+ ) -> str:
539
+ """Fire-and-forget error event creation that returns instantly.
540
+
541
+ This is a convenience function for creating error events with proper
542
+ traceback information, returning immediately while processing happens
543
+ in the background.
113
544
 
114
545
  Args:
115
- timeout_seconds: Maximum time to wait for flush
546
+ error: The error message or exception object
547
+ parent_event_id: Optional parent event ID for nesting
548
+ **kwargs: Additional event parameters
116
549
 
117
550
  Returns:
118
- True if flush completed, False if timeout
551
+ Event ID of the created error event - returned immediately
119
552
  """
120
- from ..sdk.init import get_event_queue
121
- event_queue = get_event_queue()
122
- if event_queue:
123
- debug(f"[Event] Forcing flush with {timeout_seconds}s timeout")
124
- event_queue.force_flush(timeout_seconds)
125
- return True
126
- return False
553
+ import traceback
554
+
555
+ if isinstance(error, Exception):
556
+ error_str = str(error)
557
+ traceback_str = traceback.format_exc()
558
+ else:
559
+ error_str = str(error)
560
+ traceback_str = kwargs.pop('traceback', '')
561
+
562
+ # Note: emit_event already handles context capture for both
563
+ # parent_event_id and session_id, so we just pass through
564
+ return emit_event(
565
+ type="error_traceback",
566
+ error=error_str,
567
+ traceback=traceback_str,
568
+ parent_event_id=parent_event_id,
569
+ **kwargs
570
+ )
571
+
572
+
573
+ def flush(timeout: float = 5.0) -> None:
574
+ """Wait for all background operations to complete.
575
+
576
+ This includes:
577
+ - Event creation HTTP requests
578
+ - S3 blob uploads for large payloads
579
+ - Session creation requests
580
+ - Any other background telemetry operations
581
+
582
+ Useful before program exit or when you need to ensure all telemetry
583
+ has been sent.
584
+
585
+ Args:
586
+ timeout: Maximum time to wait in seconds (default: 5.0)
587
+ """
588
+ import time
589
+
590
+ start_time = time.time()
591
+
592
+ # Flush sessions first
593
+ from .session import flush_sessions as _flush_sessions
594
+ remaining = timeout - (time.time() - start_time)
595
+ if remaining > 0:
596
+ _flush_sessions(timeout=remaining)
597
+
598
+ # Wait for event background threads
599
+ threads = list(_background_threads)
600
+ for thread in threads:
601
+ if thread.is_alive():
602
+ remaining = timeout - (time.time() - start_time)
603
+ if remaining > 0:
604
+ thread.join(timeout=remaining)
605
+ if thread.is_alive():
606
+ warning(f"[SDK] Thread {thread.name} did not complete within timeout")
607
+
608
+ # Wait for async tasks if in async context
609
+ try:
610
+ loop = asyncio.get_running_loop()
611
+ tasks = [t for t in _background_tasks if not t.done()]
612
+ if tasks:
613
+ remaining = timeout - (time.time() - start_time)
614
+ if remaining > 0:
615
+ try:
616
+ loop.run_until_complete(
617
+ asyncio.wait_for(
618
+ asyncio.gather(*tasks, return_exceptions=True),
619
+ timeout=remaining
620
+ )
621
+ )
622
+ except asyncio.TimeoutError:
623
+ warning(f"[SDK] {len(tasks)} async tasks did not complete within timeout")
624
+ except RuntimeError:
625
+ # Not in async context, skip async task flushing
626
+ pass
627
+
628
+ debug(f"[SDK] Flush completed in {time.time() - start_time:.2f}s")
@@ -30,7 +30,7 @@ class EventBuilder:
30
30
  # field sets for different event types
31
31
  BASE_FIELDS = {
32
32
  'type', 'event_id', 'parent_event_id', 'occurred_at',
33
- 'duration', 'tags', 'metadata', 'screenshots'
33
+ 'duration', 'tags', 'metadata'
34
34
  }
35
35
 
36
36
  LLM_FIELDS = {
@@ -136,9 +136,7 @@ class EventBuilder:
136
136
  base['tags'] = params['tags']
137
137
  if 'metadata' in params:
138
138
  base['metadata'] = params['metadata']
139
- if 'screenshots' in params:
140
- base['screenshots'] = params['screenshots']
141
-
139
+
142
140
  return base
143
141
 
144
142
  @classmethod