lucidicai 2.0.2__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lucidicai/__init__.py +367 -899
- lucidicai/api/__init__.py +1 -0
- lucidicai/api/client.py +218 -0
- lucidicai/api/resources/__init__.py +1 -0
- lucidicai/api/resources/dataset.py +192 -0
- lucidicai/api/resources/event.py +88 -0
- lucidicai/api/resources/session.py +126 -0
- lucidicai/core/__init__.py +1 -0
- lucidicai/core/config.py +223 -0
- lucidicai/core/errors.py +60 -0
- lucidicai/core/types.py +35 -0
- lucidicai/sdk/__init__.py +1 -0
- lucidicai/sdk/context.py +231 -0
- lucidicai/sdk/decorators.py +187 -0
- lucidicai/sdk/error_boundary.py +299 -0
- lucidicai/sdk/event.py +126 -0
- lucidicai/sdk/event_builder.py +304 -0
- lucidicai/sdk/features/__init__.py +1 -0
- lucidicai/sdk/features/dataset.py +605 -0
- lucidicai/sdk/features/feature_flag.py +383 -0
- lucidicai/sdk/init.py +361 -0
- lucidicai/sdk/shutdown_manager.py +302 -0
- lucidicai/telemetry/context_bridge.py +82 -0
- lucidicai/telemetry/context_capture_processor.py +25 -9
- lucidicai/telemetry/litellm_bridge.py +20 -24
- lucidicai/telemetry/lucidic_exporter.py +99 -60
- lucidicai/telemetry/openai_patch.py +295 -0
- lucidicai/telemetry/openai_uninstrument.py +87 -0
- lucidicai/telemetry/telemetry_init.py +16 -1
- lucidicai/telemetry/utils/model_pricing.py +278 -0
- lucidicai/utils/__init__.py +1 -0
- lucidicai/utils/images.py +337 -0
- lucidicai/utils/logger.py +168 -0
- lucidicai/utils/queue.py +393 -0
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/METADATA +1 -1
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/RECORD +38 -9
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/WHEEL +0 -0
- {lucidicai-2.0.2.dist-info → lucidicai-2.1.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""Centralized logging utilities for Lucidic SDK.
|
|
2
|
+
|
|
3
|
+
This module provides consistent logging functions that respect
|
|
4
|
+
LUCIDIC_DEBUG and LUCIDIC_VERBOSE environment variables.
|
|
5
|
+
"""
|
|
6
|
+
import os
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Optional
|
|
9
|
+
from dotenv import load_dotenv
|
|
10
|
+
|
|
11
|
+
# Load environment variables from .env file
|
|
12
|
+
load_dotenv()
|
|
13
|
+
|
|
14
|
+
# Configure base logger
|
|
15
|
+
logging.basicConfig(
|
|
16
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
17
|
+
level=logging.WARNING # Default to WARNING, will be overridden by env vars
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
# Get the logger
|
|
21
|
+
logger = logging.getLogger("Lucidic")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _env_true(value: Optional[str]) -> bool:
|
|
25
|
+
"""Check if environment variable is truthy."""
|
|
26
|
+
if value is None:
|
|
27
|
+
return False
|
|
28
|
+
return value.lower() in ('true', '1', 'yes')
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def is_debug() -> bool:
|
|
32
|
+
"""Check if debug mode is enabled."""
|
|
33
|
+
return _env_true(os.getenv('LUCIDIC_DEBUG'))
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def is_verbose() -> bool:
|
|
37
|
+
"""Check if verbose mode is enabled."""
|
|
38
|
+
return _env_true(os.getenv('LUCIDIC_VERBOSE'))
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def debug(message: str, *args: Any, **kwargs: Any) -> None:
|
|
42
|
+
"""Log debug message if LUCIDIC_DEBUG is enabled.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
message: Log message with optional formatting
|
|
46
|
+
*args: Positional arguments for message formatting
|
|
47
|
+
**kwargs: Keyword arguments for logging
|
|
48
|
+
"""
|
|
49
|
+
if is_debug():
|
|
50
|
+
logger.debug(f"[DEBUG] {message}", *args, **kwargs)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def info(message: str, *args: Any, **kwargs: Any) -> None:
|
|
54
|
+
"""Log info message (always visible).
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
message: Log message with optional formatting
|
|
58
|
+
*args: Positional arguments for message formatting
|
|
59
|
+
**kwargs: Keyword arguments for logging
|
|
60
|
+
"""
|
|
61
|
+
logger.info(message, *args, **kwargs)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def warning(message: str, *args: Any, **kwargs: Any) -> None:
|
|
65
|
+
"""Log warning message (always visible).
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
message: Log message with optional formatting
|
|
69
|
+
*args: Positional arguments for message formatting
|
|
70
|
+
**kwargs: Keyword arguments for logging
|
|
71
|
+
"""
|
|
72
|
+
logger.warning(message, *args, **kwargs)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def error(message: str, *args: Any, **kwargs: Any) -> None:
|
|
76
|
+
"""Log error message (always visible).
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
message: Log message with optional formatting
|
|
80
|
+
*args: Positional arguments for message formatting
|
|
81
|
+
**kwargs: Keyword arguments for logging
|
|
82
|
+
"""
|
|
83
|
+
logger.error(message, *args, **kwargs)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def verbose(message: str, *args: Any, **kwargs: Any) -> None:
|
|
87
|
+
"""Log verbose message if LUCIDIC_VERBOSE or LUCIDIC_DEBUG is enabled.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
message: Log message with optional formatting
|
|
91
|
+
*args: Positional arguments for message formatting
|
|
92
|
+
**kwargs: Keyword arguments for logging
|
|
93
|
+
"""
|
|
94
|
+
if is_debug() or is_verbose():
|
|
95
|
+
logger.info(f"[VERBOSE] {message}", *args, **kwargs)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def truncate_id(id_str: Optional[str], length: int = 8) -> str:
|
|
99
|
+
"""Truncate UUID for logging.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
id_str: UUID string to truncate
|
|
103
|
+
length: Number of characters to keep
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
Truncated ID with ellipsis
|
|
107
|
+
"""
|
|
108
|
+
if not id_str:
|
|
109
|
+
return "None"
|
|
110
|
+
if len(id_str) <= length:
|
|
111
|
+
return id_str
|
|
112
|
+
return f"{id_str[:length]}..."
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def mask_sensitive(data: dict, sensitive_keys: set = None) -> dict:
|
|
116
|
+
"""Mask sensitive data in dictionary for logging.
|
|
117
|
+
|
|
118
|
+
Args:
|
|
119
|
+
data: Dictionary potentially containing sensitive data
|
|
120
|
+
sensitive_keys: Set of keys to mask (default: common sensitive keys)
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
Dictionary with sensitive values masked
|
|
124
|
+
"""
|
|
125
|
+
if sensitive_keys is None:
|
|
126
|
+
sensitive_keys = {
|
|
127
|
+
'api_key', 'apikey', 'api-key',
|
|
128
|
+
'token', 'auth', 'authorization',
|
|
129
|
+
'password', 'secret', 'key',
|
|
130
|
+
'x-api-key', 'x-auth-token'
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
masked = {}
|
|
134
|
+
for key, value in data.items():
|
|
135
|
+
if any(k in key.lower() for k in sensitive_keys):
|
|
136
|
+
if value:
|
|
137
|
+
# Show first few chars for debugging
|
|
138
|
+
masked[key] = f"{str(value)[:4]}...MASKED" if len(str(value)) > 4 else "MASKED"
|
|
139
|
+
else:
|
|
140
|
+
masked[key] = value
|
|
141
|
+
else:
|
|
142
|
+
masked[key] = value
|
|
143
|
+
return masked
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def truncate_data(data: Any, max_length: int = 500) -> str:
|
|
147
|
+
"""Truncate long data for logging.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
data: Data to truncate
|
|
151
|
+
max_length: Maximum length
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
Truncated string representation
|
|
155
|
+
"""
|
|
156
|
+
str_data = str(data)
|
|
157
|
+
if len(str_data) <= max_length:
|
|
158
|
+
return str_data
|
|
159
|
+
return f"{str_data[:max_length]}... (truncated)"
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# Configure logger level based on environment
|
|
163
|
+
if is_debug():
|
|
164
|
+
logger.setLevel(logging.DEBUG)
|
|
165
|
+
elif is_verbose():
|
|
166
|
+
logger.setLevel(logging.INFO)
|
|
167
|
+
else:
|
|
168
|
+
logger.setLevel(logging.WARNING)
|
lucidicai/utils/queue.py
ADDED
|
@@ -0,0 +1,393 @@
|
|
|
1
|
+
"""Parallel event queue for efficient event processing.
|
|
2
|
+
|
|
3
|
+
This module provides a high-performance event queue that processes events
|
|
4
|
+
in parallel while respecting parent-child dependencies.
|
|
5
|
+
"""
|
|
6
|
+
import gzip
|
|
7
|
+
import io
|
|
8
|
+
import json
|
|
9
|
+
import queue
|
|
10
|
+
import threading
|
|
11
|
+
import time
|
|
12
|
+
import requests
|
|
13
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
from typing import Any, Dict, List, Optional, Set, Tuple
|
|
16
|
+
|
|
17
|
+
from ..core.config import get_config
|
|
18
|
+
from ..utils.logger import debug, info, warning, error, truncate_id, truncate_data
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class EventQueue:
|
|
22
|
+
"""High-performance parallel event queue."""
|
|
23
|
+
|
|
24
|
+
def __init__(self, client):
|
|
25
|
+
"""Initialize the event queue."""
|
|
26
|
+
self.config = get_config()
|
|
27
|
+
self._client = client
|
|
28
|
+
|
|
29
|
+
# Queue configuration
|
|
30
|
+
self.max_queue_size = self.config.event_queue.max_queue_size
|
|
31
|
+
self.flush_interval_ms = self.config.event_queue.flush_interval_ms
|
|
32
|
+
self.flush_at_count = self.config.event_queue.flush_at_count
|
|
33
|
+
self.blob_threshold = self.config.event_queue.blob_threshold
|
|
34
|
+
self.max_workers = self.config.event_queue.max_parallel_workers
|
|
35
|
+
self.retry_failed = self.config.event_queue.retry_failed
|
|
36
|
+
|
|
37
|
+
# Runtime state
|
|
38
|
+
self._queue = queue.Queue(maxsize=self.max_queue_size)
|
|
39
|
+
self._stopped = threading.Event()
|
|
40
|
+
self._flush_event = threading.Event()
|
|
41
|
+
self._worker: Optional[threading.Thread] = None
|
|
42
|
+
self._sent_ids: Set[str] = set()
|
|
43
|
+
# Removed deferred queue - no longer needed since backend handles any order
|
|
44
|
+
|
|
45
|
+
# Thread pool for parallel processing
|
|
46
|
+
self._executor = ThreadPoolExecutor(
|
|
47
|
+
max_workers=self.max_workers,
|
|
48
|
+
thread_name_prefix="LucidicSender"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Thread safety
|
|
52
|
+
self._flush_lock = threading.Lock()
|
|
53
|
+
self._processing_count = 0
|
|
54
|
+
self._processing_lock = threading.Lock()
|
|
55
|
+
|
|
56
|
+
# Start background worker
|
|
57
|
+
self._start_worker()
|
|
58
|
+
|
|
59
|
+
debug(f"[EventQueue] Initialized with {self.max_workers} parallel workers, batch size: {self.flush_at_count}, flush interval: {self.flush_interval_ms}ms")
|
|
60
|
+
|
|
61
|
+
def queue_event(self, event_request: Dict[str, Any]) -> None:
|
|
62
|
+
"""Enqueue an event for background processing."""
|
|
63
|
+
if "defer_count" not in event_request:
|
|
64
|
+
event_request["defer_count"] = 0
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
self._queue.put(event_request, block=True, timeout=0.001)
|
|
68
|
+
|
|
69
|
+
event_id = event_request.get('client_event_id', 'unknown')
|
|
70
|
+
parent_id = event_request.get('client_parent_event_id')
|
|
71
|
+
debug(f"[EventQueue] Queued event {truncate_id(event_id)} (parent: {truncate_id(parent_id)}), queue size: {self._queue.qsize()}")
|
|
72
|
+
|
|
73
|
+
# Wake worker if batch large enough
|
|
74
|
+
if self._queue.qsize() >= self.flush_at_count:
|
|
75
|
+
self._flush_event.set()
|
|
76
|
+
|
|
77
|
+
except queue.Full:
|
|
78
|
+
warning(f"[EventQueue] Queue at max size {self.max_queue_size}, dropping event")
|
|
79
|
+
|
|
80
|
+
def force_flush(self, timeout_seconds: float = 5.0) -> None:
|
|
81
|
+
"""Flush current queue synchronously (best-effort)."""
|
|
82
|
+
with self._flush_lock:
|
|
83
|
+
debug(f"[EventQueue] Force flush requested, queue size: {self._queue.qsize()}")
|
|
84
|
+
|
|
85
|
+
# Signal the worker to flush immediately
|
|
86
|
+
self._flush_event.set()
|
|
87
|
+
|
|
88
|
+
# Wait for the queue to be processed
|
|
89
|
+
end_time = time.time() + timeout_seconds
|
|
90
|
+
last_size = -1
|
|
91
|
+
stable_count = 0
|
|
92
|
+
|
|
93
|
+
debug(f"[EventQueue] Force flush: entering wait loop, timeout={timeout_seconds}s")
|
|
94
|
+
iterations = 0
|
|
95
|
+
start_time = time.time()
|
|
96
|
+
while time.time() < end_time:
|
|
97
|
+
iterations += 1
|
|
98
|
+
if iterations % 20 == 1: # Log every second (20 * 0.05s)
|
|
99
|
+
debug(f"[EventQueue] Force flush: iteration {iterations}, time left: {end_time - time.time():.1f}s")
|
|
100
|
+
|
|
101
|
+
current_size = self._queue.qsize()
|
|
102
|
+
|
|
103
|
+
with self._processing_lock:
|
|
104
|
+
processing = self._processing_count
|
|
105
|
+
|
|
106
|
+
# Check if we're done
|
|
107
|
+
if current_size == 0 and processing == 0:
|
|
108
|
+
if stable_count >= 2:
|
|
109
|
+
debug("[EventQueue] Force flush complete")
|
|
110
|
+
return
|
|
111
|
+
stable_count += 1
|
|
112
|
+
debug(f"[EventQueue] Force flush: queue empty, stable_count={stable_count}")
|
|
113
|
+
else:
|
|
114
|
+
stable_count = 0
|
|
115
|
+
|
|
116
|
+
# Check for progress
|
|
117
|
+
if current_size == last_size:
|
|
118
|
+
stable_count += 1
|
|
119
|
+
if stable_count >= 10: # 0.5 seconds of no progress
|
|
120
|
+
break
|
|
121
|
+
else:
|
|
122
|
+
stable_count = 0
|
|
123
|
+
last_size = current_size
|
|
124
|
+
|
|
125
|
+
self._flush_event.set()
|
|
126
|
+
time.sleep(0.05)
|
|
127
|
+
|
|
128
|
+
# Safety check to prevent infinite loop
|
|
129
|
+
if time.time() - start_time > timeout_seconds + 1:
|
|
130
|
+
warning(f"[EventQueue] Force flush: exceeded timeout by >1s, breaking")
|
|
131
|
+
break
|
|
132
|
+
|
|
133
|
+
debug(f"[EventQueue] Force flush: exited wait loop after {time.time() - start_time:.1f}s")
|
|
134
|
+
|
|
135
|
+
def is_empty(self) -> bool:
|
|
136
|
+
"""Check if queue is completely empty."""
|
|
137
|
+
with self._processing_lock:
|
|
138
|
+
queue_empty = self._queue.empty()
|
|
139
|
+
not_processing = self._processing_count == 0
|
|
140
|
+
# No deferred queue to check anymore
|
|
141
|
+
return queue_empty and not_processing
|
|
142
|
+
|
|
143
|
+
def shutdown(self, timeout: float = 5.0) -> None:
|
|
144
|
+
"""Shutdown the event queue."""
|
|
145
|
+
info(f"[EventQueue] Shutting down with {self._queue.qsize()} events in queue")
|
|
146
|
+
|
|
147
|
+
# Flush remaining events
|
|
148
|
+
self.force_flush(timeout_seconds=timeout)
|
|
149
|
+
|
|
150
|
+
# Shutdown executor (timeout param added in Python 3.9+)
|
|
151
|
+
try:
|
|
152
|
+
self._executor.shutdown(wait=True, timeout=timeout)
|
|
153
|
+
except TypeError:
|
|
154
|
+
# Fallback for older Python versions
|
|
155
|
+
self._executor.shutdown(wait=True)
|
|
156
|
+
|
|
157
|
+
# Signal stop
|
|
158
|
+
self._stopped.set()
|
|
159
|
+
self._flush_event.set()
|
|
160
|
+
|
|
161
|
+
# Wait for worker
|
|
162
|
+
if self._worker and self._worker.is_alive():
|
|
163
|
+
self._worker.join(timeout=timeout)
|
|
164
|
+
|
|
165
|
+
# --- Internal Implementation ---
|
|
166
|
+
|
|
167
|
+
def _start_worker(self) -> None:
|
|
168
|
+
"""Start the background worker thread."""
|
|
169
|
+
if self._worker and self._worker.is_alive():
|
|
170
|
+
return
|
|
171
|
+
|
|
172
|
+
self._worker = threading.Thread(
|
|
173
|
+
target=self._run_loop,
|
|
174
|
+
name="LucidicEventQueue",
|
|
175
|
+
daemon=self.config.event_queue.daemon_mode
|
|
176
|
+
)
|
|
177
|
+
self._worker.start()
|
|
178
|
+
|
|
179
|
+
def _run_loop(self) -> None:
|
|
180
|
+
"""Main worker loop."""
|
|
181
|
+
while not self._stopped.is_set():
|
|
182
|
+
batch = self._collect_batch()
|
|
183
|
+
|
|
184
|
+
if batch:
|
|
185
|
+
with self._processing_lock:
|
|
186
|
+
self._processing_count = len(batch)
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
self._process_batch(batch)
|
|
190
|
+
except Exception as e:
|
|
191
|
+
error(f"[EventQueue] Batch processing error: {e}")
|
|
192
|
+
finally:
|
|
193
|
+
with self._processing_lock:
|
|
194
|
+
self._processing_count = 0
|
|
195
|
+
|
|
196
|
+
def _collect_batch(self) -> List[Dict[str, Any]]:
|
|
197
|
+
"""Collect a batch of events from the queue."""
|
|
198
|
+
batch: List[Dict[str, Any]] = []
|
|
199
|
+
deadline = time.time() + (self.flush_interval_ms / 1000.0)
|
|
200
|
+
|
|
201
|
+
while True:
|
|
202
|
+
# Check for force flush
|
|
203
|
+
if self._flush_event.is_set():
|
|
204
|
+
self._flush_event.clear()
|
|
205
|
+
# Drain entire queue
|
|
206
|
+
while not self._queue.empty():
|
|
207
|
+
try:
|
|
208
|
+
batch.append(self._queue.get_nowait())
|
|
209
|
+
except queue.Empty:
|
|
210
|
+
break
|
|
211
|
+
if batch:
|
|
212
|
+
break
|
|
213
|
+
|
|
214
|
+
# Check batch size
|
|
215
|
+
if len(batch) >= self.flush_at_count:
|
|
216
|
+
break
|
|
217
|
+
|
|
218
|
+
# Check deadline
|
|
219
|
+
remaining_time = deadline - time.time()
|
|
220
|
+
if remaining_time <= 0:
|
|
221
|
+
break
|
|
222
|
+
|
|
223
|
+
# Try to get an item
|
|
224
|
+
try:
|
|
225
|
+
timeout = min(remaining_time, 0.05)
|
|
226
|
+
item = self._queue.get(block=True, timeout=timeout)
|
|
227
|
+
batch.append(item)
|
|
228
|
+
except queue.Empty:
|
|
229
|
+
if self._stopped.is_set():
|
|
230
|
+
# Drain remaining on shutdown
|
|
231
|
+
while not self._queue.empty():
|
|
232
|
+
try:
|
|
233
|
+
batch.append(self._queue.get_nowait())
|
|
234
|
+
except queue.Empty:
|
|
235
|
+
break
|
|
236
|
+
break
|
|
237
|
+
if batch and time.time() >= deadline:
|
|
238
|
+
break
|
|
239
|
+
|
|
240
|
+
return batch
|
|
241
|
+
|
|
242
|
+
def _process_batch(self, batch: List[Dict[str, Any]]) -> None:
|
|
243
|
+
"""Process batch with parallel sending."""
|
|
244
|
+
debug(f"[EventQueue] Processing batch of {len(batch)} events")
|
|
245
|
+
|
|
246
|
+
# No need to handle deferred events - we don't defer anymore
|
|
247
|
+
|
|
248
|
+
# Group for parallel processing
|
|
249
|
+
dependency_groups = self._group_by_dependencies(batch)
|
|
250
|
+
|
|
251
|
+
# Process each group in parallel
|
|
252
|
+
for group_index, group in enumerate(dependency_groups):
|
|
253
|
+
debug(f"[EventQueue] Processing dependency group {group_index + 1}/{len(dependency_groups)} with {len(group)} events in parallel")
|
|
254
|
+
|
|
255
|
+
# Submit all events in group for parallel processing
|
|
256
|
+
futures_to_event = {}
|
|
257
|
+
for event in group:
|
|
258
|
+
future = self._executor.submit(self._send_event_safe, event)
|
|
259
|
+
futures_to_event[future] = event
|
|
260
|
+
|
|
261
|
+
# Wait for completion
|
|
262
|
+
for future in as_completed(futures_to_event):
|
|
263
|
+
event = futures_to_event[future]
|
|
264
|
+
try:
|
|
265
|
+
success = future.result(timeout=30)
|
|
266
|
+
if success:
|
|
267
|
+
if event_id := event.get("client_event_id"):
|
|
268
|
+
self._sent_ids.add(event_id)
|
|
269
|
+
except Exception as e:
|
|
270
|
+
debug(f"[EventQueue] Failed to send event: {e}")
|
|
271
|
+
if self.retry_failed:
|
|
272
|
+
self._retry_event(event)
|
|
273
|
+
|
|
274
|
+
def _group_by_dependencies(self, events: List[Dict[str, Any]]) -> List[List[Dict[str, Any]]]:
|
|
275
|
+
"""Group events for parallel processing.
|
|
276
|
+
|
|
277
|
+
Since the backend handles events in any order using client-side event IDs,
|
|
278
|
+
we don't need to check dependencies. Just return all events in one group
|
|
279
|
+
for maximum parallel processing.
|
|
280
|
+
"""
|
|
281
|
+
if not events:
|
|
282
|
+
return []
|
|
283
|
+
|
|
284
|
+
# Mark all event IDs as sent for tracking
|
|
285
|
+
for event in events:
|
|
286
|
+
if event_id := event.get("client_event_id"):
|
|
287
|
+
self._sent_ids.add(event_id)
|
|
288
|
+
|
|
289
|
+
# Return all events in a single group for parallel processing
|
|
290
|
+
return [events]
|
|
291
|
+
|
|
292
|
+
def _send_event_safe(self, event_request: Dict[str, Any]) -> bool:
|
|
293
|
+
"""Send event with error suppression if configured."""
|
|
294
|
+
if self.config.error_handling.suppress_errors:
|
|
295
|
+
try:
|
|
296
|
+
return self._send_event(event_request)
|
|
297
|
+
except Exception as e:
|
|
298
|
+
warning(f"[EventQueue] Suppressed send error: {e}")
|
|
299
|
+
return False
|
|
300
|
+
else:
|
|
301
|
+
return self._send_event(event_request)
|
|
302
|
+
|
|
303
|
+
def _send_event(self, event_request: Dict[str, Any]) -> bool:
|
|
304
|
+
"""Send a single event to the backend."""
|
|
305
|
+
# No dependency checking needed - backend handles events in any order
|
|
306
|
+
|
|
307
|
+
# Check for blob offloading
|
|
308
|
+
payload = event_request.get("payload", {})
|
|
309
|
+
raw_bytes = json.dumps(payload, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
|
|
310
|
+
should_offload = len(raw_bytes) > self.blob_threshold
|
|
311
|
+
|
|
312
|
+
if should_offload:
|
|
313
|
+
event_id = event_request.get('client_event_id', 'unknown')
|
|
314
|
+
debug(f"[EventQueue] Event {truncate_id(event_id)} needs blob storage ({len(raw_bytes)} bytes > {self.blob_threshold} threshold)")
|
|
315
|
+
|
|
316
|
+
send_body: Dict[str, Any] = dict(event_request)
|
|
317
|
+
if should_offload:
|
|
318
|
+
send_body["needs_blob"] = True
|
|
319
|
+
send_body["payload"] = self._create_preview(send_body.get("type"), payload)
|
|
320
|
+
else:
|
|
321
|
+
send_body["needs_blob"] = False
|
|
322
|
+
|
|
323
|
+
# Send event
|
|
324
|
+
try:
|
|
325
|
+
response = self._client.make_request("events", "POST", send_body)
|
|
326
|
+
|
|
327
|
+
# Handle blob upload if needed
|
|
328
|
+
if should_offload:
|
|
329
|
+
blob_url = response.get("blob_url")
|
|
330
|
+
if blob_url:
|
|
331
|
+
compressed = self._compress_json(payload)
|
|
332
|
+
self._upload_blob(blob_url, compressed)
|
|
333
|
+
debug(f"[EventQueue] Blob uploaded for event {truncate_id(event_request.get('client_event_id'))}")
|
|
334
|
+
else:
|
|
335
|
+
error("[EventQueue] No blob_url received for large payload")
|
|
336
|
+
return False
|
|
337
|
+
|
|
338
|
+
return True
|
|
339
|
+
|
|
340
|
+
except Exception as e:
|
|
341
|
+
debug(f"[EventQueue] Failed to send event {truncate_id(event_request.get('client_event_id'))}: {e}")
|
|
342
|
+
return False
|
|
343
|
+
|
|
344
|
+
def _retry_event(self, event: Dict[str, Any]) -> None:
|
|
345
|
+
"""Retry a failed event."""
|
|
346
|
+
event["retry_count"] = event.get("retry_count", 0) + 1
|
|
347
|
+
if event["retry_count"] <= 3:
|
|
348
|
+
try:
|
|
349
|
+
self._queue.put_nowait(event)
|
|
350
|
+
except queue.Full:
|
|
351
|
+
pass
|
|
352
|
+
|
|
353
|
+
@staticmethod
|
|
354
|
+
def _compress_json(payload: Dict[str, Any]) -> bytes:
|
|
355
|
+
"""Compress JSON payload using gzip."""
|
|
356
|
+
raw = json.dumps(payload, separators=(",", ":"), ensure_ascii=False).encode("utf-8")
|
|
357
|
+
buf = io.BytesIO()
|
|
358
|
+
with gzip.GzipFile(fileobj=buf, mode="wb") as gz:
|
|
359
|
+
gz.write(raw)
|
|
360
|
+
return buf.getvalue()
|
|
361
|
+
|
|
362
|
+
def _upload_blob(self, blob_url: str, data: bytes) -> None:
|
|
363
|
+
"""Upload compressed blob to presigned URL."""
|
|
364
|
+
headers = {"Content-Type": "application/json", "Content-Encoding": "gzip"}
|
|
365
|
+
resp = requests.put(blob_url, data=data, headers=headers)
|
|
366
|
+
resp.raise_for_status()
|
|
367
|
+
|
|
368
|
+
@staticmethod
|
|
369
|
+
def _create_preview(event_type: Optional[str], payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
370
|
+
"""Create preview of large payload for logging."""
|
|
371
|
+
try:
|
|
372
|
+
t = (event_type or "generic").lower()
|
|
373
|
+
|
|
374
|
+
if t == "llm_generation":
|
|
375
|
+
req = payload.get("request", {})
|
|
376
|
+
return {
|
|
377
|
+
"request": {
|
|
378
|
+
"model": str(req.get("model", ""))[:200],
|
|
379
|
+
"provider": str(req.get("provider", ""))[:200],
|
|
380
|
+
"messages": "truncated"
|
|
381
|
+
},
|
|
382
|
+
"response": {"output": "truncated"}
|
|
383
|
+
}
|
|
384
|
+
elif t == "function_call":
|
|
385
|
+
return {
|
|
386
|
+
"function_name": str(payload.get("function_name", ""))[:200],
|
|
387
|
+
"arguments": "truncated"
|
|
388
|
+
}
|
|
389
|
+
else:
|
|
390
|
+
return {"details": "preview_unavailable"}
|
|
391
|
+
|
|
392
|
+
except Exception:
|
|
393
|
+
return {"details": "preview_error"}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
lucidicai/__init__.py,sha256=
|
|
1
|
+
lucidicai/__init__.py,sha256=b5om5w8CV6UjgmeFOlzHTyC-m2PNDgQJ5AcOD7SizYk,12600
|
|
2
2
|
lucidicai/action.py,sha256=sPRd1hTIVXDqnvG9ZXWEipUFh0bsXcE0Fm7RVqmVccM,237
|
|
3
3
|
lucidicai/client.py,sha256=IIhlY6Mfwy47FeMxzpvIygCaqcI1FnqiXiVU6M4QEiE,22327
|
|
4
4
|
lucidicai/constants.py,sha256=zN8O7TjoRHRlaGa9CZUWppS73rhzKGwaEkF9XMTV0Cg,1160
|
|
@@ -17,6 +17,16 @@ lucidicai/singleton.py,sha256=SKiNBgt_Wb5cCWbMt3IWjRAQw3v153LTRgqvDj8poF8,1457
|
|
|
17
17
|
lucidicai/state.py,sha256=4Tb1X6l2or6w_e62FYSuEeghAv3xXm5gquKwzCpvdok,235
|
|
18
18
|
lucidicai/step.py,sha256=_oBIyTBZBvNkUkYHIrwWd75KMSlMtR9Ws2Lo71Lyff8,2522
|
|
19
19
|
lucidicai/streaming.py,sha256=QOLAzhwxetvx711J8VcphY5kXWPJz9XEBJrmHveRKMc,9796
|
|
20
|
+
lucidicai/api/__init__.py,sha256=UOYuFZupG0TgzMAxbLNgpodDXhDRXBgMva8ZblgBN9Y,31
|
|
21
|
+
lucidicai/api/client.py,sha256=czD3sg4wgyGQTVVlnSi3wpeCt90_D4eOuK8nBlpiv4U,7276
|
|
22
|
+
lucidicai/api/resources/__init__.py,sha256=Wc8-JfL82wkE7eB8PHplqYvaEG2oXNXXhRyEPeduJeE,27
|
|
23
|
+
lucidicai/api/resources/dataset.py,sha256=6UnMUd-y__TOAjUJLjbc0lZJRTy_gHkyoE82OvjFoN4,5583
|
|
24
|
+
lucidicai/api/resources/event.py,sha256=GyyNL3_k53EbmvTdgJEABexiuJnoX61hxWey7DYmlYY,2434
|
|
25
|
+
lucidicai/api/resources/session.py,sha256=w7b4kkbWdbaNbwuMBFgEeVmDfaYozBf9OK8B8L9B1m8,3730
|
|
26
|
+
lucidicai/core/__init__.py,sha256=b0YQkd8190Y_GgwUcmf0tOiSLARd7L4kq4jwfhhGAyI,39
|
|
27
|
+
lucidicai/core/config.py,sha256=P9y5aSZRkAehTvoBdYEc6x5-jiumB5cxftoMtJatl7w,7980
|
|
28
|
+
lucidicai/core/errors.py,sha256=aRfdXABiTWFTiWELgu2Dz_wxVSggcBFqX7Q-toCy_fY,2130
|
|
29
|
+
lucidicai/core/types.py,sha256=KabcTBQe7SemigccKfJSDiJmjSJDJJvvtefSd8pfrJI,702
|
|
20
30
|
lucidicai/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
31
|
lucidicai/providers/anthropic_handler.py,sha256=GZEa4QOrjZ9ftu_qTwY3L410HwKzkXgN7omYRsEQ4LU,10174
|
|
22
32
|
lucidicai/providers/base_providers.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
|
|
@@ -34,25 +44,44 @@ lucidicai/providers/otel_provider.py,sha256=ixLc80-_Vag0EO_92wj2m3_lg6HXyIpz9Md4
|
|
|
34
44
|
lucidicai/providers/pydantic_ai_handler.py,sha256=Yhd9VTJhq292ZzJF04O_jYGRh-1bzs70BzQdo7a2Z9M,28269
|
|
35
45
|
lucidicai/providers/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6cFwx2DE,1503
|
|
36
46
|
lucidicai/providers/universal_image_interceptor.py,sha256=7d-hw4xihRwvvA1AP8-vqYNChtmVXKmn09MN4pDS7KQ,12126
|
|
47
|
+
lucidicai/sdk/__init__.py,sha256=UrkV9FYbZkBxaX9qwxGbCJdXp-JqMpn0_u-huO9Y-ec,32
|
|
48
|
+
lucidicai/sdk/context.py,sha256=_tNem1a39CY-cFWue173eM7FgeUmOEmQ42EfkvKhehQ,7515
|
|
49
|
+
lucidicai/sdk/decorators.py,sha256=B5BXG9Sn5ruUkxFq10L1rrCR_wzYUPlYeu5aqyXetMM,8393
|
|
50
|
+
lucidicai/sdk/error_boundary.py,sha256=IPr5wS9rS7ZQNgEaBwK53UaixAm6L2rijKKFfxcxjUI,9190
|
|
51
|
+
lucidicai/sdk/event.py,sha256=NiPcnPzYCU0VlFbBk93LD88wqAYmnglV64nQb2XteOs,3747
|
|
52
|
+
lucidicai/sdk/event_builder.py,sha256=oMvt39m07ZLmPllJTWwRxpinJUz9_AD17yNE6wQRoDA,10423
|
|
53
|
+
lucidicai/sdk/init.py,sha256=RMTyu_LZIo9Pi0uA76jkRviX6VBuvIuVpmFUXC7zwA4,12784
|
|
54
|
+
lucidicai/sdk/shutdown_manager.py,sha256=I5ylR96QHQ_SfP1euAiM0qQ-I7upCPMW1HUNvoj7hCw,12090
|
|
55
|
+
lucidicai/sdk/features/__init__.py,sha256=23KUF2EZBzsaH9JUFDGNXZb_3PSfc35VZfD59gAfyR0,26
|
|
56
|
+
lucidicai/sdk/features/dataset.py,sha256=qFGnu8Wm1yhaflBhtm-5veN-KaoxGLBL5xWEifkrsY0,19416
|
|
57
|
+
lucidicai/sdk/features/feature_flag.py,sha256=SzuzHiVnbticD6Ojn0_i9xQKui2s9QUFPJ7LixzAtf4,13844
|
|
37
58
|
lucidicai/telemetry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
38
59
|
lucidicai/telemetry/base_provider.py,sha256=nrZVr4Y9xcAiMn4uAN3t3k6DlHNTvlXrA4qQg7lANOQ,544
|
|
39
|
-
lucidicai/telemetry/
|
|
60
|
+
lucidicai/telemetry/context_bridge.py,sha256=NwyclZvPcZHZtIvLSrY3oO8WQ_J1JSuHWIr36gxA7xk,2989
|
|
61
|
+
lucidicai/telemetry/context_capture_processor.py,sha256=kzKWpg5m0OMUP5we6g453FjckWwA_jAVjOKCfiyKVN8,3651
|
|
40
62
|
lucidicai/telemetry/extract.py,sha256=30Iqvnr9I0EkD61GRCMN0Zpk3fLmRYcuVajWjRz0z9I,6814
|
|
41
|
-
lucidicai/telemetry/litellm_bridge.py,sha256=
|
|
42
|
-
lucidicai/telemetry/lucidic_exporter.py,sha256=
|
|
63
|
+
lucidicai/telemetry/litellm_bridge.py,sha256=GlNeTX0HCu4JsUqfCGBb62XA61fhyWKv5ohfqSkriaE,16574
|
|
64
|
+
lucidicai/telemetry/lucidic_exporter.py,sha256=tD1A2UGn0vuOW_FV_GVLXuXSxYYZf6r79Pczrn6d0lc,13189
|
|
43
65
|
lucidicai/telemetry/lucidic_span_processor.py,sha256=-jo7Muuslo3ZCSAysLsDGBqJijQSpIOvJHPbPNjP4iQ,31029
|
|
44
66
|
lucidicai/telemetry/openai_agents_instrumentor.py,sha256=__wIbeglMnEEf4AGTQ--FXeWCKmz2yy8SBupwprEdZA,12694
|
|
67
|
+
lucidicai/telemetry/openai_patch.py,sha256=BRSwX4JQLd1kiH43K2FnGvk6rcf5rfZg9lxg_wPb45M,11904
|
|
68
|
+
lucidicai/telemetry/openai_uninstrument.py,sha256=zELpoz2BU8O-rdHrg_7NuvjdNoY6swgoqVm5NtTCJRQ,3456
|
|
45
69
|
lucidicai/telemetry/opentelemetry_converter.py,sha256=xOHCqoTyO4hUkL6k7fxy84PbljPpYep6ET9ZqbkJehc,17665
|
|
46
70
|
lucidicai/telemetry/otel_handlers.py,sha256=OCzXuYog6AuwjI4eXy5Sk40DUehyz48QOxuOujXnEVU,20859
|
|
47
71
|
lucidicai/telemetry/otel_init.py,sha256=hjUOX8nEBLrDOuh0UTKFfG-C98yFZHTiP8ql59bmNXY,13780
|
|
48
72
|
lucidicai/telemetry/otel_provider.py,sha256=e5XcpQTd_a5UrMAq-EQcJ0zUJpO7NO16T-BphVUigR4,7513
|
|
49
73
|
lucidicai/telemetry/pydantic_ai_handler.py,sha256=WPa3tFcVgVnPPO3AxcNOTbNkmODLgNOrU2_3GVtWqUw,28261
|
|
50
|
-
lucidicai/telemetry/telemetry_init.py,sha256=
|
|
74
|
+
lucidicai/telemetry/telemetry_init.py,sha256=i5lxd2RzIADv2Va06APob9CmQ0KZMSbLKDrGtAGFyBo,9503
|
|
51
75
|
lucidicai/telemetry/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
52
76
|
lucidicai/telemetry/utils/image_storage.py,sha256=4Z59ZpVexr7-lcExfr8GsqXe0y2VZmr8Yjwa-3DeOxU,1457
|
|
77
|
+
lucidicai/telemetry/utils/model_pricing.py,sha256=Dxi6e0WjcIyCTkVX7K7f0pJ5rPu7nSt3lOmgzAUQl1o,12402
|
|
53
78
|
lucidicai/telemetry/utils/text_storage.py,sha256=L62MMJ8E23TDqDTUv2aRntdKMCItsXV7XjY6cFwx2DE,1503
|
|
54
79
|
lucidicai/telemetry/utils/universal_image_interceptor.py,sha256=vARgMk1hVSF--zfi5b8qBpJJOESuD17YlH9xqxmB9Uw,15954
|
|
55
|
-
lucidicai
|
|
56
|
-
lucidicai
|
|
57
|
-
lucidicai
|
|
58
|
-
lucidicai
|
|
80
|
+
lucidicai/utils/__init__.py,sha256=ZiGtmJaF0ph9iIFIgQiAreVuYM_1o7qu9VySK1NblTw,22
|
|
81
|
+
lucidicai/utils/images.py,sha256=YHFjeKHRxzWu0IsuNwKw303egPsd99AShaD4WND1lJk,12325
|
|
82
|
+
lucidicai/utils/logger.py,sha256=R3B3gSee64F6UVHUrShihBq_O7W7bgfrBiVDXTO3Isg,4777
|
|
83
|
+
lucidicai/utils/queue.py,sha256=iBhazYt9EPTpyuexfDyPjvJT-2ODaAbCBbGYvLVl8wM,15815
|
|
84
|
+
lucidicai-2.1.1.dist-info/METADATA,sha256=QWcIgu6okS3ZQmbRJgq_2mAgHUnhV9tazlTSPmUMqKg,902
|
|
85
|
+
lucidicai-2.1.1.dist-info/WHEEL,sha256=Xo9-1PvkuimrydujYJAjF7pCkriuXBpUPEjma1nZyJ0,92
|
|
86
|
+
lucidicai-2.1.1.dist-info/top_level.txt,sha256=vSSdM3lclF4I5tyVC0xxUk8eIRnnYXMe1hW-eO91HUo,10
|
|
87
|
+
lucidicai-2.1.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|