lucidicai 2.1.1__py3-none-any.whl → 2.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,12 +1,12 @@
1
- """OpenAI responses.parse instrumentation patch.
1
+ """OpenAI responses API instrumentation patch.
2
2
 
3
- This module provides instrumentation for OpenAI's responses.parse API
4
- which is not covered by the standard opentelemetry-instrumentation-openai package.
3
+ This module provides instrumentation for OpenAI's responses.parse and responses.create APIs
4
+ which are not covered by the standard opentelemetry-instrumentation-openai package.
5
5
  """
6
6
  import functools
7
7
  import logging
8
8
  import time
9
- from typing import Any, Callable, Optional
9
+ from typing import Any, Callable, Optional, Dict
10
10
 
11
11
  from opentelemetry import trace
12
12
  from opentelemetry.trace import Status, StatusCode, SpanKind
@@ -18,7 +18,7 @@ logger = logging.getLogger("Lucidic")
18
18
 
19
19
 
20
20
  class OpenAIResponsesPatcher:
21
- """Patches OpenAI client to instrument responses.parse method."""
21
+ """Patches OpenAI client to instrument responses API methods."""
22
22
 
23
23
  def __init__(self, tracer_provider=None):
24
24
  """Initialize the patcher.
@@ -29,13 +29,13 @@ class OpenAIResponsesPatcher:
29
29
  self._tracer_provider = tracer_provider or trace.get_tracer_provider()
30
30
  self._tracer = self._tracer_provider.get_tracer(__name__)
31
31
  self._is_patched = False
32
- self._original_parse = None
32
+ self._original_init = None
33
33
  self._client_refs = [] # Keep track of patched clients for cleanup
34
34
 
35
35
  def patch(self):
36
36
  """Apply the patch to OpenAI client initialization."""
37
37
  if self._is_patched:
38
- debug("[OpenAI Patch] responses.parse already patched")
38
+ debug("[OpenAI Patch] responses API already patched")
39
39
  return
40
40
 
41
41
  try:
@@ -50,102 +50,132 @@ class OpenAIResponsesPatcher:
50
50
  # Call original initialization
51
51
  original_init(client_self, *args, **kwargs)
52
52
 
53
- # Patch the responses.parse method on this specific instance
54
- if hasattr(client_self, 'resources') and hasattr(client_self.resources, 'responses'):
55
- responses = client_self.resources.responses
56
- if hasattr(responses, 'parse'):
57
- # Store original and apply wrapper
58
- original_parse = responses.parse
59
- responses.parse = self._create_parse_wrapper(original_parse)
53
+ # Patch responses API methods
54
+ self._patch_responses_api(client_self)
60
55
 
61
- # Track this client for cleanup
62
- self._client_refs.append((responses, original_parse))
63
-
64
- verbose("[OpenAI Patch] Patched responses.parse on client instance")
65
-
66
- # Also patch the direct access if available
67
- if hasattr(client_self, 'responses') and hasattr(client_self.responses, 'parse'):
68
- original_parse = client_self.responses.parse
69
- client_self.responses.parse = self._create_parse_wrapper(original_parse)
70
- self._client_refs.append((client_self.responses, original_parse))
71
- verbose("[OpenAI Patch] Patched client.responses.parse")
56
+ # Also patch beta.chat.completions.parse if it exists
57
+ self._patch_beta_api(client_self)
72
58
 
73
59
  # Replace the __init__ method
74
60
  OpenAI.__init__ = patched_init
75
61
  self._original_init = original_init
76
62
  self._is_patched = True
77
63
 
78
- logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses.parse")
64
+ logger.info("[OpenAI Patch] Successfully patched OpenAI client for responses API")
79
65
 
80
66
  except ImportError:
81
67
  logger.warning("[OpenAI Patch] OpenAI library not installed, skipping patch")
82
68
  except Exception as e:
83
- logger.error(f"[OpenAI Patch] Failed to patch responses.parse: {e}")
69
+ logger.error(f"[OpenAI Patch] Failed to patch responses API: {e}")
70
+
71
+ def _patch_responses_api(self, client):
72
+ """Patch the responses API methods on the client."""
73
+ # Check for client.resources.responses (newer structure)
74
+ if hasattr(client, 'resources') and hasattr(client.resources, 'responses'):
75
+ responses = client.resources.responses
76
+ self._patch_responses_object(responses, "client.resources.responses")
77
+
78
+ # Check for client.responses (direct access)
79
+ if hasattr(client, 'responses'):
80
+ responses = client.responses
81
+ self._patch_responses_object(responses, "client.responses")
82
+
83
+ def _patch_responses_object(self, responses, location: str):
84
+ """Patch methods on a responses object.
85
+
86
+ Args:
87
+ responses: The responses object to patch
88
+ location: String describing where this object is (for logging)
89
+ """
90
+ methods_to_patch = {
91
+ 'parse': 'openai.responses.parse',
92
+ 'create': 'openai.responses.create'
93
+ }
94
+
95
+ for method_name, span_name in methods_to_patch.items():
96
+ if hasattr(responses, method_name):
97
+ original_method = getattr(responses, method_name)
98
+ wrapped_method = self._create_method_wrapper(original_method, span_name)
99
+ setattr(responses, method_name, wrapped_method)
100
+
101
+ # Track for cleanup
102
+ self._client_refs.append((responses, method_name, original_method))
103
+
104
+ verbose(f"[OpenAI Patch] Patched {location}.{method_name}")
105
+
106
+ def _patch_beta_api(self, client):
107
+ """Patch beta.chat.completions.parse if it exists."""
108
+ try:
109
+ if (hasattr(client, 'beta') and
110
+ hasattr(client.beta, 'chat') and
111
+ hasattr(client.beta.chat, 'completions') and
112
+ hasattr(client.beta.chat.completions, 'parse')):
113
+
114
+ completions = client.beta.chat.completions
115
+ original_parse = completions.parse
116
+
117
+ # Wrap with a slightly different span name for clarity
118
+ wrapped_parse = self._create_method_wrapper(
119
+ original_parse,
120
+ 'openai.beta.chat.completions.parse'
121
+ )
122
+ completions.parse = wrapped_parse
123
+
124
+ # Track for cleanup
125
+ self._client_refs.append((completions, 'parse', original_parse))
126
+
127
+ verbose("[OpenAI Patch] Patched beta.chat.completions.parse")
128
+
129
+ except Exception as e:
130
+ debug(f"[OpenAI Patch] Could not patch beta API: {e}")
84
131
 
85
- def _create_parse_wrapper(self, original_method: Callable) -> Callable:
86
- """Create a wrapper for the responses.parse method.
132
+ def _create_method_wrapper(self, original_method: Callable, span_name: str) -> Callable:
133
+ """Create a wrapper for an OpenAI API method.
87
134
 
88
135
  Args:
89
- original_method: The original parse method to wrap
136
+ original_method: The original method to wrap
137
+ span_name: Name for the OpenTelemetry span
90
138
 
91
139
  Returns:
92
140
  Wrapped method with instrumentation
93
141
  """
94
142
  @functools.wraps(original_method)
95
- def wrapper(**kwargs):
143
+ def wrapper(*args, **kwargs):
96
144
  # Create span for tracing
97
145
  with self._tracer.start_as_current_span(
98
- "openai.responses.parse",
146
+ span_name,
99
147
  kind=SpanKind.CLIENT
100
148
  ) as span:
101
149
  start_time = time.time()
102
150
 
103
151
  try:
104
- # Extract request parameters
105
- model = kwargs.get('model', 'unknown')
106
- temperature = kwargs.get('temperature', 1.0)
107
- input_param = kwargs.get('input', [])
108
- text_format = kwargs.get('text_format')
109
- instructions = kwargs.get('instructions')
110
-
111
- # Convert input to messages format if needed
112
- if isinstance(input_param, str):
113
- messages = [{"role": "user", "content": input_param}]
114
- elif isinstance(input_param, list):
115
- messages = input_param
116
- else:
117
- messages = []
152
+ # Debug log for responses.create to understand the parameters
153
+ if 'responses.create' in span_name:
154
+ debug(f"[OpenAI Patch] responses.create called with kwargs keys: {list(kwargs.keys())}")
155
+
156
+ # Extract and process request parameters
157
+ request_attrs = self._extract_request_attributes(span_name, args, kwargs)
118
158
 
119
159
  # Set span attributes
120
160
  span.set_attribute("gen_ai.system", "openai")
121
- span.set_attribute("gen_ai.request.model", model)
122
- span.set_attribute("gen_ai.request.temperature", temperature)
123
- span.set_attribute("gen_ai.operation.name", "responses.parse")
161
+ span.set_attribute("gen_ai.operation.name", span_name)
124
162
 
125
- # Add a unique marker for our instrumentation
126
- span.set_attribute("lucidic.instrumented", "responses.parse")
127
- span.set_attribute("lucidic.patch.version", "1.0")
163
+ # Add our instrumentation marker
164
+ span.set_attribute("lucidic.instrumented", span_name)
165
+ span.set_attribute("lucidic.patch.version", "2.0")
128
166
 
129
- if text_format and hasattr(text_format, '__name__'):
130
- span.set_attribute("gen_ai.request.response_format", text_format.__name__)
131
-
132
- if instructions:
133
- span.set_attribute("gen_ai.request.instructions", str(instructions))
134
-
135
- # Always set message attributes for proper event creation
136
- for i, msg in enumerate(messages): # Include all messages
137
- if isinstance(msg, dict):
138
- role = msg.get('role', 'user')
139
- content = msg.get('content', '')
140
- span.set_attribute(f"gen_ai.prompt.{i}.role", role)
141
- # Always include full content - EventQueue handles large messages
142
- span.set_attribute(f"gen_ai.prompt.{i}.content", str(content))
167
+ # Set request attributes on span
168
+ for key, value in request_attrs.items():
169
+ if value is not None:
170
+ span.set_attribute(key, value)
171
+ if 'responses.create' in span_name and ('prompt' in key or 'completion' in key):
172
+ debug(f"[OpenAI Patch] Set attribute {key}: {str(value)[:100]}")
143
173
 
144
174
  # Call the original method
145
- result = original_method(**kwargs)
175
+ result = original_method(*args, **kwargs)
146
176
 
147
- # Process the response and set attributes on span
148
- self._set_response_attributes(span, result, model, messages, start_time, text_format)
177
+ # Process the response
178
+ self._set_response_attributes(span, result, span_name, start_time)
149
179
 
150
180
  span.set_status(Status(StatusCode.OK))
151
181
  return result
@@ -160,29 +190,154 @@ class OpenAIResponsesPatcher:
160
190
 
161
191
  return wrapper
162
192
 
163
- def _set_response_attributes(self, span, result, model: str, messages: list, start_time: float, text_format):
193
+ def _extract_request_attributes(self, span_name: str, args: tuple, kwargs: dict) -> Dict[str, Any]:
194
+ """Extract request attributes based on the API method being called.
195
+
196
+ Args:
197
+ span_name: Name of the span/API method
198
+ args: Positional arguments
199
+ kwargs: Keyword arguments
200
+
201
+ Returns:
202
+ Dictionary of span attributes to set
203
+ """
204
+ attrs = {}
205
+
206
+ # Common attributes
207
+ model = kwargs.get('model', 'unknown')
208
+ attrs['gen_ai.request.model'] = model
209
+
210
+ temperature = kwargs.get('temperature')
211
+ if temperature is not None:
212
+ attrs['gen_ai.request.temperature'] = temperature
213
+
214
+ # Method-specific handling
215
+ if 'responses.parse' in span_name:
216
+ # Handle responses.parse format
217
+ input_param = kwargs.get('input', [])
218
+ text_format = kwargs.get('text_format')
219
+ instructions = kwargs.get('instructions')
220
+
221
+ # Convert input to messages format
222
+ if isinstance(input_param, str):
223
+ messages = [{"role": "user", "content": input_param}]
224
+ elif isinstance(input_param, list):
225
+ messages = input_param
226
+ else:
227
+ messages = []
228
+
229
+ if text_format and hasattr(text_format, '__name__'):
230
+ attrs['gen_ai.request.response_format'] = text_format.__name__
231
+
232
+ if instructions:
233
+ # Never truncate - EventQueue handles large messages automatically
234
+ attrs['gen_ai.request.instructions'] = str(instructions)
235
+
236
+ elif 'responses.create' in span_name:
237
+ # Handle responses.create format - it uses 'input' not 'messages'
238
+ input_param = kwargs.get('input', [])
239
+
240
+ # Convert input to messages format
241
+ if isinstance(input_param, str):
242
+ messages = [{"role": "user", "content": input_param}]
243
+ elif isinstance(input_param, list):
244
+ messages = input_param
245
+ else:
246
+ messages = []
247
+
248
+ # Handle text parameter for structured outputs
249
+ text_format = kwargs.get('text')
250
+ if text_format and hasattr(text_format, '__name__'):
251
+ attrs['gen_ai.request.response_format'] = text_format.__name__
252
+
253
+ elif 'completions.parse' in span_name:
254
+ # Handle standard chat completion format
255
+ messages = kwargs.get('messages', [])
256
+
257
+ # Handle response_format for structured outputs
258
+ response_format = kwargs.get('response_format')
259
+ if response_format:
260
+ if hasattr(response_format, '__name__'):
261
+ attrs['gen_ai.request.response_format'] = response_format.__name__
262
+ elif isinstance(response_format, dict):
263
+ attrs['gen_ai.request.response_format'] = str(response_format)
264
+
265
+ else:
266
+ # Fallback: try to get messages from kwargs
267
+ messages = kwargs.get('messages', kwargs.get('input', []))
268
+ if isinstance(messages, str):
269
+ messages = [{"role": "user", "content": messages}]
270
+
271
+ # Always set message attributes for proper event creation
272
+ # The EventQueue handles large messages automatically with blob storage
273
+ for i, msg in enumerate(messages):
274
+ if isinstance(msg, dict):
275
+ role = msg.get('role', 'user')
276
+ content = msg.get('content', '')
277
+ attrs[f'gen_ai.prompt.{i}.role'] = role
278
+ # Always include full content - EventQueue handles large messages
279
+ attrs[f'gen_ai.prompt.{i}.content'] = str(content)
280
+
281
+ return attrs
282
+
283
+ def _set_response_attributes(self, span, result, span_name: str, start_time: float):
164
284
  """Set response attributes on the span for the exporter to use.
165
285
 
166
286
  Args:
167
287
  span: OpenTelemetry span
168
288
  result: Response from OpenAI
169
- model: Model name
170
- messages: Input messages
289
+ span_name: Name of the API method
171
290
  start_time: Request start time
172
- text_format: Response format (Pydantic model)
173
291
  """
174
292
  duration = time.time() - start_time
293
+ span.set_attribute("lucidic.duration_seconds", duration)
175
294
 
176
- # Extract output
295
+ # Extract output based on response structure
177
296
  output_text = None
178
297
 
179
- # Handle structured output response
180
- if hasattr(result, 'output_parsed'):
181
- output_text = str(result.output_parsed)
182
-
183
- # Always set completion attributes so the exporter can extract them
298
+ # Handle different response formats
299
+ if 'responses.parse' in span_name:
300
+ # responses.parse format
301
+ if hasattr(result, 'output_parsed'):
302
+ output_text = str(result.output_parsed)
303
+ elif hasattr(result, 'parsed'):
304
+ output_text = str(result.parsed)
305
+
306
+ elif 'responses.create' in span_name:
307
+ # responses.create returns a Response object with output_text
308
+ if hasattr(result, 'output_text'):
309
+ output_text = result.output_text
310
+ elif hasattr(result, 'output'):
311
+ output_text = result.output
312
+ else:
313
+ # Log what we actually got for debugging
314
+ debug(f"[OpenAI Patch] responses.create result type: {type(result)}")
315
+ debug(f"[OpenAI Patch] responses.create result attributes: {[attr for attr in dir(result) if not attr.startswith('_')]}")
316
+
317
+ elif 'completions.parse' in span_name:
318
+ # Standard chat completion format
319
+ if hasattr(result, 'choices') and result.choices:
320
+ choice = result.choices[0]
321
+ if hasattr(choice, 'message'):
322
+ msg = choice.message
323
+ if hasattr(msg, 'parsed'):
324
+ # Structured output
325
+ output_text = str(msg.parsed)
326
+ elif hasattr(msg, 'content'):
327
+ # Regular content
328
+ output_text = msg.content
329
+ elif hasattr(choice, 'text'):
330
+ # Completion format
331
+ output_text = choice.text
332
+
333
+ # Set completion attributes if we have output
334
+ if output_text:
335
+ # Never truncate - EventQueue handles large messages automatically
184
336
  span.set_attribute("gen_ai.completion.0.role", "assistant")
185
337
  span.set_attribute("gen_ai.completion.0.content", output_text)
338
+ debug(f"[OpenAI Patch] Set completion: {output_text[:100]}")
339
+ else:
340
+ debug(f"[OpenAI Patch] No output_text found for {span_name}")
186
341
 
187
342
  # Handle usage data
188
343
  if hasattr(result, 'usage'):
@@ -223,54 +378,29 @@ class OpenAIResponsesPatcher:
223
378
  if total_tokens is not None:
224
379
  span.set_attribute("gen_ai.usage.total_tokens", total_tokens)
225
380
 
226
- # Set additional metadata for the exporter
227
- if text_format and hasattr(text_format, '__name__'):
228
- span.set_attribute("lucidic.response_format", text_format.__name__)
229
-
230
- # Set duration as attribute
231
- span.set_attribute("lucidic.duration_seconds", duration)
232
-
233
-
234
- def _should_capture_content(self) -> bool:
235
- """Check if message content should be captured.
236
-
237
- Returns:
238
- True if content capture is enabled
239
- """
240
-
241
- return True # always capture content for now
242
-
243
- import os
244
- # check OTEL standard env var
245
- otel_capture = os.getenv('OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT', 'false')
246
- # check Lucidic-specific env var
247
- lucidic_capture = os.getenv('LUCIDIC_CAPTURE_CONTENT', 'false')
248
-
249
- return otel_capture.lower() == 'true' or lucidic_capture.lower() == 'true'
250
-
251
381
  def unpatch(self):
252
382
  """Remove the patch and restore original behavior."""
253
383
  if not self._is_patched:
254
384
  return
255
385
 
256
386
  try:
257
- # restore original __init__ if we have it
258
- if hasattr(self, '_original_init'):
387
+ # Restore original __init__ if we have it
388
+ if self._original_init:
259
389
  import openai
260
390
  from openai import OpenAI
261
391
  OpenAI.__init__ = self._original_init
262
392
 
263
- # restore original parse methods on tracked clients
264
- for responses_obj, original_parse in self._client_refs:
393
+ # Restore original methods on tracked clients
394
+ for obj, method_name, original_method in self._client_refs:
265
395
  try:
266
- responses_obj.parse = original_parse
396
+ setattr(obj, method_name, original_method)
267
397
  except:
268
398
  pass # Client might have been garbage collected
269
399
 
270
400
  self._client_refs.clear()
271
401
  self._is_patched = False
272
402
 
273
- logger.info("[OpenAI Patch] Successfully removed responses.parse patch")
403
+ logger.info("[OpenAI Patch] Successfully removed responses API patch")
274
404
 
275
405
  except Exception as e:
276
406
  logger.error(f"[OpenAI Patch] Failed to unpatch: {e}")
@@ -60,7 +60,7 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
60
60
  from .openai_uninstrument import clean_openai_instrumentation
61
61
  clean_openai_instrumentation()
62
62
 
63
- # Add patch for responses.parse (not covered by standard instrumentation)
63
+ # Add patch for responses API methods (not covered by standard instrumentation)
64
64
  import os
65
65
  if os.getenv('LUCIDIC_DISABLE_RESPONSES_PATCH', 'false').lower() != 'true':
66
66
  from .openai_patch import get_responses_patcher
@@ -68,9 +68,9 @@ def instrument_providers(providers: list, tracer_provider: TracerProvider, exist
68
68
  patcher.patch()
69
69
  _global_instrumentors["openai_responses_patch"] = patcher
70
70
  else:
71
- logger.info("[Telemetry] Skipping responses.parse patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
71
+ logger.info("[Telemetry] Skipping responses API patch (disabled via LUCIDIC_DISABLE_RESPONSES_PATCH)")
72
72
 
73
- logger.info("[Telemetry] Instrumented OpenAI (including responses.parse)")
73
+ logger.info("[Telemetry] Instrumented OpenAI (including responses.parse, responses.create, beta.chat.completions.parse)")
74
74
  except Exception as e:
75
75
  logger.error(f"Failed to instrument OpenAI: {e}")
76
76
 
lucidicai/utils/images.py CHANGED
@@ -11,7 +11,7 @@ import logging
11
11
  import threading
12
12
  from typing import List, Dict, Any, Optional, Tuple, Union
13
13
  from PIL import Image
14
- import requests
14
+ import httpx
15
15
 
16
16
  logger = logging.getLogger("Lucidic")
17
17
 
@@ -299,9 +299,9 @@ class ImageUploader:
299
299
  image_obj, content_type = ImageHandler.prepare_for_upload(image_data, format)
300
300
 
301
301
  # Upload to S3
302
- upload_response = requests.put(
302
+ upload_response = httpx.put(
303
303
  url,
304
- data=image_obj.getvalue() if hasattr(image_obj, 'getvalue') else image_obj,
304
+ content=image_obj.getvalue() if hasattr(image_obj, 'getvalue') else image_obj,
305
305
  headers={"Content-Type": content_type}
306
306
  )
307
307
  upload_response.raise_for_status()
lucidicai/utils/queue.py CHANGED
@@ -9,7 +9,7 @@ import json
9
9
  import queue
10
10
  import threading
11
11
  import time
12
- import requests
12
+ import httpx
13
13
  from concurrent.futures import ThreadPoolExecutor, as_completed
14
14
  from datetime import datetime, timezone
15
15
  from typing import Any, Dict, List, Optional, Set, Tuple
@@ -362,7 +362,7 @@ class EventQueue:
362
362
  def _upload_blob(self, blob_url: str, data: bytes) -> None:
363
363
  """Upload compressed blob to presigned URL."""
364
364
  headers = {"Content-Type": "application/json", "Content-Encoding": "gzip"}
365
- resp = requests.put(blob_url, data=data, headers=headers)
365
+ resp = httpx.put(blob_url, content=data, headers=headers)
366
366
  resp.raise_for_status()
367
367
 
368
368
  @staticmethod
@@ -372,19 +372,51 @@ class EventQueue:
372
372
  t = (event_type or "generic").lower()
373
373
 
374
374
  if t == "llm_generation":
375
+
375
376
  req = payload.get("request", {})
377
+ usage = payload.get("usage", {})
378
+ messages = req.get("messages", [])[:5]
379
+ output = payload.get("response", {}).get("output", {})
380
+ compressed_messages = []
381
+ for i, m in enumerate(messages):
382
+ compressed_message_item = {}
383
+ for k, v in messages[i].items():
384
+ compressed_message_item[k] = str(v)[:200] if v else None
385
+ compressed_messages.append(compressed_message_item)
376
386
  return {
377
387
  "request": {
378
- "model": str(req.get("model", ""))[:200],
379
- "provider": str(req.get("provider", ""))[:200],
380
- "messages": "truncated"
388
+ "model": req.get("model")[:200] if req.get("model") else None,
389
+ "provider": req.get("provider")[:200] if req.get("provider") else None,
390
+ "messages": compressed_messages,
391
+ },
392
+ "usage": {
393
+ k: usage.get(k) for k in ("input_tokens", "output_tokens", "cost") if k in usage
381
394
  },
382
- "response": {"output": "truncated"}
395
+ "response": {
396
+ "output": str(output)[:200] if output else None,
397
+ }
383
398
  }
399
+
384
400
  elif t == "function_call":
401
+ args = payload.get("arguments")
402
+ truncated_args = (
403
+ {k: (str(v)[:200] if v is not None else None) for k, v in args.items()}
404
+ if isinstance(args, dict)
405
+ else (str(args)[:200] if args is not None else None)
406
+ )
407
+ return {
408
+ "function_name": payload.get("function_name")[:200] if payload.get("function_name") else None,
409
+ "arguments": truncated_args,
410
+ }
411
+
412
+ elif t == "error_traceback":
413
+ return {
414
+ "error": payload.get("error")[:200] if payload.get("error") else None,
415
+ }
416
+
417
+ elif t == "generic":
385
418
  return {
386
- "function_name": str(payload.get("function_name", ""))[:200],
387
- "arguments": "truncated"
419
+ "details": payload.get("details")[:200] if payload.get("details") else None,
388
420
  }
389
421
  else:
390
422
  return {"details": "preview_unavailable"}
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lucidicai
3
- Version: 2.1.1
3
+ Version: 2.1.3
4
4
  Summary: Lucidic AI Python SDK
5
5
  Author: Andy Liang
6
6
  Author-email: andy@lucidic.ai