raindrop-ai 0.0.35__tar.gz → 0.0.36__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: raindrop-ai
3
- Version: 0.0.35
3
+ Version: 0.0.36
4
4
  Summary: Raindrop AI (Python SDK)
5
5
  License: MIT
6
6
  Author: Raindrop AI
@@ -15,6 +15,7 @@ Requires-Dist: opentelemetry-sdk (>=1.39.0)
15
15
  Requires-Dist: pydantic (>=2.09,<3)
16
16
  Requires-Dist: requests (>=2.32.3,<3.0.0)
17
17
  Requires-Dist: traceloop-sdk (>=0.46.0)
18
+ Requires-Dist: urllib3 (>=2.6.0)
18
19
  Description-Content-Type: text/markdown
19
20
 
20
21
  # Raindrop Python SDK
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "raindrop-ai"
3
- version = "0.0.35"
3
+ version = "0.0.36"
4
4
  description = "Raindrop AI (Python SDK)"
5
5
  authors = ["Raindrop AI <sdk@raindrop.ai>"]
6
6
  license = "MIT"
@@ -13,6 +13,7 @@ pydantic = ">=2.09,<3"
13
13
  requests = "^2.32.3"
14
14
  traceloop-sdk = ">=0.46.0"
15
15
  opentelemetry-sdk = ">=1.39.0"
16
+ urllib3 = ">=2.6.0"
16
17
 
17
18
 
18
19
  [tool.poetry.group.dev.dependencies]
@@ -66,6 +66,7 @@ __all__ = [
66
66
  "start_span",
67
67
  "ManualSpan",
68
68
  "set_span_properties",
69
+ "set_llm_span_io",
69
70
  "flush",
70
71
  "shutdown",
71
72
  ]
@@ -347,6 +348,59 @@ def _should_send_prompts():
347
348
  ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
348
349
 
349
350
 
351
+ def set_llm_span_io(
352
+ input: Any = None,
353
+ output: Any = None,
354
+ ) -> None:
355
+ """
356
+ Set LLM input/output content on the current span.
357
+
358
+ Use this to add prompt/completion content to auto-instrumented spans
359
+ that don't capture content automatically (e.g., Bedrock with aioboto3).
360
+
361
+ Args:
362
+ input: The input/prompt content (messages, text, etc.)
363
+ output: The output/completion content (response text, message, etc.)
364
+
365
+ Example:
366
+ response = await bedrock_client.converse(modelId=model, messages=messages)
367
+ raindrop.set_llm_span_io(
368
+ input=messages,
369
+ output=response["output"]["message"]["content"]
370
+ )
371
+ """
372
+ if not _should_send_prompts():
373
+ return
374
+
375
+ span = get_current_span()
376
+ if not span or not span.is_recording():
377
+ logger.debug("[raindrop] set_llm_span_io called but no active span found")
378
+ return
379
+
380
+ try:
381
+ if input is not None:
382
+ input_str = (
383
+ json.dumps(input, cls=JSONEncoder)
384
+ if not isinstance(input, str)
385
+ else input
386
+ )
387
+ input_str = _truncate_json_if_needed(input_str)
388
+ span.set_attribute("gen_ai.prompt.0.role", "user")
389
+ span.set_attribute("gen_ai.prompt.0.content", input_str)
390
+
391
+ if output is not None:
392
+ output_str = (
393
+ json.dumps(output, cls=JSONEncoder)
394
+ if not isinstance(output, str)
395
+ else output
396
+ )
397
+ output_str = _truncate_json_if_needed(output_str)
398
+ span.set_attribute("gen_ai.completion.0.role", "assistant")
399
+ span.set_attribute("gen_ai.completion.0.content", output_str)
400
+ except Exception as e:
401
+ logger.debug(f"[raindrop] Failed to record LLM content: {e}")
402
+
403
+
350
404
  # Signal types - This is now defined in models.py
351
405
  # SignalType = Literal["default", "feedback", "edit"]
352
406
 
@@ -0,0 +1 @@
1
+ VERSION = "0.0.36"
@@ -1 +0,0 @@
1
- VERSION = "0.0.35"
File without changes