lmnr 0.4.53.dev0__py3-none-any.whl → 0.7.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. lmnr/__init__.py +32 -11
  2. lmnr/cli/__init__.py +270 -0
  3. lmnr/cli/datasets.py +371 -0
  4. lmnr/cli/evals.py +111 -0
  5. lmnr/cli/rules.py +42 -0
  6. lmnr/opentelemetry_lib/__init__.py +70 -0
  7. lmnr/opentelemetry_lib/decorators/__init__.py +337 -0
  8. lmnr/opentelemetry_lib/litellm/__init__.py +685 -0
  9. lmnr/opentelemetry_lib/litellm/utils.py +100 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +599 -0
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +9 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
  26. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +330 -0
  27. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  28. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  29. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  30. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  31. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  32. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  33. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  34. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
  35. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
  36. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +121 -0
  37. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +60 -0
  38. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  39. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
  40. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
  41. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
  42. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  43. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
  44. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  45. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  46. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  47. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
  48. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  49. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
  50. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
  51. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
  52. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
  53. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  54. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
  55. lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
  56. lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +191 -0
  57. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
  58. lmnr/opentelemetry_lib/tracing/__init__.py +263 -0
  59. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +516 -0
  60. lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/attributes.py +21 -8
  61. lmnr/opentelemetry_lib/tracing/context.py +200 -0
  62. lmnr/opentelemetry_lib/tracing/exporter.py +153 -0
  63. lmnr/opentelemetry_lib/tracing/instruments.py +140 -0
  64. lmnr/opentelemetry_lib/tracing/processor.py +193 -0
  65. lmnr/opentelemetry_lib/tracing/span.py +398 -0
  66. lmnr/opentelemetry_lib/tracing/tracer.py +57 -0
  67. lmnr/opentelemetry_lib/tracing/utils.py +62 -0
  68. lmnr/opentelemetry_lib/utils/package_check.py +18 -0
  69. lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
  70. lmnr/sdk/browser/__init__.py +0 -0
  71. lmnr/sdk/browser/background_send_events.py +158 -0
  72. lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
  73. lmnr/sdk/browser/browser_use_otel.py +142 -0
  74. lmnr/sdk/browser/bubus_otel.py +71 -0
  75. lmnr/sdk/browser/cdp_utils.py +518 -0
  76. lmnr/sdk/browser/inject_script.js +514 -0
  77. lmnr/sdk/browser/patchright_otel.py +151 -0
  78. lmnr/sdk/browser/playwright_otel.py +322 -0
  79. lmnr/sdk/browser/pw_utils.py +363 -0
  80. lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
  81. lmnr/sdk/browser/utils.py +70 -0
  82. lmnr/sdk/client/asynchronous/async_client.py +180 -0
  83. lmnr/sdk/client/asynchronous/resources/__init__.py +6 -0
  84. lmnr/sdk/client/asynchronous/resources/base.py +32 -0
  85. lmnr/sdk/client/asynchronous/resources/browser_events.py +41 -0
  86. lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
  87. lmnr/sdk/client/asynchronous/resources/evals.py +266 -0
  88. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  89. lmnr/sdk/client/asynchronous/resources/tags.py +83 -0
  90. lmnr/sdk/client/synchronous/resources/__init__.py +6 -0
  91. lmnr/sdk/client/synchronous/resources/base.py +32 -0
  92. lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
  93. lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
  94. lmnr/sdk/client/synchronous/resources/evals.py +263 -0
  95. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  96. lmnr/sdk/client/synchronous/resources/tags.py +83 -0
  97. lmnr/sdk/client/synchronous/sync_client.py +191 -0
  98. lmnr/sdk/datasets/__init__.py +94 -0
  99. lmnr/sdk/datasets/file_utils.py +91 -0
  100. lmnr/sdk/decorators.py +163 -26
  101. lmnr/sdk/eval_control.py +3 -2
  102. lmnr/sdk/evaluations.py +403 -191
  103. lmnr/sdk/laminar.py +1080 -549
  104. lmnr/sdk/log.py +7 -2
  105. lmnr/sdk/types.py +246 -134
  106. lmnr/sdk/utils.py +151 -7
  107. lmnr/version.py +46 -0
  108. {lmnr-0.4.53.dev0.dist-info → lmnr-0.7.26.dist-info}/METADATA +152 -106
  109. lmnr-0.7.26.dist-info/RECORD +116 -0
  110. lmnr-0.7.26.dist-info/WHEEL +4 -0
  111. lmnr-0.7.26.dist-info/entry_points.txt +3 -0
  112. lmnr/cli.py +0 -101
  113. lmnr/openllmetry_sdk/.python-version +0 -1
  114. lmnr/openllmetry_sdk/__init__.py +0 -72
  115. lmnr/openllmetry_sdk/config/__init__.py +0 -9
  116. lmnr/openllmetry_sdk/decorators/base.py +0 -185
  117. lmnr/openllmetry_sdk/instruments.py +0 -38
  118. lmnr/openllmetry_sdk/tracing/__init__.py +0 -1
  119. lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -24
  120. lmnr/openllmetry_sdk/tracing/context_manager.py +0 -13
  121. lmnr/openllmetry_sdk/tracing/tracing.py +0 -884
  122. lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -61
  123. lmnr/openllmetry_sdk/utils/package_check.py +0 -7
  124. lmnr/openllmetry_sdk/version.py +0 -1
  125. lmnr/sdk/datasets.py +0 -55
  126. lmnr-0.4.53.dev0.dist-info/LICENSE +0 -75
  127. lmnr-0.4.53.dev0.dist-info/RECORD +0 -33
  128. lmnr-0.4.53.dev0.dist-info/WHEEL +0 -4
  129. lmnr-0.4.53.dev0.dist-info/entry_points.txt +0 -3
  130. /lmnr/{openllmetry_sdk → opentelemetry_lib}/.flake8 +0 -0
  131. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/__init__.py +0 -0
  132. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/json_encoder.py +0 -0
  133. /lmnr/{openllmetry_sdk/decorators/__init__.py → py.typed} +0 -0
@@ -0,0 +1,685 @@
1
+ """LiteLLM callback logger for Laminar"""
2
+
3
+ import json
4
+ from datetime import datetime
5
+
6
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_PROMPT
7
+ from opentelemetry.trace import SpanKind, Status, StatusCode, Tracer
8
+ from lmnr.opentelemetry_lib.litellm.utils import (
9
+ get_tool_definition,
10
+ is_validator_iterator,
11
+ model_as_dict,
12
+ set_span_attribute,
13
+ )
14
+ from lmnr.opentelemetry_lib.tracing import TracerWrapper
15
+
16
+ from lmnr.opentelemetry_lib.tracing.context import (
17
+ get_current_context,
18
+ get_event_attributes_from_context,
19
+ )
20
+ from lmnr.opentelemetry_lib.tracing.attributes import ASSOCIATION_PROPERTIES
21
+ from lmnr.opentelemetry_lib.utils.package_check import is_package_installed
22
+ from lmnr.sdk.log import get_default_logger
23
+ from lmnr.sdk.utils import json_dumps
24
+
25
+ logger = get_default_logger(__name__)
26
+
27
+ SUPPORTED_CALL_TYPES = ["completion", "acompletion", "responses", "aresponses"]
28
+
29
+ # Try to import the necessary LiteLLM components and gracefully handle ImportError
30
+ try:
31
+ if not is_package_installed("litellm"):
32
+ raise ImportError("LiteLLM is not installed")
33
+
34
+ from litellm.integrations.custom_batch_logger import CustomBatchLogger
35
+
36
+ class LaminarLiteLLMCallback(CustomBatchLogger):
37
+ """Custom LiteLLM logger that sends logs to Laminar via OpenTelemetry spans
38
+
39
+ Usage:
40
+ import litellm
41
+ from lmnr import Laminar, LaminarLiteLLMCallback
42
+
43
+ # make sure this comes first
44
+ Laminar.initialize()
45
+
46
+ # Add the logger to LiteLLM callbacks
47
+ litellm.callbacks = [LaminarLiteLLMCallback()]
48
+ """
49
+
50
+ logged_openai_responses: set[str]
51
+
52
+ def __init__(self, **kwargs):
53
+ super().__init__(**kwargs)
54
+ if not hasattr(TracerWrapper, "instance") or TracerWrapper.instance is None:
55
+ raise ValueError("Laminar must be initialized before LiteLLM callback")
56
+
57
+ self.logged_openai_responses = set()
58
+ if is_package_installed("openai"):
59
+ from lmnr.opentelemetry_lib.opentelemetry.instrumentation.openai import (
60
+ OpenAIInstrumentor,
61
+ )
62
+
63
+ openai_instrumentor = OpenAIInstrumentor()
64
+ if (
65
+ openai_instrumentor
66
+ and openai_instrumentor.is_instrumented_by_opentelemetry
67
+ ):
68
+ logger.debug(
69
+ "Disabling OpenTelemetry instrumentation for OpenAI to avoid double-instrumentation of LiteLLM."
70
+ )
71
+ openai_instrumentor.uninstrument()
72
+
73
+ def _get_tracer(self) -> Tracer:
74
+ if not hasattr(TracerWrapper, "instance") or TracerWrapper.instance is None:
75
+ raise ValueError("Laminar must be initialized before LiteLLM callback")
76
+ return TracerWrapper().get_tracer()
77
+
78
+ def log_success_event(
79
+ self, kwargs, response_obj, start_time: datetime, end_time: datetime
80
+ ):
81
+ if kwargs.get("call_type") not in SUPPORTED_CALL_TYPES:
82
+ return
83
+ if kwargs.get("call_type") in ["responses", "aresponses"]:
84
+ # responses API may be called multiple times with the same response_obj
85
+ response_id = getattr(response_obj, "id", None)
86
+ if response_id in self.logged_openai_responses:
87
+ return
88
+ if response_id:
89
+ self.logged_openai_responses.add(response_id)
90
+ self.logged_openai_responses.add(response_obj.id)
91
+ try:
92
+ self._create_span(
93
+ kwargs, response_obj, start_time, end_time, is_success=True
94
+ )
95
+ except Exception as e:
96
+ logger.error(f"Error in log_success_event: {e}")
97
+
98
+ def log_failure_event(
99
+ self, kwargs, response_obj, start_time: datetime, end_time: datetime
100
+ ):
101
+ if kwargs.get("call_type") not in SUPPORTED_CALL_TYPES:
102
+ return
103
+ try:
104
+ self._create_span(
105
+ kwargs, response_obj, start_time, end_time, is_success=False
106
+ )
107
+ except Exception as e:
108
+ logger.error(f"Error in log_failure_event: {e}")
109
+
110
+ async def async_log_success_event(
111
+ self, kwargs, response_obj, start_time: datetime, end_time: datetime
112
+ ):
113
+ self.log_success_event(kwargs, response_obj, start_time, end_time)
114
+
115
+ async def async_log_failure_event(
116
+ self, kwargs, response_obj, start_time: datetime, end_time: datetime
117
+ ):
118
+ self.log_failure_event(kwargs, response_obj, start_time, end_time)
119
+
120
+ def _create_span(
121
+ self,
122
+ kwargs,
123
+ response_obj,
124
+ start_time: datetime,
125
+ end_time: datetime,
126
+ is_success: bool,
127
+ ):
128
+ """Create an OpenTelemetry span for the LiteLLM call"""
129
+ call_type = kwargs.get("call_type", "completion")
130
+ if call_type == "aresponses":
131
+ call_type = "responses"
132
+ if call_type == "acompletion":
133
+ call_type = "completion"
134
+ span_name = f"litellm.{call_type}"
135
+ try:
136
+ tracer = self._get_tracer()
137
+ except Exception as e:
138
+ logger.error(f"Error getting tracer: {e}")
139
+ return
140
+
141
+ span = tracer.start_span(
142
+ span_name,
143
+ kind=SpanKind.CLIENT,
144
+ start_time=int(start_time.timestamp() * 1e9),
145
+ attributes={
146
+ "lmnr.internal.provider": "litellm",
147
+ },
148
+ context=get_current_context(),
149
+ )
150
+ try:
151
+ model = kwargs.get("model", "unknown")
152
+ if kwargs.get("custom_llm_provider"):
153
+ set_span_attribute(
154
+ span, "gen_ai.system", kwargs["custom_llm_provider"]
155
+ )
156
+
157
+ messages = kwargs.get("messages", [])
158
+ self._process_input_messages(span, messages)
159
+
160
+ tools = kwargs.get("tools", [])
161
+ self._process_request_tool_definitions(span, tools)
162
+
163
+ set_span_attribute(span, "gen_ai.request.model", model)
164
+
165
+ # Add more attributes from kwargs
166
+ if "temperature" in kwargs:
167
+ set_span_attribute(
168
+ span, "gen_ai.request.temperature", kwargs["temperature"]
169
+ )
170
+ if "max_tokens" in kwargs:
171
+ set_span_attribute(
172
+ span, "gen_ai.request.max_tokens", kwargs["max_tokens"]
173
+ )
174
+ if "top_p" in kwargs:
175
+ set_span_attribute(span, "gen_ai.request.top_p", kwargs["top_p"])
176
+
177
+ metadata = (
178
+ kwargs.get("litellm_params").get(
179
+ "metadata", kwargs.get("metadata", {})
180
+ )
181
+ or {}
182
+ )
183
+ tags = metadata.get("tags", [])
184
+ if isinstance(tags, str):
185
+ try:
186
+ tags = json.loads(tags)
187
+ except Exception:
188
+ pass
189
+ if (
190
+ tags
191
+ and isinstance(tags, (list, tuple, set))
192
+ and all(isinstance(tag, str) for tag in tags)
193
+ ):
194
+ span.set_attribute(f"{ASSOCIATION_PROPERTIES}.tags", tags)
195
+
196
+ user_id = metadata.get("user_id")
197
+ if user_id:
198
+ span.set_attribute(f"{ASSOCIATION_PROPERTIES}.user_id", user_id)
199
+
200
+ session_id = metadata.get("session_id")
201
+ if session_id:
202
+ span.set_attribute(
203
+ f"{ASSOCIATION_PROPERTIES}.session_id", session_id
204
+ )
205
+
206
+ optional_params = kwargs.get("optional_params") or {}
207
+ if not optional_params:
208
+ hidden_params = metadata.get("hidden_params") or {}
209
+ optional_params = hidden_params.get("optional_params") or {}
210
+ response_format = optional_params.get("response_format")
211
+ if (
212
+ response_format
213
+ and isinstance(response_format, dict)
214
+ and response_format.get("type") == "json_schema"
215
+ ):
216
+ schema = (response_format.get("json_schema") or {}).get("schema")
217
+ if schema:
218
+ span.set_attribute(
219
+ "gen_ai.request.structured_output_schema",
220
+ json_dumps(schema),
221
+ )
222
+
223
+ if is_success:
224
+ span.set_status(Status(StatusCode.OK))
225
+ if kwargs.get("complete_streaming_response"):
226
+ self._process_success_response(
227
+ span,
228
+ kwargs.get("complete_streaming_response"),
229
+ )
230
+ else:
231
+ self._process_success_response(span, response_obj)
232
+ else:
233
+ span.set_status(Status(StatusCode.ERROR))
234
+ if isinstance(response_obj, Exception):
235
+ attributes = get_event_attributes_from_context()
236
+ span.record_exception(response_obj, attributes=attributes)
237
+
238
+ except Exception as e:
239
+ attributes = get_event_attributes_from_context()
240
+ span.record_exception(e, attributes=attributes)
241
+ logger.error(f"Error in Laminar LiteLLM instrumentation: {e}")
242
+ finally:
243
+ span.end(int(end_time.timestamp() * 1e9))
244
+
245
+ def _process_input_messages(self, span, messages):
246
+ """Process and set message attributes on the span"""
247
+ if not isinstance(messages, list):
248
+ return
249
+
250
+ prompt_index = 0
251
+ for item in messages:
252
+ block_dict = model_as_dict(item)
253
+ if block_dict.get("type", "message") == "message":
254
+ tool_calls = block_dict.get("tool_calls", [])
255
+ self._process_tool_calls(
256
+ span, tool_calls, prompt_index, is_response=False
257
+ )
258
+ content = block_dict.get("content")
259
+ if is_validator_iterator(content):
260
+ # Have not been able to catch this in the wild, but keeping
261
+ # just in case, as raw OpenAI responses do that
262
+ content = [self._process_content_part(part) for part in content]
263
+ try:
264
+ stringified_content = (
265
+ content if isinstance(content, str) else json_dumps(content)
266
+ )
267
+ except Exception:
268
+ stringified_content = (
269
+ str(content) if content is not None else ""
270
+ )
271
+ set_span_attribute(
272
+ span,
273
+ f"{GEN_AI_PROMPT}.{prompt_index}.content",
274
+ stringified_content,
275
+ )
276
+ set_span_attribute(
277
+ span,
278
+ f"{GEN_AI_PROMPT}.{prompt_index}.role",
279
+ block_dict.get("role"),
280
+ )
281
+ prompt_index += 1
282
+
283
+ elif block_dict.get("type") == "computer_call_output":
284
+ set_span_attribute(
285
+ span,
286
+ f"{GEN_AI_PROMPT}.{prompt_index}.role",
287
+ "computer_call_output",
288
+ )
289
+ output_image_url = block_dict.get("output", {}).get("image_url")
290
+ if output_image_url:
291
+ set_span_attribute(
292
+ span,
293
+ f"{GEN_AI_PROMPT}.{prompt_index}.content",
294
+ json.dumps(
295
+ [
296
+ {
297
+ "type": "image_url",
298
+ "image_url": {"url": output_image_url},
299
+ }
300
+ ]
301
+ ),
302
+ )
303
+ prompt_index += 1
304
+ elif block_dict.get("type") == "computer_call":
305
+ set_span_attribute(
306
+ span, f"{GEN_AI_PROMPT}.{prompt_index}.role", "assistant"
307
+ )
308
+ call_content = {}
309
+ if block_dict.get("id"):
310
+ call_content["id"] = block_dict.get("id")
311
+ if block_dict.get("action"):
312
+ call_content["action"] = block_dict.get("action")
313
+ set_span_attribute(
314
+ span,
315
+ f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.arguments",
316
+ json.dumps(call_content),
317
+ )
318
+ set_span_attribute(
319
+ span,
320
+ f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.id",
321
+ block_dict.get("call_id"),
322
+ )
323
+ set_span_attribute(
324
+ span,
325
+ f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.name",
326
+ "computer_call",
327
+ )
328
+ prompt_index += 1
329
+ elif block_dict.get("type") == "reasoning":
330
+ reasoning_summary = block_dict.get("summary")
331
+ if reasoning_summary and isinstance(reasoning_summary, list):
332
+ processed_chunks = [
333
+ {"type": "text", "text": chunk.get("text")}
334
+ for chunk in reasoning_summary
335
+ if isinstance(chunk, dict)
336
+ and chunk.get("type") == "summary_text"
337
+ ]
338
+ set_span_attribute(
339
+ span,
340
+ f"{GEN_AI_PROMPT}.{prompt_index}.reasoning",
341
+ json_dumps(processed_chunks),
342
+ )
343
+ set_span_attribute(
344
+ span,
345
+ f"{GEN_AI_PROMPT}.{prompt_index}.role",
346
+ "assistant",
347
+ )
348
+ # reasoning is followed by other content parts in the same messge,
349
+ # so we don't increment the prompt index
350
+ # TODO: handle other block types
351
+
352
+ def _process_request_tool_definitions(self, span, tools):
353
+ """Process and set tool definitions attributes on the span"""
354
+ if not isinstance(tools, list):
355
+ return
356
+
357
+ for i, tool in enumerate(tools):
358
+ tool_dict = model_as_dict(tool)
359
+ tool_definition = get_tool_definition(tool_dict)
360
+ function_name = tool_definition.get("name")
361
+ function_description = tool_definition.get("description")
362
+ function_parameters = tool_definition.get("parameters")
363
+ set_span_attribute(
364
+ span,
365
+ f"llm.request.functions.{i}.name",
366
+ function_name,
367
+ )
368
+ set_span_attribute(
369
+ span,
370
+ f"llm.request.functions.{i}.description",
371
+ function_description,
372
+ )
373
+ set_span_attribute(
374
+ span,
375
+ f"llm.request.functions.{i}.parameters",
376
+ json.dumps(function_parameters),
377
+ )
378
+
379
+ def _process_response_usage(self, span, usage):
380
+ """Process and set usage attributes on the span"""
381
+ usage_dict = model_as_dict(usage)
382
+ if (
383
+ not usage_dict.get("prompt_tokens")
384
+ and not usage_dict.get("completion_tokens")
385
+ and not usage_dict.get("total_tokens")
386
+ ):
387
+ return
388
+
389
+ set_span_attribute(
390
+ span, "gen_ai.usage.input_tokens", usage_dict.get("prompt_tokens")
391
+ )
392
+ set_span_attribute(
393
+ span, "gen_ai.usage.output_tokens", usage_dict.get("completion_tokens")
394
+ )
395
+ set_span_attribute(
396
+ span, "llm.usage.total_tokens", usage_dict.get("total_tokens")
397
+ )
398
+
399
+ if usage_dict.get("prompt_tokens_details"):
400
+ details = usage_dict.get("prompt_tokens_details", {})
401
+ details = model_as_dict(details)
402
+ if details.get("cached_tokens"):
403
+ set_span_attribute(
404
+ span,
405
+ "gen_ai.usage.cache_read_input_tokens",
406
+ details.get("cached_tokens"),
407
+ )
408
+ # TODO: add audio/image/text token details
409
+ if usage_dict.get("completion_tokens_details"):
410
+ details = usage_dict.get("completion_tokens_details", {})
411
+ details = model_as_dict(details)
412
+ if details.get("reasoning_tokens"):
413
+ set_span_attribute(
414
+ span,
415
+ "gen_ai.usage.reasoning_tokens",
416
+ details.get("reasoning_tokens"),
417
+ )
418
+
419
+ def _process_tool_calls(self, span, tool_calls, choice_index, is_response=True):
420
+ """Process and set tool call attributes on the span"""
421
+ attr_prefix = "completion" if is_response else "prompt"
422
+ if not isinstance(tool_calls, list):
423
+ return
424
+
425
+ for j, tool_call in enumerate(tool_calls):
426
+ tool_call_dict = model_as_dict(tool_call)
427
+
428
+ tool_name = tool_call_dict.get(
429
+ "name", tool_call_dict.get("function", {}).get("name", "")
430
+ )
431
+ set_span_attribute(
432
+ span,
433
+ f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.name",
434
+ tool_name,
435
+ )
436
+
437
+ call_id = tool_call_dict.get("id", "")
438
+ set_span_attribute(
439
+ span,
440
+ f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.id",
441
+ call_id,
442
+ )
443
+
444
+ tool_arguments = tool_call_dict.get(
445
+ "arguments", tool_call_dict.get("function", {}).get("arguments", "")
446
+ )
447
+ if isinstance(tool_arguments, str):
448
+ set_span_attribute(
449
+ span,
450
+ f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.arguments",
451
+ tool_arguments,
452
+ )
453
+ else:
454
+ set_span_attribute(
455
+ span,
456
+ f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.arguments",
457
+ json.dumps(model_as_dict(tool_arguments)),
458
+ )
459
+
460
+ def _process_response_choices(self, span, choices):
461
+ """Process and set choice attributes on the span"""
462
+ if not isinstance(choices, list):
463
+ return
464
+
465
+ for i, choice in enumerate(choices):
466
+ choice_dict = model_as_dict(choice)
467
+ message = choice_dict.get("message", choice_dict)
468
+
469
+ role = message.get("role", "unknown")
470
+ set_span_attribute(span, f"gen_ai.completion.{i}.role", role)
471
+
472
+ tool_calls = message.get("tool_calls", [])
473
+ self._process_tool_calls(span, tool_calls, i, is_response=True)
474
+
475
+ content = message.get("content", "")
476
+ if content is None:
477
+ continue
478
+ reasoning_content = message.get("reasoning_content")
479
+ if reasoning_content:
480
+ if isinstance(reasoning_content, str):
481
+ reasoning_content = [
482
+ {
483
+ "type": "text",
484
+ "text": reasoning_content,
485
+ }
486
+ ]
487
+ elif not isinstance(reasoning_content, list):
488
+ reasoning_content = [
489
+ {
490
+ "type": "text",
491
+ "text": str(reasoning_content),
492
+ }
493
+ ]
494
+ else:
495
+ reasoning_content = []
496
+ if isinstance(content, str):
497
+ if reasoning_content:
498
+ set_span_attribute(
499
+ span,
500
+ f"gen_ai.completion.{i}.content",
501
+ json.dumps(
502
+ reasoning_content
503
+ + [
504
+ {
505
+ "type": "text",
506
+ "text": content,
507
+ }
508
+ ]
509
+ ),
510
+ )
511
+ else:
512
+ set_span_attribute(
513
+ span,
514
+ f"gen_ai.completion.{i}.content",
515
+ content,
516
+ )
517
+ elif isinstance(content, list):
518
+ set_span_attribute(
519
+ span,
520
+ f"gen_ai.completion.{i}.content",
521
+ json.dumps(reasoning_content + content),
522
+ )
523
+ else:
524
+ set_span_attribute(
525
+ span,
526
+ f"gen_ai.completion.{i}.content",
527
+ json.dumps(reasoning_content + [model_as_dict(content)]),
528
+ )
529
+
530
+ def _process_content_part(self, content_part: dict) -> dict:
531
+ content_part_dict = model_as_dict(content_part)
532
+ if content_part_dict.get("type") == "output_text":
533
+ return {"type": "text", "text": content_part_dict.get("text")}
534
+ return content_part_dict
535
+
536
+ def _process_response_output(self, span, output):
537
+ """Response of OpenAI Responses API"""
538
+ if not isinstance(output, list):
539
+ return
540
+ set_span_attribute(span, "gen_ai.completion.0.role", "assistant")
541
+ tool_call_index = 0
542
+ for block in output:
543
+ block_dict = model_as_dict(block)
544
+ if block_dict.get("type") == "message":
545
+ content = block_dict.get("content")
546
+ if content is None:
547
+ continue
548
+ if isinstance(content, str):
549
+ set_span_attribute(span, "gen_ai.completion.0.content", content)
550
+ elif isinstance(content, list):
551
+ set_span_attribute(
552
+ span,
553
+ "gen_ai.completion.0.content",
554
+ json_dumps(
555
+ [self._process_content_part(part) for part in content]
556
+ ),
557
+ )
558
+ if block_dict.get("type") == "function_call":
559
+ set_span_attribute(
560
+ span,
561
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
562
+ block_dict.get("id"),
563
+ )
564
+ set_span_attribute(
565
+ span,
566
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
567
+ block_dict.get("name"),
568
+ )
569
+ set_span_attribute(
570
+ span,
571
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.arguments",
572
+ block_dict.get("arguments"),
573
+ )
574
+ tool_call_index += 1
575
+ elif block_dict.get("type") == "file_search_call":
576
+ set_span_attribute(
577
+ span,
578
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
579
+ block_dict.get("id"),
580
+ )
581
+ set_span_attribute(
582
+ span,
583
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
584
+ "file_search_call",
585
+ )
586
+ tool_call_index += 1
587
+ elif block_dict.get("type") == "web_search_call":
588
+ set_span_attribute(
589
+ span,
590
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
591
+ block_dict.get("id"),
592
+ )
593
+ set_span_attribute(
594
+ span,
595
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
596
+ "web_search_call",
597
+ )
598
+ tool_call_index += 1
599
+ elif block_dict.get("type") == "computer_call":
600
+ set_span_attribute(
601
+ span,
602
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
603
+ block_dict.get("call_id"),
604
+ )
605
+ set_span_attribute(
606
+ span,
607
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
608
+ "computer_call",
609
+ )
610
+ set_span_attribute(
611
+ span,
612
+ f"gen_ai.completion.0.tool_calls.{tool_call_index}.arguments",
613
+ json_dumps(block_dict.get("action")),
614
+ )
615
+ tool_call_index += 1
616
+ elif block_dict.get("type") == "reasoning":
617
+ reasoning_summary = block_dict.get("summary")
618
+ if reasoning_summary and isinstance(reasoning_summary, list):
619
+ processed_chunks = [
620
+ {"type": "text", "text": chunk.get("text")}
621
+ for chunk in reasoning_summary
622
+ if isinstance(chunk, dict)
623
+ and chunk.get("type") == "summary_text"
624
+ ]
625
+ set_span_attribute(
626
+ span,
627
+ "gen_ai.completion.0.reasoning",
628
+ json_dumps(processed_chunks),
629
+ )
630
+ # TODO: handle other block types, in particular other calls
631
+
632
+ def _process_success_response(self, span, response_obj):
633
+ """Process successful response attributes"""
634
+ response_dict = model_as_dict(response_obj)
635
+ set_span_attribute(span, "gen_ai.response.id", response_dict.get("id"))
636
+ set_span_attribute(
637
+ span, "gen_ai.response.model", response_dict.get("model")
638
+ )
639
+
640
+ if getattr(response_obj, "usage", None):
641
+ self._process_response_usage(span, getattr(response_obj, "usage", None))
642
+ elif response_dict.get("usage"):
643
+ self._process_response_usage(span, response_dict.get("usage"))
644
+
645
+ if response_dict.get("cache_creation_input_tokens"):
646
+ set_span_attribute(
647
+ span,
648
+ "gen_ai.usage.cache_creation_input_tokens",
649
+ response_dict.get("cache_creation_input_tokens"),
650
+ )
651
+ if response_dict.get("cache_read_input_tokens"):
652
+ set_span_attribute(
653
+ span,
654
+ "gen_ai.usage.cache_read_input_tokens",
655
+ response_dict.get("cache_read_input_tokens"),
656
+ )
657
+
658
+ if response_dict.get("choices"):
659
+ self._process_response_choices(span, response_dict.get("choices"))
660
+ elif response_dict.get("output"):
661
+ self._process_response_output(span, response_dict.get("output"))
662
+
663
+ except ImportError as e:
664
+ logger.debug(f"LiteLLM callback unavailable: {e}")
665
+
666
+ # Create a no-op logger when LiteLLM is not available
667
+ class LaminarLiteLLMCallback:
668
+ """No-op logger when LiteLLM is not available"""
669
+
670
+ def __init__(self, **kwargs):
671
+ logger.warning(
672
+ "LiteLLM is not installed. Install with: pip install litellm"
673
+ )
674
+
675
+ def log_success_event(self, *args, **kwargs):
676
+ pass
677
+
678
+ def log_failure_event(self, *args, **kwargs):
679
+ pass
680
+
681
+ async def async_log_success_event(self, *args, **kwargs):
682
+ pass
683
+
684
+ async def async_log_failure_event(self, *args, **kwargs):
685
+ pass