lmnr 0.4.53.dev0__py3-none-any.whl → 0.7.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. lmnr/__init__.py +32 -11
  2. lmnr/cli/__init__.py +270 -0
  3. lmnr/cli/datasets.py +371 -0
  4. lmnr/cli/evals.py +111 -0
  5. lmnr/cli/rules.py +42 -0
  6. lmnr/opentelemetry_lib/__init__.py +70 -0
  7. lmnr/opentelemetry_lib/decorators/__init__.py +337 -0
  8. lmnr/opentelemetry_lib/litellm/__init__.py +685 -0
  9. lmnr/opentelemetry_lib/litellm/utils.py +100 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +599 -0
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +9 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
  26. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +330 -0
  27. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  28. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  29. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  30. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  31. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  32. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  33. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  34. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
  35. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
  36. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +121 -0
  37. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +60 -0
  38. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  39. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
  40. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
  41. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
  42. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  43. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
  44. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  45. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  46. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  47. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
  48. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  49. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
  50. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
  51. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
  52. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
  53. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  54. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
  55. lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
  56. lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +191 -0
  57. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
  58. lmnr/opentelemetry_lib/tracing/__init__.py +263 -0
  59. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +516 -0
  60. lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/attributes.py +21 -8
  61. lmnr/opentelemetry_lib/tracing/context.py +200 -0
  62. lmnr/opentelemetry_lib/tracing/exporter.py +153 -0
  63. lmnr/opentelemetry_lib/tracing/instruments.py +140 -0
  64. lmnr/opentelemetry_lib/tracing/processor.py +193 -0
  65. lmnr/opentelemetry_lib/tracing/span.py +398 -0
  66. lmnr/opentelemetry_lib/tracing/tracer.py +57 -0
  67. lmnr/opentelemetry_lib/tracing/utils.py +62 -0
  68. lmnr/opentelemetry_lib/utils/package_check.py +18 -0
  69. lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
  70. lmnr/sdk/browser/__init__.py +0 -0
  71. lmnr/sdk/browser/background_send_events.py +158 -0
  72. lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
  73. lmnr/sdk/browser/browser_use_otel.py +142 -0
  74. lmnr/sdk/browser/bubus_otel.py +71 -0
  75. lmnr/sdk/browser/cdp_utils.py +518 -0
  76. lmnr/sdk/browser/inject_script.js +514 -0
  77. lmnr/sdk/browser/patchright_otel.py +151 -0
  78. lmnr/sdk/browser/playwright_otel.py +322 -0
  79. lmnr/sdk/browser/pw_utils.py +363 -0
  80. lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
  81. lmnr/sdk/browser/utils.py +70 -0
  82. lmnr/sdk/client/asynchronous/async_client.py +180 -0
  83. lmnr/sdk/client/asynchronous/resources/__init__.py +6 -0
  84. lmnr/sdk/client/asynchronous/resources/base.py +32 -0
  85. lmnr/sdk/client/asynchronous/resources/browser_events.py +41 -0
  86. lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
  87. lmnr/sdk/client/asynchronous/resources/evals.py +266 -0
  88. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  89. lmnr/sdk/client/asynchronous/resources/tags.py +83 -0
  90. lmnr/sdk/client/synchronous/resources/__init__.py +6 -0
  91. lmnr/sdk/client/synchronous/resources/base.py +32 -0
  92. lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
  93. lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
  94. lmnr/sdk/client/synchronous/resources/evals.py +263 -0
  95. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  96. lmnr/sdk/client/synchronous/resources/tags.py +83 -0
  97. lmnr/sdk/client/synchronous/sync_client.py +191 -0
  98. lmnr/sdk/datasets/__init__.py +94 -0
  99. lmnr/sdk/datasets/file_utils.py +91 -0
  100. lmnr/sdk/decorators.py +163 -26
  101. lmnr/sdk/eval_control.py +3 -2
  102. lmnr/sdk/evaluations.py +403 -191
  103. lmnr/sdk/laminar.py +1080 -549
  104. lmnr/sdk/log.py +7 -2
  105. lmnr/sdk/types.py +246 -134
  106. lmnr/sdk/utils.py +151 -7
  107. lmnr/version.py +46 -0
  108. {lmnr-0.4.53.dev0.dist-info → lmnr-0.7.26.dist-info}/METADATA +152 -106
  109. lmnr-0.7.26.dist-info/RECORD +116 -0
  110. lmnr-0.7.26.dist-info/WHEEL +4 -0
  111. lmnr-0.7.26.dist-info/entry_points.txt +3 -0
  112. lmnr/cli.py +0 -101
  113. lmnr/openllmetry_sdk/.python-version +0 -1
  114. lmnr/openllmetry_sdk/__init__.py +0 -72
  115. lmnr/openllmetry_sdk/config/__init__.py +0 -9
  116. lmnr/openllmetry_sdk/decorators/base.py +0 -185
  117. lmnr/openllmetry_sdk/instruments.py +0 -38
  118. lmnr/openllmetry_sdk/tracing/__init__.py +0 -1
  119. lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -24
  120. lmnr/openllmetry_sdk/tracing/context_manager.py +0 -13
  121. lmnr/openllmetry_sdk/tracing/tracing.py +0 -884
  122. lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -61
  123. lmnr/openllmetry_sdk/utils/package_check.py +0 -7
  124. lmnr/openllmetry_sdk/version.py +0 -1
  125. lmnr/sdk/datasets.py +0 -55
  126. lmnr-0.4.53.dev0.dist-info/LICENSE +0 -75
  127. lmnr-0.4.53.dev0.dist-info/RECORD +0 -33
  128. lmnr-0.4.53.dev0.dist-info/WHEEL +0 -4
  129. lmnr-0.4.53.dev0.dist-info/entry_points.txt +0 -3
  130. /lmnr/{openllmetry_sdk → opentelemetry_lib}/.flake8 +0 -0
  131. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/__init__.py +0 -0
  132. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/json_encoder.py +0 -0
  133. /lmnr/{openllmetry_sdk/decorators/__init__.py → py.typed} +0 -0
@@ -0,0 +1,599 @@
1
+ """OpenTelemetry Google Generative AI API instrumentation"""
2
+
3
+ from collections import defaultdict
4
+ import json
5
+ import logging
6
+ import os
7
+ from typing import AsyncGenerator, Callable, Collection, Generator
8
+
9
+ from google.genai import types
10
+
11
+ from lmnr.opentelemetry_lib.tracing.context import (
12
+ get_current_context,
13
+ get_event_attributes_from_context,
14
+ )
15
+ from lmnr.sdk.utils import json_dumps
16
+
17
+ from .config import (
18
+ Config,
19
+ )
20
+ from .schema_utils import SchemaJSONEncoder, process_schema
21
+ from .utils import (
22
+ dont_throw,
23
+ get_content,
24
+ merge_text_parts,
25
+ process_content_union,
26
+ process_stream_chunk,
27
+ role_from_content_union,
28
+ set_span_attribute,
29
+ to_dict,
30
+ with_tracer_wrapper,
31
+ )
32
+ from opentelemetry.trace import Tracer
33
+ from wrapt import wrap_function_wrapper
34
+
35
+ from opentelemetry import context as context_api
36
+ from opentelemetry.trace import get_tracer, SpanKind, Span, Status, StatusCode
37
+ from opentelemetry.semconv._incubating.attributes import gen_ai_attributes
38
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
39
+
40
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
41
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY, unwrap
42
+
43
+ from opentelemetry.semconv_ai import (
44
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
45
+ SpanAttributes,
46
+ LLMRequestTypeValues,
47
+ )
48
+
49
+ logger = logging.getLogger(__name__)
50
+
51
+ _instruments = ("google-genai >= 1.0.0",)
52
+
53
+ WRAPPED_METHODS = [
54
+ {
55
+ "package": "google.genai.models",
56
+ "object": "Models",
57
+ "method": "generate_content",
58
+ "span_name": "gemini.generate_content",
59
+ "is_streaming": False,
60
+ "is_async": False,
61
+ },
62
+ {
63
+ "package": "google.genai.models",
64
+ "object": "AsyncModels",
65
+ "method": "generate_content",
66
+ "span_name": "gemini.generate_content",
67
+ "is_streaming": False,
68
+ "is_async": True,
69
+ },
70
+ {
71
+ "package": "google.genai.models",
72
+ "object": "Models",
73
+ "method": "generate_content_stream",
74
+ "span_name": "gemini.generate_content_stream",
75
+ "is_streaming": True,
76
+ "is_async": False,
77
+ },
78
+ {
79
+ "package": "google.genai.models",
80
+ "object": "AsyncModels",
81
+ "method": "generate_content_stream",
82
+ "span_name": "gemini.generate_content_stream",
83
+ "is_streaming": True,
84
+ "is_async": True,
85
+ },
86
+ ]
87
+
88
+
89
+ def should_send_prompts():
90
+ return (
91
+ os.getenv("LAMINAR_TRACE_CONTENT") or "true"
92
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
93
+
94
+
95
+ @dont_throw
96
+ def _set_request_attributes(span, args, kwargs):
97
+ config_dict = to_dict(kwargs.get("config", {}))
98
+ set_span_attribute(
99
+ span, gen_ai_attributes.GEN_AI_REQUEST_MODEL, kwargs.get("model")
100
+ )
101
+ set_span_attribute(
102
+ span,
103
+ gen_ai_attributes.GEN_AI_REQUEST_TEMPERATURE,
104
+ config_dict.get("temperature"),
105
+ )
106
+ set_span_attribute(
107
+ span, gen_ai_attributes.GEN_AI_REQUEST_TOP_P, config_dict.get("top_p")
108
+ )
109
+ set_span_attribute(
110
+ span, gen_ai_attributes.GEN_AI_REQUEST_TOP_K, config_dict.get("top_k")
111
+ )
112
+ set_span_attribute(
113
+ span,
114
+ gen_ai_attributes.GEN_AI_REQUEST_CHOICE_COUNT,
115
+ config_dict.get("candidate_count"),
116
+ )
117
+ set_span_attribute(
118
+ span,
119
+ gen_ai_attributes.GEN_AI_REQUEST_MAX_TOKENS,
120
+ config_dict.get("max_output_tokens"),
121
+ )
122
+ set_span_attribute(
123
+ span,
124
+ gen_ai_attributes.GEN_AI_REQUEST_STOP_SEQUENCES,
125
+ config_dict.get("stop_sequences"),
126
+ )
127
+ set_span_attribute(
128
+ span,
129
+ gen_ai_attributes.GEN_AI_REQUEST_FREQUENCY_PENALTY,
130
+ config_dict.get("frequency_penalty"),
131
+ )
132
+ set_span_attribute(
133
+ span,
134
+ gen_ai_attributes.GEN_AI_REQUEST_PRESENCE_PENALTY,
135
+ config_dict.get("presence_penalty"),
136
+ )
137
+ set_span_attribute(
138
+ span, gen_ai_attributes.GEN_AI_REQUEST_SEED, config_dict.get("seed")
139
+ )
140
+
141
+ if schema := config_dict.get("response_schema"):
142
+ try:
143
+ set_span_attribute(
144
+ span,
145
+ SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
146
+ json.dumps(process_schema(schema), cls=SchemaJSONEncoder),
147
+ )
148
+ except Exception:
149
+ pass
150
+ elif json_schema := config_dict.get("response_json_schema"):
151
+ try:
152
+ set_span_attribute(
153
+ span,
154
+ SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
155
+ json_dumps(json_schema),
156
+ )
157
+ except Exception:
158
+ pass
159
+
160
+ tools: list[types.FunctionDeclaration] = []
161
+ arg_tools = config_dict.get("tools", kwargs.get("tools"))
162
+ if arg_tools:
163
+ for tool in arg_tools:
164
+ if isinstance(tool, types.Tool):
165
+ tools += tool.function_declarations or []
166
+ elif isinstance(tool, Callable):
167
+ tools.append(types.FunctionDeclaration.from_callable(tool))
168
+
169
+ for tool_num, tool in enumerate(tools):
170
+ tool_dict = to_dict(tool)
171
+ set_span_attribute(
172
+ span,
173
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.name",
174
+ tool_dict.get("name"),
175
+ )
176
+ set_span_attribute(
177
+ span,
178
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.description",
179
+ tool_dict.get("description"),
180
+ )
181
+ set_span_attribute(
182
+ span,
183
+ f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.parameters",
184
+ json_dumps(tool_dict.get("parameters")),
185
+ )
186
+
187
+ if should_send_prompts():
188
+ i = 0
189
+ system_instruction: types.ContentUnion | None = config_dict.get(
190
+ "system_instruction"
191
+ )
192
+ if system_instruction:
193
+ set_span_attribute(
194
+ span,
195
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
196
+ (get_content(process_content_union(system_instruction)) or {}).get(
197
+ "text", ""
198
+ ),
199
+ )
200
+ set_span_attribute(
201
+ span, f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role", "system"
202
+ )
203
+ i += 1
204
+ contents = kwargs.get("contents", [])
205
+ if not isinstance(contents, list):
206
+ contents = [contents]
207
+ for content in contents:
208
+ processed_content = process_content_union(content)
209
+ content_payload = get_content(processed_content)
210
+ if isinstance(content_payload, dict):
211
+ content_payload = [content_payload]
212
+
213
+ set_span_attribute(
214
+ span,
215
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
216
+ (
217
+ content_payload
218
+ if isinstance(content_payload, str)
219
+ else json_dumps(content_payload)
220
+ ),
221
+ )
222
+ blocks = (
223
+ processed_content
224
+ if isinstance(processed_content, list)
225
+ else [processed_content]
226
+ )
227
+ tool_call_index = 0
228
+ for block in blocks:
229
+ block_dict = to_dict(block)
230
+
231
+ if not block_dict.get("function_call"):
232
+ continue
233
+ function_call = to_dict(block_dict.get("function_call", {}))
234
+
235
+ set_span_attribute(
236
+ span,
237
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.name",
238
+ function_call.get("name"),
239
+ )
240
+ set_span_attribute(
241
+ span,
242
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.id",
243
+ (
244
+ function_call.get("id")
245
+ if function_call.get("id") is not None
246
+ else function_call.get("name")
247
+ ), # google genai doesn't support tool call ids
248
+ )
249
+ set_span_attribute(
250
+ span,
251
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.arguments",
252
+ json_dumps(function_call.get("arguments")),
253
+ )
254
+ tool_call_index += 1
255
+
256
+ set_span_attribute(
257
+ span,
258
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role",
259
+ role_from_content_union(content) or "user",
260
+ )
261
+ i += 1
262
+
263
+
264
+ @dont_throw
265
+ def _set_response_attributes(span, response: types.GenerateContentResponse):
266
+ candidates = response.candidates or []
267
+ set_span_attribute(
268
+ span, gen_ai_attributes.GEN_AI_RESPONSE_ID, to_dict(response).get("response_id")
269
+ )
270
+ set_span_attribute(
271
+ span,
272
+ gen_ai_attributes.GEN_AI_RESPONSE_MODEL,
273
+ to_dict(response).get("model_version"),
274
+ )
275
+
276
+ if response.usage_metadata:
277
+ usage_dict = to_dict(response.usage_metadata)
278
+ candidates_token_count = usage_dict.get("candidates_token_count")
279
+ # unlike OpenAI, and unlike input cached tokens, thinking tokens are
280
+ # not counted as part of candidates token count, so we need to add them
281
+ # separately for consistency with other instrumentations
282
+ thoughts_token_count = usage_dict.get("thoughts_token_count")
283
+ output_token_count = (
284
+ (candidates_token_count or 0) + (thoughts_token_count or 0)
285
+ if candidates_token_count is not None or thoughts_token_count is not None
286
+ else None
287
+ )
288
+ set_span_attribute(
289
+ span,
290
+ gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS,
291
+ usage_dict.get("prompt_token_count"),
292
+ )
293
+ set_span_attribute(
294
+ span,
295
+ gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS,
296
+ output_token_count,
297
+ )
298
+ set_span_attribute(
299
+ span,
300
+ SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
301
+ usage_dict.get("total_token_count"),
302
+ )
303
+ set_span_attribute(
304
+ span,
305
+ SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
306
+ usage_dict.get("cached_content_token_count"),
307
+ )
308
+ set_span_attribute(
309
+ span,
310
+ SpanAttributes.LLM_USAGE_REASONING_TOKENS,
311
+ thoughts_token_count,
312
+ )
313
+
314
+ if should_send_prompts():
315
+ set_span_attribute(
316
+ span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "model"
317
+ )
318
+ candidates_list = candidates if isinstance(candidates, list) else [candidates]
319
+ i = 0
320
+ for candidate in candidates_list:
321
+ has_content = False
322
+ processed_content = process_content_union(candidate.content)
323
+ content_payload = get_content(processed_content)
324
+ if isinstance(content_payload, dict):
325
+ content_payload = [content_payload]
326
+
327
+ set_span_attribute(
328
+ span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.role", "model"
329
+ )
330
+ if content_payload:
331
+ has_content = True
332
+ set_span_attribute(
333
+ span,
334
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.content",
335
+ (
336
+ content_payload
337
+ if isinstance(content_payload, str)
338
+ else json_dumps(content_payload)
339
+ ),
340
+ )
341
+ blocks = (
342
+ processed_content
343
+ if isinstance(processed_content, list)
344
+ else [processed_content]
345
+ )
346
+
347
+ tool_call_index = 0
348
+ for block in blocks:
349
+ block_dict = to_dict(block)
350
+ if not block_dict.get("function_call"):
351
+ continue
352
+ function_call = to_dict(block_dict.get("function_call", {}))
353
+ has_content = True
354
+ set_span_attribute(
355
+ span,
356
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.name",
357
+ function_call.get("name"),
358
+ )
359
+ set_span_attribute(
360
+ span,
361
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.id",
362
+ (
363
+ function_call.get("id")
364
+ if function_call.get("id") is not None
365
+ else function_call.get("name")
366
+ ), # google genai doesn't support tool call ids
367
+ )
368
+ set_span_attribute(
369
+ span,
370
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.arguments",
371
+ json_dumps(function_call.get("arguments")),
372
+ )
373
+ tool_call_index += 1
374
+ if has_content:
375
+ i += 1
376
+
377
+
378
+ @dont_throw
379
+ def _build_from_streaming_response(
380
+ span: Span, response: Generator[types.GenerateContentResponse, None, None]
381
+ ) -> Generator[types.GenerateContentResponse, None, None]:
382
+ final_parts = []
383
+ role = "model"
384
+ aggregated_usage_metadata = defaultdict(int)
385
+ model_version = None
386
+ for chunk in response:
387
+ try:
388
+ span.add_event("llm.content.completion.chunk")
389
+ except Exception:
390
+ pass
391
+ # Important: do all processing in a separate sync function, that is
392
+ # wrapped in @dont_throw. If we did it here, the @dont_throw on top of
393
+ # this function would not be able to catch the errors, as they are
394
+ # raised later, after the generator is returned, and when it is being
395
+ # consumed.
396
+ chunk_result = process_stream_chunk(
397
+ chunk,
398
+ role,
399
+ model_version,
400
+ aggregated_usage_metadata,
401
+ final_parts,
402
+ )
403
+ # even though process_stream_chunk can't return None, the result can be
404
+ # None, if the processing throws an error (see @dont_throw)
405
+ if chunk_result:
406
+ role = chunk_result["role"]
407
+ model_version = chunk_result["model_version"]
408
+ yield chunk
409
+
410
+ try:
411
+ compound_response = types.GenerateContentResponse(
412
+ candidates=[
413
+ {
414
+ "content": {
415
+ "parts": merge_text_parts(final_parts),
416
+ "role": role,
417
+ },
418
+ }
419
+ ],
420
+ usage_metadata=types.GenerateContentResponseUsageMetadataDict(
421
+ **aggregated_usage_metadata
422
+ ),
423
+ model_version=model_version,
424
+ )
425
+ if span.is_recording():
426
+ _set_response_attributes(span, compound_response)
427
+ finally:
428
+ if span.is_recording():
429
+ span.end()
430
+
431
+
432
+ @dont_throw
433
+ async def _abuild_from_streaming_response(
434
+ span: Span, response: AsyncGenerator[types.GenerateContentResponse, None]
435
+ ) -> AsyncGenerator[types.GenerateContentResponse, None]:
436
+ final_parts = []
437
+ role = "model"
438
+ aggregated_usage_metadata = defaultdict(int)
439
+ model_version = None
440
+ async for chunk in response:
441
+ try:
442
+ span.add_event("llm.content.completion.chunk")
443
+ except Exception:
444
+ pass
445
+ # Important: do all processing in a separate sync function, that is
446
+ # wrapped in @dont_throw. If we did it here, the @dont_throw on top of
447
+ # this function would not be able to catch the errors, as they are
448
+ # raised later, after the generator is returned, and when it is being
449
+ # consumed.
450
+ chunk_result = process_stream_chunk(
451
+ chunk,
452
+ role,
453
+ model_version,
454
+ aggregated_usage_metadata,
455
+ final_parts,
456
+ )
457
+ # even though process_stream_chunk can't return None, the result can be
458
+ # None, if the processing throws an error (see @dont_throw)
459
+ if chunk_result:
460
+ role = chunk_result["role"]
461
+ model_version = chunk_result["model_version"]
462
+ yield chunk
463
+
464
+ try:
465
+ compound_response = types.GenerateContentResponse(
466
+ candidates=[
467
+ {
468
+ "content": {
469
+ "parts": merge_text_parts(final_parts),
470
+ "role": role,
471
+ },
472
+ }
473
+ ],
474
+ usage_metadata=types.GenerateContentResponseUsageMetadataDict(
475
+ **aggregated_usage_metadata
476
+ ),
477
+ model_version=model_version,
478
+ )
479
+ if span.is_recording():
480
+ _set_response_attributes(span, compound_response)
481
+ finally:
482
+ if span.is_recording():
483
+ span.end()
484
+
485
+
486
+ @with_tracer_wrapper
487
+ def _wrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
488
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
489
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
490
+ ):
491
+ return wrapped(*args, **kwargs)
492
+
493
+ span = tracer.start_span(
494
+ to_wrap.get("span_name"),
495
+ kind=SpanKind.CLIENT,
496
+ attributes={
497
+ SpanAttributes.LLM_SYSTEM: "gemini",
498
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
499
+ },
500
+ context=get_current_context(),
501
+ )
502
+
503
+ if span.is_recording():
504
+ _set_request_attributes(span, args, kwargs)
505
+
506
+ try:
507
+ response = wrapped(*args, **kwargs)
508
+ if to_wrap.get("is_streaming"):
509
+ return _build_from_streaming_response(span, response)
510
+ if span.is_recording():
511
+ _set_response_attributes(span, response)
512
+ span.end()
513
+ return response
514
+ except Exception as e:
515
+ attributes = get_event_attributes_from_context()
516
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
517
+ span.record_exception(e, attributes=attributes)
518
+ span.set_status(Status(StatusCode.ERROR, str(e)))
519
+ span.end()
520
+ raise
521
+
522
+
523
+ @with_tracer_wrapper
524
+ async def _awrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
525
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
526
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
527
+ ):
528
+ return await wrapped(*args, **kwargs)
529
+
530
+ span = tracer.start_span(
531
+ to_wrap.get("span_name"),
532
+ kind=SpanKind.CLIENT,
533
+ attributes={
534
+ SpanAttributes.LLM_SYSTEM: "gemini",
535
+ SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
536
+ },
537
+ context=get_current_context(),
538
+ )
539
+
540
+ if span.is_recording():
541
+ _set_request_attributes(span, args, kwargs)
542
+
543
+ try:
544
+ response = await wrapped(*args, **kwargs)
545
+ if to_wrap.get("is_streaming"):
546
+ return _abuild_from_streaming_response(span, response)
547
+ else:
548
+ if span.is_recording():
549
+ _set_response_attributes(span, response)
550
+
551
+ span.end()
552
+ return response
553
+ except Exception as e:
554
+ attributes = get_event_attributes_from_context()
555
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
556
+ span.record_exception(e, attributes=attributes)
557
+ span.set_status(Status(StatusCode.ERROR, str(e)))
558
+ span.end()
559
+ raise
560
+
561
+
562
+ class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
563
+ """An instrumentor for Google GenAI's client library."""
564
+
565
+ def __init__(
566
+ self,
567
+ exception_logger=None,
568
+ upload_base64_image=None,
569
+ convert_image_to_openai_format=True,
570
+ ):
571
+ super().__init__()
572
+ Config.exception_logger = exception_logger
573
+ Config.upload_base64_image = upload_base64_image
574
+ Config.convert_image_to_openai_format = convert_image_to_openai_format
575
+
576
+ def instrumentation_dependencies(self) -> Collection[str]:
577
+ return _instruments
578
+
579
+ def _instrument(self, **kwargs):
580
+ tracer_provider = kwargs.get("tracer_provider")
581
+ tracer = get_tracer(__name__, "0.0.1a1", tracer_provider)
582
+
583
+ for wrapped_method in WRAPPED_METHODS:
584
+ wrap_function_wrapper(
585
+ wrapped_method.get("package"),
586
+ f"{wrapped_method.get('object')}.{wrapped_method.get('method')}",
587
+ (
588
+ _awrap(tracer, wrapped_method)
589
+ if wrapped_method.get("is_async")
590
+ else _wrap(tracer, wrapped_method)
591
+ ),
592
+ )
593
+
594
+ def _uninstrument(self, **kwargs):
595
+ for wrapped_method in WRAPPED_METHODS:
596
+ unwrap(
597
+ f"{wrapped_method.get('package')}.{wrapped_method.get('object')}",
598
+ wrapped_method.get("method"),
599
+ )
@@ -0,0 +1,9 @@
1
+ from typing import Callable, Coroutine
2
+
3
+
4
+ class Config:
5
+ exception_logger = None
6
+ upload_base64_image: (
7
+ Callable[[str, str, str, str], Coroutine[None, None, str]] | None
8
+ ) = None
9
+ convert_image_to_openai_format: bool = True
@@ -0,0 +1,26 @@
1
+ from typing import Any
2
+ from google.genai._api_client import BaseApiClient
3
+ from google.genai._transformers import t_schema
4
+ from google.genai.types import JSONSchemaType
5
+
6
+ import json
7
+
8
+ DUMMY_CLIENT = BaseApiClient(api_key="dummy")
9
+
10
+
11
+ def process_schema(schema: Any) -> dict[str, Any]:
12
+ # The only thing we need from the client is the t_schema function
13
+ try:
14
+ json_schema = t_schema(DUMMY_CLIENT, schema).json_schema.model_dump(
15
+ exclude_unset=True, exclude_none=True
16
+ )
17
+ except Exception:
18
+ json_schema = {}
19
+ return json_schema
20
+
21
+
22
+ class SchemaJSONEncoder(json.JSONEncoder):
23
+ def default(self, o: Any) -> Any:
24
+ if isinstance(o, JSONSchemaType):
25
+ return o.value
26
+ return super().default(o)