lmnr 0.7.8__tar.gz → 0.7.10__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lmnr-0.7.8 → lmnr-0.7.10}/PKG-INFO +1 -1
- {lmnr-0.7.8 → lmnr-0.7.10}/pyproject.toml +1 -1
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +103 -116
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +6 -3
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +76 -41
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/__init__.py +13 -2
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/version.py +1 -1
- {lmnr-0.7.8 → lmnr-0.7.10}/README.md +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/cli.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/.flake8 +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/decorators/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/litellm/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/litellm/utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/attributes.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/context.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/exporter.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/instruments.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/processor.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/tracing/tracer.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/utils/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/utils/json_encoder.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/utils/package_check.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/utils/wrappers.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/py.typed +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/browser_use_cdp_otel.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/browser_use_otel.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/cdp_utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/patchright_otel.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/playwright_otel.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/pw_utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/recorder/record.umd.min.cjs +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/browser/utils.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/async_client.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/resources/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/resources/agent.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/resources/base.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/resources/browser_events.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/resources/evals.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/resources/evaluators.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/asynchronous/resources/tags.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/resources/__init__.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/resources/agent.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/resources/base.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/resources/browser_events.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/resources/evals.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/resources/evaluators.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/resources/tags.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/client/synchronous/sync_client.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/datasets.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/decorators.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/eval_control.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/evaluations.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/laminar.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/log.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/types.py +0 -0
- {lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/sdk/utils.py +0 -0
@@ -8,6 +8,7 @@ from typing import AsyncGenerator, Callable, Collection, Generator
|
|
8
8
|
|
9
9
|
from google.genai import types
|
10
10
|
|
11
|
+
from lmnr.opentelemetry_lib.decorators import json_dumps
|
11
12
|
from lmnr.opentelemetry_lib.tracing.context import (
|
12
13
|
get_current_context,
|
13
14
|
get_event_attributes_from_context,
|
@@ -20,9 +21,10 @@ from .schema_utils import SchemaJSONEncoder, process_schema
|
|
20
21
|
from .utils import (
|
21
22
|
dont_throw,
|
22
23
|
get_content,
|
24
|
+
process_content_union,
|
25
|
+
process_stream_chunk,
|
23
26
|
role_from_content_union,
|
24
27
|
set_span_attribute,
|
25
|
-
process_content_union,
|
26
28
|
to_dict,
|
27
29
|
with_tracer_wrapper,
|
28
30
|
)
|
@@ -139,9 +141,7 @@ def _set_request_attributes(span, args, kwargs):
|
|
139
141
|
try:
|
140
142
|
set_span_attribute(
|
141
143
|
span,
|
142
|
-
|
143
|
-
# when we upgrade to opentelemetry-semantic-conventions-ai>=0.4.10
|
144
|
-
"gen_ai.request.structured_output_schema",
|
144
|
+
SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
|
145
145
|
json.dumps(process_schema(schema), cls=SchemaJSONEncoder),
|
146
146
|
)
|
147
147
|
except Exception:
|
@@ -150,10 +150,8 @@ def _set_request_attributes(span, args, kwargs):
|
|
150
150
|
try:
|
151
151
|
set_span_attribute(
|
152
152
|
span,
|
153
|
-
|
154
|
-
|
155
|
-
"gen_ai.request.structured_output_schema",
|
156
|
-
json.dumps(json_schema),
|
153
|
+
SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
|
154
|
+
json_dumps(json_schema),
|
157
155
|
)
|
158
156
|
except Exception:
|
159
157
|
pass
|
@@ -182,7 +180,7 @@ def _set_request_attributes(span, args, kwargs):
|
|
182
180
|
set_span_attribute(
|
183
181
|
span,
|
184
182
|
f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.parameters",
|
185
|
-
|
183
|
+
json_dumps(tool_dict.get("parameters")),
|
186
184
|
)
|
187
185
|
|
188
186
|
if should_send_prompts():
|
@@ -215,7 +213,7 @@ def _set_request_attributes(span, args, kwargs):
|
|
215
213
|
(
|
216
214
|
content_str
|
217
215
|
if isinstance(content_str, str)
|
218
|
-
else
|
216
|
+
else json_dumps(content_str)
|
219
217
|
),
|
220
218
|
)
|
221
219
|
blocks = (
|
@@ -248,7 +246,7 @@ def _set_request_attributes(span, args, kwargs):
|
|
248
246
|
set_span_attribute(
|
249
247
|
span,
|
250
248
|
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.arguments",
|
251
|
-
|
249
|
+
json_dumps(function_call.get("arguments")),
|
252
250
|
)
|
253
251
|
tool_call_index += 1
|
254
252
|
|
@@ -300,22 +298,26 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
300
298
|
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "model"
|
301
299
|
)
|
302
300
|
candidates_list = candidates if isinstance(candidates, list) else [candidates]
|
303
|
-
|
301
|
+
i = 0
|
302
|
+
for candidate in candidates_list:
|
303
|
+
has_content = False
|
304
304
|
processed_content = process_content_union(candidate.content)
|
305
305
|
content_str = get_content(processed_content)
|
306
306
|
|
307
307
|
set_span_attribute(
|
308
308
|
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.role", "model"
|
309
309
|
)
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
310
|
+
if content_str:
|
311
|
+
has_content = True
|
312
|
+
set_span_attribute(
|
313
|
+
span,
|
314
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.content",
|
315
|
+
(
|
316
|
+
content_str
|
317
|
+
if isinstance(content_str, str)
|
318
|
+
else json_dumps(content_str)
|
319
|
+
),
|
320
|
+
)
|
319
321
|
blocks = (
|
320
322
|
processed_content
|
321
323
|
if isinstance(processed_content, list)
|
@@ -328,6 +330,7 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
328
330
|
if not block_dict.get("function_call"):
|
329
331
|
continue
|
330
332
|
function_call = to_dict(block_dict.get("function_call", {}))
|
333
|
+
has_content = True
|
331
334
|
set_span_attribute(
|
332
335
|
span,
|
333
336
|
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.name",
|
@@ -345,9 +348,11 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
345
348
|
set_span_attribute(
|
346
349
|
span,
|
347
350
|
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.arguments",
|
348
|
-
|
351
|
+
json_dumps(function_call.get("arguments")),
|
349
352
|
)
|
350
353
|
tool_call_index += 1
|
354
|
+
if has_content:
|
355
|
+
i += 1
|
351
356
|
|
352
357
|
|
353
358
|
@dont_throw
|
@@ -359,54 +364,45 @@ def _build_from_streaming_response(
|
|
359
364
|
aggregated_usage_metadata = defaultdict(int)
|
360
365
|
model_version = None
|
361
366
|
for chunk in response:
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
373
|
-
|
374
|
-
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
|
379
|
-
aggregated_usage_metadata["prompt_token_count"] = (
|
380
|
-
usage_dict.get("prompt_token_count") or 0
|
381
|
-
)
|
382
|
-
aggregated_usage_metadata["total_token_count"] = (
|
383
|
-
usage_dict.get("total_token_count") or 0
|
384
|
-
)
|
385
|
-
aggregated_usage_metadata["candidates_token_count"] += (
|
386
|
-
usage_dict.get("candidates_token_count") or 0
|
387
|
-
)
|
388
|
-
aggregated_usage_metadata["total_token_count"] += (
|
389
|
-
usage_dict.get("candidates_token_count") or 0
|
390
|
-
)
|
367
|
+
# Important: do all processing in a separate sync function, that is
|
368
|
+
# wrapped in @dont_throw. If we did it here, the @dont_throw on top of
|
369
|
+
# this function would not be able to catch the errors, as they are
|
370
|
+
# raised later, after the generator is returned, and when it is being
|
371
|
+
# consumed.
|
372
|
+
chunk_result = process_stream_chunk(
|
373
|
+
chunk,
|
374
|
+
role,
|
375
|
+
model_version,
|
376
|
+
aggregated_usage_metadata,
|
377
|
+
final_parts,
|
378
|
+
)
|
379
|
+
# even though process_stream_chunk can't return None, the result can be
|
380
|
+
# None, if the processing throws an error (see @dont_throw)
|
381
|
+
if chunk_result:
|
382
|
+
role = chunk_result["role"]
|
383
|
+
model_version = chunk_result["model_version"]
|
391
384
|
yield chunk
|
392
385
|
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
"
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
386
|
+
try:
|
387
|
+
compound_response = types.GenerateContentResponse(
|
388
|
+
candidates=[
|
389
|
+
{
|
390
|
+
"content": {
|
391
|
+
"parts": final_parts,
|
392
|
+
"role": role,
|
393
|
+
},
|
394
|
+
}
|
395
|
+
],
|
396
|
+
usage_metadata=types.GenerateContentResponseUsageMetadataDict(
|
397
|
+
**aggregated_usage_metadata
|
398
|
+
),
|
399
|
+
model_version=model_version,
|
400
|
+
)
|
401
|
+
if span.is_recording():
|
402
|
+
_set_response_attributes(span, compound_response)
|
403
|
+
finally:
|
404
|
+
if span.is_recording():
|
405
|
+
span.end()
|
410
406
|
|
411
407
|
|
412
408
|
@dont_throw
|
@@ -418,54 +414,45 @@ async def _abuild_from_streaming_response(
|
|
418
414
|
aggregated_usage_metadata = defaultdict(int)
|
419
415
|
model_version = None
|
420
416
|
async for chunk in response:
|
421
|
-
|
422
|
-
|
423
|
-
|
424
|
-
|
425
|
-
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
aggregated_usage_metadata["prompt_token_count"] = (
|
439
|
-
usage_dict.get("prompt_token_count") or 0
|
440
|
-
)
|
441
|
-
aggregated_usage_metadata["total_token_count"] = (
|
442
|
-
usage_dict.get("total_token_count") or 0
|
443
|
-
)
|
444
|
-
aggregated_usage_metadata["candidates_token_count"] += (
|
445
|
-
usage_dict.get("candidates_token_count") or 0
|
446
|
-
)
|
447
|
-
aggregated_usage_metadata["total_token_count"] += (
|
448
|
-
usage_dict.get("candidates_token_count") or 0
|
449
|
-
)
|
417
|
+
# Important: do all processing in a separate sync function, that is
|
418
|
+
# wrapped in @dont_throw. If we did it here, the @dont_throw on top of
|
419
|
+
# this function would not be able to catch the errors, as they are
|
420
|
+
# raised later, after the generator is returned, and when it is being
|
421
|
+
# consumed.
|
422
|
+
chunk_result = process_stream_chunk(
|
423
|
+
chunk,
|
424
|
+
role,
|
425
|
+
model_version,
|
426
|
+
aggregated_usage_metadata,
|
427
|
+
final_parts,
|
428
|
+
)
|
429
|
+
# even though process_stream_chunk can't return None, the result can be
|
430
|
+
# None, if the processing throws an error (see @dont_throw)
|
431
|
+
if chunk_result:
|
432
|
+
role = chunk_result["role"]
|
433
|
+
model_version = chunk_result["model_version"]
|
450
434
|
yield chunk
|
451
435
|
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
"
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
436
|
+
try:
|
437
|
+
compound_response = types.GenerateContentResponse(
|
438
|
+
candidates=[
|
439
|
+
{
|
440
|
+
"content": {
|
441
|
+
"parts": final_parts,
|
442
|
+
"role": role,
|
443
|
+
},
|
444
|
+
}
|
445
|
+
],
|
446
|
+
usage_metadata=types.GenerateContentResponseUsageMetadataDict(
|
447
|
+
**aggregated_usage_metadata
|
448
|
+
),
|
449
|
+
model_version=model_version,
|
450
|
+
)
|
451
|
+
if span.is_recording():
|
452
|
+
_set_response_attributes(span, compound_response)
|
453
|
+
finally:
|
454
|
+
if span.is_recording():
|
455
|
+
span.end()
|
469
456
|
|
470
457
|
|
471
458
|
@with_tracer_wrapper
|
@@ -502,7 +489,7 @@ def _wrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
502
489
|
span.record_exception(e, attributes=attributes)
|
503
490
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
504
491
|
span.end()
|
505
|
-
raise
|
492
|
+
raise
|
506
493
|
|
507
494
|
|
508
495
|
@with_tracer_wrapper
|
@@ -541,7 +528,7 @@ async def _awrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
541
528
|
span.record_exception(e, attributes=attributes)
|
542
529
|
span.set_status(Status(StatusCode.ERROR, str(e)))
|
543
530
|
span.end()
|
544
|
-
raise
|
531
|
+
raise
|
545
532
|
|
546
533
|
|
547
534
|
class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
|
@@ -10,9 +10,12 @@ DUMMY_CLIENT = BaseApiClient(api_key="dummy")
|
|
10
10
|
|
11
11
|
def process_schema(schema: Any) -> dict[str, Any]:
|
12
12
|
# The only thing we need from the client is the t_schema function
|
13
|
-
|
14
|
-
|
15
|
-
|
13
|
+
try:
|
14
|
+
json_schema = t_schema(DUMMY_CLIENT, schema).json_schema.model_dump(
|
15
|
+
exclude_unset=True, exclude_none=True
|
16
|
+
)
|
17
|
+
except Exception:
|
18
|
+
json_schema = {}
|
16
19
|
return json_schema
|
17
20
|
|
18
21
|
|
@@ -1,6 +1,8 @@
|
|
1
1
|
import base64
|
2
|
+
from collections import defaultdict
|
2
3
|
import logging
|
3
4
|
import traceback
|
5
|
+
from typing_extensions import TypedDict
|
4
6
|
|
5
7
|
from .config import (
|
6
8
|
Config,
|
@@ -33,10 +35,14 @@ class ProcessedContentPart(pydantic.BaseModel):
|
|
33
35
|
image_url: ImageUrl | None = pydantic.Field(default=None)
|
34
36
|
|
35
37
|
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
38
|
+
class ProcessChunkResult(TypedDict):
|
39
|
+
role: str
|
40
|
+
model_version: str | None
|
41
|
+
|
42
|
+
|
43
|
+
def set_span_attribute(span: Span, name: str, value: Any):
|
44
|
+
if value is not None and value != "":
|
45
|
+
span.set_attribute(name, value)
|
40
46
|
return
|
41
47
|
|
42
48
|
|
@@ -84,7 +90,7 @@ def get_content(
|
|
84
90
|
content: (
|
85
91
|
ProcessedContentPart | dict | list[ProcessedContentPart | dict] | str | None
|
86
92
|
),
|
87
|
-
) -> list[
|
93
|
+
) -> dict | list[dict] | None:
|
88
94
|
if isinstance(content, dict):
|
89
95
|
return content.get("content") or content.get("image_url")
|
90
96
|
if isinstance(content, ProcessedContentPart):
|
@@ -98,7 +104,8 @@ def get_content(
|
|
98
104
|
else:
|
99
105
|
return None
|
100
106
|
elif isinstance(content, list):
|
101
|
-
|
107
|
+
contents_list = [get_content(item) for item in content]
|
108
|
+
return [item for item in contents_list if item is not None]
|
102
109
|
elif isinstance(content, str):
|
103
110
|
return {
|
104
111
|
"type": "text",
|
@@ -110,9 +117,6 @@ def get_content(
|
|
110
117
|
|
111
118
|
def process_content_union(
|
112
119
|
content: types.ContentUnion | types.ContentUnionDict,
|
113
|
-
trace_id: str | None = None,
|
114
|
-
span_id: str | None = None,
|
115
|
-
message_index: int = 0,
|
116
120
|
) -> ProcessedContentPart | dict | list[ProcessedContentPart | dict] | None:
|
117
121
|
if isinstance(content, types.Content):
|
118
122
|
parts = to_dict(content).get("parts", [])
|
@@ -123,25 +127,16 @@ def process_content_union(
|
|
123
127
|
return _process_part_union(content)
|
124
128
|
elif isinstance(content, dict):
|
125
129
|
if "parts" in content:
|
126
|
-
return [
|
127
|
-
_process_part_union(
|
128
|
-
item, trace_id, span_id, message_index, content_index
|
129
|
-
)
|
130
|
-
for content_index, item in enumerate(content.get("parts", []))
|
131
|
-
]
|
130
|
+
return [_process_part_union(item) for item in content.get("parts", [])]
|
132
131
|
else:
|
133
132
|
# Assume it's PartDict
|
134
|
-
return _process_part_union(content
|
133
|
+
return _process_part_union(content)
|
135
134
|
else:
|
136
135
|
return None
|
137
136
|
|
138
137
|
|
139
138
|
def _process_part_union(
|
140
139
|
content: types.PartDict | types.File | types.Part | str,
|
141
|
-
trace_id: str | None = None,
|
142
|
-
span_id: str | None = None,
|
143
|
-
message_index: int = 0,
|
144
|
-
content_index: int = 0,
|
145
140
|
) -> ProcessedContentPart | dict | None:
|
146
141
|
if isinstance(content, str):
|
147
142
|
return ProcessedContentPart(content=content)
|
@@ -154,36 +149,31 @@ def _process_part_union(
|
|
154
149
|
)
|
155
150
|
return ProcessedContentPart(content=f"files/{name}")
|
156
151
|
elif isinstance(content, (types.Part, dict)):
|
157
|
-
return _process_part(content
|
152
|
+
return _process_part(content)
|
158
153
|
else:
|
159
154
|
return None
|
160
155
|
|
161
156
|
|
162
157
|
def _process_part(
|
163
158
|
content: types.Part,
|
164
|
-
trace_id: str | None = None,
|
165
|
-
span_id: str | None = None,
|
166
|
-
message_index: int = 0,
|
167
|
-
content_index: int = 0,
|
168
159
|
) -> ProcessedContentPart | dict | None:
|
169
160
|
part_dict = to_dict(content)
|
170
161
|
if part_dict.get("inline_data"):
|
171
162
|
blob = to_dict(part_dict.get("inline_data"))
|
172
|
-
if blob.get("mime_type").startswith("image/"):
|
173
|
-
return _process_image_item(
|
174
|
-
blob, trace_id, span_id, message_index, content_index
|
175
|
-
)
|
163
|
+
if blob.get("mime_type", "").startswith("image/"):
|
164
|
+
return _process_image_item(blob)
|
176
165
|
else:
|
177
166
|
# currently, only images are supported
|
178
167
|
return ProcessedContentPart(
|
179
168
|
content=blob.get("mime_type") or "unknown_media"
|
180
169
|
)
|
181
|
-
elif part_dict.get("function_call"):
|
170
|
+
elif function_call := part_dict.get("function_call"):
|
171
|
+
function_call_dict = to_dict(function_call)
|
182
172
|
return ProcessedContentPart(
|
183
173
|
function_call=ToolCall(
|
184
|
-
name=
|
185
|
-
id=
|
186
|
-
arguments=
|
174
|
+
name=function_call_dict.get("name"),
|
175
|
+
id=function_call_dict.get("id"),
|
176
|
+
arguments=function_call_dict.get("args", {}),
|
187
177
|
)
|
188
178
|
)
|
189
179
|
elif part_dict.get("text") is not None:
|
@@ -220,26 +210,71 @@ def with_tracer_wrapper(func):
|
|
220
210
|
return _with_tracer
|
221
211
|
|
222
212
|
|
223
|
-
def _process_image_item(
|
224
|
-
blob: dict[str, Any],
|
225
|
-
trace_id: str,
|
226
|
-
span_id: str,
|
227
|
-
message_index: int,
|
228
|
-
content_index: int,
|
229
|
-
) -> ProcessedContentPart | dict | None:
|
213
|
+
def _process_image_item(blob: dict[str, Any]) -> ProcessedContentPart | dict | None:
|
230
214
|
# Convert to openai format, so backends can handle it
|
231
215
|
data = blob.get("data")
|
232
216
|
encoded_data = (
|
233
217
|
base64.b64encode(data).decode("utf-8") if isinstance(data, bytes) else data
|
234
218
|
)
|
219
|
+
mime_type = blob.get("mime_type", "image/unknown")
|
220
|
+
image_type = mime_type.split("/")[1] if "/" in mime_type else "unknown"
|
221
|
+
|
235
222
|
return (
|
236
223
|
ProcessedContentPart(
|
237
224
|
image_url=ImageUrl(
|
238
225
|
image_url=ImageUrlInner(
|
239
|
-
url=f"data:image/{
|
226
|
+
url=f"data:image/{image_type};base64,{encoded_data}",
|
240
227
|
)
|
241
228
|
)
|
242
229
|
)
|
243
230
|
if Config.convert_image_to_openai_format
|
244
231
|
else blob
|
245
232
|
)
|
233
|
+
|
234
|
+
|
235
|
+
@dont_throw
|
236
|
+
def process_stream_chunk(
|
237
|
+
chunk: types.GenerateContentResponse,
|
238
|
+
existing_role: str,
|
239
|
+
existing_model_version: str | None,
|
240
|
+
# ============================== #
|
241
|
+
# mutable states, passed by reference
|
242
|
+
aggregated_usage_metadata: defaultdict[str, int],
|
243
|
+
final_parts: list[types.Part | None],
|
244
|
+
# ============================== #
|
245
|
+
) -> ProcessChunkResult:
|
246
|
+
role = existing_role
|
247
|
+
model_version = existing_model_version
|
248
|
+
|
249
|
+
if chunk.model_version:
|
250
|
+
model_version = chunk.model_version
|
251
|
+
|
252
|
+
# Currently gemini throws an error if you pass more than one candidate
|
253
|
+
# with streaming
|
254
|
+
if chunk.candidates and len(chunk.candidates) > 0 and chunk.candidates[0].content:
|
255
|
+
final_parts += chunk.candidates[0].content.parts or []
|
256
|
+
role = chunk.candidates[0].content.role or role
|
257
|
+
if chunk.usage_metadata:
|
258
|
+
usage_dict = to_dict(chunk.usage_metadata)
|
259
|
+
# prompt token count is sent in every chunk
|
260
|
+
# (and is less by 1 in the last chunk, so we set it once);
|
261
|
+
# total token count in every chunk is greater by prompt token count than it should be,
|
262
|
+
# thus this awkward logic here
|
263
|
+
if aggregated_usage_metadata.get("prompt_token_count") is None:
|
264
|
+
# or 0, not .get(key, 0), because sometimes the value is explicitly None
|
265
|
+
aggregated_usage_metadata["prompt_token_count"] = (
|
266
|
+
usage_dict.get("prompt_token_count") or 0
|
267
|
+
)
|
268
|
+
aggregated_usage_metadata["total_token_count"] = (
|
269
|
+
usage_dict.get("total_token_count") or 0
|
270
|
+
)
|
271
|
+
aggregated_usage_metadata["candidates_token_count"] += (
|
272
|
+
usage_dict.get("candidates_token_count") or 0
|
273
|
+
)
|
274
|
+
aggregated_usage_metadata["total_token_count"] += (
|
275
|
+
usage_dict.get("candidates_token_count") or 0
|
276
|
+
)
|
277
|
+
return ProcessChunkResult(
|
278
|
+
role=role,
|
279
|
+
model_version=model_version,
|
280
|
+
)
|
@@ -219,8 +219,19 @@ class TracerWrapper(object):
|
|
219
219
|
|
220
220
|
@classmethod
|
221
221
|
def verify_initialized(cls) -> bool:
|
222
|
-
|
223
|
-
|
222
|
+
# This is not using lock, but it is fine to return False from here
|
223
|
+
# even if initialization is going on.
|
224
|
+
|
225
|
+
# If we try to acquire the lock here, it may deadlock if an automatic
|
226
|
+
# instrumentation is importing a file that (at the top level) has a
|
227
|
+
# function annotated with Laminar's `observe` decorator.
|
228
|
+
# The decorator is evaluated at import time, inside `init_instrumentations`,
|
229
|
+
# which is called by the `TracerWrapper` constructor while holding the lock.
|
230
|
+
# Without the lock here, we will simply return False, which will cause
|
231
|
+
# the decorator to return the original function. This is fine, at runtime,
|
232
|
+
# the next import statement will re-evaluate the decorator, and Laminar will
|
233
|
+
# have been initialized by that time.
|
234
|
+
return hasattr(cls, "instance") and hasattr(cls.instance, "_span_processor")
|
224
235
|
|
225
236
|
@classmethod
|
226
237
|
def clear(cls):
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py
RENAMED
File without changes
|
{lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py
RENAMED
File without changes
|
{lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{lmnr-0.7.8 → lmnr-0.7.10}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|