lmnr 0.7.14__py3-none-any.whl → 0.7.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lmnr might be problematic. Click here for more details.
- lmnr/opentelemetry_lib/decorators/__init__.py +25 -6
- lmnr/opentelemetry_lib/litellm/__init__.py +51 -4
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +16 -1
- lmnr/version.py +1 -1
- {lmnr-0.7.14.dist-info → lmnr-0.7.16.dist-info}/METADATA +1 -1
- {lmnr-0.7.14.dist-info → lmnr-0.7.16.dist-info}/RECORD +8 -8
- {lmnr-0.7.14.dist-info → lmnr-0.7.16.dist-info}/WHEEL +0 -0
- {lmnr-0.7.14.dist-info → lmnr-0.7.16.dist-info}/entry_points.txt +0 -0
|
@@ -225,7 +225,7 @@ def observe_base(
|
|
|
225
225
|
detach_context(isolated_ctx_token)
|
|
226
226
|
# span will be ended in the generator
|
|
227
227
|
if isinstance(res, types.GeneratorType):
|
|
228
|
-
return _handle_generator(span,
|
|
228
|
+
return _handle_generator(span, wrapper, res)
|
|
229
229
|
if isinstance(res, types.AsyncGeneratorType):
|
|
230
230
|
# async def foo() -> AsyncGenerator[int, None]:
|
|
231
231
|
# is not considered async in a classical sense in Python,
|
|
@@ -234,7 +234,7 @@ def observe_base(
|
|
|
234
234
|
# Flags are listed from LSB here:
|
|
235
235
|
# https://docs.python.org/3/library/inspect.html#inspect-module-co-flags
|
|
236
236
|
# See also: https://groups.google.com/g/python-tulip/c/6rWweGXLutU?pli=1
|
|
237
|
-
return _ahandle_generator(span,
|
|
237
|
+
return _ahandle_generator(span, wrapper, res)
|
|
238
238
|
|
|
239
239
|
_process_output(span, res, ignore_output, output_formatter)
|
|
240
240
|
_cleanup_span(span, wrapper)
|
|
@@ -306,7 +306,7 @@ def async_observe_base(
|
|
|
306
306
|
if isinstance(res, types.AsyncGeneratorType):
|
|
307
307
|
# probably unreachable, read the comment in the similar
|
|
308
308
|
# part of the sync wrapper.
|
|
309
|
-
return await _ahandle_generator(span,
|
|
309
|
+
return await _ahandle_generator(span, wrapper, res)
|
|
310
310
|
|
|
311
311
|
_process_output(span, res, ignore_output, output_formatter)
|
|
312
312
|
_cleanup_span(span, wrapper)
|
|
@@ -317,18 +317,37 @@ def async_observe_base(
|
|
|
317
317
|
return decorate
|
|
318
318
|
|
|
319
319
|
|
|
320
|
-
def _handle_generator(
|
|
320
|
+
def _handle_generator(
|
|
321
|
+
span: Span,
|
|
322
|
+
wrapper: TracerWrapper,
|
|
323
|
+
res: Generator,
|
|
324
|
+
ignore_output: bool = False,
|
|
325
|
+
output_formatter: Callable[..., str] | None = None,
|
|
326
|
+
):
|
|
327
|
+
results = []
|
|
321
328
|
try:
|
|
322
|
-
|
|
329
|
+
for part in res:
|
|
330
|
+
results.append(part)
|
|
331
|
+
yield part
|
|
323
332
|
finally:
|
|
333
|
+
_process_output(span, results, ignore_output, output_formatter)
|
|
324
334
|
_cleanup_span(span, wrapper)
|
|
325
335
|
|
|
326
336
|
|
|
327
|
-
async def _ahandle_generator(
|
|
337
|
+
async def _ahandle_generator(
|
|
338
|
+
span: Span,
|
|
339
|
+
wrapper: TracerWrapper,
|
|
340
|
+
res: AsyncGenerator,
|
|
341
|
+
ignore_output: bool = False,
|
|
342
|
+
output_formatter: Callable[..., str] | None = None,
|
|
343
|
+
):
|
|
344
|
+
results = []
|
|
328
345
|
try:
|
|
329
346
|
async for part in res:
|
|
347
|
+
results.append(part)
|
|
330
348
|
yield part
|
|
331
349
|
finally:
|
|
350
|
+
_process_output(span, results, ignore_output, output_formatter)
|
|
332
351
|
_cleanup_span(span, wrapper)
|
|
333
352
|
|
|
334
353
|
|
|
@@ -406,7 +406,15 @@ try:
|
|
|
406
406
|
details.get("cached_tokens"),
|
|
407
407
|
)
|
|
408
408
|
# TODO: add audio/image/text token details
|
|
409
|
-
|
|
409
|
+
if usage_dict.get("completion_tokens_details"):
|
|
410
|
+
details = usage_dict.get("completion_tokens_details", {})
|
|
411
|
+
details = model_as_dict(details)
|
|
412
|
+
if details.get("reasoning_tokens"):
|
|
413
|
+
set_span_attribute(
|
|
414
|
+
span,
|
|
415
|
+
"gen_ai.usage.reasoning_tokens",
|
|
416
|
+
details.get("reasoning_tokens"),
|
|
417
|
+
)
|
|
410
418
|
|
|
411
419
|
def _process_tool_calls(self, span, tool_calls, choice_index, is_response=True):
|
|
412
420
|
"""Process and set tool call attributes on the span"""
|
|
@@ -467,17 +475,56 @@ try:
|
|
|
467
475
|
content = message.get("content", "")
|
|
468
476
|
if content is None:
|
|
469
477
|
continue
|
|
478
|
+
reasoning_content = message.get("reasoning_content")
|
|
479
|
+
if reasoning_content:
|
|
480
|
+
if isinstance(reasoning_content, str):
|
|
481
|
+
reasoning_content = [
|
|
482
|
+
{
|
|
483
|
+
"type": "text",
|
|
484
|
+
"text": reasoning_content,
|
|
485
|
+
}
|
|
486
|
+
]
|
|
487
|
+
elif not isinstance(reasoning_content, list):
|
|
488
|
+
reasoning_content = [
|
|
489
|
+
{
|
|
490
|
+
"type": "text",
|
|
491
|
+
"text": str(reasoning_content),
|
|
492
|
+
}
|
|
493
|
+
]
|
|
494
|
+
else:
|
|
495
|
+
reasoning_content = []
|
|
470
496
|
if isinstance(content, str):
|
|
471
|
-
|
|
497
|
+
if reasoning_content:
|
|
498
|
+
set_span_attribute(
|
|
499
|
+
span,
|
|
500
|
+
f"gen_ai.completion.{i}.content",
|
|
501
|
+
json.dumps(
|
|
502
|
+
reasoning_content
|
|
503
|
+
+ [
|
|
504
|
+
{
|
|
505
|
+
"type": "text",
|
|
506
|
+
"text": content,
|
|
507
|
+
}
|
|
508
|
+
]
|
|
509
|
+
),
|
|
510
|
+
)
|
|
511
|
+
else:
|
|
512
|
+
set_span_attribute(
|
|
513
|
+
span,
|
|
514
|
+
f"gen_ai.completion.{i}.content",
|
|
515
|
+
content,
|
|
516
|
+
)
|
|
472
517
|
elif isinstance(content, list):
|
|
473
518
|
set_span_attribute(
|
|
474
|
-
span,
|
|
519
|
+
span,
|
|
520
|
+
f"gen_ai.completion.{i}.content",
|
|
521
|
+
json.dumps(reasoning_content + content),
|
|
475
522
|
)
|
|
476
523
|
else:
|
|
477
524
|
set_span_attribute(
|
|
478
525
|
span,
|
|
479
526
|
f"gen_ai.completion.{i}.content",
|
|
480
|
-
json.dumps(model_as_dict(content)),
|
|
527
|
+
json.dumps(reasoning_content + [model_as_dict(content)]),
|
|
481
528
|
)
|
|
482
529
|
|
|
483
530
|
def _process_content_part(self, content_part: dict) -> dict:
|
|
@@ -272,6 +272,16 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
|
272
272
|
|
|
273
273
|
if response.usage_metadata:
|
|
274
274
|
usage_dict = to_dict(response.usage_metadata)
|
|
275
|
+
candidates_token_count = usage_dict.get("candidates_token_count")
|
|
276
|
+
# unlike OpenAI, and unlike input cached tokens, thinking tokens are
|
|
277
|
+
# not counted as part of candidates token count, so we need to add them
|
|
278
|
+
# separately for consistency with other instrumentations
|
|
279
|
+
thoughts_token_count = usage_dict.get("thoughts_token_count")
|
|
280
|
+
output_token_count = (
|
|
281
|
+
(candidates_token_count or 0) + (thoughts_token_count or 0)
|
|
282
|
+
if candidates_token_count is not None or thoughts_token_count is not None
|
|
283
|
+
else None
|
|
284
|
+
)
|
|
275
285
|
set_span_attribute(
|
|
276
286
|
span,
|
|
277
287
|
gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS,
|
|
@@ -280,7 +290,7 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
|
280
290
|
set_span_attribute(
|
|
281
291
|
span,
|
|
282
292
|
gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS,
|
|
283
|
-
|
|
293
|
+
output_token_count,
|
|
284
294
|
)
|
|
285
295
|
set_span_attribute(
|
|
286
296
|
span,
|
|
@@ -292,6 +302,11 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
|
292
302
|
SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
|
|
293
303
|
usage_dict.get("cached_content_token_count"),
|
|
294
304
|
)
|
|
305
|
+
set_span_attribute(
|
|
306
|
+
span,
|
|
307
|
+
SpanAttributes.LLM_USAGE_REASONING_TOKENS,
|
|
308
|
+
thoughts_token_count,
|
|
309
|
+
)
|
|
295
310
|
|
|
296
311
|
if should_send_prompts():
|
|
297
312
|
set_span_attribute(
|
lmnr/version.py
CHANGED
|
@@ -2,8 +2,8 @@ lmnr/__init__.py,sha256=8be7b56ab62735fd54ca90a0642784c6153ed1d6e0f12734619ca061
|
|
|
2
2
|
lmnr/cli.py,sha256=b8780b51f37fe9e20db5495c41d3ad3837f6b48f408b09a58688d017850c0796,6047
|
|
3
3
|
lmnr/opentelemetry_lib/.flake8,sha256=6c2c6e0e51b1dd8439e501ca3e21899277076a787da868d0254ba37056b79405,150
|
|
4
4
|
lmnr/opentelemetry_lib/__init__.py,sha256=abdd649b6c906a7dd2985b5a0903067904a457341d0da28a559c89a9faf939c7,2572
|
|
5
|
-
lmnr/opentelemetry_lib/decorators/__init__.py,sha256=
|
|
6
|
-
lmnr/opentelemetry_lib/litellm/__init__.py,sha256=
|
|
5
|
+
lmnr/opentelemetry_lib/decorators/__init__.py,sha256=f7a19b40bf8f8145b6ff0a04e49e412ac52c3f850c330dec4a5087ead3aaf9e0,12264
|
|
6
|
+
lmnr/opentelemetry_lib/litellm/__init__.py,sha256=b2e4ff664b15dcbad61db2248700aeca7926fe6f1c8d5f56f3ead150ee07f561,29450
|
|
7
7
|
lmnr/opentelemetry_lib/litellm/utils.py,sha256=642c3857e09d8c8dfc327c23e711687c15cbed6c0ba3f0a7be7751356beb8b43,3427
|
|
8
8
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py,sha256=897b1a255ce2370d196018abcaf5942295a374cf367b668a3b418c3bddd29251,26461
|
|
9
9
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py,sha256=972919b821b9b7e5dc7cd191ba7e78b30b6efa5d63514e8cb301996d6386392c,369
|
|
@@ -16,7 +16,7 @@ lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py,sha256
|
|
|
16
16
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py,sha256=48391d935883506fe1dc4f6ace6011ecaed76a8f82f8026ccb553b2180afdb8c,3455
|
|
17
17
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py,sha256=61d2681e99c3084d1bcc27f7ca551f44a70126df6c5f23320c1e9c1654e05c42,15037
|
|
18
18
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py,sha256=19090d4d9a0511645f66112ebe6f05a9993905b11d8ae3060dab2dcc4c1a5fb2,329
|
|
19
|
-
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=
|
|
19
|
+
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=a0037e5ed736e3124c7cd0af7eff6d2656a622d4b0a16785e2d2fb7d760f85a0,20163
|
|
20
20
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py,sha256=db9cdebc9ee0dccb493ffe608eede3047efec20ed26c3924b72b2e50edbd92c2,245
|
|
21
21
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py,sha256=b10619e76e5893f8b891f92531d29dcf6651e8f9a7dcbf81c3f35341ce311f6e,753
|
|
22
22
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py,sha256=2e1dc91b04757f7f6c960e3f1c58bb9f7e735f0e1dcb811a08833faf18766d3b,9242
|
|
@@ -97,8 +97,8 @@ lmnr/sdk/laminar.py,sha256=24adfd64da01d7fd69ba9437cf9860a5c64aa6baab1bb92d8ba14
|
|
|
97
97
|
lmnr/sdk/log.py,sha256=9edfd83263f0d4845b1b2d1beeae2b4ed3f8628de941f371a893d72b79c348d4,2213
|
|
98
98
|
lmnr/sdk/types.py,sha256=d8061ca90dd582b408a893ebbbeb1586e8750ed30433ef4f6d63423a078511b0,14574
|
|
99
99
|
lmnr/sdk/utils.py,sha256=4114559ba6ae57fcba2de2bfaa09339688ce5752c36f028a7b55e51eae624947,6307
|
|
100
|
-
lmnr/version.py,sha256=
|
|
101
|
-
lmnr-0.7.
|
|
102
|
-
lmnr-0.7.
|
|
103
|
-
lmnr-0.7.
|
|
104
|
-
lmnr-0.7.
|
|
100
|
+
lmnr/version.py,sha256=cc6cc51d32104d432712f37c6215b60901c85269bf239b623a9d3d72dcd220f8,1322
|
|
101
|
+
lmnr-0.7.16.dist-info/WHEEL,sha256=ab6157bc637547491fb4567cd7ddf26b04d63382916ca16c29a5c8e94c9c9ef7,79
|
|
102
|
+
lmnr-0.7.16.dist-info/entry_points.txt,sha256=abdf3411b7dd2d7329a241f2da6669bab4e314a747a586ecdb9f888f3035003c,39
|
|
103
|
+
lmnr-0.7.16.dist-info/METADATA,sha256=0405208ecd989926a7b75dd4fedbab4fbd010c126fca736e09673ea955efeed6,14195
|
|
104
|
+
lmnr-0.7.16.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|