lmnr 0.7.14__tar.gz → 0.7.16__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lmnr might be problematic. Click here for more details.

Files changed (103) hide show
  1. {lmnr-0.7.14 → lmnr-0.7.16}/PKG-INFO +1 -1
  2. {lmnr-0.7.14 → lmnr-0.7.16}/pyproject.toml +1 -1
  3. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/decorators/__init__.py +25 -6
  4. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/litellm/__init__.py +51 -4
  5. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +16 -1
  6. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/version.py +1 -1
  7. {lmnr-0.7.14 → lmnr-0.7.16}/README.md +0 -0
  8. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/__init__.py +0 -0
  9. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/cli.py +0 -0
  10. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/.flake8 +0 -0
  11. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/__init__.py +0 -0
  12. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/litellm/utils.py +0 -0
  13. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +0 -0
  14. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +0 -0
  15. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +0 -0
  16. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +0 -0
  17. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +0 -0
  18. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +0 -0
  19. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +0 -0
  20. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +0 -0
  21. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +0 -0
  22. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +0 -0
  23. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +0 -0
  24. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +0 -0
  25. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +0 -0
  26. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +0 -0
  27. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +0 -0
  28. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +0 -0
  29. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +0 -0
  30. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +0 -0
  31. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +0 -0
  32. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +0 -0
  33. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +0 -0
  34. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +0 -0
  35. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +0 -0
  36. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +0 -0
  37. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +0 -0
  38. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +0 -0
  39. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +0 -0
  40. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +0 -0
  41. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +0 -0
  42. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +0 -0
  43. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +0 -0
  44. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +0 -0
  45. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +0 -0
  46. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +0 -0
  47. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +0 -0
  48. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +0 -0
  49. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +0 -0
  50. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +0 -0
  51. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +0 -0
  52. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +0 -0
  53. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +0 -0
  54. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +0 -0
  55. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +0 -0
  56. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/__init__.py +0 -0
  57. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +0 -0
  58. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/attributes.py +0 -0
  59. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/context.py +0 -0
  60. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/exporter.py +0 -0
  61. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/instruments.py +0 -0
  62. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/processor.py +0 -0
  63. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/tracing/tracer.py +0 -0
  64. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/utils/__init__.py +0 -0
  65. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/utils/json_encoder.py +0 -0
  66. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/utils/package_check.py +0 -0
  67. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/opentelemetry_lib/utils/wrappers.py +0 -0
  68. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/py.typed +0 -0
  69. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/__init__.py +0 -0
  70. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/__init__.py +0 -0
  71. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/browser_use_cdp_otel.py +0 -0
  72. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/browser_use_otel.py +0 -0
  73. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/bubus_otel.py +0 -0
  74. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/cdp_utils.py +0 -0
  75. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/patchright_otel.py +0 -0
  76. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/playwright_otel.py +0 -0
  77. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/pw_utils.py +0 -0
  78. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/recorder/record.umd.min.cjs +0 -0
  79. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/browser/utils.py +0 -0
  80. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/async_client.py +0 -0
  81. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/resources/__init__.py +0 -0
  82. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/resources/agent.py +0 -0
  83. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/resources/base.py +0 -0
  84. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/resources/browser_events.py +0 -0
  85. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/resources/evals.py +0 -0
  86. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/resources/evaluators.py +0 -0
  87. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/asynchronous/resources/tags.py +0 -0
  88. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/resources/__init__.py +0 -0
  89. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/resources/agent.py +0 -0
  90. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/resources/base.py +0 -0
  91. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/resources/browser_events.py +0 -0
  92. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/resources/evals.py +0 -0
  93. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/resources/evaluators.py +0 -0
  94. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/resources/tags.py +0 -0
  95. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/client/synchronous/sync_client.py +0 -0
  96. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/datasets.py +0 -0
  97. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/decorators.py +0 -0
  98. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/eval_control.py +0 -0
  99. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/evaluations.py +0 -0
  100. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/laminar.py +0 -0
  101. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/log.py +0 -0
  102. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/types.py +0 -0
  103. {lmnr-0.7.14 → lmnr-0.7.16}/src/lmnr/sdk/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lmnr
3
- Version: 0.7.14
3
+ Version: 0.7.16
4
4
  Summary: Python SDK for Laminar
5
5
  Author: lmnr.ai
6
6
  Author-email: lmnr.ai <founders@lmnr.ai>
@@ -6,7 +6,7 @@
6
6
 
7
7
  [project]
8
8
  name = "lmnr"
9
- version = "0.7.14"
9
+ version = "0.7.16"
10
10
  description = "Python SDK for Laminar"
11
11
  authors = [
12
12
  { name = "lmnr.ai", email = "founders@lmnr.ai" }
@@ -225,7 +225,7 @@ def observe_base(
225
225
  detach_context(isolated_ctx_token)
226
226
  # span will be ended in the generator
227
227
  if isinstance(res, types.GeneratorType):
228
- return _handle_generator(span, ctx_token, res)
228
+ return _handle_generator(span, wrapper, res)
229
229
  if isinstance(res, types.AsyncGeneratorType):
230
230
  # async def foo() -> AsyncGenerator[int, None]:
231
231
  # is not considered async in a classical sense in Python,
@@ -234,7 +234,7 @@ def observe_base(
234
234
  # Flags are listed from LSB here:
235
235
  # https://docs.python.org/3/library/inspect.html#inspect-module-co-flags
236
236
  # See also: https://groups.google.com/g/python-tulip/c/6rWweGXLutU?pli=1
237
- return _ahandle_generator(span, ctx_token, res)
237
+ return _ahandle_generator(span, wrapper, res)
238
238
 
239
239
  _process_output(span, res, ignore_output, output_formatter)
240
240
  _cleanup_span(span, wrapper)
@@ -306,7 +306,7 @@ def async_observe_base(
306
306
  if isinstance(res, types.AsyncGeneratorType):
307
307
  # probably unreachable, read the comment in the similar
308
308
  # part of the sync wrapper.
309
- return await _ahandle_generator(span, ctx_token, res)
309
+ return await _ahandle_generator(span, wrapper, res)
310
310
 
311
311
  _process_output(span, res, ignore_output, output_formatter)
312
312
  _cleanup_span(span, wrapper)
@@ -317,18 +317,37 @@ def async_observe_base(
317
317
  return decorate
318
318
 
319
319
 
320
- def _handle_generator(span: Span, wrapper: TracerWrapper, res: Generator):
320
+ def _handle_generator(
321
+ span: Span,
322
+ wrapper: TracerWrapper,
323
+ res: Generator,
324
+ ignore_output: bool = False,
325
+ output_formatter: Callable[..., str] | None = None,
326
+ ):
327
+ results = []
321
328
  try:
322
- yield from res
329
+ for part in res:
330
+ results.append(part)
331
+ yield part
323
332
  finally:
333
+ _process_output(span, results, ignore_output, output_formatter)
324
334
  _cleanup_span(span, wrapper)
325
335
 
326
336
 
327
- async def _ahandle_generator(span: Span, wrapper: TracerWrapper, res: AsyncGenerator):
337
+ async def _ahandle_generator(
338
+ span: Span,
339
+ wrapper: TracerWrapper,
340
+ res: AsyncGenerator,
341
+ ignore_output: bool = False,
342
+ output_formatter: Callable[..., str] | None = None,
343
+ ):
344
+ results = []
328
345
  try:
329
346
  async for part in res:
347
+ results.append(part)
330
348
  yield part
331
349
  finally:
350
+ _process_output(span, results, ignore_output, output_formatter)
332
351
  _cleanup_span(span, wrapper)
333
352
 
334
353
 
@@ -406,7 +406,15 @@ try:
406
406
  details.get("cached_tokens"),
407
407
  )
408
408
  # TODO: add audio/image/text token details
409
- # TODO: add completion tokens details (reasoning tokens)
409
+ if usage_dict.get("completion_tokens_details"):
410
+ details = usage_dict.get("completion_tokens_details", {})
411
+ details = model_as_dict(details)
412
+ if details.get("reasoning_tokens"):
413
+ set_span_attribute(
414
+ span,
415
+ "gen_ai.usage.reasoning_tokens",
416
+ details.get("reasoning_tokens"),
417
+ )
410
418
 
411
419
  def _process_tool_calls(self, span, tool_calls, choice_index, is_response=True):
412
420
  """Process and set tool call attributes on the span"""
@@ -467,17 +475,56 @@ try:
467
475
  content = message.get("content", "")
468
476
  if content is None:
469
477
  continue
478
+ reasoning_content = message.get("reasoning_content")
479
+ if reasoning_content:
480
+ if isinstance(reasoning_content, str):
481
+ reasoning_content = [
482
+ {
483
+ "type": "text",
484
+ "text": reasoning_content,
485
+ }
486
+ ]
487
+ elif not isinstance(reasoning_content, list):
488
+ reasoning_content = [
489
+ {
490
+ "type": "text",
491
+ "text": str(reasoning_content),
492
+ }
493
+ ]
494
+ else:
495
+ reasoning_content = []
470
496
  if isinstance(content, str):
471
- set_span_attribute(span, f"gen_ai.completion.{i}.content", content)
497
+ if reasoning_content:
498
+ set_span_attribute(
499
+ span,
500
+ f"gen_ai.completion.{i}.content",
501
+ json.dumps(
502
+ reasoning_content
503
+ + [
504
+ {
505
+ "type": "text",
506
+ "text": content,
507
+ }
508
+ ]
509
+ ),
510
+ )
511
+ else:
512
+ set_span_attribute(
513
+ span,
514
+ f"gen_ai.completion.{i}.content",
515
+ content,
516
+ )
472
517
  elif isinstance(content, list):
473
518
  set_span_attribute(
474
- span, f"gen_ai.completion.{i}.content", json.dumps(content)
519
+ span,
520
+ f"gen_ai.completion.{i}.content",
521
+ json.dumps(reasoning_content + content),
475
522
  )
476
523
  else:
477
524
  set_span_attribute(
478
525
  span,
479
526
  f"gen_ai.completion.{i}.content",
480
- json.dumps(model_as_dict(content)),
527
+ json.dumps(reasoning_content + [model_as_dict(content)]),
481
528
  )
482
529
 
483
530
  def _process_content_part(self, content_part: dict) -> dict:
@@ -272,6 +272,16 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
272
272
 
273
273
  if response.usage_metadata:
274
274
  usage_dict = to_dict(response.usage_metadata)
275
+ candidates_token_count = usage_dict.get("candidates_token_count")
276
+ # unlike OpenAI, and unlike input cached tokens, thinking tokens are
277
+ # not counted as part of candidates token count, so we need to add them
278
+ # separately for consistency with other instrumentations
279
+ thoughts_token_count = usage_dict.get("thoughts_token_count")
280
+ output_token_count = (
281
+ (candidates_token_count or 0) + (thoughts_token_count or 0)
282
+ if candidates_token_count is not None or thoughts_token_count is not None
283
+ else None
284
+ )
275
285
  set_span_attribute(
276
286
  span,
277
287
  gen_ai_attributes.GEN_AI_USAGE_INPUT_TOKENS,
@@ -280,7 +290,7 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
280
290
  set_span_attribute(
281
291
  span,
282
292
  gen_ai_attributes.GEN_AI_USAGE_OUTPUT_TOKENS,
283
- usage_dict.get("candidates_token_count"),
293
+ output_token_count,
284
294
  )
285
295
  set_span_attribute(
286
296
  span,
@@ -292,6 +302,11 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
292
302
  SpanAttributes.LLM_USAGE_CACHE_READ_INPUT_TOKENS,
293
303
  usage_dict.get("cached_content_token_count"),
294
304
  )
305
+ set_span_attribute(
306
+ span,
307
+ SpanAttributes.LLM_USAGE_REASONING_TOKENS,
308
+ thoughts_token_count,
309
+ )
295
310
 
296
311
  if should_send_prompts():
297
312
  set_span_attribute(
@@ -3,7 +3,7 @@ import httpx
3
3
  from packaging import version
4
4
 
5
5
 
6
- __version__ = "0.7.14"
6
+ __version__ = "0.7.16"
7
7
  PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}"
8
8
 
9
9
 
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes