lmnr 0.6.20__py3-none-any.whl → 0.6.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. lmnr/opentelemetry_lib/decorators/__init__.py +188 -138
  2. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +674 -0
  3. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +485 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
  18. lmnr/opentelemetry_lib/tracing/__init__.py +1 -1
  19. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
  20. lmnr/opentelemetry_lib/tracing/processor.py +1 -1
  21. lmnr/opentelemetry_lib/utils/package_check.py +9 -0
  22. lmnr/sdk/browser/browser_use_otel.py +4 -2
  23. lmnr/sdk/browser/patchright_otel.py +0 -26
  24. lmnr/sdk/browser/playwright_otel.py +51 -78
  25. lmnr/sdk/browser/pw_utils.py +359 -114
  26. lmnr/sdk/decorators.py +39 -4
  27. lmnr/sdk/evaluations.py +23 -9
  28. lmnr/sdk/laminar.py +75 -48
  29. lmnr/version.py +1 -1
  30. {lmnr-0.6.20.dist-info → lmnr-0.6.21.dist-info}/METADATA +8 -7
  31. {lmnr-0.6.20.dist-info → lmnr-0.6.21.dist-info}/RECORD +33 -18
  32. {lmnr-0.6.20.dist-info → lmnr-0.6.21.dist-info}/WHEEL +1 -1
  33. {lmnr-0.6.20.dist-info → lmnr-0.6.21.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,229 @@
1
+ import json
2
+
3
+ from .utils import (
4
+ dont_throw,
5
+ model_as_dict,
6
+ set_span_attribute,
7
+ should_send_prompts,
8
+ )
9
+ from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import (
10
+ GEN_AI_RESPONSE_ID,
11
+ )
12
+ from opentelemetry.semconv_ai import (
13
+ SpanAttributes,
14
+ )
15
+
16
+ CONTENT_FILTER_KEY = "content_filter_results"
17
+
18
+
19
+ @dont_throw
20
+ def set_input_attributes(span, kwargs):
21
+ if not span.is_recording():
22
+ return
23
+
24
+ if should_send_prompts():
25
+ if kwargs.get("prompt") is not None:
26
+ set_span_attribute(
27
+ span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
28
+ )
29
+
30
+ elif kwargs.get("messages") is not None:
31
+ for i, message in enumerate(kwargs.get("messages")):
32
+ set_span_attribute(
33
+ span,
34
+ f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
35
+ _dump_content(message.get("content")),
36
+ )
37
+ set_span_attribute(
38
+ span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", message.get("role")
39
+ )
40
+
41
+
42
+ @dont_throw
43
+ def set_model_input_attributes(span, kwargs):
44
+ if not span.is_recording():
45
+ return
46
+
47
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
48
+ set_span_attribute(
49
+ span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens_to_sample")
50
+ )
51
+ set_span_attribute(
52
+ span, SpanAttributes.LLM_REQUEST_TEMPERATURE, kwargs.get("temperature")
53
+ )
54
+ set_span_attribute(span, SpanAttributes.LLM_REQUEST_TOP_P, kwargs.get("top_p"))
55
+ set_span_attribute(
56
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
57
+ )
58
+ set_span_attribute(
59
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
60
+ )
61
+ set_span_attribute(
62
+ span, SpanAttributes.LLM_IS_STREAMING, kwargs.get("stream") or False
63
+ )
64
+
65
+
66
+ def set_streaming_response_attributes(
67
+ span, accumulated_content, finish_reason=None, usage=None
68
+ ):
69
+ """Set span attributes for accumulated streaming response."""
70
+ if not span.is_recording() or not should_send_prompts():
71
+ return
72
+
73
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.0"
74
+ set_span_attribute(span, f"{prefix}.role", "assistant")
75
+ set_span_attribute(span, f"{prefix}.content", accumulated_content)
76
+ if finish_reason:
77
+ set_span_attribute(span, f"{prefix}.finish_reason", finish_reason)
78
+
79
+
80
+ def set_model_streaming_response_attributes(span, usage):
81
+ if not span.is_recording():
82
+ return
83
+
84
+ if usage:
85
+ set_span_attribute(
86
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.completion_tokens
87
+ )
88
+ set_span_attribute(
89
+ span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.prompt_tokens
90
+ )
91
+ set_span_attribute(
92
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens
93
+ )
94
+
95
+
96
+ @dont_throw
97
+ def set_model_response_attributes(span, response, token_histogram):
98
+ if not span.is_recording():
99
+ return
100
+ response = model_as_dict(response)
101
+ set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
102
+ set_span_attribute(span, GEN_AI_RESPONSE_ID, response.get("id"))
103
+
104
+ usage = response.get("usage") or {}
105
+ prompt_tokens = usage.get("prompt_tokens")
106
+ completion_tokens = usage.get("completion_tokens")
107
+ if usage:
108
+ set_span_attribute(
109
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
110
+ )
111
+ set_span_attribute(
112
+ span, SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, completion_tokens
113
+ )
114
+ set_span_attribute(span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, prompt_tokens)
115
+
116
+ if (
117
+ isinstance(prompt_tokens, int)
118
+ and prompt_tokens >= 0
119
+ and token_histogram is not None
120
+ ):
121
+ token_histogram.record(
122
+ prompt_tokens,
123
+ attributes={
124
+ SpanAttributes.LLM_TOKEN_TYPE: "input",
125
+ SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
126
+ },
127
+ )
128
+
129
+ if (
130
+ isinstance(completion_tokens, int)
131
+ and completion_tokens >= 0
132
+ and token_histogram is not None
133
+ ):
134
+ token_histogram.record(
135
+ completion_tokens,
136
+ attributes={
137
+ SpanAttributes.LLM_TOKEN_TYPE: "output",
138
+ SpanAttributes.LLM_RESPONSE_MODEL: response.get("model"),
139
+ },
140
+ )
141
+
142
+
143
+ def set_response_attributes(span, response):
144
+ if not span.is_recording():
145
+ return
146
+ choices = model_as_dict(response).get("choices")
147
+ if should_send_prompts() and choices:
148
+ _set_completions(span, choices)
149
+
150
+
151
+ def _set_completions(span, choices):
152
+ if choices is None or not should_send_prompts():
153
+ return
154
+
155
+ for choice in choices:
156
+ index = choice.get("index")
157
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
158
+ set_span_attribute(span, f"{prefix}.finish_reason", choice.get("finish_reason"))
159
+
160
+ if choice.get("content_filter_results"):
161
+ set_span_attribute(
162
+ span,
163
+ f"{prefix}.{CONTENT_FILTER_KEY}",
164
+ json.dumps(choice.get("content_filter_results")),
165
+ )
166
+
167
+ if choice.get("finish_reason") == "content_filter":
168
+ set_span_attribute(span, f"{prefix}.role", "assistant")
169
+ set_span_attribute(span, f"{prefix}.content", "FILTERED")
170
+
171
+ return
172
+
173
+ message = choice.get("message")
174
+ if not message:
175
+ return
176
+
177
+ set_span_attribute(span, f"{prefix}.role", message.get("role"))
178
+ set_span_attribute(span, f"{prefix}.content", message.get("content"))
179
+
180
+ function_call = message.get("function_call")
181
+ if function_call:
182
+ set_span_attribute(
183
+ span, f"{prefix}.tool_calls.0.name", function_call.get("name")
184
+ )
185
+ set_span_attribute(
186
+ span,
187
+ f"{prefix}.tool_calls.0.arguments",
188
+ function_call.get("arguments"),
189
+ )
190
+
191
+ tool_calls = message.get("tool_calls")
192
+ if tool_calls:
193
+ for i, tool_call in enumerate(tool_calls):
194
+ function = tool_call.get("function")
195
+ set_span_attribute(
196
+ span,
197
+ f"{prefix}.tool_calls.{i}.id",
198
+ tool_call.get("id"),
199
+ )
200
+ set_span_attribute(
201
+ span,
202
+ f"{prefix}.tool_calls.{i}.name",
203
+ function.get("name"),
204
+ )
205
+ set_span_attribute(
206
+ span,
207
+ f"{prefix}.tool_calls.{i}.arguments",
208
+ function.get("arguments"),
209
+ )
210
+
211
+
212
+ def _dump_content(content):
213
+ if isinstance(content, str):
214
+ return content
215
+ json_serializable = []
216
+ for item in content:
217
+ if item.get("type") == "text":
218
+ json_serializable.append({"type": "text", "text": item.get("text")})
219
+ elif image_url := item.get("image_url"):
220
+ json_serializable.append(
221
+ {
222
+ "type": "image_url",
223
+ "image_url": {
224
+ "url": image_url.get("url"),
225
+ "detail": image_url.get("detail"),
226
+ },
227
+ }
228
+ )
229
+ return json.dumps(json_serializable)
@@ -0,0 +1,92 @@
1
+ import logging
2
+ import os
3
+ import traceback
4
+ from importlib.metadata import version
5
+
6
+ from opentelemetry import context as context_api
7
+ from .config import Config
8
+ from opentelemetry.semconv_ai import SpanAttributes
9
+
10
+ GEN_AI_SYSTEM = "gen_ai.system"
11
+ GEN_AI_SYSTEM_GROQ = "groq"
12
+
13
+ _PYDANTIC_VERSION = version("pydantic")
14
+
15
+ LMNR_TRACE_CONTENT = "LMNR_TRACE_CONTENT"
16
+
17
+
18
+ def set_span_attribute(span, name, value):
19
+ if value is not None and value != "":
20
+ span.set_attribute(name, value)
21
+
22
+
23
+ def should_send_prompts():
24
+ return (
25
+ os.getenv(LMNR_TRACE_CONTENT) or "true"
26
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
27
+
28
+
29
+ def dont_throw(func):
30
+ """
31
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
32
+
33
+ @param func: The function to wrap
34
+ @return: The wrapper function
35
+ """
36
+ # Obtain a logger specific to the function's module
37
+ logger = logging.getLogger(func.__module__)
38
+
39
+ def wrapper(*args, **kwargs):
40
+ try:
41
+ return func(*args, **kwargs)
42
+ except Exception as e:
43
+ logger.debug(
44
+ "OpenLLMetry failed to trace in %s, error: %s",
45
+ func.__name__,
46
+ traceback.format_exc(),
47
+ )
48
+ if Config.exception_logger:
49
+ Config.exception_logger(e)
50
+
51
+ return wrapper
52
+
53
+
54
+ @dont_throw
55
+ def shared_metrics_attributes(response):
56
+ response_dict = model_as_dict(response)
57
+
58
+ common_attributes = Config.get_common_metrics_attributes()
59
+
60
+ return {
61
+ **common_attributes,
62
+ GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ,
63
+ SpanAttributes.LLM_RESPONSE_MODEL: response_dict.get("model"),
64
+ }
65
+
66
+
67
+ @dont_throw
68
+ def error_metrics_attributes(exception):
69
+ return {
70
+ GEN_AI_SYSTEM: GEN_AI_SYSTEM_GROQ,
71
+ "error.type": exception.__class__.__name__,
72
+ }
73
+
74
+
75
+ def model_as_dict(model):
76
+ if _PYDANTIC_VERSION < "2.0.0":
77
+ return model.dict()
78
+ if hasattr(model, "model_dump"):
79
+ return model.model_dump()
80
+ elif hasattr(model, "parse"): # Raw API response
81
+ return model_as_dict(model.parse())
82
+ else:
83
+ return model
84
+
85
+
86
+ def should_emit_events() -> bool:
87
+ """
88
+ Checks if the instrumentation isn't using the legacy attributes
89
+ and if the event logger is not None.
90
+ """
91
+
92
+ return not Config.use_legacy_attributes
@@ -0,0 +1 @@
1
+ __version__ = "0.41.0"
@@ -14,7 +14,7 @@ import openai
14
14
 
15
15
  _OPENAI_VERSION = version("openai")
16
16
 
17
- TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
17
+ LMNR_TRACE_CONTENT = "LMNR_TRACE_CONTENT"
18
18
 
19
19
 
20
20
  def is_openai_v1():
@@ -28,7 +28,7 @@ def is_azure_openai(instance):
28
28
 
29
29
 
30
30
  def is_metrics_enabled() -> bool:
31
- return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
31
+ return False
32
32
 
33
33
 
34
34
  def should_record_stream_token_usage():
@@ -171,7 +171,7 @@ def run_async(method):
171
171
 
172
172
  def should_send_prompts():
173
173
  return (
174
- os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
174
+ os.getenv(LMNR_TRACE_CONTENT) or "true"
175
175
  ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
176
176
 
177
177
 
@@ -45,7 +45,7 @@ class TracerWrapper(object):
45
45
  project_api_key: str | None = None,
46
46
  max_export_batch_size: int | None = None,
47
47
  force_http: bool = False,
48
- timeout_seconds: int = 10,
48
+ timeout_seconds: int = 30,
49
49
  set_global_tracer_provider: bool = True,
50
50
  otel_logger_level: int = logging.ERROR,
51
51
  ) -> "TracerWrapper":
@@ -2,7 +2,10 @@ import abc
2
2
 
3
3
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
4
4
 
5
- from lmnr.opentelemetry_lib.utils.package_check import is_package_installed
5
+ from lmnr.opentelemetry_lib.utils.package_check import (
6
+ get_package_version,
7
+ is_package_installed,
8
+ )
6
9
 
7
10
 
8
11
  class InstrumentorInitializer(abc.ABC):
@@ -27,10 +30,8 @@ class AnthropicInstrumentorInitializer(InstrumentorInitializer):
27
30
  def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
28
31
  if not is_package_installed("anthropic"):
29
32
  return None
30
- if not is_package_installed("opentelemetry-instrumentation-anthropic"):
31
- return None
32
33
 
33
- from opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
34
+ from ..opentelemetry.instrumentation.anthropic import AnthropicInstrumentor
34
35
 
35
36
  return AnthropicInstrumentor(
36
37
  upload_base64_image=None,
@@ -54,6 +55,12 @@ class BrowserUseInstrumentorInitializer(InstrumentorInitializer):
54
55
  if not is_package_installed("browser-use"):
55
56
  return None
56
57
 
58
+ version = get_package_version("browser-use")
59
+ from packaging.version import parse
60
+
61
+ if version and parse(version) >= parse("0.5.0"):
62
+ return None
63
+
57
64
  from lmnr.sdk.browser.browser_use_otel import BrowserUseInstrumentor
58
65
 
59
66
  return BrowserUseInstrumentor()
@@ -127,10 +134,8 @@ class GroqInstrumentorInitializer(InstrumentorInitializer):
127
134
  def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
128
135
  if not is_package_installed("groq"):
129
136
  return None
130
- if not is_package_installed("opentelemetry-instrumentation-groq"):
131
- return None
132
137
 
133
- from opentelemetry.instrumentation.groq import GroqInstrumentor
138
+ from ..opentelemetry.instrumentation.groq import GroqInstrumentor
134
139
 
135
140
  return GroqInstrumentor()
136
141
 
@@ -35,7 +35,7 @@ class LaminarSpanProcessor(SpanProcessor):
35
35
  api_key: str | None = None,
36
36
  timeout_seconds: int = 30,
37
37
  force_http: bool = False,
38
- max_export_batch_size: int = 512,
38
+ max_export_batch_size: int = 64,
39
39
  disable_batch: bool = False,
40
40
  exporter: SpanExporter | None = None,
41
41
  ):
@@ -1,5 +1,7 @@
1
1
  from importlib.metadata import distributions
2
2
 
3
+ from typing import Optional
4
+
3
5
  installed_packages = {
4
6
  (dist.name or dist.metadata.get("Name", "")).lower() for dist in distributions()
5
7
  }
@@ -7,3 +9,10 @@ installed_packages = {
7
9
 
8
10
  def is_package_installed(package_name: str) -> bool:
9
11
  return package_name.lower() in installed_packages
12
+
13
+
14
+ def get_package_version(package_name: str) -> Optional[str]:
15
+ for dist in distributions():
16
+ if (dist.name or dist.metadata.get("Name", "")).lower() == package_name.lower():
17
+ return dist.version
18
+ return None
@@ -1,4 +1,5 @@
1
1
  from lmnr.opentelemetry_lib.decorators import json_dumps
2
+ from lmnr.sdk.laminar import Laminar
2
3
  from lmnr.sdk.browser.utils import with_tracer_wrapper
3
4
  from lmnr.sdk.utils import get_input_from_func_args
4
5
  from lmnr.version import __version__
@@ -19,7 +20,7 @@ except ImportError as e:
19
20
  "to install Browser Use or remove this import."
20
21
  ) from e
21
22
 
22
- _instruments = ("browser-use < 0.5.0",)
23
+ _instruments = ("browser-use >= 0.1.0",)
23
24
 
24
25
  WRAPPED_METHODS = [
25
26
  {
@@ -86,7 +87,8 @@ async def _wrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
86
87
  step_info = kwargs.get("step_info", args[0] if len(args) > 0 else None)
87
88
  if step_info and hasattr(step_info, "step_number"):
88
89
  span_name = f"agent.step.{step_info.step_number}"
89
- with tracer.start_as_current_span(span_name, attributes=attributes) as span:
90
+
91
+ with Laminar.start_as_current_span(span_name) as span:
90
92
  span.set_attributes(attributes)
91
93
  result = await wrapped(*args, **kwargs)
92
94
  if not to_wrap.get("ignore_output"):
@@ -1,6 +1,4 @@
1
1
  from lmnr.sdk.browser.playwright_otel import (
2
- _wrap_new_page,
3
- _wrap_new_page_async,
4
2
  _wrap_new_browser_sync,
5
3
  _wrap_new_browser_async,
6
4
  _wrap_new_context_sync,
@@ -18,18 +16,6 @@ from wrapt import wrap_function_wrapper
18
16
  _instruments = ("patchright >= 1.9.0",)
19
17
 
20
18
  WRAPPED_METHODS = [
21
- {
22
- "package": "patchright.sync_api",
23
- "object": "BrowserContext",
24
- "method": "new_page",
25
- "wrapper": _wrap_new_page,
26
- },
27
- {
28
- "package": "patchright.sync_api",
29
- "object": "Browser",
30
- "method": "new_page",
31
- "wrapper": _wrap_new_page,
32
- },
33
19
  {
34
20
  "package": "patchright.sync_api",
35
21
  "object": "BrowserType",
@@ -63,18 +49,6 @@ WRAPPED_METHODS = [
63
49
  ]
64
50
 
65
51
  WRAPPED_METHODS_ASYNC = [
66
- {
67
- "package": "patchright.async_api",
68
- "object": "BrowserContext",
69
- "method": "new_page",
70
- "wrapper": _wrap_new_page_async,
71
- },
72
- {
73
- "package": "patchright.async_api",
74
- "object": "Browser",
75
- "method": "new_page",
76
- "wrapper": _wrap_new_page_async,
77
- },
78
52
  {
79
53
  "package": "patchright.async_api",
80
54
  "object": "BrowserType",