lmnr 0.6.20__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +0 -4
- lmnr/opentelemetry_lib/decorators/__init__.py +211 -151
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +678 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +256 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +295 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +179 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +4 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +16 -16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +3 -3
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +3 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +7 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +190 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +90 -2
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -7
- lmnr/opentelemetry_lib/tracing/context.py +109 -0
- lmnr/opentelemetry_lib/tracing/processor.py +6 -7
- lmnr/opentelemetry_lib/tracing/tracer.py +29 -0
- lmnr/opentelemetry_lib/utils/package_check.py +9 -0
- lmnr/sdk/browser/browser_use_otel.py +9 -7
- lmnr/sdk/browser/patchright_otel.py +14 -26
- lmnr/sdk/browser/playwright_otel.py +72 -73
- lmnr/sdk/browser/pw_utils.py +436 -119
- lmnr/sdk/client/asynchronous/resources/browser_events.py +1 -0
- lmnr/sdk/decorators.py +39 -4
- lmnr/sdk/evaluations.py +23 -9
- lmnr/sdk/laminar.py +181 -209
- lmnr/sdk/types.py +0 -6
- lmnr/version.py +1 -1
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/METADATA +10 -8
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/RECORD +45 -29
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/WHEEL +1 -1
- lmnr/opentelemetry_lib/tracing/context_properties.py +0 -65
- {lmnr-0.6.20.dist-info → lmnr-0.7.0.dist-info}/entry_points.txt +0 -0
lmnr/sdk/decorators.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
from lmnr.opentelemetry_lib.decorators import (
|
2
|
-
|
3
|
-
|
2
|
+
observe_base,
|
3
|
+
async_observe_base,
|
4
4
|
json_dumps,
|
5
5
|
)
|
6
6
|
from opentelemetry.trace import INVALID_SPAN, get_current_span
|
@@ -28,6 +28,8 @@ def observe(
|
|
28
28
|
ignore_output: bool = False,
|
29
29
|
span_type: Literal["DEFAULT", "LLM", "TOOL"] = "DEFAULT",
|
30
30
|
ignore_inputs: list[str] | None = None,
|
31
|
+
input_formatter: Callable[P, str] | None = None,
|
32
|
+
output_formatter: Callable[[R], str] | None = None,
|
31
33
|
metadata: dict[str, Any] | None = None,
|
32
34
|
tags: list[str] | None = None,
|
33
35
|
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
@@ -53,6 +55,16 @@ def observe(
|
|
53
55
|
def foo(a, b, `sensitive_data`), and you want to ignore the\
|
54
56
|
`sensitive_data` argument, you can pass ["sensitive_data"] to\
|
55
57
|
this argument. Defaults to None.
|
58
|
+
input_formatter (Callable[P, str] | None, optional): A custom function\
|
59
|
+
to format the input of the wrapped function. All function arguments\
|
60
|
+
are passed to this function. Must return a string. Ignored if\
|
61
|
+
`ignore_input` is True. Does not respect `ignore_inputs` argument.
|
62
|
+
Defaults to None.
|
63
|
+
output_formatter (Callable[[R], str] | None, optional): A custom function\
|
64
|
+
to format the output of the wrapped function. The output is passed\
|
65
|
+
to this function. Must return a string. Ignored if `ignore_output`
|
66
|
+
is True. Does not respect `ignore_inputs` argument.
|
67
|
+
Defaults to None.
|
56
68
|
metadata (dict[str, Any] | None, optional): Metadata to associate with\
|
57
69
|
the trace. Must be JSON serializable. Defaults to None.
|
58
70
|
tags (list[str] | None, optional): Tags to associate with the trace.
|
@@ -91,22 +103,45 @@ def observe(
|
|
91
103
|
logger.warning("Tags must be a list of strings. Tags will be ignored.")
|
92
104
|
else:
|
93
105
|
association_properties["tags"] = tags
|
106
|
+
if input_formatter is not None and ignore_input:
|
107
|
+
logger.warning(
|
108
|
+
f"observe, function {func.__name__}: Input formatter"
|
109
|
+
" is ignored because `ignore_input` is True. Specify only one of"
|
110
|
+
" `ignore_input` or `input_formatter`."
|
111
|
+
)
|
112
|
+
if input_formatter is not None and ignore_inputs is not None:
|
113
|
+
logger.warning(
|
114
|
+
f"observe, function {func.__name__}: Both input formatter and"
|
115
|
+
" `ignore_inputs` are specified. Input formatter"
|
116
|
+
" will pass all arguments to the formatter regardless of"
|
117
|
+
" `ignore_inputs`."
|
118
|
+
)
|
119
|
+
if output_formatter is not None and ignore_output:
|
120
|
+
logger.warning(
|
121
|
+
f"observe, function {func.__name__}: Output formatter"
|
122
|
+
" is ignored because `ignore_output` is True. Specify only one of"
|
123
|
+
" `ignore_output` or `output_formatter`."
|
124
|
+
)
|
94
125
|
result = (
|
95
|
-
|
126
|
+
async_observe_base(
|
96
127
|
name=name,
|
97
128
|
ignore_input=ignore_input,
|
98
129
|
ignore_output=ignore_output,
|
99
130
|
span_type=span_type,
|
100
131
|
ignore_inputs=ignore_inputs,
|
132
|
+
input_formatter=input_formatter,
|
133
|
+
output_formatter=output_formatter,
|
101
134
|
association_properties=association_properties,
|
102
135
|
)(func)
|
103
136
|
if is_async(func)
|
104
|
-
else
|
137
|
+
else observe_base(
|
105
138
|
name=name,
|
106
139
|
ignore_input=ignore_input,
|
107
140
|
ignore_output=ignore_output,
|
108
141
|
span_type=span_type,
|
109
142
|
ignore_inputs=ignore_inputs,
|
143
|
+
input_formatter=input_formatter,
|
144
|
+
output_formatter=output_formatter,
|
110
145
|
association_properties=association_properties,
|
111
146
|
)(func)
|
112
147
|
)
|
lmnr/sdk/evaluations.py
CHANGED
@@ -57,7 +57,7 @@ def get_average_scores(results: list[EvaluationResultDatapoint]) -> dict[str, Nu
|
|
57
57
|
average_scores = {}
|
58
58
|
for key, values in per_score_values.items():
|
59
59
|
scores = [v for v in values if v is not None]
|
60
|
-
|
60
|
+
|
61
61
|
# If there are no scores, we don't want to include the key in the average scores
|
62
62
|
if len(scores) > 0:
|
63
63
|
average_scores[key] = sum(scores) / len(scores)
|
@@ -108,6 +108,7 @@ class Evaluation:
|
|
108
108
|
concurrency_limit: int = DEFAULT_BATCH_SIZE,
|
109
109
|
project_api_key: str | None = None,
|
110
110
|
base_url: str | None = None,
|
111
|
+
base_http_url: str | None = None,
|
111
112
|
http_port: int | None = None,
|
112
113
|
grpc_port: int | None = None,
|
113
114
|
instruments: set[Instruments] | None = None,
|
@@ -157,6 +158,10 @@ class Evaluation:
|
|
157
158
|
Useful if self-hosted. Do NOT include the port, use `http_port`\
|
158
159
|
and `grpc_port` instead.
|
159
160
|
Defaults to "https://api.lmnr.ai".
|
161
|
+
base_http_url (str | None, optional): The base HTTP URL for Laminar API.\
|
162
|
+
Only set this if your Laminar backend HTTP is proxied\
|
163
|
+
through a different host. If not specified, defaults\
|
164
|
+
to https://api.lmnr.ai.
|
160
165
|
http_port (int | None, optional): The port for Laminar API\
|
161
166
|
HTTP service. Defaults to 443 if not specified.
|
162
167
|
grpc_port (int | None, optional): The port for Laminar API\
|
@@ -199,7 +204,7 @@ class Evaluation:
|
|
199
204
|
self.batch_size = concurrency_limit
|
200
205
|
self._logger = get_default_logger(self.__class__.__name__)
|
201
206
|
self.upload_tasks = []
|
202
|
-
self.base_http_url = f"{base_url}:{http_port or 443}"
|
207
|
+
self.base_http_url = f"{base_http_url or base_url}:{http_port or 443}"
|
203
208
|
|
204
209
|
api_key = project_api_key or from_env("LMNR_PROJECT_API_KEY")
|
205
210
|
if not api_key and not L.is_initialized():
|
@@ -224,6 +229,7 @@ class Evaluation:
|
|
224
229
|
L.initialize(
|
225
230
|
project_api_key=project_api_key,
|
226
231
|
base_url=base_url,
|
232
|
+
base_http_url=self.base_http_url,
|
227
233
|
http_port=http_port,
|
228
234
|
grpc_port=grpc_port,
|
229
235
|
instruments=instruments,
|
@@ -352,22 +358,24 @@ class Evaluation:
|
|
352
358
|
if isinstance(evaluator, HumanEvaluator):
|
353
359
|
# Create an empty span for human evaluators
|
354
360
|
with L.start_as_current_span(
|
355
|
-
evaluator_name,
|
356
|
-
input={"output": output, "target": target}
|
361
|
+
evaluator_name, input={"output": output, "target": target}
|
357
362
|
) as human_evaluator_span:
|
358
|
-
human_evaluator_span.set_attribute(
|
363
|
+
human_evaluator_span.set_attribute(
|
364
|
+
SPAN_TYPE, SpanType.HUMAN_EVALUATOR.value
|
365
|
+
)
|
359
366
|
# Human evaluators don't execute automatically, just create the span
|
360
367
|
L.set_span_output(None)
|
361
|
-
|
368
|
+
|
362
369
|
# We don't want to save the score for human evaluators
|
363
370
|
scores[evaluator_name] = None
|
364
371
|
else:
|
365
372
|
# Regular evaluator function
|
366
373
|
with L.start_as_current_span(
|
367
|
-
evaluator_name,
|
368
|
-
input={"output": output, "target": target}
|
374
|
+
evaluator_name, input={"output": output, "target": target}
|
369
375
|
) as evaluator_span:
|
370
|
-
evaluator_span.set_attribute(
|
376
|
+
evaluator_span.set_attribute(
|
377
|
+
SPAN_TYPE, SpanType.EVALUATOR.value
|
378
|
+
)
|
371
379
|
if is_async(evaluator):
|
372
380
|
value = await evaluator(output, target)
|
373
381
|
else:
|
@@ -416,6 +424,7 @@ def evaluate(
|
|
416
424
|
concurrency_limit: int = DEFAULT_BATCH_SIZE,
|
417
425
|
project_api_key: str | None = None,
|
418
426
|
base_url: str | None = None,
|
427
|
+
base_http_url: str | None = None,
|
419
428
|
http_port: int | None = None,
|
420
429
|
grpc_port: int | None = None,
|
421
430
|
instruments: set[Instruments] | None = None,
|
@@ -465,6 +474,10 @@ def evaluate(
|
|
465
474
|
Useful if self-hosted elsewhere. Do NOT include the\
|
466
475
|
port, use `http_port` and `grpc_port` instead.
|
467
476
|
Defaults to "https://api.lmnr.ai".
|
477
|
+
base_http_url (str | None, optional): The base HTTP URL for Laminar API.\
|
478
|
+
Only set this if your Laminar backend HTTP is proxied\
|
479
|
+
through a different host. If not specified, defaults\
|
480
|
+
to https://api.lmnr.ai.
|
468
481
|
http_port (int | None, optional): The port for Laminar API's HTTP\
|
469
482
|
service. 443 is used if not specified.
|
470
483
|
Defaults to None.
|
@@ -488,6 +501,7 @@ def evaluate(
|
|
488
501
|
concurrency_limit=concurrency_limit,
|
489
502
|
project_api_key=project_api_key,
|
490
503
|
base_url=base_url,
|
504
|
+
base_http_url=base_http_url,
|
491
505
|
http_port=http_port,
|
492
506
|
grpc_port=grpc_port,
|
493
507
|
instruments=instruments,
|