lmnr 0.6.8__py3-none-any.whl → 0.6.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/cli.py +50 -24
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +90 -15
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +81 -58
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +119 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +60 -0
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -0
- lmnr/opentelemetry_lib/tracing/context_properties.py +8 -1
- lmnr/opentelemetry_lib/tracing/instruments.py +2 -0
- lmnr/opentelemetry_lib/tracing/processor.py +1 -1
- lmnr/sdk/evaluations.py +4 -3
- lmnr/sdk/types.py +2 -2
- lmnr/version.py +1 -1
- {lmnr-0.6.8.dist-info → lmnr-0.6.9.dist-info}/METADATA +55 -55
- {lmnr-0.6.8.dist-info → lmnr-0.6.9.dist-info}/RECORD +17 -15
- {lmnr-0.6.8.dist-info → lmnr-0.6.9.dist-info}/LICENSE +0 -0
- {lmnr-0.6.8.dist-info → lmnr-0.6.9.dist-info}/WHEEL +0 -0
- {lmnr-0.6.8.dist-info → lmnr-0.6.9.dist-info}/entry_points.txt +0 -0
lmnr/cli.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
from argparse import ArgumentParser
|
2
2
|
import asyncio
|
3
|
+
import glob
|
3
4
|
import importlib.util
|
5
|
+
import json
|
4
6
|
import os
|
5
7
|
import re
|
6
8
|
import sys
|
@@ -23,27 +25,27 @@ def add_cursor_rules():
|
|
23
25
|
"""Download laminar.mdc file from a hardcoded public URL and save it to .cursor/rules/laminar.mdc"""
|
24
26
|
# Hardcoded URL for the laminar.mdc file
|
25
27
|
url = "https://raw.githubusercontent.com/lmnr-ai/lmnr/dev/rules/laminar.mdc"
|
26
|
-
|
28
|
+
|
27
29
|
# Create .cursor/rules directory if it doesn't exist
|
28
30
|
rules_dir = Path(".cursor/rules")
|
29
31
|
rules_dir.mkdir(parents=True, exist_ok=True)
|
30
|
-
|
32
|
+
|
31
33
|
# Define the target file path
|
32
34
|
target_file = rules_dir / "laminar.mdc"
|
33
|
-
|
35
|
+
|
34
36
|
try:
|
35
37
|
LOG.info(f"Downloading laminar.mdc from {url}")
|
36
|
-
|
38
|
+
|
37
39
|
# Download the file
|
38
40
|
with urllib.request.urlopen(url) as response:
|
39
41
|
content = response.read()
|
40
|
-
|
42
|
+
|
41
43
|
# Write the content to the target file (this will overwrite if it exists)
|
42
|
-
with open(target_file,
|
44
|
+
with open(target_file, "wb") as f:
|
43
45
|
f.write(content)
|
44
|
-
|
46
|
+
|
45
47
|
LOG.info(f"Successfully downloaded laminar.mdc to {target_file}")
|
46
|
-
|
48
|
+
|
47
49
|
except urllib.error.URLError as e:
|
48
50
|
LOG.error(f"Failed to download file from {url}: {e}")
|
49
51
|
sys.exit(1)
|
@@ -55,7 +57,7 @@ def add_cursor_rules():
|
|
55
57
|
async def run_evaluation(args):
|
56
58
|
sys.path.append(os.getcwd())
|
57
59
|
|
58
|
-
if args.file
|
60
|
+
if len(args.file) == 0:
|
59
61
|
files = [
|
60
62
|
os.path.join(EVAL_DIR, f)
|
61
63
|
for f in os.listdir(EVAL_DIR)
|
@@ -71,9 +73,17 @@ async def run_evaluation(args):
|
|
71
73
|
LOG.info(f"Located {len(files)} evaluation files in {EVAL_DIR}")
|
72
74
|
|
73
75
|
else:
|
74
|
-
files = [
|
76
|
+
files = []
|
77
|
+
for pattern in args.file:
|
78
|
+
matches = glob.glob(pattern)
|
79
|
+
if matches:
|
80
|
+
files.extend(matches)
|
81
|
+
else:
|
82
|
+
# If no matches found, treat as literal filename
|
83
|
+
files.append(pattern)
|
75
84
|
|
76
85
|
prep_token = PREPARE_ONLY.set(True)
|
86
|
+
scores = []
|
77
87
|
try:
|
78
88
|
for file in files:
|
79
89
|
LOG.info(f"Running evaluation from {file}")
|
@@ -83,9 +93,9 @@ async def run_evaluation(args):
|
|
83
93
|
spec = importlib.util.spec_from_file_location(name, file)
|
84
94
|
if spec is None or spec.loader is None:
|
85
95
|
LOG.error(f"Could not load module specification from {file}")
|
86
|
-
if args.
|
87
|
-
|
88
|
-
|
96
|
+
if args.continue_on_error:
|
97
|
+
continue
|
98
|
+
return
|
89
99
|
mod = importlib.util.module_from_spec(spec)
|
90
100
|
sys.modules[name] = mod
|
91
101
|
|
@@ -93,19 +103,29 @@ async def run_evaluation(args):
|
|
93
103
|
evaluations: list[Evaluation] | None = EVALUATION_INSTANCES.get()
|
94
104
|
if evaluations is None:
|
95
105
|
LOG.warning("Evaluation instance not found")
|
96
|
-
if args.
|
97
|
-
|
98
|
-
|
106
|
+
if args.continue_on_error:
|
107
|
+
continue
|
108
|
+
return
|
99
109
|
|
100
110
|
LOG.info(f"Loaded {len(evaluations)} evaluations from {file}")
|
101
111
|
|
102
112
|
for evaluation in evaluations:
|
103
113
|
try:
|
104
|
-
await evaluation.run()
|
114
|
+
eval_scores = await evaluation.run()
|
115
|
+
scores.append(
|
116
|
+
{
|
117
|
+
"file": file,
|
118
|
+
"scores": eval_scores,
|
119
|
+
}
|
120
|
+
)
|
105
121
|
except Exception as e:
|
106
122
|
LOG.error(f"Error running evaluation: {e}")
|
107
|
-
if args.
|
123
|
+
if not args.continue_on_error:
|
108
124
|
raise
|
125
|
+
|
126
|
+
if args.output_file:
|
127
|
+
with open(args.output_file, "w") as f:
|
128
|
+
json.dump(scores, f, indent=2)
|
109
129
|
finally:
|
110
130
|
PREPARE_ONLY.reset(prep_token)
|
111
131
|
|
@@ -125,21 +145,27 @@ def cli():
|
|
125
145
|
)
|
126
146
|
parser_eval.add_argument(
|
127
147
|
"file",
|
128
|
-
nargs="
|
129
|
-
help="
|
148
|
+
nargs="*",
|
149
|
+
help="Files or a file containing the evaluation to run."
|
130
150
|
+ "If no file name is provided, all evaluation files in the `evals` directory are run as long"
|
131
151
|
+ "as they match *_eval.py or eval_*.py",
|
132
|
-
default=
|
152
|
+
default=[],
|
133
153
|
)
|
134
154
|
|
135
155
|
parser_eval.add_argument(
|
136
|
-
"--
|
156
|
+
"--continue-on-error",
|
137
157
|
action="store_true",
|
138
158
|
default=False,
|
139
|
-
help="
|
159
|
+
help="Continue execution upon errors",
|
160
|
+
)
|
161
|
+
|
162
|
+
parser_eval.add_argument(
|
163
|
+
"--output-file",
|
164
|
+
help="Output file to write the results to. Outputs are written in JSON format.",
|
165
|
+
nargs="?",
|
140
166
|
)
|
141
167
|
|
142
|
-
|
168
|
+
subparsers.add_parser(
|
143
169
|
"add-cursor-rules",
|
144
170
|
description="Download laminar.mdc file and add it to .cursor/rules",
|
145
171
|
help="Download laminar.mdc file and add it to .cursor/rules",
|
@@ -1,6 +1,7 @@
|
|
1
1
|
"""OpenTelemetry Google Generative AI API instrumentation"""
|
2
2
|
|
3
3
|
from collections import defaultdict
|
4
|
+
import json
|
4
5
|
import logging
|
5
6
|
import os
|
6
7
|
from typing import AsyncGenerator, Callable, Collection, Generator
|
@@ -11,7 +12,9 @@ from .config import (
|
|
11
12
|
Config,
|
12
13
|
)
|
13
14
|
from .utils import (
|
15
|
+
ProcessedContentPart,
|
14
16
|
dont_throw,
|
17
|
+
get_content,
|
15
18
|
role_from_content_union,
|
16
19
|
set_span_attribute,
|
17
20
|
process_content_union,
|
@@ -159,7 +162,7 @@ def _set_request_attributes(span, args, kwargs):
|
|
159
162
|
set_span_attribute(
|
160
163
|
span,
|
161
164
|
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
|
162
|
-
process_content_union(system_instruction),
|
165
|
+
(get_content(process_content_union(system_instruction)) or {}).get("text", ""),
|
163
166
|
)
|
164
167
|
set_span_attribute(
|
165
168
|
span, f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role", "system"
|
@@ -169,11 +172,42 @@ def _set_request_attributes(span, args, kwargs):
|
|
169
172
|
if not isinstance(contents, list):
|
170
173
|
contents = [contents]
|
171
174
|
for content in contents:
|
175
|
+
processed_content = process_content_union(content)
|
176
|
+
content_str = get_content(processed_content)
|
172
177
|
set_span_attribute(
|
173
178
|
span,
|
174
179
|
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
|
175
|
-
|
180
|
+
(
|
181
|
+
content_str
|
182
|
+
if isinstance(content_str, str)
|
183
|
+
else json.dumps(content_str)
|
184
|
+
),
|
176
185
|
)
|
186
|
+
blocks = (
|
187
|
+
processed_content
|
188
|
+
if isinstance(processed_content, list)
|
189
|
+
else [processed_content]
|
190
|
+
)
|
191
|
+
for j, block in enumerate(blocks):
|
192
|
+
block_dict = to_dict(block)
|
193
|
+
if not block_dict.get("function_call"):
|
194
|
+
continue
|
195
|
+
function_call = to_dict(block_dict.get("function_call", {}))
|
196
|
+
set_span_attribute(
|
197
|
+
span,
|
198
|
+
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.name",
|
199
|
+
function_call.get("name"),
|
200
|
+
)
|
201
|
+
set_span_attribute(
|
202
|
+
span,
|
203
|
+
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.id",
|
204
|
+
function_call.get("id"),
|
205
|
+
)
|
206
|
+
set_span_attribute(
|
207
|
+
span,
|
208
|
+
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.arguments",
|
209
|
+
json.dumps(function_call.get("arguments")),
|
210
|
+
)
|
177
211
|
set_span_attribute(
|
178
212
|
span,
|
179
213
|
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role",
|
@@ -218,23 +252,64 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
218
252
|
)
|
219
253
|
|
220
254
|
if should_send_prompts():
|
221
|
-
|
222
|
-
|
255
|
+
set_span_attribute(
|
256
|
+
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "model"
|
257
|
+
)
|
258
|
+
candidates_list = candidates if isinstance(candidates, list) else [candidates]
|
259
|
+
for i, candidate in enumerate(candidates_list):
|
260
|
+
processed_content = process_content_union(candidate.content)
|
261
|
+
if isinstance(processed_content, list):
|
262
|
+
if all(
|
263
|
+
isinstance(item, dict) and item.get("type") == "text"
|
264
|
+
for item in processed_content
|
265
|
+
):
|
266
|
+
content_str = processed_content[0]["text"]
|
267
|
+
elif all(
|
268
|
+
isinstance(item, ProcessedContentPart) and item.content
|
269
|
+
for item in processed_content
|
270
|
+
):
|
271
|
+
content_str = processed_content[0].content
|
272
|
+
else:
|
273
|
+
content_str = get_content(processed_content)
|
274
|
+
else:
|
275
|
+
content_str = get_content(processed_content)
|
276
|
+
set_span_attribute(
|
277
|
+
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.role", "model"
|
278
|
+
)
|
279
|
+
set_span_attribute(
|
280
|
+
span,
|
281
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.content",
|
282
|
+
(
|
283
|
+
content_str
|
284
|
+
if isinstance(content_str, str)
|
285
|
+
else json.dumps(content_str)
|
286
|
+
),
|
287
|
+
)
|
288
|
+
blocks = (
|
289
|
+
processed_content
|
290
|
+
if isinstance(processed_content, list)
|
291
|
+
else [processed_content]
|
292
|
+
)
|
293
|
+
for j, block in enumerate(blocks):
|
294
|
+
block_dict = to_dict(block)
|
295
|
+
if not block_dict.get("function_call"):
|
296
|
+
continue
|
297
|
+
function_call = to_dict(block_dict.get("function_call", {}))
|
223
298
|
set_span_attribute(
|
224
299
|
span,
|
225
|
-
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.
|
226
|
-
|
300
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.name",
|
301
|
+
function_call.get("name"),
|
227
302
|
)
|
228
303
|
set_span_attribute(
|
229
|
-
span,
|
304
|
+
span,
|
305
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.id",
|
306
|
+
function_call.get("id"),
|
307
|
+
)
|
308
|
+
set_span_attribute(
|
309
|
+
span,
|
310
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.arguments",
|
311
|
+
json.dumps(function_call.get("arguments")),
|
230
312
|
)
|
231
|
-
else:
|
232
|
-
set_span_attribute(
|
233
|
-
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.content", response.text
|
234
|
-
)
|
235
|
-
set_span_attribute(
|
236
|
-
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "assistant"
|
237
|
-
)
|
238
313
|
|
239
314
|
|
240
315
|
@dont_throw
|
@@ -433,7 +508,7 @@ class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
|
|
433
508
|
|
434
509
|
def _instrument(self, **kwargs):
|
435
510
|
tracer_provider = kwargs.get("tracer_provider")
|
436
|
-
tracer = get_tracer(__name__, "0.0.
|
511
|
+
tracer = get_tracer(__name__, "0.0.1a1", tracer_provider)
|
437
512
|
|
438
513
|
for wrapped_method in WRAPPED_METHODS:
|
439
514
|
wrap_function_wrapper(
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import logging
|
2
2
|
import traceback
|
3
|
-
import json
|
4
3
|
|
5
4
|
from .config import (
|
6
5
|
Config,
|
@@ -9,7 +8,28 @@ from google.genai import types
|
|
9
8
|
from google.genai._common import BaseModel
|
10
9
|
import pydantic
|
11
10
|
from opentelemetry.trace import Span
|
12
|
-
from typing import Any
|
11
|
+
from typing import Any, Literal
|
12
|
+
|
13
|
+
|
14
|
+
class ToolCall(pydantic.BaseModel):
|
15
|
+
name: str | None = pydantic.Field(default=None)
|
16
|
+
id: str | None = pydantic.Field(default=None)
|
17
|
+
arguments: dict[str, Any] = pydantic.Field(default={})
|
18
|
+
|
19
|
+
|
20
|
+
class ImageUrlInner(pydantic.BaseModel):
|
21
|
+
url: str = pydantic.Field(default="")
|
22
|
+
|
23
|
+
|
24
|
+
class ImageUrl(pydantic.BaseModel):
|
25
|
+
type: Literal["image_url"] = pydantic.Field(default="image_url")
|
26
|
+
image_url: ImageUrlInner = pydantic.Field(default=ImageUrlInner())
|
27
|
+
|
28
|
+
|
29
|
+
class ProcessedContentPart(pydantic.BaseModel):
|
30
|
+
content: str | None = pydantic.Field(default=None)
|
31
|
+
function_call: ToolCall | None = pydantic.Field(default=None)
|
32
|
+
image_url: ImageUrl | None = pydantic.Field(default=None)
|
13
33
|
|
14
34
|
|
15
35
|
def set_span_attribute(span: Span, name: str, value: str):
|
@@ -58,36 +78,40 @@ def to_dict(obj: BaseModel | pydantic.BaseModel | dict) -> dict[str, Any]:
|
|
58
78
|
return dict(obj)
|
59
79
|
|
60
80
|
|
61
|
-
def
|
62
|
-
content:
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
if
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
+
def get_content(
|
82
|
+
content: (
|
83
|
+
ProcessedContentPart | dict | list[ProcessedContentPart | dict] | str | None
|
84
|
+
),
|
85
|
+
) -> list[Any] | None:
|
86
|
+
if isinstance(content, dict):
|
87
|
+
return content.get("content") or content.get("image_url")
|
88
|
+
if isinstance(content, ProcessedContentPart):
|
89
|
+
if content.content and isinstance(content.content, str):
|
90
|
+
return {
|
91
|
+
"type": "text",
|
92
|
+
"text": content.content,
|
93
|
+
}
|
94
|
+
elif content.image_url:
|
95
|
+
return content.image_url.model_dump()
|
96
|
+
else:
|
97
|
+
return None
|
98
|
+
elif isinstance(content, list):
|
99
|
+
return [get_content(item) or "" for item in content if item is not None]
|
100
|
+
elif isinstance(content, str):
|
101
|
+
return {
|
102
|
+
"type": "text",
|
103
|
+
"text": content,
|
104
|
+
}
|
81
105
|
else:
|
82
106
|
return None
|
83
107
|
|
84
108
|
|
85
|
-
def
|
109
|
+
def process_content_union(
|
86
110
|
content: types.ContentUnion | types.ContentUnionDict,
|
87
111
|
trace_id: str | None = None,
|
88
112
|
span_id: str | None = None,
|
89
113
|
message_index: int = 0,
|
90
|
-
) ->
|
114
|
+
) -> ProcessedContentPart | dict | list[ProcessedContentPart | dict] | None:
|
91
115
|
if isinstance(content, types.Content):
|
92
116
|
parts = to_dict(content).get("parts", [])
|
93
117
|
return [_process_part(part) for part in parts]
|
@@ -116,9 +140,9 @@ def _process_part_union(
|
|
116
140
|
span_id: str | None = None,
|
117
141
|
message_index: int = 0,
|
118
142
|
content_index: int = 0,
|
119
|
-
) ->
|
143
|
+
) -> ProcessedContentPart | dict | None:
|
120
144
|
if isinstance(content, str):
|
121
|
-
return content
|
145
|
+
return ProcessedContentPart(content=content)
|
122
146
|
elif isinstance(content, types.File):
|
123
147
|
content_dict = to_dict(content)
|
124
148
|
name = (
|
@@ -126,7 +150,7 @@ def _process_part_union(
|
|
126
150
|
or content_dict.get("display_name")
|
127
151
|
or content_dict.get("uri")
|
128
152
|
)
|
129
|
-
return f"files/{name}"
|
153
|
+
return ProcessedContentPart(content=f"files/{name}")
|
130
154
|
elif isinstance(content, (types.Part, dict)):
|
131
155
|
return _process_part(content, trace_id, span_id, message_index, content_index)
|
132
156
|
else:
|
@@ -139,11 +163,9 @@ def _process_part(
|
|
139
163
|
span_id: str | None = None,
|
140
164
|
message_index: int = 0,
|
141
165
|
content_index: int = 0,
|
142
|
-
) ->
|
166
|
+
) -> ProcessedContentPart | dict | None:
|
143
167
|
part_dict = to_dict(content)
|
144
|
-
if part_dict.get("
|
145
|
-
return part_dict.get("text")
|
146
|
-
elif part_dict.get("inline_data"):
|
168
|
+
if part_dict.get("inline_data"):
|
147
169
|
blob = to_dict(part_dict.get("inline_data"))
|
148
170
|
if blob.get("mime_type").startswith("image/"):
|
149
171
|
return _process_image_item(
|
@@ -151,7 +173,19 @@ def _process_part(
|
|
151
173
|
)
|
152
174
|
else:
|
153
175
|
# currently, only images are supported
|
154
|
-
return
|
176
|
+
return ProcessedContentPart(
|
177
|
+
content=blob.get("mime_type") or "unknown_media"
|
178
|
+
)
|
179
|
+
elif part_dict.get("function_call"):
|
180
|
+
return ProcessedContentPart(
|
181
|
+
function_call=ToolCall(
|
182
|
+
name=part_dict.get("function_call").get("name"),
|
183
|
+
id=part_dict.get("function_call").get("id"),
|
184
|
+
arguments=part_dict.get("function_call").get("args", {}),
|
185
|
+
)
|
186
|
+
)
|
187
|
+
elif part_dict.get("text") is not None:
|
188
|
+
return ProcessedContentPart(content=part_dict.get("text"))
|
155
189
|
else:
|
156
190
|
return None
|
157
191
|
|
@@ -159,12 +193,17 @@ def _process_part(
|
|
159
193
|
def role_from_content_union(
|
160
194
|
content: types.ContentUnion | types.ContentUnionDict,
|
161
195
|
) -> str | None:
|
196
|
+
role = None
|
162
197
|
if isinstance(content, types.Content):
|
163
|
-
|
198
|
+
role = to_dict(content).get("role")
|
164
199
|
elif isinstance(content, list) and len(content) > 0:
|
165
|
-
|
200
|
+
role = role_from_content_union(content[0])
|
201
|
+
elif isinstance(content, dict):
|
202
|
+
role = content.get("role")
|
166
203
|
else:
|
167
204
|
return None
|
205
|
+
return role
|
206
|
+
# return "assistant" if role == "model" else role
|
168
207
|
|
169
208
|
|
170
209
|
def with_tracer_wrapper(func):
|
@@ -179,38 +218,22 @@ def with_tracer_wrapper(func):
|
|
179
218
|
return _with_tracer
|
180
219
|
|
181
220
|
|
182
|
-
def _run_async(method):
|
183
|
-
import asyncio
|
184
|
-
import threading
|
185
|
-
|
186
|
-
try:
|
187
|
-
loop = asyncio.get_running_loop()
|
188
|
-
except RuntimeError:
|
189
|
-
loop = None
|
190
|
-
|
191
|
-
if loop and loop.is_running():
|
192
|
-
thread = threading.Thread(target=lambda: asyncio.run(method))
|
193
|
-
thread.start()
|
194
|
-
thread.join()
|
195
|
-
else:
|
196
|
-
asyncio.run(method)
|
197
|
-
|
198
|
-
|
199
221
|
def _process_image_item(
|
200
222
|
blob: dict[str, Any],
|
201
223
|
trace_id: str,
|
202
224
|
span_id: str,
|
203
225
|
message_index: int,
|
204
226
|
content_index: int,
|
205
|
-
):
|
227
|
+
) -> ProcessedContentPart | dict | None:
|
206
228
|
# Convert to openai format, so backends can handle it
|
207
229
|
return (
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
230
|
+
ProcessedContentPart(
|
231
|
+
image_url=ImageUrl(
|
232
|
+
image_url=ImageUrlInner(
|
233
|
+
url=f"data:image/{blob.get('mime_type').split('/')[1]};base64,{blob.get('data')}",
|
234
|
+
)
|
235
|
+
)
|
236
|
+
)
|
214
237
|
if Config.convert_image_to_openai_format
|
215
238
|
else blob
|
216
239
|
)
|
@@ -0,0 +1,119 @@
|
|
1
|
+
"""OpenTelemetry Langgraph instrumentation"""
|
2
|
+
|
3
|
+
import json
|
4
|
+
import logging
|
5
|
+
from typing import Collection
|
6
|
+
|
7
|
+
from .utils import (
|
8
|
+
with_tracer_wrapper,
|
9
|
+
)
|
10
|
+
|
11
|
+
from langchain_core.runnables.graph import Graph
|
12
|
+
from opentelemetry.trace import Tracer
|
13
|
+
from wrapt import wrap_function_wrapper
|
14
|
+
from opentelemetry.trace import get_tracer, get_current_span
|
15
|
+
|
16
|
+
from lmnr.opentelemetry_lib.tracing.context_properties import (
|
17
|
+
update_association_properties,
|
18
|
+
)
|
19
|
+
|
20
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
21
|
+
from opentelemetry.instrumentation.utils import unwrap
|
22
|
+
|
23
|
+
|
24
|
+
logger = logging.getLogger(__name__)
|
25
|
+
|
26
|
+
_instruments = ("langgraph >= 0.1.0",)
|
27
|
+
|
28
|
+
|
29
|
+
@with_tracer_wrapper
|
30
|
+
def wrap_pregel_stream(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
|
31
|
+
graph: Graph = instance.get_graph()
|
32
|
+
nodes = [
|
33
|
+
{
|
34
|
+
"id": node.id,
|
35
|
+
"name": node.name,
|
36
|
+
"metadata": node.metadata,
|
37
|
+
}
|
38
|
+
for node in graph.nodes.values()
|
39
|
+
]
|
40
|
+
edges = [
|
41
|
+
{
|
42
|
+
"source": edge.source,
|
43
|
+
"target": edge.target,
|
44
|
+
"conditional": edge.conditional,
|
45
|
+
}
|
46
|
+
for edge in graph.edges
|
47
|
+
]
|
48
|
+
update_association_properties(
|
49
|
+
{
|
50
|
+
"langgraph.edges": json.dumps(edges),
|
51
|
+
"langgraph.nodes": json.dumps(nodes),
|
52
|
+
},
|
53
|
+
)
|
54
|
+
return wrapped(*args, **kwargs)
|
55
|
+
|
56
|
+
|
57
|
+
@with_tracer_wrapper
|
58
|
+
async def async_wrap_pregel_stream(
|
59
|
+
tracer: Tracer, to_wrap, wrapped, instance, args, kwargs
|
60
|
+
):
|
61
|
+
graph: Graph = await instance.aget_graph()
|
62
|
+
nodes = [
|
63
|
+
{
|
64
|
+
"id": node.id,
|
65
|
+
"name": node.name,
|
66
|
+
"metadata": node.metadata,
|
67
|
+
}
|
68
|
+
for node in graph.nodes.values()
|
69
|
+
]
|
70
|
+
edges = [
|
71
|
+
{
|
72
|
+
"source": edge.source,
|
73
|
+
"target": edge.target,
|
74
|
+
"conditional": edge.conditional,
|
75
|
+
}
|
76
|
+
for edge in graph.edges
|
77
|
+
]
|
78
|
+
update_association_properties(
|
79
|
+
{
|
80
|
+
"langgraph.edges": json.dumps(edges),
|
81
|
+
"langgraph.nodes": json.dumps(nodes),
|
82
|
+
},
|
83
|
+
)
|
84
|
+
return await wrapped(*args, **kwargs)
|
85
|
+
|
86
|
+
|
87
|
+
class LanggraphInstrumentor(BaseInstrumentor):
|
88
|
+
"""An instrumentor for Langgraph."""
|
89
|
+
|
90
|
+
def __init__(self):
|
91
|
+
super().__init__()
|
92
|
+
|
93
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
94
|
+
return _instruments
|
95
|
+
|
96
|
+
def _instrument(self, **kwargs):
|
97
|
+
tracer_provider = kwargs.get("tracer_provider")
|
98
|
+
tracer = get_tracer(__name__, "0.0.1a0", tracer_provider)
|
99
|
+
|
100
|
+
wrap_function_wrapper(
|
101
|
+
module="langgraph.pregel",
|
102
|
+
name="Pregel.stream",
|
103
|
+
wrapper=wrap_pregel_stream(tracer, "Pregel.stream"),
|
104
|
+
)
|
105
|
+
wrap_function_wrapper(
|
106
|
+
module="langgraph.pregel",
|
107
|
+
name="Pregel.astream",
|
108
|
+
wrapper=async_wrap_pregel_stream(tracer, "Pregel.astream"),
|
109
|
+
)
|
110
|
+
|
111
|
+
def _uninstrument(self, **kwargs):
|
112
|
+
unwrap(
|
113
|
+
module="langgraph.pregel",
|
114
|
+
name="Pregel.stream",
|
115
|
+
)
|
116
|
+
unwrap(
|
117
|
+
module="langgraph.pregel",
|
118
|
+
name="Pregel.astream",
|
119
|
+
)
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import logging
|
2
|
+
import traceback
|
3
|
+
|
4
|
+
import pydantic
|
5
|
+
from opentelemetry.trace import Span
|
6
|
+
from typing import Any
|
7
|
+
|
8
|
+
|
9
|
+
def set_span_attribute(span: Span, name: str, value: str):
|
10
|
+
if value is not None:
|
11
|
+
if value != "":
|
12
|
+
span.set_attribute(name, value)
|
13
|
+
return
|
14
|
+
|
15
|
+
|
16
|
+
def dont_throw(func):
|
17
|
+
"""
|
18
|
+
A decorator that wraps the passed in function and logs exceptions instead of throwing them.
|
19
|
+
|
20
|
+
@param func: The function to wrap
|
21
|
+
@return: The wrapper function
|
22
|
+
"""
|
23
|
+
# Obtain a logger specific to the function's module
|
24
|
+
logger = logging.getLogger(func.__module__)
|
25
|
+
|
26
|
+
def wrapper(*args, **kwargs):
|
27
|
+
try:
|
28
|
+
return func(*args, **kwargs)
|
29
|
+
except Exception:
|
30
|
+
logger.debug(
|
31
|
+
"Laminar failed to trace in %s, error: %s",
|
32
|
+
func.__name__,
|
33
|
+
traceback.format_exc(),
|
34
|
+
)
|
35
|
+
|
36
|
+
return wrapper
|
37
|
+
|
38
|
+
|
39
|
+
def to_dict(obj: pydantic.BaseModel | dict) -> dict[str, Any]:
|
40
|
+
try:
|
41
|
+
if isinstance(obj, pydantic.BaseModel):
|
42
|
+
return obj.model_dump()
|
43
|
+
elif isinstance(obj, dict):
|
44
|
+
return obj
|
45
|
+
else:
|
46
|
+
return dict(obj)
|
47
|
+
except Exception:
|
48
|
+
return dict(obj)
|
49
|
+
|
50
|
+
|
51
|
+
def with_tracer_wrapper(func):
|
52
|
+
"""Helper for providing tracer for wrapper functions."""
|
53
|
+
|
54
|
+
def _with_tracer(tracer, to_wrap):
|
55
|
+
def wrapper(wrapped, instance, args, kwargs):
|
56
|
+
return func(tracer, to_wrap, wrapped, instance, args, kwargs)
|
57
|
+
|
58
|
+
return wrapper
|
59
|
+
|
60
|
+
return _with_tracer
|
@@ -171,6 +171,18 @@ class LangchainInstrumentorInitializer(InstrumentorInitializer):
|
|
171
171
|
return LangchainInstrumentor()
|
172
172
|
|
173
173
|
|
174
|
+
class LanggraphInstrumentorInitializer(InstrumentorInitializer):
|
175
|
+
def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
|
176
|
+
if not is_package_installed("langgraph"):
|
177
|
+
return None
|
178
|
+
if not is_package_installed("langchain-core"):
|
179
|
+
return None
|
180
|
+
|
181
|
+
from ..opentelemetry.instrumentation.langgraph import LanggraphInstrumentor
|
182
|
+
|
183
|
+
return LanggraphInstrumentor()
|
184
|
+
|
185
|
+
|
174
186
|
class LlamaIndexInstrumentorInitializer(InstrumentorInitializer):
|
175
187
|
def init_instrumentor(self, *args, **kwargs) -> BaseInstrumentor | None:
|
176
188
|
if not (
|
@@ -6,7 +6,7 @@ from lmnr.opentelemetry_lib.tracing.attributes import (
|
|
6
6
|
)
|
7
7
|
|
8
8
|
from opentelemetry.context import Context, attach, set_value, get_value
|
9
|
-
from opentelemetry.trace import Span
|
9
|
+
from opentelemetry.sdk.trace import Span
|
10
10
|
from opentelemetry import trace
|
11
11
|
|
12
12
|
|
@@ -51,8 +51,15 @@ def remove_association_properties(properties: dict) -> None:
|
|
51
51
|
|
52
52
|
|
53
53
|
def _set_association_properties_attributes(span: Span, properties: dict) -> None:
|
54
|
+
if not span.is_recording():
|
55
|
+
return
|
54
56
|
for key, value in properties.items():
|
55
57
|
if key == TRACING_LEVEL:
|
56
58
|
span.set_attribute(f"lmnr.internal.{TRACING_LEVEL}", value)
|
57
59
|
continue
|
60
|
+
if (
|
61
|
+
key in ["langgraph.edges", "langgraph.nodes"]
|
62
|
+
and span.name != "LangGraph.workflow"
|
63
|
+
):
|
64
|
+
continue
|
58
65
|
span.set_attribute(f"{ASSOCIATION_PROPERTIES}.{key}", value)
|
@@ -26,6 +26,7 @@ class Instruments(Enum):
|
|
26
26
|
HAYSTACK = "haystack"
|
27
27
|
LANCEDB = "lancedb"
|
28
28
|
LANGCHAIN = "langchain"
|
29
|
+
LANGGRAPH = "langgraph"
|
29
30
|
LLAMA_INDEX = "llama_index"
|
30
31
|
MARQO = "marqo"
|
31
32
|
MCP = "mcp"
|
@@ -62,6 +63,7 @@ INSTRUMENTATION_INITIALIZERS: dict[
|
|
62
63
|
Instruments.HAYSTACK: initializers.HaystackInstrumentorInitializer(),
|
63
64
|
Instruments.LANCEDB: initializers.LanceDBInstrumentorInitializer(),
|
64
65
|
Instruments.LANGCHAIN: initializers.LangchainInstrumentorInitializer(),
|
66
|
+
Instruments.LANGGRAPH: initializers.LanggraphInstrumentorInitializer(),
|
65
67
|
Instruments.LLAMA_INDEX: initializers.LlamaIndexInstrumentorInitializer(),
|
66
68
|
Instruments.MARQO: initializers.MarqoInstrumentorInitializer(),
|
67
69
|
Instruments.MCP: initializers.MCPInstrumentorInitializer(),
|
@@ -6,7 +6,7 @@ from opentelemetry.sdk.trace.export import (
|
|
6
6
|
BatchSpanProcessor,
|
7
7
|
SimpleSpanProcessor,
|
8
8
|
)
|
9
|
-
from opentelemetry.trace import Span
|
9
|
+
from opentelemetry.sdk.trace import Span
|
10
10
|
from opentelemetry.context import Context, get_value, get_current, set_value
|
11
11
|
|
12
12
|
from lmnr.opentelemetry_lib.tracing.attributes import (
|
lmnr/sdk/evaluations.py
CHANGED
@@ -233,10 +233,10 @@ class Evaluation:
|
|
233
233
|
export_timeout_seconds=trace_export_timeout_seconds,
|
234
234
|
)
|
235
235
|
|
236
|
-
async def run(self) -> Awaitable[
|
236
|
+
async def run(self) -> Awaitable[dict[str, int | float]]:
|
237
237
|
return await self._run()
|
238
238
|
|
239
|
-
async def _run(self) ->
|
239
|
+
async def _run(self) -> dict[str, int | float]:
|
240
240
|
if isinstance(self.data, LaminarDataset):
|
241
241
|
self.data.set_client(
|
242
242
|
LaminarClient(
|
@@ -261,11 +261,12 @@ class Evaluation:
|
|
261
261
|
except Exception as e:
|
262
262
|
self.reporter.stopWithError(e)
|
263
263
|
await self._shutdown()
|
264
|
-
return
|
264
|
+
return {}
|
265
265
|
|
266
266
|
average_scores = get_average_scores(result_datapoints)
|
267
267
|
self.reporter.stop(average_scores, evaluation.projectId, evaluation.id)
|
268
268
|
await self._shutdown()
|
269
|
+
return average_scores
|
269
270
|
|
270
271
|
async def _shutdown(self):
|
271
272
|
# We use flush() instead of shutdown() because multiple evaluations
|
lmnr/sdk/types.py
CHANGED
@@ -81,7 +81,7 @@ class PartialEvaluationDatapoint(pydantic.BaseModel):
|
|
81
81
|
"traceId": str(self.trace_id),
|
82
82
|
"executorSpanId": str(self.executor_span_id),
|
83
83
|
"metadata": (
|
84
|
-
serialize(self.metadata) if self.metadata is not None else
|
84
|
+
serialize(self.metadata) if self.metadata is not None else {}
|
85
85
|
),
|
86
86
|
}
|
87
87
|
except Exception as e:
|
@@ -123,7 +123,7 @@ class EvaluationResultDatapoint(pydantic.BaseModel):
|
|
123
123
|
"executorSpanId": str(self.executor_span_id),
|
124
124
|
"index": self.index,
|
125
125
|
"metadata": (
|
126
|
-
serialize(self.metadata) if self.metadata is not None else
|
126
|
+
serialize(self.metadata) if self.metadata is not None else {}
|
127
127
|
),
|
128
128
|
}
|
129
129
|
except Exception as e:
|
lmnr/version.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: lmnr
|
3
|
-
Version: 0.6.
|
3
|
+
Version: 0.6.9
|
4
4
|
Summary: Python SDK for Laminar
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: lmnr.ai
|
@@ -46,61 +46,61 @@ Requires-Dist: httpx (>=0.25.0)
|
|
46
46
|
Requires-Dist: opentelemetry-api (>=1.33.0)
|
47
47
|
Requires-Dist: opentelemetry-exporter-otlp-proto-grpc (>=1.33.0)
|
48
48
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http (>=1.33.0)
|
49
|
-
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.
|
50
|
-
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.
|
51
|
-
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.
|
52
|
-
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.
|
53
|
-
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.
|
54
|
-
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.
|
55
|
-
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.
|
56
|
-
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.
|
57
|
-
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.
|
58
|
-
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.
|
59
|
-
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.
|
60
|
-
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.
|
61
|
-
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.
|
62
|
-
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.
|
63
|
-
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.
|
64
|
-
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.
|
65
|
-
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.
|
66
|
-
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.
|
67
|
-
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.
|
68
|
-
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.
|
69
|
-
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.
|
70
|
-
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.
|
71
|
-
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.
|
72
|
-
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.
|
73
|
-
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.
|
74
|
-
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.
|
75
|
-
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.
|
76
|
-
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.
|
77
|
-
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.
|
78
|
-
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.
|
79
|
-
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.
|
80
|
-
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.
|
81
|
-
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.
|
82
|
-
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.
|
83
|
-
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.
|
84
|
-
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.
|
85
|
-
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.
|
86
|
-
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.
|
87
|
-
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.
|
88
|
-
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.
|
89
|
-
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.
|
90
|
-
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.
|
91
|
-
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.
|
92
|
-
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.
|
49
|
+
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.8) ; extra == "alephalpha"
|
50
|
+
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.8) ; extra == "all"
|
51
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.8) ; extra == "all"
|
52
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.8) ; extra == "anthropic"
|
53
|
+
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.8) ; extra == "all"
|
54
|
+
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.8) ; extra == "bedrock"
|
55
|
+
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.8) ; extra == "all"
|
56
|
+
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.8) ; extra == "chromadb"
|
57
|
+
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.8) ; extra == "all"
|
58
|
+
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.8) ; extra == "cohere"
|
59
|
+
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.8) ; extra == "all"
|
60
|
+
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.8) ; extra == "crewai"
|
61
|
+
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.8) ; extra == "all"
|
62
|
+
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.8) ; extra == "google-generativeai"
|
63
|
+
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.8) ; extra == "all"
|
64
|
+
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.8) ; extra == "groq"
|
65
|
+
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.8) ; extra == "all"
|
66
|
+
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.8) ; extra == "haystack"
|
67
|
+
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.8) ; extra == "all"
|
68
|
+
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.8) ; extra == "lancedb"
|
69
|
+
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.8) ; extra == "all"
|
70
|
+
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.8) ; extra == "langchain"
|
71
|
+
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.8) ; extra == "all"
|
72
|
+
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.8) ; extra == "llamaindex"
|
73
|
+
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.8) ; extra == "all"
|
74
|
+
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.8) ; extra == "marqo"
|
75
|
+
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.8) ; extra == "all"
|
76
|
+
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.8) ; extra == "mcp"
|
77
|
+
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.8) ; extra == "all"
|
78
|
+
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.8) ; extra == "milvus"
|
79
|
+
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.8) ; extra == "all"
|
80
|
+
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.8) ; extra == "mistralai"
|
81
|
+
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.8) ; extra == "all"
|
82
|
+
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.8) ; extra == "ollama"
|
83
|
+
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.8) ; extra == "all"
|
84
|
+
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.8) ; extra == "openai"
|
85
|
+
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.8) ; extra == "all"
|
86
|
+
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.8) ; extra == "pinecone"
|
87
|
+
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.8) ; extra == "all"
|
88
|
+
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.8) ; extra == "qdrant"
|
89
|
+
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.8) ; extra == "all"
|
90
|
+
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.8) ; extra == "replicate"
|
91
|
+
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.8) ; extra == "all"
|
92
|
+
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.8) ; extra == "sagemaker"
|
93
93
|
Requires-Dist: opentelemetry-instrumentation-threading (>=0.54b0)
|
94
|
-
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.
|
95
|
-
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.
|
96
|
-
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.
|
97
|
-
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.
|
98
|
-
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.
|
99
|
-
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.
|
100
|
-
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.
|
101
|
-
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.
|
102
|
-
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.
|
103
|
-
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.
|
94
|
+
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.8) ; extra == "all"
|
95
|
+
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.8) ; extra == "together"
|
96
|
+
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.8) ; extra == "all"
|
97
|
+
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.8) ; extra == "transformers"
|
98
|
+
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.8) ; extra == "all"
|
99
|
+
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.8) ; extra == "vertexai"
|
100
|
+
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.8) ; extra == "all"
|
101
|
+
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.8) ; extra == "watsonx"
|
102
|
+
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.8) ; extra == "all"
|
103
|
+
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.8) ; extra == "weaviate"
|
104
104
|
Requires-Dist: opentelemetry-sdk (>=1.33.0)
|
105
105
|
Requires-Dist: opentelemetry-semantic-conventions (>=0.54b0)
|
106
106
|
Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.8)
|
@@ -1,18 +1,20 @@
|
|
1
1
|
lmnr/__init__.py,sha256=eJ-gIHEk8KV-BeaU8c9spQww_T2G5_OMu4F8JEzngvA,1281
|
2
|
-
lmnr/cli.py,sha256=
|
2
|
+
lmnr/cli.py,sha256=X5YVOBjitRdZPfcF3qxR4SflgTMZ2wsW6s_yXgvdxkU,5621
|
3
3
|
lmnr/opentelemetry_lib/.flake8,sha256=bCxuDlGx3YQ55QHKPiGJkncHanh9qGjQJUujcFa3lAU,150
|
4
4
|
lmnr/opentelemetry_lib/__init__.py,sha256=E_NwAWxh3hckZjXTA80hOzmRUL8RvnSYRdcPExwVROc,2056
|
5
5
|
lmnr/opentelemetry_lib/decorators/__init__.py,sha256=45HVoYnHC1Y9D_VSkioDbqD3gm4RPC5sKoztomBI5j8,8496
|
6
|
-
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=
|
6
|
+
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=6Fvkc_zZEX1lk8g6ZGFrADLNOL055pkMdO-hEef8qBY,18525
|
7
7
|
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py,sha256=25zevJ7g3MtJP_5gju3jBH7-wg7SbDkktysuUO29ksI,245
|
8
|
-
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py,sha256=
|
8
|
+
lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py,sha256=ICQENOiICTKodjZVHhq3H5RIRY5bbuWp_KmzkDNgDRM,7471
|
9
|
+
lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py,sha256=khLC07QIsU15Sv8_Ax0ePjeTg7yP0vwhxvJIAqhU_4A,3099
|
10
|
+
lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py,sha256=nf9sJZXnnts4gYZortEiDvwYjYqYJZTAT0zutuP_R6Y,1512
|
9
11
|
lmnr/opentelemetry_lib/tracing/__init__.py,sha256=dy3zrgeiZmLX-aLXuRUW8-RGR4FaIdRRKKEVR0eItGs,5662
|
10
|
-
lmnr/opentelemetry_lib/tracing/_instrument_initializers.py,sha256=
|
12
|
+
lmnr/opentelemetry_lib/tracing/_instrument_initializers.py,sha256=RYSp4PxrF8yfG5qy0ALpx1EArLFBPDLQd6H6YeYw184,14567
|
11
13
|
lmnr/opentelemetry_lib/tracing/attributes.py,sha256=MvowVluXfCqSIC3Cvx3tWDqB0Cpr9bpSlY91qL4Iy74,1497
|
12
|
-
lmnr/opentelemetry_lib/tracing/context_properties.py,sha256=
|
14
|
+
lmnr/opentelemetry_lib/tracing/context_properties.py,sha256=aWbvMdWB4Q7uqc0GGSsjcRXMTcO18aWOaIZe3QyS_aA,2314
|
13
15
|
lmnr/opentelemetry_lib/tracing/exporter.py,sha256=avjmH9hz6PXbMV2bnx7b9GuGC6flAUDwvcxoZMbTWgM,2082
|
14
|
-
lmnr/opentelemetry_lib/tracing/instruments.py,sha256=
|
15
|
-
lmnr/opentelemetry_lib/tracing/processor.py,sha256=
|
16
|
+
lmnr/opentelemetry_lib/tracing/instruments.py,sha256=dsStEEAFfZ70MHn6WcmRCdDn9Yg-aCvfA6VJs_IvOOs,5267
|
17
|
+
lmnr/opentelemetry_lib/tracing/processor.py,sha256=YJ_2j02oU_YZD8cuDltXkCU_fN1hcACI61X5qgCZd7s,3441
|
16
18
|
lmnr/opentelemetry_lib/tracing/tracer.py,sha256=oNC6V8eFvuK3i5IWXsKDjEMFL_axeSov3L1fPevwuWM,476
|
17
19
|
lmnr/opentelemetry_lib/utils/__init__.py,sha256=pNhf0G3vTd5ccoc03i1MXDbricSaiqCbi1DLWhSekK8,604
|
18
20
|
lmnr/opentelemetry_lib/utils/json_encoder.py,sha256=dK6b_axr70IYL7Vv-bu4wntvDDuyntoqsHaddqX7P58,463
|
@@ -43,14 +45,14 @@ lmnr/sdk/client/synchronous/sync_client.py,sha256=kPS14M0e99xMtLQ_yEOJrFpQWhstqA
|
|
43
45
|
lmnr/sdk/datasets.py,sha256=jl5Wj5nEI9pww4Jwn4XKF8h0gXBU4TOIrhqNjTJsHZQ,1709
|
44
46
|
lmnr/sdk/decorators.py,sha256=1uu9xxBYgblFqlhQqH17cZYq7babAmB1lEtvBgTsP0E,4468
|
45
47
|
lmnr/sdk/eval_control.py,sha256=KROUrDhcZTrptRZ-hxvr60_o_Gt_8u045jb4cBXcuoY,184
|
46
|
-
lmnr/sdk/evaluations.py,sha256=
|
48
|
+
lmnr/sdk/evaluations.py,sha256=ORGfoyxGJmqBTW3yd1mslr4TK0rWlySZLT4GlC6gVuI,21313
|
47
49
|
lmnr/sdk/laminar.py,sha256=Ha6pJSzUqZQBJNfDtEXwYuMcyRvmsZFj7y3A7f0-Y5I,33785
|
48
50
|
lmnr/sdk/log.py,sha256=nt_YMmPw1IRbGy0b7q4rTtP4Yo3pQfNxqJPXK3nDSNQ,2213
|
49
|
-
lmnr/sdk/types.py,sha256=
|
51
|
+
lmnr/sdk/types.py,sha256=5tEX7yoemb9wYyXLy4aqdazudO5I8dglU5A-IegDhsQ,12653
|
50
52
|
lmnr/sdk/utils.py,sha256=yrcHIhoADf9lWH9qJWZMmkRWYvd0DuxPSLP3mY6YFw0,4327
|
51
|
-
lmnr/version.py,sha256=
|
52
|
-
lmnr-0.6.
|
53
|
-
lmnr-0.6.
|
54
|
-
lmnr-0.6.
|
55
|
-
lmnr-0.6.
|
56
|
-
lmnr-0.6.
|
53
|
+
lmnr/version.py,sha256=By46D14Qb3GmEacSJyQ3-_aLJmKB4PmhLldX9eaM1Ic,1321
|
54
|
+
lmnr-0.6.9.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
|
55
|
+
lmnr-0.6.9.dist-info/METADATA,sha256=SYj_iCKcXXUwR8WgW6qs72jYvwVQCKnA9EjpzaqFjRQ,15131
|
56
|
+
lmnr-0.6.9.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
57
|
+
lmnr-0.6.9.dist-info/entry_points.txt,sha256=K1jE20ww4jzHNZLnsfWBvU3YKDGBgbOiYG5Y7ivQcq4,37
|
58
|
+
lmnr-0.6.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|