lmnr 0.6.8__py3-none-any.whl → 0.6.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/cli.py +66 -27
- lmnr/opentelemetry_lib/__init__.py +2 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +90 -15
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +81 -58
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +121 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +60 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +85 -80
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +12 -0
- lmnr/opentelemetry_lib/tracing/context_properties.py +8 -1
- lmnr/opentelemetry_lib/tracing/instruments.py +2 -0
- lmnr/opentelemetry_lib/tracing/processor.py +1 -1
- lmnr/sdk/client/asynchronous/resources/evals.py +4 -0
- lmnr/sdk/client/synchronous/resources/evals.py +4 -0
- lmnr/sdk/evaluations.py +13 -17
- lmnr/sdk/laminar.py +7 -0
- lmnr/sdk/types.py +2 -2
- lmnr/version.py +1 -1
- {lmnr-0.6.8.dist-info → lmnr-0.6.10.dist-info}/METADATA +55 -55
- {lmnr-0.6.8.dist-info → lmnr-0.6.10.dist-info}/RECORD +22 -20
- {lmnr-0.6.8.dist-info → lmnr-0.6.10.dist-info}/LICENSE +0 -0
- {lmnr-0.6.8.dist-info → lmnr-0.6.10.dist-info}/WHEEL +0 -0
- {lmnr-0.6.8.dist-info → lmnr-0.6.10.dist-info}/entry_points.txt +0 -0
lmnr/cli.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
from argparse import ArgumentParser
|
2
2
|
import asyncio
|
3
|
+
import glob
|
3
4
|
import importlib.util
|
5
|
+
import json
|
4
6
|
import os
|
5
7
|
import re
|
6
8
|
import sys
|
@@ -23,27 +25,27 @@ def add_cursor_rules():
|
|
23
25
|
"""Download laminar.mdc file from a hardcoded public URL and save it to .cursor/rules/laminar.mdc"""
|
24
26
|
# Hardcoded URL for the laminar.mdc file
|
25
27
|
url = "https://raw.githubusercontent.com/lmnr-ai/lmnr/dev/rules/laminar.mdc"
|
26
|
-
|
28
|
+
|
27
29
|
# Create .cursor/rules directory if it doesn't exist
|
28
30
|
rules_dir = Path(".cursor/rules")
|
29
31
|
rules_dir.mkdir(parents=True, exist_ok=True)
|
30
|
-
|
32
|
+
|
31
33
|
# Define the target file path
|
32
34
|
target_file = rules_dir / "laminar.mdc"
|
33
|
-
|
35
|
+
|
34
36
|
try:
|
35
37
|
LOG.info(f"Downloading laminar.mdc from {url}")
|
36
|
-
|
38
|
+
|
37
39
|
# Download the file
|
38
40
|
with urllib.request.urlopen(url) as response:
|
39
41
|
content = response.read()
|
40
|
-
|
42
|
+
|
41
43
|
# Write the content to the target file (this will overwrite if it exists)
|
42
|
-
with open(target_file,
|
44
|
+
with open(target_file, "wb") as f:
|
43
45
|
f.write(content)
|
44
|
-
|
46
|
+
|
45
47
|
LOG.info(f"Successfully downloaded laminar.mdc to {target_file}")
|
46
|
-
|
48
|
+
|
47
49
|
except urllib.error.URLError as e:
|
48
50
|
LOG.error(f"Failed to download file from {url}: {e}")
|
49
51
|
sys.exit(1)
|
@@ -55,7 +57,7 @@ def add_cursor_rules():
|
|
55
57
|
async def run_evaluation(args):
|
56
58
|
sys.path.append(os.getcwd())
|
57
59
|
|
58
|
-
if args.file
|
60
|
+
if len(args.file) == 0:
|
59
61
|
files = [
|
60
62
|
os.path.join(EVAL_DIR, f)
|
61
63
|
for f in os.listdir(EVAL_DIR)
|
@@ -71,9 +73,17 @@ async def run_evaluation(args):
|
|
71
73
|
LOG.info(f"Located {len(files)} evaluation files in {EVAL_DIR}")
|
72
74
|
|
73
75
|
else:
|
74
|
-
files = [
|
76
|
+
files = []
|
77
|
+
for pattern in args.file:
|
78
|
+
matches = glob.glob(pattern)
|
79
|
+
if matches:
|
80
|
+
files.extend(matches)
|
81
|
+
else:
|
82
|
+
# If no matches found, treat as literal filename
|
83
|
+
files.append(pattern)
|
75
84
|
|
76
85
|
prep_token = PREPARE_ONLY.set(True)
|
86
|
+
scores = []
|
77
87
|
try:
|
78
88
|
for file in files:
|
79
89
|
LOG.info(f"Running evaluation from {file}")
|
@@ -83,33 +93,56 @@ async def run_evaluation(args):
|
|
83
93
|
spec = importlib.util.spec_from_file_location(name, file)
|
84
94
|
if spec is None or spec.loader is None:
|
85
95
|
LOG.error(f"Could not load module specification from {file}")
|
86
|
-
if args.
|
87
|
-
|
88
|
-
|
96
|
+
if args.continue_on_error:
|
97
|
+
continue
|
98
|
+
return
|
89
99
|
mod = importlib.util.module_from_spec(spec)
|
90
100
|
sys.modules[name] = mod
|
91
101
|
|
92
102
|
spec.loader.exec_module(mod)
|
93
|
-
evaluations
|
94
|
-
|
95
|
-
|
96
|
-
if
|
97
|
-
|
98
|
-
|
103
|
+
evaluations = []
|
104
|
+
try:
|
105
|
+
evaluations: list[Evaluation] | None = EVALUATION_INSTANCES.get()
|
106
|
+
if evaluations is None:
|
107
|
+
raise LookupError()
|
108
|
+
# may be raised by `get()` or manually by us above
|
109
|
+
except LookupError:
|
110
|
+
log_evaluation_instance_not_found()
|
111
|
+
if args.continue_on_error:
|
112
|
+
continue
|
113
|
+
return
|
99
114
|
|
100
115
|
LOG.info(f"Loaded {len(evaluations)} evaluations from {file}")
|
101
116
|
|
102
117
|
for evaluation in evaluations:
|
103
118
|
try:
|
104
|
-
await evaluation.run()
|
119
|
+
eval_scores = await evaluation.run()
|
120
|
+
scores.append(
|
121
|
+
{
|
122
|
+
"file": file,
|
123
|
+
"scores": eval_scores,
|
124
|
+
}
|
125
|
+
)
|
105
126
|
except Exception as e:
|
106
127
|
LOG.error(f"Error running evaluation: {e}")
|
107
|
-
if args.
|
128
|
+
if not args.continue_on_error:
|
108
129
|
raise
|
130
|
+
|
131
|
+
if args.output_file:
|
132
|
+
with open(args.output_file, "w") as f:
|
133
|
+
json.dump(scores, f, indent=2)
|
109
134
|
finally:
|
110
135
|
PREPARE_ONLY.reset(prep_token)
|
111
136
|
|
112
137
|
|
138
|
+
def log_evaluation_instance_not_found():
|
139
|
+
LOG.warning(
|
140
|
+
"Evaluation instance not found. "
|
141
|
+
"`evaluate` must be called at the top level of the file, "
|
142
|
+
"not inside a function when running evaluations from the CLI."
|
143
|
+
)
|
144
|
+
|
145
|
+
|
113
146
|
def cli():
|
114
147
|
parser = ArgumentParser(
|
115
148
|
prog="lmnr",
|
@@ -125,21 +158,27 @@ def cli():
|
|
125
158
|
)
|
126
159
|
parser_eval.add_argument(
|
127
160
|
"file",
|
128
|
-
nargs="
|
129
|
-
help="
|
161
|
+
nargs="*",
|
162
|
+
help="Files or a file containing the evaluation to run."
|
130
163
|
+ "If no file name is provided, all evaluation files in the `evals` directory are run as long"
|
131
164
|
+ "as they match *_eval.py or eval_*.py",
|
132
|
-
default=
|
165
|
+
default=[],
|
133
166
|
)
|
134
167
|
|
135
168
|
parser_eval.add_argument(
|
136
|
-
"--
|
169
|
+
"--continue-on-error",
|
137
170
|
action="store_true",
|
138
171
|
default=False,
|
139
|
-
help="
|
172
|
+
help="Continue execution upon errors",
|
173
|
+
)
|
174
|
+
|
175
|
+
parser_eval.add_argument(
|
176
|
+
"--output-file",
|
177
|
+
help="Output file to write the results to. Outputs are written in JSON format.",
|
178
|
+
nargs="?",
|
140
179
|
)
|
141
180
|
|
142
|
-
|
181
|
+
subparsers.add_parser(
|
143
182
|
"add-cursor-rules",
|
144
183
|
description="Download laminar.mdc file and add it to .cursor/rules",
|
145
184
|
help="Download laminar.mdc file and add it to .cursor/rules",
|
@@ -1,6 +1,7 @@
|
|
1
1
|
"""OpenTelemetry Google Generative AI API instrumentation"""
|
2
2
|
|
3
3
|
from collections import defaultdict
|
4
|
+
import json
|
4
5
|
import logging
|
5
6
|
import os
|
6
7
|
from typing import AsyncGenerator, Callable, Collection, Generator
|
@@ -11,7 +12,9 @@ from .config import (
|
|
11
12
|
Config,
|
12
13
|
)
|
13
14
|
from .utils import (
|
15
|
+
ProcessedContentPart,
|
14
16
|
dont_throw,
|
17
|
+
get_content,
|
15
18
|
role_from_content_union,
|
16
19
|
set_span_attribute,
|
17
20
|
process_content_union,
|
@@ -159,7 +162,7 @@ def _set_request_attributes(span, args, kwargs):
|
|
159
162
|
set_span_attribute(
|
160
163
|
span,
|
161
164
|
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
|
162
|
-
process_content_union(system_instruction),
|
165
|
+
(get_content(process_content_union(system_instruction)) or {}).get("text", ""),
|
163
166
|
)
|
164
167
|
set_span_attribute(
|
165
168
|
span, f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role", "system"
|
@@ -169,11 +172,42 @@ def _set_request_attributes(span, args, kwargs):
|
|
169
172
|
if not isinstance(contents, list):
|
170
173
|
contents = [contents]
|
171
174
|
for content in contents:
|
175
|
+
processed_content = process_content_union(content)
|
176
|
+
content_str = get_content(processed_content)
|
172
177
|
set_span_attribute(
|
173
178
|
span,
|
174
179
|
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
|
175
|
-
|
180
|
+
(
|
181
|
+
content_str
|
182
|
+
if isinstance(content_str, str)
|
183
|
+
else json.dumps(content_str)
|
184
|
+
),
|
176
185
|
)
|
186
|
+
blocks = (
|
187
|
+
processed_content
|
188
|
+
if isinstance(processed_content, list)
|
189
|
+
else [processed_content]
|
190
|
+
)
|
191
|
+
for j, block in enumerate(blocks):
|
192
|
+
block_dict = to_dict(block)
|
193
|
+
if not block_dict.get("function_call"):
|
194
|
+
continue
|
195
|
+
function_call = to_dict(block_dict.get("function_call", {}))
|
196
|
+
set_span_attribute(
|
197
|
+
span,
|
198
|
+
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.name",
|
199
|
+
function_call.get("name"),
|
200
|
+
)
|
201
|
+
set_span_attribute(
|
202
|
+
span,
|
203
|
+
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.id",
|
204
|
+
function_call.get("id"),
|
205
|
+
)
|
206
|
+
set_span_attribute(
|
207
|
+
span,
|
208
|
+
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.arguments",
|
209
|
+
json.dumps(function_call.get("arguments")),
|
210
|
+
)
|
177
211
|
set_span_attribute(
|
178
212
|
span,
|
179
213
|
f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role",
|
@@ -218,23 +252,64 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
|
|
218
252
|
)
|
219
253
|
|
220
254
|
if should_send_prompts():
|
221
|
-
|
222
|
-
|
255
|
+
set_span_attribute(
|
256
|
+
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "model"
|
257
|
+
)
|
258
|
+
candidates_list = candidates if isinstance(candidates, list) else [candidates]
|
259
|
+
for i, candidate in enumerate(candidates_list):
|
260
|
+
processed_content = process_content_union(candidate.content)
|
261
|
+
if isinstance(processed_content, list):
|
262
|
+
if all(
|
263
|
+
isinstance(item, dict) and item.get("type") == "text"
|
264
|
+
for item in processed_content
|
265
|
+
):
|
266
|
+
content_str = processed_content[0]["text"]
|
267
|
+
elif all(
|
268
|
+
isinstance(item, ProcessedContentPart) and item.content
|
269
|
+
for item in processed_content
|
270
|
+
):
|
271
|
+
content_str = processed_content[0].content
|
272
|
+
else:
|
273
|
+
content_str = get_content(processed_content)
|
274
|
+
else:
|
275
|
+
content_str = get_content(processed_content)
|
276
|
+
set_span_attribute(
|
277
|
+
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.role", "model"
|
278
|
+
)
|
279
|
+
set_span_attribute(
|
280
|
+
span,
|
281
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.content",
|
282
|
+
(
|
283
|
+
content_str
|
284
|
+
if isinstance(content_str, str)
|
285
|
+
else json.dumps(content_str)
|
286
|
+
),
|
287
|
+
)
|
288
|
+
blocks = (
|
289
|
+
processed_content
|
290
|
+
if isinstance(processed_content, list)
|
291
|
+
else [processed_content]
|
292
|
+
)
|
293
|
+
for j, block in enumerate(blocks):
|
294
|
+
block_dict = to_dict(block)
|
295
|
+
if not block_dict.get("function_call"):
|
296
|
+
continue
|
297
|
+
function_call = to_dict(block_dict.get("function_call", {}))
|
223
298
|
set_span_attribute(
|
224
299
|
span,
|
225
|
-
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.
|
226
|
-
|
300
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.name",
|
301
|
+
function_call.get("name"),
|
227
302
|
)
|
228
303
|
set_span_attribute(
|
229
|
-
span,
|
304
|
+
span,
|
305
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.id",
|
306
|
+
function_call.get("id"),
|
307
|
+
)
|
308
|
+
set_span_attribute(
|
309
|
+
span,
|
310
|
+
f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.arguments",
|
311
|
+
json.dumps(function_call.get("arguments")),
|
230
312
|
)
|
231
|
-
else:
|
232
|
-
set_span_attribute(
|
233
|
-
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.content", response.text
|
234
|
-
)
|
235
|
-
set_span_attribute(
|
236
|
-
span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "assistant"
|
237
|
-
)
|
238
313
|
|
239
314
|
|
240
315
|
@dont_throw
|
@@ -433,7 +508,7 @@ class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
|
|
433
508
|
|
434
509
|
def _instrument(self, **kwargs):
|
435
510
|
tracer_provider = kwargs.get("tracer_provider")
|
436
|
-
tracer = get_tracer(__name__, "0.0.
|
511
|
+
tracer = get_tracer(__name__, "0.0.1a1", tracer_provider)
|
437
512
|
|
438
513
|
for wrapped_method in WRAPPED_METHODS:
|
439
514
|
wrap_function_wrapper(
|
@@ -1,6 +1,5 @@
|
|
1
1
|
import logging
|
2
2
|
import traceback
|
3
|
-
import json
|
4
3
|
|
5
4
|
from .config import (
|
6
5
|
Config,
|
@@ -9,7 +8,28 @@ from google.genai import types
|
|
9
8
|
from google.genai._common import BaseModel
|
10
9
|
import pydantic
|
11
10
|
from opentelemetry.trace import Span
|
12
|
-
from typing import Any
|
11
|
+
from typing import Any, Literal
|
12
|
+
|
13
|
+
|
14
|
+
class ToolCall(pydantic.BaseModel):
|
15
|
+
name: str | None = pydantic.Field(default=None)
|
16
|
+
id: str | None = pydantic.Field(default=None)
|
17
|
+
arguments: dict[str, Any] = pydantic.Field(default={})
|
18
|
+
|
19
|
+
|
20
|
+
class ImageUrlInner(pydantic.BaseModel):
|
21
|
+
url: str = pydantic.Field(default="")
|
22
|
+
|
23
|
+
|
24
|
+
class ImageUrl(pydantic.BaseModel):
|
25
|
+
type: Literal["image_url"] = pydantic.Field(default="image_url")
|
26
|
+
image_url: ImageUrlInner = pydantic.Field(default=ImageUrlInner())
|
27
|
+
|
28
|
+
|
29
|
+
class ProcessedContentPart(pydantic.BaseModel):
|
30
|
+
content: str | None = pydantic.Field(default=None)
|
31
|
+
function_call: ToolCall | None = pydantic.Field(default=None)
|
32
|
+
image_url: ImageUrl | None = pydantic.Field(default=None)
|
13
33
|
|
14
34
|
|
15
35
|
def set_span_attribute(span: Span, name: str, value: str):
|
@@ -58,36 +78,40 @@ def to_dict(obj: BaseModel | pydantic.BaseModel | dict) -> dict[str, Any]:
|
|
58
78
|
return dict(obj)
|
59
79
|
|
60
80
|
|
61
|
-
def
|
62
|
-
content:
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
if
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
+
def get_content(
|
82
|
+
content: (
|
83
|
+
ProcessedContentPart | dict | list[ProcessedContentPart | dict] | str | None
|
84
|
+
),
|
85
|
+
) -> list[Any] | None:
|
86
|
+
if isinstance(content, dict):
|
87
|
+
return content.get("content") or content.get("image_url")
|
88
|
+
if isinstance(content, ProcessedContentPart):
|
89
|
+
if content.content and isinstance(content.content, str):
|
90
|
+
return {
|
91
|
+
"type": "text",
|
92
|
+
"text": content.content,
|
93
|
+
}
|
94
|
+
elif content.image_url:
|
95
|
+
return content.image_url.model_dump()
|
96
|
+
else:
|
97
|
+
return None
|
98
|
+
elif isinstance(content, list):
|
99
|
+
return [get_content(item) or "" for item in content if item is not None]
|
100
|
+
elif isinstance(content, str):
|
101
|
+
return {
|
102
|
+
"type": "text",
|
103
|
+
"text": content,
|
104
|
+
}
|
81
105
|
else:
|
82
106
|
return None
|
83
107
|
|
84
108
|
|
85
|
-
def
|
109
|
+
def process_content_union(
|
86
110
|
content: types.ContentUnion | types.ContentUnionDict,
|
87
111
|
trace_id: str | None = None,
|
88
112
|
span_id: str | None = None,
|
89
113
|
message_index: int = 0,
|
90
|
-
) ->
|
114
|
+
) -> ProcessedContentPart | dict | list[ProcessedContentPart | dict] | None:
|
91
115
|
if isinstance(content, types.Content):
|
92
116
|
parts = to_dict(content).get("parts", [])
|
93
117
|
return [_process_part(part) for part in parts]
|
@@ -116,9 +140,9 @@ def _process_part_union(
|
|
116
140
|
span_id: str | None = None,
|
117
141
|
message_index: int = 0,
|
118
142
|
content_index: int = 0,
|
119
|
-
) ->
|
143
|
+
) -> ProcessedContentPart | dict | None:
|
120
144
|
if isinstance(content, str):
|
121
|
-
return content
|
145
|
+
return ProcessedContentPart(content=content)
|
122
146
|
elif isinstance(content, types.File):
|
123
147
|
content_dict = to_dict(content)
|
124
148
|
name = (
|
@@ -126,7 +150,7 @@ def _process_part_union(
|
|
126
150
|
or content_dict.get("display_name")
|
127
151
|
or content_dict.get("uri")
|
128
152
|
)
|
129
|
-
return f"files/{name}"
|
153
|
+
return ProcessedContentPart(content=f"files/{name}")
|
130
154
|
elif isinstance(content, (types.Part, dict)):
|
131
155
|
return _process_part(content, trace_id, span_id, message_index, content_index)
|
132
156
|
else:
|
@@ -139,11 +163,9 @@ def _process_part(
|
|
139
163
|
span_id: str | None = None,
|
140
164
|
message_index: int = 0,
|
141
165
|
content_index: int = 0,
|
142
|
-
) ->
|
166
|
+
) -> ProcessedContentPart | dict | None:
|
143
167
|
part_dict = to_dict(content)
|
144
|
-
if part_dict.get("
|
145
|
-
return part_dict.get("text")
|
146
|
-
elif part_dict.get("inline_data"):
|
168
|
+
if part_dict.get("inline_data"):
|
147
169
|
blob = to_dict(part_dict.get("inline_data"))
|
148
170
|
if blob.get("mime_type").startswith("image/"):
|
149
171
|
return _process_image_item(
|
@@ -151,7 +173,19 @@ def _process_part(
|
|
151
173
|
)
|
152
174
|
else:
|
153
175
|
# currently, only images are supported
|
154
|
-
return
|
176
|
+
return ProcessedContentPart(
|
177
|
+
content=blob.get("mime_type") or "unknown_media"
|
178
|
+
)
|
179
|
+
elif part_dict.get("function_call"):
|
180
|
+
return ProcessedContentPart(
|
181
|
+
function_call=ToolCall(
|
182
|
+
name=part_dict.get("function_call").get("name"),
|
183
|
+
id=part_dict.get("function_call").get("id"),
|
184
|
+
arguments=part_dict.get("function_call").get("args", {}),
|
185
|
+
)
|
186
|
+
)
|
187
|
+
elif part_dict.get("text") is not None:
|
188
|
+
return ProcessedContentPart(content=part_dict.get("text"))
|
155
189
|
else:
|
156
190
|
return None
|
157
191
|
|
@@ -159,12 +193,17 @@ def _process_part(
|
|
159
193
|
def role_from_content_union(
|
160
194
|
content: types.ContentUnion | types.ContentUnionDict,
|
161
195
|
) -> str | None:
|
196
|
+
role = None
|
162
197
|
if isinstance(content, types.Content):
|
163
|
-
|
198
|
+
role = to_dict(content).get("role")
|
164
199
|
elif isinstance(content, list) and len(content) > 0:
|
165
|
-
|
200
|
+
role = role_from_content_union(content[0])
|
201
|
+
elif isinstance(content, dict):
|
202
|
+
role = content.get("role")
|
166
203
|
else:
|
167
204
|
return None
|
205
|
+
return role
|
206
|
+
# return "assistant" if role == "model" else role
|
168
207
|
|
169
208
|
|
170
209
|
def with_tracer_wrapper(func):
|
@@ -179,38 +218,22 @@ def with_tracer_wrapper(func):
|
|
179
218
|
return _with_tracer
|
180
219
|
|
181
220
|
|
182
|
-
def _run_async(method):
|
183
|
-
import asyncio
|
184
|
-
import threading
|
185
|
-
|
186
|
-
try:
|
187
|
-
loop = asyncio.get_running_loop()
|
188
|
-
except RuntimeError:
|
189
|
-
loop = None
|
190
|
-
|
191
|
-
if loop and loop.is_running():
|
192
|
-
thread = threading.Thread(target=lambda: asyncio.run(method))
|
193
|
-
thread.start()
|
194
|
-
thread.join()
|
195
|
-
else:
|
196
|
-
asyncio.run(method)
|
197
|
-
|
198
|
-
|
199
221
|
def _process_image_item(
|
200
222
|
blob: dict[str, Any],
|
201
223
|
trace_id: str,
|
202
224
|
span_id: str,
|
203
225
|
message_index: int,
|
204
226
|
content_index: int,
|
205
|
-
):
|
227
|
+
) -> ProcessedContentPart | dict | None:
|
206
228
|
# Convert to openai format, so backends can handle it
|
207
229
|
return (
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
230
|
+
ProcessedContentPart(
|
231
|
+
image_url=ImageUrl(
|
232
|
+
image_url=ImageUrlInner(
|
233
|
+
url=f"data:image/{blob.get('mime_type').split('/')[1]};base64,{blob.get('data')}",
|
234
|
+
)
|
235
|
+
)
|
236
|
+
)
|
214
237
|
if Config.convert_image_to_openai_format
|
215
238
|
else blob
|
216
239
|
)
|
@@ -0,0 +1,121 @@
|
|
1
|
+
"""OpenTelemetry Langgraph instrumentation"""
|
2
|
+
|
3
|
+
import json
|
4
|
+
import logging
|
5
|
+
from typing import Collection
|
6
|
+
|
7
|
+
from .utils import (
|
8
|
+
with_tracer_wrapper,
|
9
|
+
)
|
10
|
+
|
11
|
+
from langchain_core.runnables.graph import Graph
|
12
|
+
from opentelemetry.trace import Tracer
|
13
|
+
from wrapt import wrap_function_wrapper
|
14
|
+
from opentelemetry.trace import get_tracer
|
15
|
+
|
16
|
+
from lmnr.opentelemetry_lib.tracing.context_properties import (
|
17
|
+
update_association_properties,
|
18
|
+
)
|
19
|
+
|
20
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
21
|
+
from opentelemetry.instrumentation.utils import unwrap
|
22
|
+
|
23
|
+
|
24
|
+
logger = logging.getLogger(__name__)
|
25
|
+
|
26
|
+
_instruments = ("langgraph >= 0.1.0",)
|
27
|
+
|
28
|
+
|
29
|
+
@with_tracer_wrapper
|
30
|
+
def wrap_pregel_stream(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
|
31
|
+
graph: Graph = instance.get_graph()
|
32
|
+
nodes = [
|
33
|
+
{
|
34
|
+
"id": node.id,
|
35
|
+
"name": node.name,
|
36
|
+
"metadata": node.metadata,
|
37
|
+
}
|
38
|
+
for node in graph.nodes.values()
|
39
|
+
]
|
40
|
+
edges = [
|
41
|
+
{
|
42
|
+
"source": edge.source,
|
43
|
+
"target": edge.target,
|
44
|
+
"conditional": edge.conditional,
|
45
|
+
}
|
46
|
+
for edge in graph.edges
|
47
|
+
]
|
48
|
+
update_association_properties(
|
49
|
+
{
|
50
|
+
"langgraph.edges": json.dumps(edges),
|
51
|
+
"langgraph.nodes": json.dumps(nodes),
|
52
|
+
},
|
53
|
+
)
|
54
|
+
return wrapped(*args, **kwargs)
|
55
|
+
|
56
|
+
|
57
|
+
@with_tracer_wrapper
|
58
|
+
async def async_wrap_pregel_stream(
|
59
|
+
tracer: Tracer, to_wrap, wrapped, instance, args, kwargs
|
60
|
+
):
|
61
|
+
graph: Graph = await instance.aget_graph()
|
62
|
+
nodes = [
|
63
|
+
{
|
64
|
+
"id": node.id,
|
65
|
+
"name": node.name,
|
66
|
+
"metadata": node.metadata,
|
67
|
+
}
|
68
|
+
for node in graph.nodes.values()
|
69
|
+
]
|
70
|
+
edges = [
|
71
|
+
{
|
72
|
+
"source": edge.source,
|
73
|
+
"target": edge.target,
|
74
|
+
"conditional": edge.conditional,
|
75
|
+
}
|
76
|
+
for edge in graph.edges
|
77
|
+
]
|
78
|
+
update_association_properties(
|
79
|
+
{
|
80
|
+
"langgraph.edges": json.dumps(edges),
|
81
|
+
"langgraph.nodes": json.dumps(nodes),
|
82
|
+
},
|
83
|
+
)
|
84
|
+
|
85
|
+
async for item in wrapped(*args, **kwargs):
|
86
|
+
yield item
|
87
|
+
|
88
|
+
|
89
|
+
class LanggraphInstrumentor(BaseInstrumentor):
|
90
|
+
"""An instrumentor for Langgraph."""
|
91
|
+
|
92
|
+
def __init__(self):
|
93
|
+
super().__init__()
|
94
|
+
|
95
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
96
|
+
return _instruments
|
97
|
+
|
98
|
+
def _instrument(self, **kwargs):
|
99
|
+
tracer_provider = kwargs.get("tracer_provider")
|
100
|
+
tracer = get_tracer(__name__, "0.0.1a0", tracer_provider)
|
101
|
+
|
102
|
+
wrap_function_wrapper(
|
103
|
+
module="langgraph.pregel",
|
104
|
+
name="Pregel.stream",
|
105
|
+
wrapper=wrap_pregel_stream(tracer, "Pregel.stream"),
|
106
|
+
)
|
107
|
+
wrap_function_wrapper(
|
108
|
+
module="langgraph.pregel",
|
109
|
+
name="Pregel.astream",
|
110
|
+
wrapper=async_wrap_pregel_stream(tracer, "Pregel.astream"),
|
111
|
+
)
|
112
|
+
|
113
|
+
def _uninstrument(self, **kwargs):
|
114
|
+
unwrap(
|
115
|
+
module="langgraph.pregel",
|
116
|
+
name="Pregel.stream",
|
117
|
+
)
|
118
|
+
unwrap(
|
119
|
+
module="langgraph.pregel",
|
120
|
+
name="Pregel.astream",
|
121
|
+
)
|