lmnr 0.7.11__py3-none-any.whl → 0.7.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/opentelemetry_lib/__init__.py +6 -0
- lmnr/opentelemetry_lib/decorators/__init__.py +1 -1
- lmnr/opentelemetry_lib/litellm/__init__.py +277 -32
- lmnr/opentelemetry_lib/litellm/utils.py +76 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +136 -44
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +93 -6
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +155 -3
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +477 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +14 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +10 -1
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +100 -8
- lmnr/opentelemetry_lib/tracing/__init__.py +9 -0
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +20 -0
- lmnr/opentelemetry_lib/tracing/exporter.py +24 -9
- lmnr/opentelemetry_lib/tracing/instruments.py +4 -0
- lmnr/opentelemetry_lib/tracing/processor.py +26 -0
- lmnr/sdk/laminar.py +14 -0
- lmnr/version.py +1 -1
- {lmnr-0.7.11.dist-info → lmnr-0.7.12.dist-info}/METADATA +50 -50
- {lmnr-0.7.11.dist-info → lmnr-0.7.12.dist-info}/RECORD +24 -21
- {lmnr-0.7.11.dist-info → lmnr-0.7.12.dist-info}/WHEEL +0 -0
- {lmnr-0.7.11.dist-info → lmnr-0.7.12.dist-info}/entry_points.txt +0 -0
@@ -64,3 +64,9 @@ class TracerManager:
|
|
64
64
|
@staticmethod
|
65
65
|
def shutdown():
|
66
66
|
TracerManager.__tracer_wrapper.shutdown()
|
67
|
+
|
68
|
+
@staticmethod
|
69
|
+
def force_reinit_processor():
|
70
|
+
if not hasattr(TracerManager, "_TracerManager__tracer_wrapper"):
|
71
|
+
return False
|
72
|
+
return TracerManager.__tracer_wrapper.force_reinit_processor()
|
@@ -3,20 +3,28 @@
|
|
3
3
|
import json
|
4
4
|
from datetime import datetime
|
5
5
|
|
6
|
+
from opentelemetry.semconv._incubating.attributes.gen_ai_attributes import GEN_AI_PROMPT
|
6
7
|
from opentelemetry.trace import SpanKind, Status, StatusCode, Tracer
|
7
|
-
from lmnr.opentelemetry_lib.
|
8
|
+
from lmnr.opentelemetry_lib.decorators import json_dumps
|
9
|
+
from lmnr.opentelemetry_lib.litellm.utils import (
|
10
|
+
get_tool_definition,
|
11
|
+
is_validator_iterator,
|
12
|
+
model_as_dict,
|
13
|
+
set_span_attribute,
|
14
|
+
)
|
8
15
|
from lmnr.opentelemetry_lib.tracing import TracerWrapper
|
9
16
|
|
10
17
|
from lmnr.opentelemetry_lib.tracing.context import (
|
11
18
|
get_current_context,
|
12
19
|
get_event_attributes_from_context,
|
13
20
|
)
|
21
|
+
from lmnr.opentelemetry_lib.tracing.attributes import ASSOCIATION_PROPERTIES
|
14
22
|
from lmnr.opentelemetry_lib.utils.package_check import is_package_installed
|
15
23
|
from lmnr.sdk.log import get_default_logger
|
16
24
|
|
17
25
|
logger = get_default_logger(__name__)
|
18
26
|
|
19
|
-
SUPPORTED_CALL_TYPES = ["completion", "acompletion"]
|
27
|
+
SUPPORTED_CALL_TYPES = ["completion", "acompletion", "responses", "aresponses"]
|
20
28
|
|
21
29
|
# Try to import the necessary LiteLLM components and gracefully handle ImportError
|
22
30
|
try:
|
@@ -39,11 +47,14 @@ try:
|
|
39
47
|
litellm.callbacks = [LaminarLiteLLMCallback()]
|
40
48
|
"""
|
41
49
|
|
50
|
+
logged_openai_responses: set[str]
|
51
|
+
|
42
52
|
def __init__(self, **kwargs):
|
43
53
|
super().__init__(**kwargs)
|
44
54
|
if not hasattr(TracerWrapper, "instance") or TracerWrapper.instance is None:
|
45
55
|
raise ValueError("Laminar must be initialized before LiteLLM callback")
|
46
56
|
|
57
|
+
self.logged_openai_responses = set()
|
47
58
|
if is_package_installed("openai"):
|
48
59
|
from lmnr.opentelemetry_lib.opentelemetry.instrumentation.openai import (
|
49
60
|
OpenAIInstrumentor,
|
@@ -69,6 +80,14 @@ try:
|
|
69
80
|
):
|
70
81
|
if kwargs.get("call_type") not in SUPPORTED_CALL_TYPES:
|
71
82
|
return
|
83
|
+
if kwargs.get("call_type") in ["responses", "aresponses"]:
|
84
|
+
# responses API may be called multiple times with the same response_obj
|
85
|
+
response_id = getattr(response_obj, "id", None)
|
86
|
+
if response_id in self.logged_openai_responses:
|
87
|
+
return
|
88
|
+
if response_id:
|
89
|
+
self.logged_openai_responses.add(response_id)
|
90
|
+
self.logged_openai_responses.add(response_obj.id)
|
72
91
|
try:
|
73
92
|
self._create_span(
|
74
93
|
kwargs, response_obj, start_time, end_time, is_success=True
|
@@ -107,12 +126,18 @@ try:
|
|
107
126
|
is_success: bool,
|
108
127
|
):
|
109
128
|
"""Create an OpenTelemetry span for the LiteLLM call"""
|
110
|
-
|
129
|
+
call_type = kwargs.get("call_type", "completion")
|
130
|
+
if call_type == "aresponses":
|
131
|
+
call_type = "responses"
|
132
|
+
if call_type == "acompletion":
|
133
|
+
call_type = "completion"
|
134
|
+
span_name = f"litellm.{call_type}"
|
111
135
|
try:
|
112
136
|
tracer = self._get_tracer()
|
113
137
|
except Exception as e:
|
114
138
|
logger.error(f"Error getting tracer: {e}")
|
115
139
|
return
|
140
|
+
|
116
141
|
span = tracer.start_span(
|
117
142
|
span_name,
|
118
143
|
kind=SpanKind.CLIENT,
|
@@ -149,6 +174,52 @@ try:
|
|
149
174
|
if "top_p" in kwargs:
|
150
175
|
set_span_attribute(span, "gen_ai.request.top_p", kwargs["top_p"])
|
151
176
|
|
177
|
+
metadata = (
|
178
|
+
kwargs.get("litellm_params").get(
|
179
|
+
"metadata", kwargs.get("metadata", {})
|
180
|
+
)
|
181
|
+
or {}
|
182
|
+
)
|
183
|
+
tags = metadata.get("tags", [])
|
184
|
+
if isinstance(tags, str):
|
185
|
+
try:
|
186
|
+
tags = json.loads(tags)
|
187
|
+
except Exception:
|
188
|
+
pass
|
189
|
+
if (
|
190
|
+
tags
|
191
|
+
and isinstance(tags, (list, tuple, set))
|
192
|
+
and all(isinstance(tag, str) for tag in tags)
|
193
|
+
):
|
194
|
+
span.set_attribute(f"{ASSOCIATION_PROPERTIES}.tags", tags)
|
195
|
+
|
196
|
+
user_id = metadata.get("user_id")
|
197
|
+
if user_id:
|
198
|
+
span.set_attribute(f"{ASSOCIATION_PROPERTIES}.user_id", user_id)
|
199
|
+
|
200
|
+
session_id = metadata.get("session_id")
|
201
|
+
if session_id:
|
202
|
+
span.set_attribute(
|
203
|
+
f"{ASSOCIATION_PROPERTIES}.session_id", session_id
|
204
|
+
)
|
205
|
+
|
206
|
+
optional_params = kwargs.get("optional_params") or {}
|
207
|
+
if not optional_params:
|
208
|
+
hidden_params = metadata.get("hidden_params") or {}
|
209
|
+
optional_params = hidden_params.get("optional_params") or {}
|
210
|
+
response_format = optional_params.get("response_format")
|
211
|
+
if (
|
212
|
+
response_format
|
213
|
+
and isinstance(response_format, dict)
|
214
|
+
and response_format.get("type") == "json_schema"
|
215
|
+
):
|
216
|
+
schema = (response_format.get("json_schema") or {}).get("schema")
|
217
|
+
if schema:
|
218
|
+
span.set_attribute(
|
219
|
+
"gen_ai.request.structured_output_schema",
|
220
|
+
json_dumps(schema),
|
221
|
+
)
|
222
|
+
|
152
223
|
if is_success:
|
153
224
|
span.set_status(Status(StatusCode.OK))
|
154
225
|
if kwargs.get("complete_streaming_response"):
|
@@ -176,35 +247,107 @@ try:
|
|
176
247
|
if not isinstance(messages, list):
|
177
248
|
return
|
178
249
|
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
250
|
+
prompt_index = 0
|
251
|
+
for item in messages:
|
252
|
+
block_dict = model_as_dict(item)
|
253
|
+
if block_dict.get("type", "message") == "message":
|
254
|
+
tool_calls = block_dict.get("tool_calls", [])
|
255
|
+
self._process_tool_calls(
|
256
|
+
span, tool_calls, prompt_index, is_response=False
|
257
|
+
)
|
258
|
+
content = block_dict.get("content")
|
259
|
+
if is_validator_iterator(content):
|
260
|
+
# Have not been able to catch this in the wild, but keeping
|
261
|
+
# just in case, as raw OpenAI responses do that
|
262
|
+
content = [self._process_content_part(part) for part in content]
|
263
|
+
try:
|
264
|
+
stringified_content = (
|
265
|
+
content if isinstance(content, str) else json_dumps(content)
|
266
|
+
)
|
267
|
+
except Exception:
|
268
|
+
stringified_content = (
|
269
|
+
str(content) if content is not None else ""
|
270
|
+
)
|
271
|
+
set_span_attribute(
|
272
|
+
span,
|
273
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.content",
|
274
|
+
stringified_content,
|
275
|
+
)
|
276
|
+
set_span_attribute(
|
277
|
+
span,
|
278
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.role",
|
279
|
+
block_dict.get("role"),
|
280
|
+
)
|
281
|
+
prompt_index += 1
|
186
282
|
|
187
|
-
|
188
|
-
if content is None:
|
189
|
-
continue
|
190
|
-
if isinstance(content, str):
|
191
|
-
set_span_attribute(span, f"gen_ai.prompt.{i}.content", content)
|
192
|
-
elif isinstance(content, list):
|
283
|
+
elif block_dict.get("type") == "computer_call_output":
|
193
284
|
set_span_attribute(
|
194
|
-
span,
|
285
|
+
span,
|
286
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.role",
|
287
|
+
"computer_call_output",
|
195
288
|
)
|
196
|
-
|
289
|
+
output_image_url = block_dict.get("output", {}).get("image_url")
|
290
|
+
if output_image_url:
|
291
|
+
set_span_attribute(
|
292
|
+
span,
|
293
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.content",
|
294
|
+
json.dumps(
|
295
|
+
[
|
296
|
+
{
|
297
|
+
"type": "image_url",
|
298
|
+
"image_url": {"url": output_image_url},
|
299
|
+
}
|
300
|
+
]
|
301
|
+
),
|
302
|
+
)
|
303
|
+
prompt_index += 1
|
304
|
+
elif block_dict.get("type") == "computer_call":
|
305
|
+
set_span_attribute(
|
306
|
+
span, f"{GEN_AI_PROMPT}.{prompt_index}.role", "assistant"
|
307
|
+
)
|
308
|
+
call_content = {}
|
309
|
+
if block_dict.get("id"):
|
310
|
+
call_content["id"] = block_dict.get("id")
|
311
|
+
if block_dict.get("action"):
|
312
|
+
call_content["action"] = block_dict.get("action")
|
197
313
|
set_span_attribute(
|
198
314
|
span,
|
199
|
-
f"
|
200
|
-
json.dumps(
|
315
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.arguments",
|
316
|
+
json.dumps(call_content),
|
201
317
|
)
|
202
|
-
if role == "tool":
|
203
318
|
set_span_attribute(
|
204
319
|
span,
|
205
|
-
f"
|
206
|
-
|
320
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.id",
|
321
|
+
block_dict.get("call_id"),
|
207
322
|
)
|
323
|
+
set_span_attribute(
|
324
|
+
span,
|
325
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.tool_calls.0.name",
|
326
|
+
"computer_call",
|
327
|
+
)
|
328
|
+
prompt_index += 1
|
329
|
+
elif block_dict.get("type") == "reasoning":
|
330
|
+
reasoning_summary = block_dict.get("summary")
|
331
|
+
if reasoning_summary and isinstance(reasoning_summary, list):
|
332
|
+
processed_chunks = [
|
333
|
+
{"type": "text", "text": chunk.get("text")}
|
334
|
+
for chunk in reasoning_summary
|
335
|
+
if isinstance(chunk, dict)
|
336
|
+
and chunk.get("type") == "summary_text"
|
337
|
+
]
|
338
|
+
set_span_attribute(
|
339
|
+
span,
|
340
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.reasoning",
|
341
|
+
json_dumps(processed_chunks),
|
342
|
+
)
|
343
|
+
set_span_attribute(
|
344
|
+
span,
|
345
|
+
f"{GEN_AI_PROMPT}.{prompt_index}.role",
|
346
|
+
"assistant",
|
347
|
+
)
|
348
|
+
# reasoning is followed by other content parts in the same messge,
|
349
|
+
# so we don't increment the prompt index
|
350
|
+
# TODO: handle other block types
|
208
351
|
|
209
352
|
def _process_request_tool_definitions(self, span, tools):
|
210
353
|
"""Process and set tool definitions attributes on the span"""
|
@@ -213,14 +356,10 @@ try:
|
|
213
356
|
|
214
357
|
for i, tool in enumerate(tools):
|
215
358
|
tool_dict = model_as_dict(tool)
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
function_dict = tool_dict.get("function", {})
|
221
|
-
function_name = function_dict.get("name", "")
|
222
|
-
function_description = function_dict.get("description", "")
|
223
|
-
function_parameters = function_dict.get("parameters", {})
|
359
|
+
tool_definition = get_tool_definition(tool_dict)
|
360
|
+
function_name = tool_definition.get("name")
|
361
|
+
function_description = tool_definition.get("description")
|
362
|
+
function_parameters = tool_definition.get("parameters")
|
224
363
|
set_span_attribute(
|
225
364
|
span,
|
226
365
|
f"llm.request.functions.{i}.name",
|
@@ -341,6 +480,108 @@ try:
|
|
341
480
|
json.dumps(model_as_dict(content)),
|
342
481
|
)
|
343
482
|
|
483
|
+
def _process_content_part(self, content_part: dict) -> dict:
|
484
|
+
content_part_dict = model_as_dict(content_part)
|
485
|
+
if content_part_dict.get("type") == "output_text":
|
486
|
+
return {"type": "text", "text": content_part_dict.get("text")}
|
487
|
+
return content_part_dict
|
488
|
+
|
489
|
+
def _process_response_output(self, span, output):
|
490
|
+
"""Response of OpenAI Responses API"""
|
491
|
+
if not isinstance(output, list):
|
492
|
+
return
|
493
|
+
set_span_attribute(span, "gen_ai.completion.0.role", "assistant")
|
494
|
+
tool_call_index = 0
|
495
|
+
for block in output:
|
496
|
+
block_dict = model_as_dict(block)
|
497
|
+
if block_dict.get("type") == "message":
|
498
|
+
content = block_dict.get("content")
|
499
|
+
if content is None:
|
500
|
+
continue
|
501
|
+
if isinstance(content, str):
|
502
|
+
set_span_attribute(span, "gen_ai.completion.0.content", content)
|
503
|
+
elif isinstance(content, list):
|
504
|
+
set_span_attribute(
|
505
|
+
span,
|
506
|
+
"gen_ai.completion.0.content",
|
507
|
+
json_dumps(
|
508
|
+
[self._process_content_part(part) for part in content]
|
509
|
+
),
|
510
|
+
)
|
511
|
+
if block_dict.get("type") == "function_call":
|
512
|
+
set_span_attribute(
|
513
|
+
span,
|
514
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
|
515
|
+
block_dict.get("id"),
|
516
|
+
)
|
517
|
+
set_span_attribute(
|
518
|
+
span,
|
519
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
|
520
|
+
block_dict.get("name"),
|
521
|
+
)
|
522
|
+
set_span_attribute(
|
523
|
+
span,
|
524
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.arguments",
|
525
|
+
block_dict.get("arguments"),
|
526
|
+
)
|
527
|
+
tool_call_index += 1
|
528
|
+
elif block_dict.get("type") == "file_search_call":
|
529
|
+
set_span_attribute(
|
530
|
+
span,
|
531
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
|
532
|
+
block_dict.get("id"),
|
533
|
+
)
|
534
|
+
set_span_attribute(
|
535
|
+
span,
|
536
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
|
537
|
+
"file_search_call",
|
538
|
+
)
|
539
|
+
tool_call_index += 1
|
540
|
+
elif block_dict.get("type") == "web_search_call":
|
541
|
+
set_span_attribute(
|
542
|
+
span,
|
543
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
|
544
|
+
block_dict.get("id"),
|
545
|
+
)
|
546
|
+
set_span_attribute(
|
547
|
+
span,
|
548
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
|
549
|
+
"web_search_call",
|
550
|
+
)
|
551
|
+
tool_call_index += 1
|
552
|
+
elif block_dict.get("type") == "computer_call":
|
553
|
+
set_span_attribute(
|
554
|
+
span,
|
555
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.id",
|
556
|
+
block_dict.get("call_id"),
|
557
|
+
)
|
558
|
+
set_span_attribute(
|
559
|
+
span,
|
560
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.name",
|
561
|
+
"computer_call",
|
562
|
+
)
|
563
|
+
set_span_attribute(
|
564
|
+
span,
|
565
|
+
f"gen_ai.completion.0.tool_calls.{tool_call_index}.arguments",
|
566
|
+
json_dumps(block_dict.get("action")),
|
567
|
+
)
|
568
|
+
tool_call_index += 1
|
569
|
+
elif block_dict.get("type") == "reasoning":
|
570
|
+
reasoning_summary = block_dict.get("summary")
|
571
|
+
if reasoning_summary and isinstance(reasoning_summary, list):
|
572
|
+
processed_chunks = [
|
573
|
+
{"type": "text", "text": chunk.get("text")}
|
574
|
+
for chunk in reasoning_summary
|
575
|
+
if isinstance(chunk, dict)
|
576
|
+
and chunk.get("type") == "summary_text"
|
577
|
+
]
|
578
|
+
set_span_attribute(
|
579
|
+
span,
|
580
|
+
"gen_ai.completion.0.reasoning",
|
581
|
+
json_dumps(processed_chunks),
|
582
|
+
)
|
583
|
+
# TODO: handle other block types, in particular other calls
|
584
|
+
|
344
585
|
def _process_success_response(self, span, response_obj):
|
345
586
|
"""Process successful response attributes"""
|
346
587
|
response_dict = model_as_dict(response_obj)
|
@@ -349,7 +590,9 @@ try:
|
|
349
590
|
span, "gen_ai.response.model", response_dict.get("model")
|
350
591
|
)
|
351
592
|
|
352
|
-
if
|
593
|
+
if getattr(response_obj, "usage", None):
|
594
|
+
self._process_response_usage(span, getattr(response_obj, "usage", None))
|
595
|
+
elif response_dict.get("usage"):
|
353
596
|
self._process_response_usage(span, response_dict.get("usage"))
|
354
597
|
|
355
598
|
if response_dict.get("cache_creation_input_tokens"):
|
@@ -367,6 +610,8 @@ try:
|
|
367
610
|
|
368
611
|
if response_dict.get("choices"):
|
369
612
|
self._process_response_choices(span, response_dict.get("choices"))
|
613
|
+
elif response_dict.get("output"):
|
614
|
+
self._process_response_output(span, response_dict.get("output"))
|
370
615
|
|
371
616
|
except ImportError as e:
|
372
617
|
logger.debug(f"LiteLLM callback unavailable: {e}")
|
@@ -1,6 +1,14 @@
|
|
1
|
+
import re
|
1
2
|
from pydantic import BaseModel
|
2
3
|
from opentelemetry.sdk.trace import Span
|
3
4
|
from opentelemetry.util.types import AttributeValue
|
5
|
+
from typing_extensions import TypedDict
|
6
|
+
|
7
|
+
|
8
|
+
class ToolDefinition(TypedDict):
|
9
|
+
name: str | None
|
10
|
+
description: str | None
|
11
|
+
parameters: dict | None
|
4
12
|
|
5
13
|
|
6
14
|
def model_as_dict(model: BaseModel | dict) -> dict:
|
@@ -16,3 +24,71 @@ def set_span_attribute(span: Span, key: str, value: AttributeValue | None):
|
|
16
24
|
if value is None or value == "":
|
17
25
|
return
|
18
26
|
span.set_attribute(key, value)
|
27
|
+
|
28
|
+
|
29
|
+
def get_tool_definition(tool: dict) -> ToolDefinition:
|
30
|
+
parameters = None
|
31
|
+
description = None
|
32
|
+
name = (tool.get("function") or {}).get("name") or tool.get("name")
|
33
|
+
if tool.get("type") == "function":
|
34
|
+
function = tool.get("function") or {}
|
35
|
+
parameters = function.get("parameters") or tool.get("parameters")
|
36
|
+
description = function.get("description") or tool.get("description")
|
37
|
+
elif isinstance(tool.get("type"), str) and tool.get("type").startswith("computer"):
|
38
|
+
# Anthropic beta computer tools
|
39
|
+
# https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/computer-use-tool
|
40
|
+
|
41
|
+
# OpenAI computer use API
|
42
|
+
# https://platform.openai.com/docs/guides/tools-computer-use
|
43
|
+
if not name:
|
44
|
+
name = tool.get("type")
|
45
|
+
|
46
|
+
parameters = {}
|
47
|
+
tool_parameters = (tool.get("function") or {}).get("parameters") or {}
|
48
|
+
# Anthropic
|
49
|
+
display_width_px = tool_parameters.get("display_width_px") or tool.get(
|
50
|
+
"display_width_px"
|
51
|
+
)
|
52
|
+
display_height_px = tool_parameters.get("display_height_px") or tool.get(
|
53
|
+
"display_height_px"
|
54
|
+
)
|
55
|
+
display_number = tool_parameters.get("display_number") or tool.get(
|
56
|
+
"display_number"
|
57
|
+
)
|
58
|
+
if display_width_px:
|
59
|
+
parameters["display_width_px"] = display_width_px
|
60
|
+
if display_height_px:
|
61
|
+
parameters["display_height_px"] = display_height_px
|
62
|
+
if display_number:
|
63
|
+
parameters["display_number"] = display_number
|
64
|
+
# OpenAI
|
65
|
+
display_width = tool_parameters.get("display_width") or tool.get(
|
66
|
+
"display_width"
|
67
|
+
)
|
68
|
+
display_height = tool_parameters.get("display_height") or tool.get(
|
69
|
+
"display_height"
|
70
|
+
)
|
71
|
+
environment = tool_parameters.get("environment") or tool.get("environment")
|
72
|
+
if display_width:
|
73
|
+
parameters["display_width"] = display_width
|
74
|
+
if display_height:
|
75
|
+
parameters["display_height"] = tool.get("display_height")
|
76
|
+
if environment: # Literal['browser', 'mac', 'windows', 'ubuntu']
|
77
|
+
parameters["environment"] = environment
|
78
|
+
|
79
|
+
return ToolDefinition(
|
80
|
+
name=name,
|
81
|
+
description=description,
|
82
|
+
parameters=parameters,
|
83
|
+
)
|
84
|
+
|
85
|
+
|
86
|
+
def is_validator_iterator(content):
|
87
|
+
"""
|
88
|
+
Some OpenAI objects contain fields typed as Iterable, which pydantic
|
89
|
+
internally converts to a ValidatorIterator, and they cannot be trivially
|
90
|
+
serialized without consuming the iterator to, for example, a list.
|
91
|
+
|
92
|
+
See: https://github.com/pydantic/pydantic/issues/9541#issuecomment-2189045051
|
93
|
+
"""
|
94
|
+
return re.search(r"pydantic.*ValidatorIterator'>$", str(type(content)))
|