lmnr 0.4.53.dev0__py3-none-any.whl → 0.7.26__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +32 -11
- lmnr/cli/__init__.py +270 -0
- lmnr/cli/datasets.py +371 -0
- lmnr/cli/evals.py +111 -0
- lmnr/cli/rules.py +42 -0
- lmnr/opentelemetry_lib/__init__.py +70 -0
- lmnr/opentelemetry_lib/decorators/__init__.py +337 -0
- lmnr/opentelemetry_lib/litellm/__init__.py +685 -0
- lmnr/opentelemetry_lib/litellm/utils.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +599 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +9 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +330 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +121 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +60 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +191 -0
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
- lmnr/opentelemetry_lib/tracing/__init__.py +263 -0
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +516 -0
- lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/attributes.py +21 -8
- lmnr/opentelemetry_lib/tracing/context.py +200 -0
- lmnr/opentelemetry_lib/tracing/exporter.py +153 -0
- lmnr/opentelemetry_lib/tracing/instruments.py +140 -0
- lmnr/opentelemetry_lib/tracing/processor.py +193 -0
- lmnr/opentelemetry_lib/tracing/span.py +398 -0
- lmnr/opentelemetry_lib/tracing/tracer.py +57 -0
- lmnr/opentelemetry_lib/tracing/utils.py +62 -0
- lmnr/opentelemetry_lib/utils/package_check.py +18 -0
- lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
- lmnr/sdk/browser/__init__.py +0 -0
- lmnr/sdk/browser/background_send_events.py +158 -0
- lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
- lmnr/sdk/browser/browser_use_otel.py +142 -0
- lmnr/sdk/browser/bubus_otel.py +71 -0
- lmnr/sdk/browser/cdp_utils.py +518 -0
- lmnr/sdk/browser/inject_script.js +514 -0
- lmnr/sdk/browser/patchright_otel.py +151 -0
- lmnr/sdk/browser/playwright_otel.py +322 -0
- lmnr/sdk/browser/pw_utils.py +363 -0
- lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
- lmnr/sdk/browser/utils.py +70 -0
- lmnr/sdk/client/asynchronous/async_client.py +180 -0
- lmnr/sdk/client/asynchronous/resources/__init__.py +6 -0
- lmnr/sdk/client/asynchronous/resources/base.py +32 -0
- lmnr/sdk/client/asynchronous/resources/browser_events.py +41 -0
- lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/asynchronous/resources/evals.py +266 -0
- lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/asynchronous/resources/tags.py +83 -0
- lmnr/sdk/client/synchronous/resources/__init__.py +6 -0
- lmnr/sdk/client/synchronous/resources/base.py +32 -0
- lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
- lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
- lmnr/sdk/client/synchronous/resources/evals.py +263 -0
- lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
- lmnr/sdk/client/synchronous/resources/tags.py +83 -0
- lmnr/sdk/client/synchronous/sync_client.py +191 -0
- lmnr/sdk/datasets/__init__.py +94 -0
- lmnr/sdk/datasets/file_utils.py +91 -0
- lmnr/sdk/decorators.py +163 -26
- lmnr/sdk/eval_control.py +3 -2
- lmnr/sdk/evaluations.py +403 -191
- lmnr/sdk/laminar.py +1080 -549
- lmnr/sdk/log.py +7 -2
- lmnr/sdk/types.py +246 -134
- lmnr/sdk/utils.py +151 -7
- lmnr/version.py +46 -0
- {lmnr-0.4.53.dev0.dist-info → lmnr-0.7.26.dist-info}/METADATA +152 -106
- lmnr-0.7.26.dist-info/RECORD +116 -0
- lmnr-0.7.26.dist-info/WHEEL +4 -0
- lmnr-0.7.26.dist-info/entry_points.txt +3 -0
- lmnr/cli.py +0 -101
- lmnr/openllmetry_sdk/.python-version +0 -1
- lmnr/openllmetry_sdk/__init__.py +0 -72
- lmnr/openllmetry_sdk/config/__init__.py +0 -9
- lmnr/openllmetry_sdk/decorators/base.py +0 -185
- lmnr/openllmetry_sdk/instruments.py +0 -38
- lmnr/openllmetry_sdk/tracing/__init__.py +0 -1
- lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -24
- lmnr/openllmetry_sdk/tracing/context_manager.py +0 -13
- lmnr/openllmetry_sdk/tracing/tracing.py +0 -884
- lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -61
- lmnr/openllmetry_sdk/utils/package_check.py +0 -7
- lmnr/openllmetry_sdk/version.py +0 -1
- lmnr/sdk/datasets.py +0 -55
- lmnr-0.4.53.dev0.dist-info/LICENSE +0 -75
- lmnr-0.4.53.dev0.dist-info/RECORD +0 -33
- lmnr-0.4.53.dev0.dist-info/WHEEL +0 -4
- lmnr-0.4.53.dev0.dist-info/entry_points.txt +0 -3
- /lmnr/{openllmetry_sdk → opentelemetry_lib}/.flake8 +0 -0
- /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/__init__.py +0 -0
- /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/json_encoder.py +0 -0
- /lmnr/{openllmetry_sdk/decorators/__init__.py → py.typed} +0 -0
|
@@ -0,0 +1,330 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
from collections import defaultdict
|
|
3
|
+
import logging
|
|
4
|
+
import traceback
|
|
5
|
+
from typing_extensions import TypedDict
|
|
6
|
+
|
|
7
|
+
from .config import (
|
|
8
|
+
Config,
|
|
9
|
+
)
|
|
10
|
+
from google.genai import types
|
|
11
|
+
from google.genai._common import BaseModel
|
|
12
|
+
import pydantic
|
|
13
|
+
from opentelemetry.trace import Span
|
|
14
|
+
from typing import Any, Literal
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ToolCall(pydantic.BaseModel):
|
|
18
|
+
name: str | None = pydantic.Field(default=None)
|
|
19
|
+
id: str | None = pydantic.Field(default=None)
|
|
20
|
+
arguments: dict[str, Any] = pydantic.Field(default={})
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ImageUrlInner(pydantic.BaseModel):
|
|
24
|
+
url: str = pydantic.Field(default="")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class ImageUrl(pydantic.BaseModel):
|
|
28
|
+
type: Literal["image_url"] = pydantic.Field(default="image_url")
|
|
29
|
+
image_url: ImageUrlInner = pydantic.Field(default=ImageUrlInner())
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class ProcessedContentPart(pydantic.BaseModel):
|
|
33
|
+
content: str | None = pydantic.Field(default=None)
|
|
34
|
+
function_call: ToolCall | None = pydantic.Field(default=None)
|
|
35
|
+
image_url: ImageUrl | None = pydantic.Field(default=None)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ProcessChunkResult(TypedDict):
|
|
39
|
+
role: str
|
|
40
|
+
model_version: str | None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def merge_text_parts(
|
|
44
|
+
parts: list[types.PartDict | types.File | types.Part | str],
|
|
45
|
+
) -> list[types.Part]:
|
|
46
|
+
if not parts:
|
|
47
|
+
return []
|
|
48
|
+
|
|
49
|
+
merged_parts: list[types.Part] = []
|
|
50
|
+
accumulated_text = ""
|
|
51
|
+
|
|
52
|
+
for part in parts:
|
|
53
|
+
# Handle string input - treat as text
|
|
54
|
+
if isinstance(part, str):
|
|
55
|
+
accumulated_text += part
|
|
56
|
+
# Handle File objects - they are not text, so don't merge
|
|
57
|
+
elif isinstance(part, types.File):
|
|
58
|
+
# Flush any accumulated text first
|
|
59
|
+
if accumulated_text:
|
|
60
|
+
merged_parts.append(types.Part(text=accumulated_text))
|
|
61
|
+
accumulated_text = ""
|
|
62
|
+
# Add the File as-is (wrapped in a Part if needed)
|
|
63
|
+
# Note: File objects should be passed through as-is in the original part
|
|
64
|
+
merged_parts.append(part)
|
|
65
|
+
# Handle Part and PartDict (dicts)
|
|
66
|
+
else:
|
|
67
|
+
part_dict = to_dict(part)
|
|
68
|
+
|
|
69
|
+
# Check if this is a text part
|
|
70
|
+
if part_dict.get("text") is not None:
|
|
71
|
+
accumulated_text += part_dict.get("text")
|
|
72
|
+
else:
|
|
73
|
+
# Non-text part (inline_data, function_call, etc.)
|
|
74
|
+
# Flush any accumulated text first
|
|
75
|
+
if accumulated_text:
|
|
76
|
+
merged_parts.append(types.Part(text=accumulated_text))
|
|
77
|
+
accumulated_text = ""
|
|
78
|
+
|
|
79
|
+
# Add the non-text part as-is
|
|
80
|
+
if isinstance(part, types.Part):
|
|
81
|
+
merged_parts.append(part)
|
|
82
|
+
elif isinstance(part, dict):
|
|
83
|
+
# Convert dict to Part object
|
|
84
|
+
merged_parts.append(types.Part(**part_dict))
|
|
85
|
+
|
|
86
|
+
# Don't forget to add any remaining accumulated text
|
|
87
|
+
if accumulated_text:
|
|
88
|
+
merged_parts.append(types.Part(text=accumulated_text))
|
|
89
|
+
|
|
90
|
+
return merged_parts
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def set_span_attribute(span: Span, name: str, value: Any):
|
|
94
|
+
if value is not None and value != "":
|
|
95
|
+
span.set_attribute(name, value)
|
|
96
|
+
return
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def dont_throw(func):
|
|
100
|
+
"""
|
|
101
|
+
A decorator that wraps the passed in function and logs exceptions instead of throwing them.
|
|
102
|
+
|
|
103
|
+
@param func: The function to wrap
|
|
104
|
+
@return: The wrapper function
|
|
105
|
+
"""
|
|
106
|
+
# Obtain a logger specific to the function's module
|
|
107
|
+
logger = logging.getLogger(func.__module__)
|
|
108
|
+
|
|
109
|
+
def wrapper(*args, **kwargs):
|
|
110
|
+
try:
|
|
111
|
+
return func(*args, **kwargs)
|
|
112
|
+
except Exception as e:
|
|
113
|
+
logger.debug(
|
|
114
|
+
"Laminar failed to trace in %s, error: %s",
|
|
115
|
+
func.__name__,
|
|
116
|
+
traceback.format_exc(),
|
|
117
|
+
)
|
|
118
|
+
if Config.exception_logger:
|
|
119
|
+
Config.exception_logger(e)
|
|
120
|
+
|
|
121
|
+
return wrapper
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def to_dict(obj: BaseModel | pydantic.BaseModel | dict) -> dict[str, Any]:
|
|
125
|
+
try:
|
|
126
|
+
if isinstance(obj, BaseModel):
|
|
127
|
+
return obj.model_dump()
|
|
128
|
+
elif isinstance(obj, pydantic.BaseModel):
|
|
129
|
+
return obj.model_dump()
|
|
130
|
+
elif isinstance(obj, dict):
|
|
131
|
+
return obj
|
|
132
|
+
else:
|
|
133
|
+
return dict(obj)
|
|
134
|
+
except Exception as e:
|
|
135
|
+
logging.error(f"Error converting to dict: {obj}, error: {e}")
|
|
136
|
+
return dict(obj)
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def get_content(
|
|
140
|
+
content: (
|
|
141
|
+
ProcessedContentPart | dict | list[ProcessedContentPart | dict] | str | None
|
|
142
|
+
),
|
|
143
|
+
) -> dict | list[dict] | None:
|
|
144
|
+
if isinstance(content, dict):
|
|
145
|
+
return content.get("content") or content.get("image_url")
|
|
146
|
+
if isinstance(content, ProcessedContentPart):
|
|
147
|
+
if content.content and isinstance(content.content, str):
|
|
148
|
+
return {
|
|
149
|
+
"type": "text",
|
|
150
|
+
"text": content.content,
|
|
151
|
+
}
|
|
152
|
+
elif content.image_url:
|
|
153
|
+
return content.image_url.model_dump()
|
|
154
|
+
else:
|
|
155
|
+
return None
|
|
156
|
+
elif isinstance(content, list):
|
|
157
|
+
contents_list = [get_content(item) for item in content]
|
|
158
|
+
return [item for item in contents_list if item is not None]
|
|
159
|
+
elif isinstance(content, str):
|
|
160
|
+
return {
|
|
161
|
+
"type": "text",
|
|
162
|
+
"text": content,
|
|
163
|
+
}
|
|
164
|
+
else:
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def process_content_union(
|
|
169
|
+
content: types.ContentUnion | types.ContentUnionDict,
|
|
170
|
+
) -> ProcessedContentPart | dict | list[ProcessedContentPart | dict] | None:
|
|
171
|
+
if isinstance(content, types.Content):
|
|
172
|
+
parts = to_dict(content).get("parts", [])
|
|
173
|
+
return [_process_part(part) for part in parts]
|
|
174
|
+
elif isinstance(content, list):
|
|
175
|
+
return [_process_part_union(item) for item in content]
|
|
176
|
+
elif isinstance(content, (types.Part, types.File, str)):
|
|
177
|
+
return _process_part_union(content)
|
|
178
|
+
elif isinstance(content, dict):
|
|
179
|
+
if "parts" in content:
|
|
180
|
+
return [_process_part_union(item) for item in content.get("parts", [])]
|
|
181
|
+
else:
|
|
182
|
+
# Assume it's PartDict
|
|
183
|
+
return _process_part_union(content)
|
|
184
|
+
else:
|
|
185
|
+
return None
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _process_part_union(
|
|
189
|
+
content: types.PartDict | types.File | types.Part | str,
|
|
190
|
+
) -> ProcessedContentPart | dict | None:
|
|
191
|
+
if isinstance(content, str):
|
|
192
|
+
return ProcessedContentPart(content=content)
|
|
193
|
+
elif isinstance(content, types.File):
|
|
194
|
+
content_dict = to_dict(content)
|
|
195
|
+
name = (
|
|
196
|
+
content_dict.get("name")
|
|
197
|
+
or content_dict.get("display_name")
|
|
198
|
+
or content_dict.get("uri")
|
|
199
|
+
)
|
|
200
|
+
return ProcessedContentPart(content=f"files/{name}")
|
|
201
|
+
elif isinstance(content, (types.Part, dict)):
|
|
202
|
+
return _process_part(content)
|
|
203
|
+
else:
|
|
204
|
+
return None
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def _process_part(
|
|
208
|
+
content: types.Part,
|
|
209
|
+
) -> ProcessedContentPart | dict | None:
|
|
210
|
+
part_dict = to_dict(content)
|
|
211
|
+
if part_dict.get("inline_data"):
|
|
212
|
+
blob = to_dict(part_dict.get("inline_data"))
|
|
213
|
+
if blob.get("mime_type", "").startswith("image/"):
|
|
214
|
+
return _process_image_item(blob)
|
|
215
|
+
else:
|
|
216
|
+
# currently, only images are supported
|
|
217
|
+
return ProcessedContentPart(
|
|
218
|
+
content=blob.get("mime_type") or "unknown_media"
|
|
219
|
+
)
|
|
220
|
+
elif function_call := part_dict.get("function_call"):
|
|
221
|
+
function_call_dict = to_dict(function_call)
|
|
222
|
+
return ProcessedContentPart(
|
|
223
|
+
function_call=ToolCall(
|
|
224
|
+
name=function_call_dict.get("name"),
|
|
225
|
+
id=function_call_dict.get("id"),
|
|
226
|
+
arguments=function_call_dict.get("args", {}),
|
|
227
|
+
)
|
|
228
|
+
)
|
|
229
|
+
elif part_dict.get("text") is not None:
|
|
230
|
+
return ProcessedContentPart(content=part_dict.get("text"))
|
|
231
|
+
else:
|
|
232
|
+
return None
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def role_from_content_union(
|
|
236
|
+
content: types.ContentUnion | types.ContentUnionDict,
|
|
237
|
+
) -> str | None:
|
|
238
|
+
role = None
|
|
239
|
+
if isinstance(content, types.Content):
|
|
240
|
+
role = to_dict(content).get("role")
|
|
241
|
+
elif isinstance(content, list) and len(content) > 0:
|
|
242
|
+
role = role_from_content_union(content[0])
|
|
243
|
+
elif isinstance(content, dict):
|
|
244
|
+
role = content.get("role")
|
|
245
|
+
else:
|
|
246
|
+
return None
|
|
247
|
+
return role
|
|
248
|
+
# return "assistant" if role == "model" else role
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def with_tracer_wrapper(func):
|
|
252
|
+
"""Helper for providing tracer for wrapper functions."""
|
|
253
|
+
|
|
254
|
+
def _with_tracer(tracer, to_wrap):
|
|
255
|
+
def wrapper(wrapped, instance, args, kwargs):
|
|
256
|
+
return func(tracer, to_wrap, wrapped, instance, args, kwargs)
|
|
257
|
+
|
|
258
|
+
return wrapper
|
|
259
|
+
|
|
260
|
+
return _with_tracer
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def _process_image_item(blob: dict[str, Any]) -> ProcessedContentPart | dict | None:
|
|
264
|
+
# Convert to openai format, so backends can handle it
|
|
265
|
+
data = blob.get("data")
|
|
266
|
+
encoded_data = (
|
|
267
|
+
base64.b64encode(data).decode("utf-8") if isinstance(data, bytes) else data
|
|
268
|
+
)
|
|
269
|
+
mime_type = blob.get("mime_type", "image/unknown")
|
|
270
|
+
image_type = mime_type.split("/")[1] if "/" in mime_type else "unknown"
|
|
271
|
+
|
|
272
|
+
return (
|
|
273
|
+
ProcessedContentPart(
|
|
274
|
+
image_url=ImageUrl(
|
|
275
|
+
image_url=ImageUrlInner(
|
|
276
|
+
url=f"data:image/{image_type};base64,{encoded_data}",
|
|
277
|
+
)
|
|
278
|
+
)
|
|
279
|
+
)
|
|
280
|
+
if Config.convert_image_to_openai_format
|
|
281
|
+
else blob
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
@dont_throw
|
|
286
|
+
def process_stream_chunk(
|
|
287
|
+
chunk: types.GenerateContentResponse,
|
|
288
|
+
existing_role: str,
|
|
289
|
+
existing_model_version: str | None,
|
|
290
|
+
# ============================== #
|
|
291
|
+
# mutable states, passed by reference
|
|
292
|
+
aggregated_usage_metadata: defaultdict[str, int],
|
|
293
|
+
final_parts: list[types.Part | None],
|
|
294
|
+
# ============================== #
|
|
295
|
+
) -> ProcessChunkResult:
|
|
296
|
+
role = existing_role
|
|
297
|
+
model_version = existing_model_version
|
|
298
|
+
|
|
299
|
+
if chunk.model_version:
|
|
300
|
+
model_version = chunk.model_version
|
|
301
|
+
|
|
302
|
+
# Currently gemini throws an error if you pass more than one candidate
|
|
303
|
+
# with streaming
|
|
304
|
+
if chunk.candidates and len(chunk.candidates) > 0 and chunk.candidates[0].content:
|
|
305
|
+
final_parts += chunk.candidates[0].content.parts or []
|
|
306
|
+
role = chunk.candidates[0].content.role or role
|
|
307
|
+
if chunk.usage_metadata:
|
|
308
|
+
usage_dict = to_dict(chunk.usage_metadata)
|
|
309
|
+
# prompt token count is sent in every chunk
|
|
310
|
+
# (and is less by 1 in the last chunk, so we set it once);
|
|
311
|
+
# total token count in every chunk is greater by prompt token count than it should be,
|
|
312
|
+
# thus this awkward logic here
|
|
313
|
+
if aggregated_usage_metadata.get("prompt_token_count") is None:
|
|
314
|
+
# or 0, not .get(key, 0), because sometimes the value is explicitly None
|
|
315
|
+
aggregated_usage_metadata["prompt_token_count"] = (
|
|
316
|
+
usage_dict.get("prompt_token_count") or 0
|
|
317
|
+
)
|
|
318
|
+
aggregated_usage_metadata["total_token_count"] = (
|
|
319
|
+
usage_dict.get("total_token_count") or 0
|
|
320
|
+
)
|
|
321
|
+
aggregated_usage_metadata["candidates_token_count"] += (
|
|
322
|
+
usage_dict.get("candidates_token_count") or 0
|
|
323
|
+
)
|
|
324
|
+
aggregated_usage_metadata["total_token_count"] += (
|
|
325
|
+
usage_dict.get("candidates_token_count") or 0
|
|
326
|
+
)
|
|
327
|
+
return ProcessChunkResult(
|
|
328
|
+
role=role,
|
|
329
|
+
model_version=model_version,
|
|
330
|
+
)
|