ccs-llmconnector 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/METADATA +38 -2
- ccs_llmconnector-1.0.6.dist-info/RECORD +14 -0
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/licenses/LICENSE +22 -22
- llmconnector/__init__.py +39 -39
- llmconnector/anthropic_client.py +233 -217
- llmconnector/client.py +191 -148
- llmconnector/client_cli.py +325 -325
- llmconnector/gemini_client.py +299 -224
- llmconnector/grok_client.py +186 -170
- llmconnector/openai_client.py +90 -73
- llmconnector/py.typed +1 -1
- ccs_llmconnector-1.0.4.dist-info/RECORD +0 -14
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/WHEEL +0 -0
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/entry_points.txt +0 -0
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/top_level.txt +0 -0
llmconnector/gemini_client.py
CHANGED
|
@@ -1,224 +1,299 @@
|
|
|
1
|
-
"""Thin wrapper around the Google Gemini API via the google-genai SDK."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
import base64
|
|
6
|
-
import mimetypes
|
|
7
|
-
from pathlib import Path
|
|
8
|
-
import logging
|
|
9
|
-
from typing import Optional, Sequence, Union
|
|
10
|
-
from urllib.error import URLError
|
|
11
|
-
from urllib.request import urlopen
|
|
12
|
-
|
|
13
|
-
from google import genai
|
|
14
|
-
from google.genai import types
|
|
15
|
-
|
|
16
|
-
ImageInput = Union[str, Path]
|
|
17
|
-
logger = logging.getLogger(__name__)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
class GeminiClient:
|
|
21
|
-
"""Convenience wrapper around the Google Gemini SDK."""
|
|
22
|
-
|
|
23
|
-
def generate_response(
|
|
24
|
-
self,
|
|
25
|
-
*,
|
|
26
|
-
api_key: str,
|
|
27
|
-
prompt: str,
|
|
28
|
-
model: str,
|
|
29
|
-
max_tokens: int = 32000,
|
|
30
|
-
reasoning_effort: Optional[str] = None,
|
|
31
|
-
images: Optional[Sequence[ImageInput]] = None,
|
|
32
|
-
) -> str:
|
|
33
|
-
"""Generate a response from the specified Gemini model.
|
|
34
|
-
|
|
35
|
-
Args:
|
|
36
|
-
api_key: API key used to authenticate with the Gemini API.
|
|
37
|
-
prompt: Natural-language instruction or query for the model.
|
|
38
|
-
model: Identifier of the Gemini model to target (for example, ``"gemini-2.5-flash"``).
|
|
39
|
-
max_tokens: Cap for tokens across the entire exchange, defaults to 32000.
|
|
40
|
-
reasoning_effort: Included for API parity; currently unused by the Gemini SDK.
|
|
41
|
-
images: Optional collection of image references (local paths, URLs, or data URLs).
|
|
42
|
-
|
|
43
|
-
Returns:
|
|
44
|
-
The text output produced by the model.
|
|
45
|
-
|
|
46
|
-
Raises:
|
|
47
|
-
ValueError: If required arguments are missing or the request payload is empty.
|
|
48
|
-
URLError: If an image URL cannot be retrieved.
|
|
49
|
-
google.genai.errors.APIError: If the underlying Gemini request fails.
|
|
50
|
-
"""
|
|
51
|
-
if not api_key:
|
|
52
|
-
raise ValueError("api_key must be provided.")
|
|
53
|
-
if not prompt and not images:
|
|
54
|
-
raise ValueError("At least one of prompt or images must be provided.")
|
|
55
|
-
if not model:
|
|
56
|
-
raise ValueError("model must be provided.")
|
|
57
|
-
|
|
58
|
-
parts: list[types.Part] = []
|
|
59
|
-
if prompt:
|
|
60
|
-
parts.append(types.Part.from_text(text=prompt))
|
|
61
|
-
|
|
62
|
-
if images:
|
|
63
|
-
for image in images:
|
|
64
|
-
parts.append(self._to_image_part(image))
|
|
65
|
-
|
|
66
|
-
if not parts:
|
|
67
|
-
raise ValueError("No content provided for response generation.")
|
|
68
|
-
|
|
69
|
-
content = types.Content(role="user", parts=parts)
|
|
70
|
-
|
|
71
|
-
config = types.GenerateContentConfig(max_output_tokens=max_tokens)
|
|
72
|
-
# reasoning_effort is accepted for compatibility but not currently applied because the
|
|
73
|
-
# Gemini SDK does not expose an equivalent configuration parameter.
|
|
74
|
-
|
|
75
|
-
client = genai.Client(api_key=api_key)
|
|
76
|
-
try:
|
|
77
|
-
try:
|
|
78
|
-
response = client.models.generate_content(
|
|
79
|
-
model=model,
|
|
80
|
-
contents=[content],
|
|
81
|
-
config=config,
|
|
82
|
-
)
|
|
83
|
-
except Exception as exc:
|
|
84
|
-
logger.exception("Gemini generate_content failed: %s", exc)
|
|
85
|
-
raise
|
|
86
|
-
finally:
|
|
87
|
-
closer = getattr(client, "close", None)
|
|
88
|
-
if callable(closer):
|
|
89
|
-
try:
|
|
90
|
-
closer()
|
|
91
|
-
except Exception:
|
|
92
|
-
pass
|
|
93
|
-
|
|
94
|
-
if response.text:
|
|
95
|
-
result_text = response.text
|
|
96
|
-
logger.info(
|
|
97
|
-
"Gemini generate_content succeeded: model=%s images=%d text_len=%d",
|
|
98
|
-
model,
|
|
99
|
-
len(images or []),
|
|
100
|
-
len(result_text or ""),
|
|
101
|
-
)
|
|
102
|
-
return result_text
|
|
103
|
-
|
|
104
|
-
candidate_texts: list[str] = []
|
|
105
|
-
for candidate in getattr(response, "candidates", []) or []:
|
|
106
|
-
content_obj = getattr(candidate, "content", None)
|
|
107
|
-
if not content_obj:
|
|
108
|
-
continue
|
|
109
|
-
for part in getattr(content_obj, "parts", []) or []:
|
|
110
|
-
text = getattr(part, "text", None)
|
|
111
|
-
if text:
|
|
112
|
-
candidate_texts.append(text)
|
|
113
|
-
|
|
114
|
-
if candidate_texts:
|
|
115
|
-
result_text = "\n".join(candidate_texts)
|
|
116
|
-
logger.info(
|
|
117
|
-
"Gemini generate_content succeeded (candidates): model=%s images=%d text_len=%d",
|
|
118
|
-
model,
|
|
119
|
-
len(images or []),
|
|
120
|
-
len(result_text or ""),
|
|
121
|
-
)
|
|
122
|
-
return result_text
|
|
123
|
-
|
|
124
|
-
# Treat successful calls without textual content as a successful, empty response
|
|
125
|
-
# rather than raising. This aligns with callers that handle empty outputs gracefully.
|
|
126
|
-
logger.info(
|
|
127
|
-
"Gemini generate_content succeeded with no text: model=%s images=%d",
|
|
128
|
-
model,
|
|
129
|
-
len(images or []),
|
|
130
|
-
)
|
|
131
|
-
return ""
|
|
132
|
-
|
|
133
|
-
def
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
def
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
1
|
+
"""Thin wrapper around the Google Gemini API via the google-genai SDK."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import base64
|
|
6
|
+
import mimetypes
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Optional, Sequence, Union
|
|
10
|
+
from urllib.error import URLError
|
|
11
|
+
from urllib.request import urlopen
|
|
12
|
+
|
|
13
|
+
from google import genai
|
|
14
|
+
from google.genai import types
|
|
15
|
+
|
|
16
|
+
ImageInput = Union[str, Path]
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class GeminiClient:
|
|
21
|
+
"""Convenience wrapper around the Google Gemini SDK."""
|
|
22
|
+
|
|
23
|
+
def generate_response(
|
|
24
|
+
self,
|
|
25
|
+
*,
|
|
26
|
+
api_key: str,
|
|
27
|
+
prompt: str,
|
|
28
|
+
model: str,
|
|
29
|
+
max_tokens: int = 32000,
|
|
30
|
+
reasoning_effort: Optional[str] = None,
|
|
31
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
32
|
+
) -> str:
|
|
33
|
+
"""Generate a response from the specified Gemini model.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
api_key: API key used to authenticate with the Gemini API.
|
|
37
|
+
prompt: Natural-language instruction or query for the model.
|
|
38
|
+
model: Identifier of the Gemini model to target (for example, ``"gemini-2.5-flash"``).
|
|
39
|
+
max_tokens: Cap for tokens across the entire exchange, defaults to 32000.
|
|
40
|
+
reasoning_effort: Included for API parity; currently unused by the Gemini SDK.
|
|
41
|
+
images: Optional collection of image references (local paths, URLs, or data URLs).
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
The text output produced by the model.
|
|
45
|
+
|
|
46
|
+
Raises:
|
|
47
|
+
ValueError: If required arguments are missing or the request payload is empty.
|
|
48
|
+
URLError: If an image URL cannot be retrieved.
|
|
49
|
+
google.genai.errors.APIError: If the underlying Gemini request fails.
|
|
50
|
+
"""
|
|
51
|
+
if not api_key:
|
|
52
|
+
raise ValueError("api_key must be provided.")
|
|
53
|
+
if not prompt and not images:
|
|
54
|
+
raise ValueError("At least one of prompt or images must be provided.")
|
|
55
|
+
if not model:
|
|
56
|
+
raise ValueError("model must be provided.")
|
|
57
|
+
|
|
58
|
+
parts: list[types.Part] = []
|
|
59
|
+
if prompt:
|
|
60
|
+
parts.append(types.Part.from_text(text=prompt))
|
|
61
|
+
|
|
62
|
+
if images:
|
|
63
|
+
for image in images:
|
|
64
|
+
parts.append(self._to_image_part(image))
|
|
65
|
+
|
|
66
|
+
if not parts:
|
|
67
|
+
raise ValueError("No content provided for response generation.")
|
|
68
|
+
|
|
69
|
+
content = types.Content(role="user", parts=parts)
|
|
70
|
+
|
|
71
|
+
config = types.GenerateContentConfig(max_output_tokens=max_tokens)
|
|
72
|
+
# reasoning_effort is accepted for compatibility but not currently applied because the
|
|
73
|
+
# Gemini SDK does not expose an equivalent configuration parameter.
|
|
74
|
+
|
|
75
|
+
client = genai.Client(api_key=api_key)
|
|
76
|
+
try:
|
|
77
|
+
try:
|
|
78
|
+
response = client.models.generate_content(
|
|
79
|
+
model=model,
|
|
80
|
+
contents=[content],
|
|
81
|
+
config=config,
|
|
82
|
+
)
|
|
83
|
+
except Exception as exc:
|
|
84
|
+
logger.exception("Gemini generate_content failed: %s", exc)
|
|
85
|
+
raise
|
|
86
|
+
finally:
|
|
87
|
+
closer = getattr(client, "close", None)
|
|
88
|
+
if callable(closer):
|
|
89
|
+
try:
|
|
90
|
+
closer()
|
|
91
|
+
except Exception:
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
if response.text:
|
|
95
|
+
result_text = response.text
|
|
96
|
+
logger.info(
|
|
97
|
+
"Gemini generate_content succeeded: model=%s images=%d text_len=%d",
|
|
98
|
+
model,
|
|
99
|
+
len(images or []),
|
|
100
|
+
len(result_text or ""),
|
|
101
|
+
)
|
|
102
|
+
return result_text
|
|
103
|
+
|
|
104
|
+
candidate_texts: list[str] = []
|
|
105
|
+
for candidate in getattr(response, "candidates", []) or []:
|
|
106
|
+
content_obj = getattr(candidate, "content", None)
|
|
107
|
+
if not content_obj:
|
|
108
|
+
continue
|
|
109
|
+
for part in getattr(content_obj, "parts", []) or []:
|
|
110
|
+
text = getattr(part, "text", None)
|
|
111
|
+
if text:
|
|
112
|
+
candidate_texts.append(text)
|
|
113
|
+
|
|
114
|
+
if candidate_texts:
|
|
115
|
+
result_text = "\n".join(candidate_texts)
|
|
116
|
+
logger.info(
|
|
117
|
+
"Gemini generate_content succeeded (candidates): model=%s images=%d text_len=%d",
|
|
118
|
+
model,
|
|
119
|
+
len(images or []),
|
|
120
|
+
len(result_text or ""),
|
|
121
|
+
)
|
|
122
|
+
return result_text
|
|
123
|
+
|
|
124
|
+
# Treat successful calls without textual content as a successful, empty response
|
|
125
|
+
# rather than raising. This aligns with callers that handle empty outputs gracefully.
|
|
126
|
+
logger.info(
|
|
127
|
+
"Gemini generate_content succeeded with no text: model=%s images=%d",
|
|
128
|
+
model,
|
|
129
|
+
len(images or []),
|
|
130
|
+
)
|
|
131
|
+
return ""
|
|
132
|
+
|
|
133
|
+
def generate_image(
|
|
134
|
+
self,
|
|
135
|
+
*,
|
|
136
|
+
api_key: str,
|
|
137
|
+
prompt: str,
|
|
138
|
+
model: str,
|
|
139
|
+
image_size: Optional[str] = None,
|
|
140
|
+
aspect_ratio: Optional[str] = None,
|
|
141
|
+
image: Optional[ImageInput] = None,
|
|
142
|
+
) -> bytes:
|
|
143
|
+
"""Generate an image using Gemini 3 Pro Image.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
api_key: API key used to authenticate with the Gemini API.
|
|
147
|
+
prompt: Text prompt for image generation.
|
|
148
|
+
model: Identifier of the Gemini model to target (e.g., "gemini-3-pro-image-preview").
|
|
149
|
+
image_size: Size of the generated image (e.g., "2K", "4K"). Defaults to "2K".
|
|
150
|
+
aspect_ratio: Aspect ratio of the generated image (e.g., "16:9", "4:3").
|
|
151
|
+
image: Optional input image for editing tasks.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
The generated image data as bytes.
|
|
155
|
+
|
|
156
|
+
Raises:
|
|
157
|
+
ValueError: If required arguments are missing or no image is returned.
|
|
158
|
+
google.genai.errors.APIError: If the underlying Gemini request fails.
|
|
159
|
+
"""
|
|
160
|
+
if not api_key:
|
|
161
|
+
raise ValueError("api_key must be provided.")
|
|
162
|
+
if not prompt:
|
|
163
|
+
raise ValueError("prompt must be provided.")
|
|
164
|
+
if not model:
|
|
165
|
+
raise ValueError("model must be provided.")
|
|
166
|
+
|
|
167
|
+
client = genai.Client(api_key=api_key)
|
|
168
|
+
|
|
169
|
+
config = types.GenerateContentConfig(
|
|
170
|
+
tools=[{"google_search": {}}],
|
|
171
|
+
image_config=types.ImageConfig(
|
|
172
|
+
image_size=image_size or "2K",
|
|
173
|
+
aspect_ratio=aspect_ratio,
|
|
174
|
+
)
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
contents = [prompt]
|
|
178
|
+
if image:
|
|
179
|
+
contents.append(self._to_image_part(image))
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
try:
|
|
183
|
+
response = client.models.generate_content(
|
|
184
|
+
model=model,
|
|
185
|
+
contents=contents,
|
|
186
|
+
config=config,
|
|
187
|
+
)
|
|
188
|
+
except Exception as exc:
|
|
189
|
+
logger.exception("Gemini generate_image failed: %s", exc)
|
|
190
|
+
raise
|
|
191
|
+
finally:
|
|
192
|
+
closer = getattr(client, "close", None)
|
|
193
|
+
if callable(closer):
|
|
194
|
+
try:
|
|
195
|
+
closer()
|
|
196
|
+
except Exception:
|
|
197
|
+
pass
|
|
198
|
+
|
|
199
|
+
if not response.parts:
|
|
200
|
+
raise ValueError("No content returned from Gemini.")
|
|
201
|
+
|
|
202
|
+
for part in response.parts:
|
|
203
|
+
if part.inline_data:
|
|
204
|
+
return part.inline_data.data
|
|
205
|
+
|
|
206
|
+
raise ValueError("No image data found in response.")
|
|
207
|
+
|
|
208
|
+
def list_models(self, *, api_key: str) -> list[dict[str, Optional[str]]]:
|
|
209
|
+
"""Return the models available to the authenticated Gemini account."""
|
|
210
|
+
if not api_key:
|
|
211
|
+
raise ValueError("api_key must be provided.")
|
|
212
|
+
|
|
213
|
+
models: list[dict[str, Optional[str]]] = []
|
|
214
|
+
client = genai.Client(api_key=api_key)
|
|
215
|
+
try:
|
|
216
|
+
try:
|
|
217
|
+
iterator = client.models.list()
|
|
218
|
+
except Exception as exc:
|
|
219
|
+
logger.exception("Gemini list models failed: %s", exc)
|
|
220
|
+
raise
|
|
221
|
+
for model in iterator:
|
|
222
|
+
model_id = getattr(model, "name", None)
|
|
223
|
+
if model_id is None and isinstance(model, dict):
|
|
224
|
+
model_id = model.get("name")
|
|
225
|
+
if not model_id:
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
# Normalize IDs like "models/<id>" -> "<id>"
|
|
229
|
+
if isinstance(model_id, str) and model_id.startswith("models/"):
|
|
230
|
+
model_id = model_id.split("/", 1)[1]
|
|
231
|
+
|
|
232
|
+
display_name = getattr(model, "display_name", None)
|
|
233
|
+
if display_name is None and isinstance(model, dict):
|
|
234
|
+
display_name = model.get("display_name")
|
|
235
|
+
|
|
236
|
+
models.append({"id": model_id, "display_name": display_name})
|
|
237
|
+
finally:
|
|
238
|
+
closer = getattr(client, "close", None)
|
|
239
|
+
if callable(closer):
|
|
240
|
+
try:
|
|
241
|
+
closer()
|
|
242
|
+
except Exception:
|
|
243
|
+
pass
|
|
244
|
+
|
|
245
|
+
logger.info("Gemini list_models succeeded: count=%d", len(models))
|
|
246
|
+
return models
|
|
247
|
+
|
|
248
|
+
@staticmethod
|
|
249
|
+
def _to_image_part(image: ImageInput) -> types.Part:
|
|
250
|
+
"""Convert an image reference into a Gemini SDK part."""
|
|
251
|
+
if isinstance(image, Path):
|
|
252
|
+
return _part_from_path(image)
|
|
253
|
+
|
|
254
|
+
if image.startswith("data:"):
|
|
255
|
+
return _part_from_data_url(image)
|
|
256
|
+
|
|
257
|
+
if image.startswith(("http://", "https://")):
|
|
258
|
+
return _part_from_url(image)
|
|
259
|
+
|
|
260
|
+
return _part_from_path(Path(image))
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def _part_from_path(path: Path) -> types.Part:
|
|
264
|
+
"""Create an image part from a local filesystem path."""
|
|
265
|
+
expanded = path.expanduser()
|
|
266
|
+
data = expanded.read_bytes()
|
|
267
|
+
mime_type = mimetypes.guess_type(expanded.name)[0] or "application/octet-stream"
|
|
268
|
+
return types.Part.from_bytes(data=data, mime_type=mime_type)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def _part_from_url(url: str) -> types.Part:
|
|
272
|
+
"""Create an image part by downloading content from a URL."""
|
|
273
|
+
with urlopen(url) as response:
|
|
274
|
+
data = response.read()
|
|
275
|
+
mime_type = response.info().get_content_type()
|
|
276
|
+
|
|
277
|
+
if not mime_type or mime_type == "application/octet-stream":
|
|
278
|
+
mime_type = mimetypes.guess_type(url)[0] or "application/octet-stream"
|
|
279
|
+
|
|
280
|
+
return types.Part.from_bytes(data=data, mime_type=mime_type)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def _part_from_data_url(data_url: str) -> types.Part:
|
|
284
|
+
"""Create an image part from a data URL."""
|
|
285
|
+
header, encoded = data_url.split(",", 1)
|
|
286
|
+
metadata = header[len("data:") :]
|
|
287
|
+
mime_type = "application/octet-stream"
|
|
288
|
+
|
|
289
|
+
if ";" in metadata:
|
|
290
|
+
mime_type, _, metadata = metadata.partition(";")
|
|
291
|
+
elif metadata:
|
|
292
|
+
mime_type = metadata
|
|
293
|
+
|
|
294
|
+
if "base64" in metadata:
|
|
295
|
+
data = base64.b64decode(encoded)
|
|
296
|
+
else:
|
|
297
|
+
data = encoded.encode("utf-8")
|
|
298
|
+
|
|
299
|
+
return types.Part.from_bytes(data=data, mime_type=mime_type or "application/octet-stream")
|