ccs-llmconnector 1.0.6__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.1.dist-info}/METADATA +51 -8
- ccs_llmconnector-1.1.1.dist-info/RECORD +16 -0
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.1.dist-info}/WHEEL +1 -1
- llmconnector/__init__.py +11 -1
- llmconnector/anthropic_client.py +209 -66
- llmconnector/client.py +225 -10
- llmconnector/client_cli.py +27 -0
- llmconnector/gemini_client.py +316 -119
- llmconnector/grok_client.py +208 -78
- llmconnector/openai_client.py +194 -62
- llmconnector/types.py +49 -0
- llmconnector/utils.py +78 -0
- ccs_llmconnector-1.0.6.dist-info/RECORD +0 -14
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.1.dist-info}/entry_points.txt +0 -0
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.1.dist-info}/licenses/LICENSE +0 -0
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.1.dist-info}/top_level.txt +0 -0
llmconnector/gemini_client.py
CHANGED
|
@@ -6,14 +6,15 @@ import base64
|
|
|
6
6
|
import mimetypes
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
import logging
|
|
9
|
-
from typing import Optional, Sequence
|
|
10
|
-
from urllib.error import URLError
|
|
9
|
+
from typing import Optional, Sequence
|
|
11
10
|
from urllib.request import urlopen
|
|
12
11
|
|
|
13
12
|
from google import genai
|
|
14
13
|
from google.genai import types
|
|
15
14
|
|
|
16
|
-
|
|
15
|
+
from .types import ImageInput, MessageSequence, normalize_messages
|
|
16
|
+
from .utils import clamp_retries, run_sync_in_thread, run_with_retries
|
|
17
|
+
|
|
17
18
|
logger = logging.getLogger(__name__)
|
|
18
19
|
|
|
19
20
|
|
|
@@ -24,11 +25,16 @@ class GeminiClient:
|
|
|
24
25
|
self,
|
|
25
26
|
*,
|
|
26
27
|
api_key: str,
|
|
27
|
-
prompt: str,
|
|
28
|
+
prompt: Optional[str] = None,
|
|
28
29
|
model: str,
|
|
29
30
|
max_tokens: int = 32000,
|
|
30
31
|
reasoning_effort: Optional[str] = None,
|
|
31
32
|
images: Optional[Sequence[ImageInput]] = None,
|
|
33
|
+
messages: Optional[MessageSequence] = None,
|
|
34
|
+
request_id: Optional[str] = None,
|
|
35
|
+
timeout_s: Optional[float] = None,
|
|
36
|
+
max_retries: Optional[int] = None,
|
|
37
|
+
retry_backoff_s: float = 0.5,
|
|
32
38
|
) -> str:
|
|
33
39
|
"""Generate a response from the specified Gemini model.
|
|
34
40
|
|
|
@@ -39,6 +45,11 @@ class GeminiClient:
|
|
|
39
45
|
max_tokens: Cap for tokens across the entire exchange, defaults to 32000.
|
|
40
46
|
reasoning_effort: Included for API parity; currently unused by the Gemini SDK.
|
|
41
47
|
images: Optional collection of image references (local paths, URLs, or data URLs).
|
|
48
|
+
messages: Optional list of chat-style messages (role/content).
|
|
49
|
+
request_id: Optional request identifier for tracing/logging.
|
|
50
|
+
timeout_s: Optional request timeout in seconds.
|
|
51
|
+
max_retries: Optional retry count for transient failures.
|
|
52
|
+
retry_backoff_s: Base delay (seconds) for exponential backoff between retries.
|
|
42
53
|
|
|
43
54
|
Returns:
|
|
44
55
|
The text output produced by the model.
|
|
@@ -50,85 +61,160 @@ class GeminiClient:
|
|
|
50
61
|
"""
|
|
51
62
|
if not api_key:
|
|
52
63
|
raise ValueError("api_key must be provided.")
|
|
53
|
-
if not prompt and not images:
|
|
54
|
-
raise ValueError("At least one of prompt or images must be provided.")
|
|
64
|
+
if not prompt and not messages and not images:
|
|
65
|
+
raise ValueError("At least one of prompt, messages, or images must be provided.")
|
|
55
66
|
if not model:
|
|
56
67
|
raise ValueError("model must be provided.")
|
|
57
68
|
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
69
|
+
normalized_messages = normalize_messages(prompt=prompt, messages=messages)
|
|
70
|
+
contents: list[types.Content] = []
|
|
71
|
+
for message in normalized_messages:
|
|
72
|
+
parts: list[types.Part] = []
|
|
73
|
+
if message["content"]:
|
|
74
|
+
parts.append(types.Part.from_text(text=message["content"]))
|
|
75
|
+
contents.append(types.Content(role=message["role"], parts=parts))
|
|
61
76
|
|
|
62
77
|
if images:
|
|
63
|
-
for image in images
|
|
64
|
-
|
|
78
|
+
image_parts = [self._to_image_part(image) for image in images]
|
|
79
|
+
target_index = next(
|
|
80
|
+
(
|
|
81
|
+
index
|
|
82
|
+
for index in range(len(contents) - 1, -1, -1)
|
|
83
|
+
if contents[index].role == "user"
|
|
84
|
+
),
|
|
85
|
+
None,
|
|
86
|
+
)
|
|
87
|
+
if target_index is None:
|
|
88
|
+
contents.append(types.Content(role="user", parts=image_parts))
|
|
89
|
+
else:
|
|
90
|
+
existing_parts = list(contents[target_index].parts or [])
|
|
91
|
+
existing_parts.extend(image_parts)
|
|
92
|
+
contents[target_index] = types.Content(
|
|
93
|
+
role="user", parts=existing_parts
|
|
94
|
+
)
|
|
65
95
|
|
|
66
|
-
if not parts:
|
|
96
|
+
if not contents or not any(content.parts for content in contents):
|
|
67
97
|
raise ValueError("No content provided for response generation.")
|
|
68
98
|
|
|
69
|
-
content = types.Content(role="user", parts=parts)
|
|
70
|
-
|
|
71
99
|
config = types.GenerateContentConfig(max_output_tokens=max_tokens)
|
|
72
100
|
# reasoning_effort is accepted for compatibility but not currently applied because the
|
|
73
101
|
# Gemini SDK does not expose an equivalent configuration parameter.
|
|
74
102
|
|
|
75
|
-
|
|
76
|
-
|
|
103
|
+
retry_count = clamp_retries(max_retries)
|
|
104
|
+
|
|
105
|
+
def _build_client() -> genai.Client:
|
|
106
|
+
client_kwargs: dict[str, object] = {"api_key": api_key}
|
|
107
|
+
if timeout_s is not None:
|
|
108
|
+
# Gemini requires at least 10s timeout if set
|
|
109
|
+
effective_timeout = max(10.0, timeout_s)
|
|
110
|
+
if effective_timeout != timeout_s:
|
|
111
|
+
logger.warning("Gemini timeout %ss is too short, clamping to %ss.", timeout_s, effective_timeout)
|
|
112
|
+
client_kwargs["http_options"] = types.HttpOptions(timeout=effective_timeout)
|
|
113
|
+
return genai.Client(**client_kwargs)
|
|
114
|
+
|
|
115
|
+
def _run_request() -> str:
|
|
116
|
+
client = _build_client()
|
|
77
117
|
try:
|
|
78
|
-
response = client.models.generate_content(
|
|
79
|
-
model=model,
|
|
80
|
-
contents=[content],
|
|
81
|
-
config=config,
|
|
82
|
-
)
|
|
83
|
-
except Exception as exc:
|
|
84
|
-
logger.exception("Gemini generate_content failed: %s", exc)
|
|
85
|
-
raise
|
|
86
|
-
finally:
|
|
87
|
-
closer = getattr(client, "close", None)
|
|
88
|
-
if callable(closer):
|
|
89
118
|
try:
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
119
|
+
response = client.models.generate_content(
|
|
120
|
+
model=model,
|
|
121
|
+
contents=contents,
|
|
122
|
+
config=config,
|
|
123
|
+
)
|
|
124
|
+
except Exception as exc:
|
|
125
|
+
logger.exception(
|
|
126
|
+
"Gemini generate_content failed: %s request_id=%s",
|
|
127
|
+
exc,
|
|
128
|
+
request_id,
|
|
129
|
+
)
|
|
130
|
+
raise
|
|
131
|
+
finally:
|
|
132
|
+
closer = getattr(client, "close", None)
|
|
133
|
+
if callable(closer):
|
|
134
|
+
try:
|
|
135
|
+
closer()
|
|
136
|
+
except Exception:
|
|
137
|
+
pass
|
|
138
|
+
|
|
139
|
+
if response.text:
|
|
140
|
+
result_text = response.text
|
|
141
|
+
logger.info(
|
|
142
|
+
"Gemini generate_content succeeded: model=%s images=%d text_len=%d request_id=%s",
|
|
143
|
+
model,
|
|
144
|
+
len(images or []),
|
|
145
|
+
len(result_text or ""),
|
|
146
|
+
request_id,
|
|
147
|
+
)
|
|
148
|
+
return result_text
|
|
149
|
+
|
|
150
|
+
candidate_texts: list[str] = []
|
|
151
|
+
for candidate in getattr(response, "candidates", []) or []:
|
|
152
|
+
content_obj = getattr(candidate, "content", None)
|
|
153
|
+
if not content_obj:
|
|
154
|
+
continue
|
|
155
|
+
for part in getattr(content_obj, "parts", []) or []:
|
|
156
|
+
text = getattr(part, "text", None)
|
|
157
|
+
if text:
|
|
158
|
+
candidate_texts.append(text)
|
|
159
|
+
|
|
160
|
+
if candidate_texts:
|
|
161
|
+
result_text = "\n".join(candidate_texts)
|
|
162
|
+
logger.info(
|
|
163
|
+
"Gemini generate_content succeeded (candidates): model=%s images=%d text_len=%d request_id=%s",
|
|
164
|
+
model,
|
|
165
|
+
len(images or []),
|
|
166
|
+
len(result_text or ""),
|
|
167
|
+
request_id,
|
|
168
|
+
)
|
|
169
|
+
return result_text
|
|
93
170
|
|
|
94
|
-
|
|
95
|
-
|
|
171
|
+
# Treat successful calls without textual content as a successful, empty response
|
|
172
|
+
# rather than raising. This aligns with callers that handle empty outputs gracefully.
|
|
96
173
|
logger.info(
|
|
97
|
-
"Gemini generate_content succeeded: model=%s images=%d
|
|
174
|
+
"Gemini generate_content succeeded with no text: model=%s images=%d request_id=%s",
|
|
98
175
|
model,
|
|
99
176
|
len(images or []),
|
|
100
|
-
|
|
177
|
+
request_id,
|
|
101
178
|
)
|
|
102
|
-
return
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
179
|
+
return ""
|
|
180
|
+
|
|
181
|
+
return run_with_retries(
|
|
182
|
+
func=_run_request,
|
|
183
|
+
max_retries=retry_count,
|
|
184
|
+
retry_backoff_s=retry_backoff_s,
|
|
185
|
+
request_id=request_id,
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
async def async_generate_response(
|
|
189
|
+
self,
|
|
190
|
+
*,
|
|
191
|
+
api_key: str,
|
|
192
|
+
prompt: Optional[str] = None,
|
|
193
|
+
model: str,
|
|
194
|
+
max_tokens: int = 32000,
|
|
195
|
+
reasoning_effort: Optional[str] = None,
|
|
196
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
197
|
+
messages: Optional[MessageSequence] = None,
|
|
198
|
+
request_id: Optional[str] = None,
|
|
199
|
+
timeout_s: Optional[float] = None,
|
|
200
|
+
max_retries: Optional[int] = None,
|
|
201
|
+
retry_backoff_s: float = 0.5,
|
|
202
|
+
) -> str:
|
|
203
|
+
return await run_sync_in_thread(
|
|
204
|
+
lambda: self.generate_response(
|
|
205
|
+
api_key=api_key,
|
|
206
|
+
prompt=prompt,
|
|
207
|
+
model=model,
|
|
208
|
+
max_tokens=max_tokens,
|
|
209
|
+
reasoning_effort=reasoning_effort,
|
|
210
|
+
images=images,
|
|
211
|
+
messages=messages,
|
|
212
|
+
request_id=request_id,
|
|
213
|
+
timeout_s=timeout_s,
|
|
214
|
+
max_retries=max_retries,
|
|
215
|
+
retry_backoff_s=retry_backoff_s,
|
|
121
216
|
)
|
|
122
|
-
return result_text
|
|
123
|
-
|
|
124
|
-
# Treat successful calls without textual content as a successful, empty response
|
|
125
|
-
# rather than raising. This aligns with callers that handle empty outputs gracefully.
|
|
126
|
-
logger.info(
|
|
127
|
-
"Gemini generate_content succeeded with no text: model=%s images=%d",
|
|
128
|
-
model,
|
|
129
|
-
len(images or []),
|
|
130
217
|
)
|
|
131
|
-
return ""
|
|
132
218
|
|
|
133
219
|
def generate_image(
|
|
134
220
|
self,
|
|
@@ -139,6 +225,10 @@ class GeminiClient:
|
|
|
139
225
|
image_size: Optional[str] = None,
|
|
140
226
|
aspect_ratio: Optional[str] = None,
|
|
141
227
|
image: Optional[ImageInput] = None,
|
|
228
|
+
request_id: Optional[str] = None,
|
|
229
|
+
timeout_s: Optional[float] = None,
|
|
230
|
+
max_retries: Optional[int] = None,
|
|
231
|
+
retry_backoff_s: float = 0.5,
|
|
142
232
|
) -> bytes:
|
|
143
233
|
"""Generate an image using Gemini 3 Pro Image.
|
|
144
234
|
|
|
@@ -164,86 +254,193 @@ class GeminiClient:
|
|
|
164
254
|
if not model:
|
|
165
255
|
raise ValueError("model must be provided.")
|
|
166
256
|
|
|
167
|
-
client = genai.Client(api_key=api_key)
|
|
168
|
-
|
|
169
257
|
config = types.GenerateContentConfig(
|
|
170
258
|
tools=[{"google_search": {}}],
|
|
171
259
|
image_config=types.ImageConfig(
|
|
172
260
|
image_size=image_size or "2K",
|
|
173
261
|
aspect_ratio=aspect_ratio,
|
|
174
|
-
)
|
|
262
|
+
),
|
|
175
263
|
)
|
|
176
264
|
|
|
177
265
|
contents = [prompt]
|
|
178
266
|
if image:
|
|
179
267
|
contents.append(self._to_image_part(image))
|
|
180
268
|
|
|
181
|
-
|
|
269
|
+
retry_count = clamp_retries(max_retries)
|
|
270
|
+
|
|
271
|
+
def _build_client() -> genai.Client:
|
|
272
|
+
client_kwargs: dict[str, object] = {"api_key": api_key}
|
|
273
|
+
if timeout_s is not None:
|
|
274
|
+
# Gemini requires at least 10s timeout if set
|
|
275
|
+
effective_timeout = max(10.0, timeout_s)
|
|
276
|
+
if effective_timeout != timeout_s:
|
|
277
|
+
logger.warning("Gemini timeout %ss is too short, clamping to %ss.", timeout_s, effective_timeout)
|
|
278
|
+
client_kwargs["http_options"] = types.HttpOptions(timeout=effective_timeout)
|
|
279
|
+
return genai.Client(**client_kwargs)
|
|
280
|
+
|
|
281
|
+
def _run_request() -> bytes:
|
|
282
|
+
client = _build_client()
|
|
182
283
|
try:
|
|
183
|
-
response = client.models.generate_content(
|
|
184
|
-
model=model,
|
|
185
|
-
contents=contents,
|
|
186
|
-
config=config,
|
|
187
|
-
)
|
|
188
|
-
except Exception as exc:
|
|
189
|
-
logger.exception("Gemini generate_image failed: %s", exc)
|
|
190
|
-
raise
|
|
191
|
-
finally:
|
|
192
|
-
closer = getattr(client, "close", None)
|
|
193
|
-
if callable(closer):
|
|
194
284
|
try:
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
285
|
+
response = client.models.generate_content(
|
|
286
|
+
model=model,
|
|
287
|
+
contents=contents,
|
|
288
|
+
config=config,
|
|
289
|
+
)
|
|
290
|
+
except Exception as exc:
|
|
291
|
+
logger.exception(
|
|
292
|
+
"Gemini generate_image failed: %s request_id=%s",
|
|
293
|
+
exc,
|
|
294
|
+
request_id,
|
|
295
|
+
)
|
|
296
|
+
raise
|
|
297
|
+
finally:
|
|
298
|
+
closer = getattr(client, "close", None)
|
|
299
|
+
if callable(closer):
|
|
300
|
+
try:
|
|
301
|
+
closer()
|
|
302
|
+
except Exception:
|
|
303
|
+
pass
|
|
304
|
+
|
|
305
|
+
if not response.parts:
|
|
306
|
+
raise ValueError("No content returned from Gemini.")
|
|
307
|
+
|
|
308
|
+
for part in response.parts:
|
|
309
|
+
if part.inline_data:
|
|
310
|
+
return part.inline_data.data
|
|
311
|
+
|
|
312
|
+
raise ValueError("No image data found in response.")
|
|
313
|
+
|
|
314
|
+
return run_with_retries(
|
|
315
|
+
func=_run_request,
|
|
316
|
+
max_retries=retry_count,
|
|
317
|
+
retry_backoff_s=retry_backoff_s,
|
|
318
|
+
request_id=request_id,
|
|
319
|
+
)
|
|
201
320
|
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
321
|
+
async def async_generate_image(
|
|
322
|
+
self,
|
|
323
|
+
*,
|
|
324
|
+
api_key: str,
|
|
325
|
+
prompt: str,
|
|
326
|
+
model: str,
|
|
327
|
+
image_size: Optional[str] = None,
|
|
328
|
+
aspect_ratio: Optional[str] = None,
|
|
329
|
+
image: Optional[ImageInput] = None,
|
|
330
|
+
request_id: Optional[str] = None,
|
|
331
|
+
timeout_s: Optional[float] = None,
|
|
332
|
+
max_retries: Optional[int] = None,
|
|
333
|
+
retry_backoff_s: float = 0.5,
|
|
334
|
+
) -> bytes:
|
|
335
|
+
return await run_sync_in_thread(
|
|
336
|
+
lambda: self.generate_image(
|
|
337
|
+
api_key=api_key,
|
|
338
|
+
prompt=prompt,
|
|
339
|
+
model=model,
|
|
340
|
+
image_size=image_size,
|
|
341
|
+
aspect_ratio=aspect_ratio,
|
|
342
|
+
image=image,
|
|
343
|
+
request_id=request_id,
|
|
344
|
+
timeout_s=timeout_s,
|
|
345
|
+
max_retries=max_retries,
|
|
346
|
+
retry_backoff_s=retry_backoff_s,
|
|
347
|
+
)
|
|
348
|
+
)
|
|
207
349
|
|
|
208
|
-
def list_models(
|
|
350
|
+
def list_models(
|
|
351
|
+
self,
|
|
352
|
+
*,
|
|
353
|
+
api_key: str,
|
|
354
|
+
request_id: Optional[str] = None,
|
|
355
|
+
timeout_s: Optional[float] = None,
|
|
356
|
+
max_retries: Optional[int] = None,
|
|
357
|
+
retry_backoff_s: float = 0.5,
|
|
358
|
+
) -> list[dict[str, Optional[str]]]:
|
|
209
359
|
"""Return the models available to the authenticated Gemini account."""
|
|
210
360
|
if not api_key:
|
|
211
361
|
raise ValueError("api_key must be provided.")
|
|
212
362
|
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
363
|
+
retry_count = clamp_retries(max_retries)
|
|
364
|
+
|
|
365
|
+
def _build_client() -> genai.Client:
|
|
366
|
+
client_kwargs: dict[str, object] = {"api_key": api_key}
|
|
367
|
+
if timeout_s is not None:
|
|
368
|
+
# Gemini requires at least 10s timeout if set
|
|
369
|
+
effective_timeout = max(10.0, timeout_s)
|
|
370
|
+
if effective_timeout != timeout_s:
|
|
371
|
+
logger.warning("Gemini timeout %ss is too short, clamping to %ss.", timeout_s, effective_timeout)
|
|
372
|
+
client_kwargs["http_options"] = types.HttpOptions(timeout=effective_timeout)
|
|
373
|
+
return genai.Client(**client_kwargs)
|
|
374
|
+
|
|
375
|
+
def _run_request() -> list[dict[str, Optional[str]]]:
|
|
376
|
+
models: list[dict[str, Optional[str]]] = []
|
|
377
|
+
client = _build_client()
|
|
216
378
|
try:
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
379
|
+
try:
|
|
380
|
+
iterator = client.models.list()
|
|
381
|
+
except Exception as exc:
|
|
382
|
+
logger.exception(
|
|
383
|
+
"Gemini list models failed: %s request_id=%s",
|
|
384
|
+
exc,
|
|
385
|
+
request_id,
|
|
386
|
+
)
|
|
387
|
+
raise
|
|
388
|
+
for model in iterator:
|
|
389
|
+
model_id = getattr(model, "name", None)
|
|
390
|
+
if model_id is None and isinstance(model, dict):
|
|
391
|
+
model_id = model.get("name")
|
|
392
|
+
if not model_id:
|
|
393
|
+
continue
|
|
394
|
+
|
|
395
|
+
# Normalize IDs like "models/<id>" -> "<id>"
|
|
396
|
+
if isinstance(model_id, str) and model_id.startswith("models/"):
|
|
397
|
+
model_id = model_id.split("/", 1)[1]
|
|
398
|
+
|
|
399
|
+
display_name = getattr(model, "display_name", None)
|
|
400
|
+
if display_name is None and isinstance(model, dict):
|
|
401
|
+
display_name = model.get("display_name")
|
|
402
|
+
|
|
403
|
+
models.append({"id": model_id, "display_name": display_name})
|
|
404
|
+
finally:
|
|
405
|
+
closer = getattr(client, "close", None)
|
|
406
|
+
if callable(closer):
|
|
407
|
+
try:
|
|
408
|
+
closer()
|
|
409
|
+
except Exception:
|
|
410
|
+
pass
|
|
231
411
|
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
412
|
+
logger.info(
|
|
413
|
+
"Gemini list_models succeeded: count=%d request_id=%s",
|
|
414
|
+
len(models),
|
|
415
|
+
request_id,
|
|
416
|
+
)
|
|
417
|
+
return models
|
|
235
418
|
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
except Exception:
|
|
243
|
-
pass
|
|
419
|
+
return run_with_retries(
|
|
420
|
+
func=_run_request,
|
|
421
|
+
max_retries=retry_count,
|
|
422
|
+
retry_backoff_s=retry_backoff_s,
|
|
423
|
+
request_id=request_id,
|
|
424
|
+
)
|
|
244
425
|
|
|
245
|
-
|
|
246
|
-
|
|
426
|
+
async def async_list_models(
|
|
427
|
+
self,
|
|
428
|
+
*,
|
|
429
|
+
api_key: str,
|
|
430
|
+
request_id: Optional[str] = None,
|
|
431
|
+
timeout_s: Optional[float] = None,
|
|
432
|
+
max_retries: Optional[int] = None,
|
|
433
|
+
retry_backoff_s: float = 0.5,
|
|
434
|
+
) -> list[dict[str, Optional[str]]]:
|
|
435
|
+
return await run_sync_in_thread(
|
|
436
|
+
lambda: self.list_models(
|
|
437
|
+
api_key=api_key,
|
|
438
|
+
request_id=request_id,
|
|
439
|
+
timeout_s=timeout_s,
|
|
440
|
+
max_retries=max_retries,
|
|
441
|
+
retry_backoff_s=retry_backoff_s,
|
|
442
|
+
)
|
|
443
|
+
)
|
|
247
444
|
|
|
248
445
|
@staticmethod
|
|
249
446
|
def _to_image_part(image: ImageInput) -> types.Part:
|