ccs-llmconnector 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.1.dist-info}/METADATA +1 -1
- ccs_llmconnector-1.1.1.dist-info/RECORD +16 -0
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.1.dist-info}/WHEEL +1 -1
- llmconnector/__init__.py +21 -21
- llmconnector/anthropic_client.py +266 -266
- llmconnector/client.py +291 -291
- llmconnector/client_cli.py +42 -42
- llmconnector/gemini_client.py +393 -396
- llmconnector/grok_client.py +270 -270
- llmconnector/openai_client.py +256 -256
- llmconnector/types.py +48 -48
- llmconnector/utils.py +77 -77
- ccs_llmconnector-1.1.0.dist-info/RECORD +0 -16
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.1.dist-info}/entry_points.txt +0 -0
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.1.dist-info}/licenses/LICENSE +0 -0
- {ccs_llmconnector-1.1.0.dist-info → ccs_llmconnector-1.1.1.dist-info}/top_level.txt +0 -0
llmconnector/client.py
CHANGED
|
@@ -2,95 +2,95 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
import asyncio
|
|
6
|
-
from typing import Dict, Optional, Protocol, Sequence
|
|
7
|
-
|
|
8
|
-
from .types import ImageInput, MessageSequence
|
|
9
|
-
from .utils import run_sync_in_thread
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class SupportsGenerateResponse(Protocol):
|
|
13
|
-
"""Protocol describing provider clients."""
|
|
14
|
-
|
|
15
|
-
def generate_response(
|
|
16
|
-
self,
|
|
17
|
-
*,
|
|
18
|
-
api_key: str,
|
|
19
|
-
prompt: Optional[str] = None,
|
|
20
|
-
model: str,
|
|
21
|
-
max_tokens: int = 32000,
|
|
22
|
-
reasoning_effort: Optional[str] = None,
|
|
23
|
-
images: Optional[Sequence[ImageInput]] = None,
|
|
24
|
-
messages: Optional[MessageSequence] = None,
|
|
25
|
-
request_id: Optional[str] = None,
|
|
26
|
-
timeout_s: Optional[float] = None,
|
|
27
|
-
max_retries: Optional[int] = None,
|
|
28
|
-
retry_backoff_s: float = 0.5,
|
|
29
|
-
) -> str:
|
|
30
|
-
...
|
|
31
|
-
|
|
32
|
-
async def async_generate_response(
|
|
33
|
-
self,
|
|
34
|
-
*,
|
|
35
|
-
api_key: str,
|
|
36
|
-
prompt: Optional[str] = None,
|
|
37
|
-
model: str,
|
|
38
|
-
max_tokens: int = 32000,
|
|
39
|
-
reasoning_effort: Optional[str] = None,
|
|
40
|
-
images: Optional[Sequence[ImageInput]] = None,
|
|
41
|
-
messages: Optional[MessageSequence] = None,
|
|
42
|
-
request_id: Optional[str] = None,
|
|
43
|
-
timeout_s: Optional[float] = None,
|
|
44
|
-
max_retries: Optional[int] = None,
|
|
45
|
-
retry_backoff_s: float = 0.5,
|
|
46
|
-
) -> str:
|
|
47
|
-
...
|
|
48
|
-
|
|
49
|
-
def generate_image(
|
|
50
|
-
self,
|
|
51
|
-
*,
|
|
52
|
-
api_key: str,
|
|
53
|
-
prompt: str,
|
|
5
|
+
import asyncio
|
|
6
|
+
from typing import Dict, Optional, Protocol, Sequence
|
|
7
|
+
|
|
8
|
+
from .types import ImageInput, MessageSequence
|
|
9
|
+
from .utils import run_sync_in_thread
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class SupportsGenerateResponse(Protocol):
|
|
13
|
+
"""Protocol describing provider clients."""
|
|
14
|
+
|
|
15
|
+
def generate_response(
|
|
16
|
+
self,
|
|
17
|
+
*,
|
|
18
|
+
api_key: str,
|
|
19
|
+
prompt: Optional[str] = None,
|
|
20
|
+
model: str,
|
|
21
|
+
max_tokens: int = 32000,
|
|
22
|
+
reasoning_effort: Optional[str] = None,
|
|
23
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
24
|
+
messages: Optional[MessageSequence] = None,
|
|
25
|
+
request_id: Optional[str] = None,
|
|
26
|
+
timeout_s: Optional[float] = None,
|
|
27
|
+
max_retries: Optional[int] = None,
|
|
28
|
+
retry_backoff_s: float = 0.5,
|
|
29
|
+
) -> str:
|
|
30
|
+
...
|
|
31
|
+
|
|
32
|
+
async def async_generate_response(
|
|
33
|
+
self,
|
|
34
|
+
*,
|
|
35
|
+
api_key: str,
|
|
36
|
+
prompt: Optional[str] = None,
|
|
37
|
+
model: str,
|
|
38
|
+
max_tokens: int = 32000,
|
|
39
|
+
reasoning_effort: Optional[str] = None,
|
|
40
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
41
|
+
messages: Optional[MessageSequence] = None,
|
|
42
|
+
request_id: Optional[str] = None,
|
|
43
|
+
timeout_s: Optional[float] = None,
|
|
44
|
+
max_retries: Optional[int] = None,
|
|
45
|
+
retry_backoff_s: float = 0.5,
|
|
46
|
+
) -> str:
|
|
47
|
+
...
|
|
48
|
+
|
|
49
|
+
def generate_image(
|
|
50
|
+
self,
|
|
51
|
+
*,
|
|
52
|
+
api_key: str,
|
|
53
|
+
prompt: str,
|
|
54
|
+
model: str,
|
|
55
|
+
image_size: Optional[str] = None,
|
|
56
|
+
aspect_ratio: Optional[str] = None,
|
|
57
|
+
image: Optional[ImageInput] = None,
|
|
58
|
+
) -> bytes:
|
|
59
|
+
...
|
|
60
|
+
|
|
61
|
+
async def async_generate_image(
|
|
62
|
+
self,
|
|
63
|
+
*,
|
|
64
|
+
api_key: str,
|
|
65
|
+
prompt: str,
|
|
54
66
|
model: str,
|
|
55
67
|
image_size: Optional[str] = None,
|
|
56
68
|
aspect_ratio: Optional[str] = None,
|
|
57
|
-
image: Optional[ImageInput] = None,
|
|
58
|
-
) -> bytes:
|
|
59
|
-
...
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
self,
|
|
63
|
-
*,
|
|
64
|
-
api_key: str,
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
...
|
|
83
|
-
|
|
84
|
-
async def async_list_models(
|
|
85
|
-
self,
|
|
86
|
-
*,
|
|
87
|
-
api_key: str,
|
|
88
|
-
request_id: Optional[str] = None,
|
|
89
|
-
timeout_s: Optional[float] = None,
|
|
90
|
-
max_retries: Optional[int] = None,
|
|
91
|
-
retry_backoff_s: float = 0.5,
|
|
92
|
-
) -> Sequence[dict[str, Optional[str]]]:
|
|
93
|
-
...
|
|
69
|
+
image: Optional[ImageInput] = None,
|
|
70
|
+
) -> bytes:
|
|
71
|
+
...
|
|
72
|
+
|
|
73
|
+
def list_models(
|
|
74
|
+
self,
|
|
75
|
+
*,
|
|
76
|
+
api_key: str,
|
|
77
|
+
request_id: Optional[str] = None,
|
|
78
|
+
timeout_s: Optional[float] = None,
|
|
79
|
+
max_retries: Optional[int] = None,
|
|
80
|
+
retry_backoff_s: float = 0.5,
|
|
81
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
82
|
+
...
|
|
83
|
+
|
|
84
|
+
async def async_list_models(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
api_key: str,
|
|
88
|
+
request_id: Optional[str] = None,
|
|
89
|
+
timeout_s: Optional[float] = None,
|
|
90
|
+
max_retries: Optional[int] = None,
|
|
91
|
+
retry_backoff_s: float = 0.5,
|
|
92
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
93
|
+
...
|
|
94
94
|
|
|
95
95
|
|
|
96
96
|
class LLMClient:
|
|
@@ -120,22 +120,22 @@ class LLMClient:
|
|
|
120
120
|
|
|
121
121
|
self._providers[name.lower()] = client
|
|
122
122
|
|
|
123
|
-
def generate_response(
|
|
124
|
-
self,
|
|
125
|
-
*,
|
|
126
|
-
provider: str,
|
|
127
|
-
api_key: str,
|
|
128
|
-
prompt: Optional[str] = None,
|
|
129
|
-
model: str,
|
|
130
|
-
max_tokens: int = 32000,
|
|
131
|
-
reasoning_effort: Optional[str] = None,
|
|
132
|
-
images: Optional[Sequence[ImageInput]] = None,
|
|
133
|
-
messages: Optional[MessageSequence] = None,
|
|
134
|
-
request_id: Optional[str] = None,
|
|
135
|
-
timeout_s: Optional[float] = None,
|
|
136
|
-
max_retries: Optional[int] = None,
|
|
137
|
-
retry_backoff_s: float = 0.5,
|
|
138
|
-
) -> str:
|
|
123
|
+
def generate_response(
|
|
124
|
+
self,
|
|
125
|
+
*,
|
|
126
|
+
provider: str,
|
|
127
|
+
api_key: str,
|
|
128
|
+
prompt: Optional[str] = None,
|
|
129
|
+
model: str,
|
|
130
|
+
max_tokens: int = 32000,
|
|
131
|
+
reasoning_effort: Optional[str] = None,
|
|
132
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
133
|
+
messages: Optional[MessageSequence] = None,
|
|
134
|
+
request_id: Optional[str] = None,
|
|
135
|
+
timeout_s: Optional[float] = None,
|
|
136
|
+
max_retries: Optional[int] = None,
|
|
137
|
+
retry_backoff_s: float = 0.5,
|
|
138
|
+
) -> str:
|
|
139
139
|
"""Generate a response using the selected provider."""
|
|
140
140
|
if not provider:
|
|
141
141
|
raise ValueError("provider must be provided.")
|
|
@@ -147,84 +147,84 @@ class LLMClient:
|
|
|
147
147
|
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
148
148
|
)
|
|
149
149
|
|
|
150
|
-
return provider_client.generate_response(
|
|
151
|
-
api_key=api_key,
|
|
152
|
-
prompt=prompt,
|
|
153
|
-
model=model,
|
|
154
|
-
max_tokens=max_tokens,
|
|
155
|
-
reasoning_effort=reasoning_effort,
|
|
156
|
-
images=images,
|
|
157
|
-
messages=messages,
|
|
158
|
-
request_id=request_id,
|
|
159
|
-
timeout_s=timeout_s,
|
|
160
|
-
max_retries=max_retries,
|
|
161
|
-
retry_backoff_s=retry_backoff_s,
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
async def async_generate_response(
|
|
165
|
-
self,
|
|
166
|
-
*,
|
|
167
|
-
provider: str,
|
|
168
|
-
api_key: str,
|
|
169
|
-
prompt: Optional[str] = None,
|
|
170
|
-
model: str,
|
|
171
|
-
max_tokens: int = 32000,
|
|
172
|
-
reasoning_effort: Optional[str] = None,
|
|
173
|
-
images: Optional[Sequence[ImageInput]] = None,
|
|
174
|
-
messages: Optional[MessageSequence] = None,
|
|
175
|
-
request_id: Optional[str] = None,
|
|
176
|
-
timeout_s: Optional[float] = None,
|
|
177
|
-
max_retries: Optional[int] = None,
|
|
178
|
-
retry_backoff_s: float = 0.5,
|
|
179
|
-
) -> str:
|
|
180
|
-
"""Generate a response using the selected provider (async)."""
|
|
181
|
-
if not provider:
|
|
182
|
-
raise ValueError("provider must be provided.")
|
|
183
|
-
|
|
184
|
-
provider_client = self._providers.get(provider.lower())
|
|
185
|
-
if provider_client is None:
|
|
186
|
-
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
187
|
-
raise ValueError(
|
|
188
|
-
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
189
|
-
)
|
|
190
|
-
|
|
191
|
-
async_method = getattr(provider_client, "async_generate_response", None)
|
|
192
|
-
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
193
|
-
return await async_method(
|
|
194
|
-
api_key=api_key,
|
|
195
|
-
prompt=prompt,
|
|
196
|
-
model=model,
|
|
197
|
-
max_tokens=max_tokens,
|
|
198
|
-
reasoning_effort=reasoning_effort,
|
|
199
|
-
images=images,
|
|
200
|
-
messages=messages,
|
|
201
|
-
request_id=request_id,
|
|
202
|
-
timeout_s=timeout_s,
|
|
203
|
-
max_retries=max_retries,
|
|
204
|
-
retry_backoff_s=retry_backoff_s,
|
|
205
|
-
)
|
|
206
|
-
|
|
207
|
-
return await run_sync_in_thread(
|
|
208
|
-
lambda: provider_client.generate_response(
|
|
209
|
-
api_key=api_key,
|
|
210
|
-
prompt=prompt,
|
|
211
|
-
model=model,
|
|
212
|
-
max_tokens=max_tokens,
|
|
213
|
-
reasoning_effort=reasoning_effort,
|
|
214
|
-
images=images,
|
|
215
|
-
messages=messages,
|
|
216
|
-
request_id=request_id,
|
|
217
|
-
timeout_s=timeout_s,
|
|
218
|
-
max_retries=max_retries,
|
|
219
|
-
retry_backoff_s=retry_backoff_s,
|
|
220
|
-
)
|
|
221
|
-
)
|
|
222
|
-
|
|
223
|
-
def generate_image(
|
|
224
|
-
self,
|
|
225
|
-
*,
|
|
226
|
-
provider: str,
|
|
227
|
-
api_key: str,
|
|
150
|
+
return provider_client.generate_response(
|
|
151
|
+
api_key=api_key,
|
|
152
|
+
prompt=prompt,
|
|
153
|
+
model=model,
|
|
154
|
+
max_tokens=max_tokens,
|
|
155
|
+
reasoning_effort=reasoning_effort,
|
|
156
|
+
images=images,
|
|
157
|
+
messages=messages,
|
|
158
|
+
request_id=request_id,
|
|
159
|
+
timeout_s=timeout_s,
|
|
160
|
+
max_retries=max_retries,
|
|
161
|
+
retry_backoff_s=retry_backoff_s,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
async def async_generate_response(
|
|
165
|
+
self,
|
|
166
|
+
*,
|
|
167
|
+
provider: str,
|
|
168
|
+
api_key: str,
|
|
169
|
+
prompt: Optional[str] = None,
|
|
170
|
+
model: str,
|
|
171
|
+
max_tokens: int = 32000,
|
|
172
|
+
reasoning_effort: Optional[str] = None,
|
|
173
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
174
|
+
messages: Optional[MessageSequence] = None,
|
|
175
|
+
request_id: Optional[str] = None,
|
|
176
|
+
timeout_s: Optional[float] = None,
|
|
177
|
+
max_retries: Optional[int] = None,
|
|
178
|
+
retry_backoff_s: float = 0.5,
|
|
179
|
+
) -> str:
|
|
180
|
+
"""Generate a response using the selected provider (async)."""
|
|
181
|
+
if not provider:
|
|
182
|
+
raise ValueError("provider must be provided.")
|
|
183
|
+
|
|
184
|
+
provider_client = self._providers.get(provider.lower())
|
|
185
|
+
if provider_client is None:
|
|
186
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
187
|
+
raise ValueError(
|
|
188
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
async_method = getattr(provider_client, "async_generate_response", None)
|
|
192
|
+
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
193
|
+
return await async_method(
|
|
194
|
+
api_key=api_key,
|
|
195
|
+
prompt=prompt,
|
|
196
|
+
model=model,
|
|
197
|
+
max_tokens=max_tokens,
|
|
198
|
+
reasoning_effort=reasoning_effort,
|
|
199
|
+
images=images,
|
|
200
|
+
messages=messages,
|
|
201
|
+
request_id=request_id,
|
|
202
|
+
timeout_s=timeout_s,
|
|
203
|
+
max_retries=max_retries,
|
|
204
|
+
retry_backoff_s=retry_backoff_s,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return await run_sync_in_thread(
|
|
208
|
+
lambda: provider_client.generate_response(
|
|
209
|
+
api_key=api_key,
|
|
210
|
+
prompt=prompt,
|
|
211
|
+
model=model,
|
|
212
|
+
max_tokens=max_tokens,
|
|
213
|
+
reasoning_effort=reasoning_effort,
|
|
214
|
+
images=images,
|
|
215
|
+
messages=messages,
|
|
216
|
+
request_id=request_id,
|
|
217
|
+
timeout_s=timeout_s,
|
|
218
|
+
max_retries=max_retries,
|
|
219
|
+
retry_backoff_s=retry_backoff_s,
|
|
220
|
+
)
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
def generate_image(
|
|
224
|
+
self,
|
|
225
|
+
*,
|
|
226
|
+
provider: str,
|
|
227
|
+
api_key: str,
|
|
228
228
|
prompt: str,
|
|
229
229
|
model: str,
|
|
230
230
|
image_size: Optional[str] = None,
|
|
@@ -242,69 +242,69 @@ class LLMClient:
|
|
|
242
242
|
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
243
243
|
)
|
|
244
244
|
|
|
245
|
-
return provider_client.generate_image(
|
|
246
|
-
api_key=api_key,
|
|
247
|
-
prompt=prompt,
|
|
248
|
-
model=model,
|
|
249
|
-
image_size=image_size,
|
|
250
|
-
aspect_ratio=aspect_ratio,
|
|
251
|
-
image=image,
|
|
252
|
-
)
|
|
253
|
-
|
|
254
|
-
async def async_generate_image(
|
|
255
|
-
self,
|
|
256
|
-
*,
|
|
257
|
-
provider: str,
|
|
258
|
-
api_key: str,
|
|
259
|
-
prompt: str,
|
|
260
|
-
model: str,
|
|
261
|
-
image_size: Optional[str] = None,
|
|
262
|
-
aspect_ratio: Optional[str] = None,
|
|
263
|
-
image: Optional[ImageInput] = None,
|
|
264
|
-
) -> bytes:
|
|
265
|
-
"""Generate an image using the selected provider (async)."""
|
|
266
|
-
if not provider:
|
|
267
|
-
raise ValueError("provider must be provided.")
|
|
268
|
-
|
|
269
|
-
provider_client = self._providers.get(provider.lower())
|
|
270
|
-
if provider_client is None:
|
|
271
|
-
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
272
|
-
raise ValueError(
|
|
273
|
-
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
274
|
-
)
|
|
275
|
-
|
|
276
|
-
async_method = getattr(provider_client, "async_generate_image", None)
|
|
277
|
-
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
278
|
-
return await async_method(
|
|
279
|
-
api_key=api_key,
|
|
280
|
-
prompt=prompt,
|
|
281
|
-
model=model,
|
|
282
|
-
image_size=image_size,
|
|
283
|
-
aspect_ratio=aspect_ratio,
|
|
284
|
-
image=image,
|
|
285
|
-
)
|
|
286
|
-
|
|
287
|
-
return await run_sync_in_thread(
|
|
288
|
-
lambda: provider_client.generate_image(
|
|
289
|
-
api_key=api_key,
|
|
290
|
-
prompt=prompt,
|
|
291
|
-
model=model,
|
|
292
|
-
image_size=image_size,
|
|
293
|
-
aspect_ratio=aspect_ratio,
|
|
294
|
-
image=image,
|
|
295
|
-
)
|
|
296
|
-
)
|
|
297
|
-
|
|
298
|
-
def list_models(
|
|
299
|
-
self,
|
|
300
|
-
*,
|
|
301
|
-
provider: str,
|
|
302
|
-
api_key: str,
|
|
303
|
-
request_id: Optional[str] = None,
|
|
304
|
-
timeout_s: Optional[float] = None,
|
|
305
|
-
max_retries: Optional[int] = None,
|
|
306
|
-
retry_backoff_s: float = 0.5,
|
|
307
|
-
) -> Sequence[dict[str, Optional[str]]]:
|
|
245
|
+
return provider_client.generate_image(
|
|
246
|
+
api_key=api_key,
|
|
247
|
+
prompt=prompt,
|
|
248
|
+
model=model,
|
|
249
|
+
image_size=image_size,
|
|
250
|
+
aspect_ratio=aspect_ratio,
|
|
251
|
+
image=image,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
async def async_generate_image(
|
|
255
|
+
self,
|
|
256
|
+
*,
|
|
257
|
+
provider: str,
|
|
258
|
+
api_key: str,
|
|
259
|
+
prompt: str,
|
|
260
|
+
model: str,
|
|
261
|
+
image_size: Optional[str] = None,
|
|
262
|
+
aspect_ratio: Optional[str] = None,
|
|
263
|
+
image: Optional[ImageInput] = None,
|
|
264
|
+
) -> bytes:
|
|
265
|
+
"""Generate an image using the selected provider (async)."""
|
|
266
|
+
if not provider:
|
|
267
|
+
raise ValueError("provider must be provided.")
|
|
268
|
+
|
|
269
|
+
provider_client = self._providers.get(provider.lower())
|
|
270
|
+
if provider_client is None:
|
|
271
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
272
|
+
raise ValueError(
|
|
273
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
async_method = getattr(provider_client, "async_generate_image", None)
|
|
277
|
+
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
278
|
+
return await async_method(
|
|
279
|
+
api_key=api_key,
|
|
280
|
+
prompt=prompt,
|
|
281
|
+
model=model,
|
|
282
|
+
image_size=image_size,
|
|
283
|
+
aspect_ratio=aspect_ratio,
|
|
284
|
+
image=image,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
return await run_sync_in_thread(
|
|
288
|
+
lambda: provider_client.generate_image(
|
|
289
|
+
api_key=api_key,
|
|
290
|
+
prompt=prompt,
|
|
291
|
+
model=model,
|
|
292
|
+
image_size=image_size,
|
|
293
|
+
aspect_ratio=aspect_ratio,
|
|
294
|
+
image=image,
|
|
295
|
+
)
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
def list_models(
|
|
299
|
+
self,
|
|
300
|
+
*,
|
|
301
|
+
provider: str,
|
|
302
|
+
api_key: str,
|
|
303
|
+
request_id: Optional[str] = None,
|
|
304
|
+
timeout_s: Optional[float] = None,
|
|
305
|
+
max_retries: Optional[int] = None,
|
|
306
|
+
retry_backoff_s: float = 0.5,
|
|
307
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
308
308
|
"""List models available for the specified provider."""
|
|
309
309
|
if not provider:
|
|
310
310
|
raise ValueError("provider must be provided.")
|
|
@@ -316,54 +316,54 @@ class LLMClient:
|
|
|
316
316
|
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
317
317
|
)
|
|
318
318
|
|
|
319
|
-
return provider_client.list_models(
|
|
320
|
-
api_key=api_key,
|
|
321
|
-
request_id=request_id,
|
|
322
|
-
timeout_s=timeout_s,
|
|
323
|
-
max_retries=max_retries,
|
|
324
|
-
retry_backoff_s=retry_backoff_s,
|
|
325
|
-
)
|
|
326
|
-
|
|
327
|
-
async def async_list_models(
|
|
328
|
-
self,
|
|
329
|
-
*,
|
|
330
|
-
provider: str,
|
|
331
|
-
api_key: str,
|
|
332
|
-
request_id: Optional[str] = None,
|
|
333
|
-
timeout_s: Optional[float] = None,
|
|
334
|
-
max_retries: Optional[int] = None,
|
|
335
|
-
retry_backoff_s: float = 0.5,
|
|
336
|
-
) -> Sequence[dict[str, Optional[str]]]:
|
|
337
|
-
"""List models available for the specified provider (async)."""
|
|
338
|
-
if not provider:
|
|
339
|
-
raise ValueError("provider must be provided.")
|
|
340
|
-
|
|
341
|
-
provider_client = self._providers.get(provider.lower())
|
|
342
|
-
if provider_client is None:
|
|
343
|
-
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
344
|
-
raise ValueError(
|
|
345
|
-
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
346
|
-
)
|
|
347
|
-
|
|
348
|
-
async_method = getattr(provider_client, "async_list_models", None)
|
|
349
|
-
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
350
|
-
return await async_method(
|
|
351
|
-
api_key=api_key,
|
|
352
|
-
request_id=request_id,
|
|
353
|
-
timeout_s=timeout_s,
|
|
354
|
-
max_retries=max_retries,
|
|
355
|
-
retry_backoff_s=retry_backoff_s,
|
|
356
|
-
)
|
|
357
|
-
|
|
358
|
-
return await run_sync_in_thread(
|
|
359
|
-
lambda: provider_client.list_models(
|
|
360
|
-
api_key=api_key,
|
|
361
|
-
request_id=request_id,
|
|
362
|
-
timeout_s=timeout_s,
|
|
363
|
-
max_retries=max_retries,
|
|
364
|
-
retry_backoff_s=retry_backoff_s,
|
|
365
|
-
)
|
|
366
|
-
)
|
|
319
|
+
return provider_client.list_models(
|
|
320
|
+
api_key=api_key,
|
|
321
|
+
request_id=request_id,
|
|
322
|
+
timeout_s=timeout_s,
|
|
323
|
+
max_retries=max_retries,
|
|
324
|
+
retry_backoff_s=retry_backoff_s,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
async def async_list_models(
|
|
328
|
+
self,
|
|
329
|
+
*,
|
|
330
|
+
provider: str,
|
|
331
|
+
api_key: str,
|
|
332
|
+
request_id: Optional[str] = None,
|
|
333
|
+
timeout_s: Optional[float] = None,
|
|
334
|
+
max_retries: Optional[int] = None,
|
|
335
|
+
retry_backoff_s: float = 0.5,
|
|
336
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
337
|
+
"""List models available for the specified provider (async)."""
|
|
338
|
+
if not provider:
|
|
339
|
+
raise ValueError("provider must be provided.")
|
|
340
|
+
|
|
341
|
+
provider_client = self._providers.get(provider.lower())
|
|
342
|
+
if provider_client is None:
|
|
343
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
344
|
+
raise ValueError(
|
|
345
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
async_method = getattr(provider_client, "async_list_models", None)
|
|
349
|
+
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
350
|
+
return await async_method(
|
|
351
|
+
api_key=api_key,
|
|
352
|
+
request_id=request_id,
|
|
353
|
+
timeout_s=timeout_s,
|
|
354
|
+
max_retries=max_retries,
|
|
355
|
+
retry_backoff_s=retry_backoff_s,
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
return await run_sync_in_thread(
|
|
359
|
+
lambda: provider_client.list_models(
|
|
360
|
+
api_key=api_key,
|
|
361
|
+
request_id=request_id,
|
|
362
|
+
timeout_s=timeout_s,
|
|
363
|
+
max_retries=max_retries,
|
|
364
|
+
retry_backoff_s=retry_backoff_s,
|
|
365
|
+
)
|
|
366
|
+
)
|
|
367
367
|
|
|
368
368
|
@staticmethod
|
|
369
369
|
def _discover_default_providers() -> Dict[str, SupportsGenerateResponse]:
|