ccs-llmconnector 1.0.6__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.0.dist-info}/METADATA +51 -8
- ccs_llmconnector-1.1.0.dist-info/RECORD +16 -0
- llmconnector/__init__.py +21 -11
- llmconnector/anthropic_client.py +266 -123
- llmconnector/client.py +289 -74
- llmconnector/client_cli.py +42 -15
- llmconnector/gemini_client.py +396 -196
- llmconnector/grok_client.py +270 -140
- llmconnector/openai_client.py +256 -124
- llmconnector/types.py +49 -0
- llmconnector/utils.py +78 -0
- ccs_llmconnector-1.0.6.dist-info/RECORD +0 -14
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.0.dist-info}/WHEEL +0 -0
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.0.dist-info}/entry_points.txt +0 -0
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.0.dist-info}/licenses/LICENSE +0 -0
- {ccs_llmconnector-1.0.6.dist-info → ccs_llmconnector-1.1.0.dist-info}/top_level.txt +0 -0
llmconnector/client.py
CHANGED
|
@@ -2,44 +2,95 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
|
|
6
|
-
from typing import
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
5
|
+
import asyncio
|
|
6
|
+
from typing import Dict, Optional, Protocol, Sequence
|
|
7
|
+
|
|
8
|
+
from .types import ImageInput, MessageSequence
|
|
9
|
+
from .utils import run_sync_in_thread
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class SupportsGenerateResponse(Protocol):
|
|
13
|
+
"""Protocol describing provider clients."""
|
|
14
|
+
|
|
15
|
+
def generate_response(
|
|
16
|
+
self,
|
|
17
|
+
*,
|
|
18
|
+
api_key: str,
|
|
19
|
+
prompt: Optional[str] = None,
|
|
20
|
+
model: str,
|
|
21
|
+
max_tokens: int = 32000,
|
|
22
|
+
reasoning_effort: Optional[str] = None,
|
|
23
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
24
|
+
messages: Optional[MessageSequence] = None,
|
|
25
|
+
request_id: Optional[str] = None,
|
|
26
|
+
timeout_s: Optional[float] = None,
|
|
27
|
+
max_retries: Optional[int] = None,
|
|
28
|
+
retry_backoff_s: float = 0.5,
|
|
29
|
+
) -> str:
|
|
30
|
+
...
|
|
31
|
+
|
|
32
|
+
async def async_generate_response(
|
|
33
|
+
self,
|
|
34
|
+
*,
|
|
35
|
+
api_key: str,
|
|
36
|
+
prompt: Optional[str] = None,
|
|
37
|
+
model: str,
|
|
38
|
+
max_tokens: int = 32000,
|
|
39
|
+
reasoning_effort: Optional[str] = None,
|
|
40
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
41
|
+
messages: Optional[MessageSequence] = None,
|
|
42
|
+
request_id: Optional[str] = None,
|
|
43
|
+
timeout_s: Optional[float] = None,
|
|
44
|
+
max_retries: Optional[int] = None,
|
|
45
|
+
retry_backoff_s: float = 0.5,
|
|
46
|
+
) -> str:
|
|
47
|
+
...
|
|
48
|
+
|
|
49
|
+
def generate_image(
|
|
50
|
+
self,
|
|
51
|
+
*,
|
|
52
|
+
api_key: str,
|
|
53
|
+
prompt: str,
|
|
34
54
|
model: str,
|
|
35
55
|
image_size: Optional[str] = None,
|
|
36
56
|
aspect_ratio: Optional[str] = None,
|
|
37
|
-
image: Optional[ImageInput] = None,
|
|
38
|
-
) -> bytes:
|
|
39
|
-
...
|
|
40
|
-
|
|
41
|
-
def
|
|
42
|
-
|
|
57
|
+
image: Optional[ImageInput] = None,
|
|
58
|
+
) -> bytes:
|
|
59
|
+
...
|
|
60
|
+
|
|
61
|
+
async def async_generate_image(
|
|
62
|
+
self,
|
|
63
|
+
*,
|
|
64
|
+
api_key: str,
|
|
65
|
+
prompt: str,
|
|
66
|
+
model: str,
|
|
67
|
+
image_size: Optional[str] = None,
|
|
68
|
+
aspect_ratio: Optional[str] = None,
|
|
69
|
+
image: Optional[ImageInput] = None,
|
|
70
|
+
) -> bytes:
|
|
71
|
+
...
|
|
72
|
+
|
|
73
|
+
def list_models(
|
|
74
|
+
self,
|
|
75
|
+
*,
|
|
76
|
+
api_key: str,
|
|
77
|
+
request_id: Optional[str] = None,
|
|
78
|
+
timeout_s: Optional[float] = None,
|
|
79
|
+
max_retries: Optional[int] = None,
|
|
80
|
+
retry_backoff_s: float = 0.5,
|
|
81
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
82
|
+
...
|
|
83
|
+
|
|
84
|
+
async def async_list_models(
|
|
85
|
+
self,
|
|
86
|
+
*,
|
|
87
|
+
api_key: str,
|
|
88
|
+
request_id: Optional[str] = None,
|
|
89
|
+
timeout_s: Optional[float] = None,
|
|
90
|
+
max_retries: Optional[int] = None,
|
|
91
|
+
retry_backoff_s: float = 0.5,
|
|
92
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
93
|
+
...
|
|
43
94
|
|
|
44
95
|
|
|
45
96
|
class LLMClient:
|
|
@@ -69,17 +120,22 @@ class LLMClient:
|
|
|
69
120
|
|
|
70
121
|
self._providers[name.lower()] = client
|
|
71
122
|
|
|
72
|
-
def generate_response(
|
|
73
|
-
self,
|
|
74
|
-
*,
|
|
75
|
-
provider: str,
|
|
76
|
-
api_key: str,
|
|
77
|
-
prompt: str,
|
|
78
|
-
model: str,
|
|
79
|
-
max_tokens: int = 32000,
|
|
80
|
-
reasoning_effort: Optional[str] = None,
|
|
81
|
-
images: Optional[Sequence[ImageInput]] = None,
|
|
82
|
-
|
|
123
|
+
def generate_response(
|
|
124
|
+
self,
|
|
125
|
+
*,
|
|
126
|
+
provider: str,
|
|
127
|
+
api_key: str,
|
|
128
|
+
prompt: Optional[str] = None,
|
|
129
|
+
model: str,
|
|
130
|
+
max_tokens: int = 32000,
|
|
131
|
+
reasoning_effort: Optional[str] = None,
|
|
132
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
133
|
+
messages: Optional[MessageSequence] = None,
|
|
134
|
+
request_id: Optional[str] = None,
|
|
135
|
+
timeout_s: Optional[float] = None,
|
|
136
|
+
max_retries: Optional[int] = None,
|
|
137
|
+
retry_backoff_s: float = 0.5,
|
|
138
|
+
) -> str:
|
|
83
139
|
"""Generate a response using the selected provider."""
|
|
84
140
|
if not provider:
|
|
85
141
|
raise ValueError("provider must be provided.")
|
|
@@ -91,20 +147,84 @@ class LLMClient:
|
|
|
91
147
|
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
92
148
|
)
|
|
93
149
|
|
|
94
|
-
return provider_client.generate_response(
|
|
95
|
-
api_key=api_key,
|
|
96
|
-
prompt=prompt,
|
|
97
|
-
model=model,
|
|
98
|
-
max_tokens=max_tokens,
|
|
99
|
-
reasoning_effort=reasoning_effort,
|
|
100
|
-
images=images,
|
|
101
|
-
|
|
150
|
+
return provider_client.generate_response(
|
|
151
|
+
api_key=api_key,
|
|
152
|
+
prompt=prompt,
|
|
153
|
+
model=model,
|
|
154
|
+
max_tokens=max_tokens,
|
|
155
|
+
reasoning_effort=reasoning_effort,
|
|
156
|
+
images=images,
|
|
157
|
+
messages=messages,
|
|
158
|
+
request_id=request_id,
|
|
159
|
+
timeout_s=timeout_s,
|
|
160
|
+
max_retries=max_retries,
|
|
161
|
+
retry_backoff_s=retry_backoff_s,
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
async def async_generate_response(
|
|
165
|
+
self,
|
|
166
|
+
*,
|
|
167
|
+
provider: str,
|
|
168
|
+
api_key: str,
|
|
169
|
+
prompt: Optional[str] = None,
|
|
170
|
+
model: str,
|
|
171
|
+
max_tokens: int = 32000,
|
|
172
|
+
reasoning_effort: Optional[str] = None,
|
|
173
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
174
|
+
messages: Optional[MessageSequence] = None,
|
|
175
|
+
request_id: Optional[str] = None,
|
|
176
|
+
timeout_s: Optional[float] = None,
|
|
177
|
+
max_retries: Optional[int] = None,
|
|
178
|
+
retry_backoff_s: float = 0.5,
|
|
179
|
+
) -> str:
|
|
180
|
+
"""Generate a response using the selected provider (async)."""
|
|
181
|
+
if not provider:
|
|
182
|
+
raise ValueError("provider must be provided.")
|
|
183
|
+
|
|
184
|
+
provider_client = self._providers.get(provider.lower())
|
|
185
|
+
if provider_client is None:
|
|
186
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
187
|
+
raise ValueError(
|
|
188
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
async_method = getattr(provider_client, "async_generate_response", None)
|
|
192
|
+
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
193
|
+
return await async_method(
|
|
194
|
+
api_key=api_key,
|
|
195
|
+
prompt=prompt,
|
|
196
|
+
model=model,
|
|
197
|
+
max_tokens=max_tokens,
|
|
198
|
+
reasoning_effort=reasoning_effort,
|
|
199
|
+
images=images,
|
|
200
|
+
messages=messages,
|
|
201
|
+
request_id=request_id,
|
|
202
|
+
timeout_s=timeout_s,
|
|
203
|
+
max_retries=max_retries,
|
|
204
|
+
retry_backoff_s=retry_backoff_s,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
return await run_sync_in_thread(
|
|
208
|
+
lambda: provider_client.generate_response(
|
|
209
|
+
api_key=api_key,
|
|
210
|
+
prompt=prompt,
|
|
211
|
+
model=model,
|
|
212
|
+
max_tokens=max_tokens,
|
|
213
|
+
reasoning_effort=reasoning_effort,
|
|
214
|
+
images=images,
|
|
215
|
+
messages=messages,
|
|
216
|
+
request_id=request_id,
|
|
217
|
+
timeout_s=timeout_s,
|
|
218
|
+
max_retries=max_retries,
|
|
219
|
+
retry_backoff_s=retry_backoff_s,
|
|
220
|
+
)
|
|
221
|
+
)
|
|
102
222
|
|
|
103
|
-
def generate_image(
|
|
104
|
-
self,
|
|
105
|
-
*,
|
|
106
|
-
provider: str,
|
|
107
|
-
api_key: str,
|
|
223
|
+
def generate_image(
|
|
224
|
+
self,
|
|
225
|
+
*,
|
|
226
|
+
provider: str,
|
|
227
|
+
api_key: str,
|
|
108
228
|
prompt: str,
|
|
109
229
|
model: str,
|
|
110
230
|
image_size: Optional[str] = None,
|
|
@@ -122,21 +242,69 @@ class LLMClient:
|
|
|
122
242
|
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
123
243
|
)
|
|
124
244
|
|
|
125
|
-
return provider_client.generate_image(
|
|
126
|
-
api_key=api_key,
|
|
127
|
-
prompt=prompt,
|
|
128
|
-
model=model,
|
|
129
|
-
image_size=image_size,
|
|
130
|
-
aspect_ratio=aspect_ratio,
|
|
131
|
-
image=image,
|
|
132
|
-
)
|
|
245
|
+
return provider_client.generate_image(
|
|
246
|
+
api_key=api_key,
|
|
247
|
+
prompt=prompt,
|
|
248
|
+
model=model,
|
|
249
|
+
image_size=image_size,
|
|
250
|
+
aspect_ratio=aspect_ratio,
|
|
251
|
+
image=image,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
async def async_generate_image(
|
|
255
|
+
self,
|
|
256
|
+
*,
|
|
257
|
+
provider: str,
|
|
258
|
+
api_key: str,
|
|
259
|
+
prompt: str,
|
|
260
|
+
model: str,
|
|
261
|
+
image_size: Optional[str] = None,
|
|
262
|
+
aspect_ratio: Optional[str] = None,
|
|
263
|
+
image: Optional[ImageInput] = None,
|
|
264
|
+
) -> bytes:
|
|
265
|
+
"""Generate an image using the selected provider (async)."""
|
|
266
|
+
if not provider:
|
|
267
|
+
raise ValueError("provider must be provided.")
|
|
268
|
+
|
|
269
|
+
provider_client = self._providers.get(provider.lower())
|
|
270
|
+
if provider_client is None:
|
|
271
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
272
|
+
raise ValueError(
|
|
273
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
async_method = getattr(provider_client, "async_generate_image", None)
|
|
277
|
+
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
278
|
+
return await async_method(
|
|
279
|
+
api_key=api_key,
|
|
280
|
+
prompt=prompt,
|
|
281
|
+
model=model,
|
|
282
|
+
image_size=image_size,
|
|
283
|
+
aspect_ratio=aspect_ratio,
|
|
284
|
+
image=image,
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
return await run_sync_in_thread(
|
|
288
|
+
lambda: provider_client.generate_image(
|
|
289
|
+
api_key=api_key,
|
|
290
|
+
prompt=prompt,
|
|
291
|
+
model=model,
|
|
292
|
+
image_size=image_size,
|
|
293
|
+
aspect_ratio=aspect_ratio,
|
|
294
|
+
image=image,
|
|
295
|
+
)
|
|
296
|
+
)
|
|
133
297
|
|
|
134
|
-
def list_models(
|
|
135
|
-
self,
|
|
136
|
-
*,
|
|
137
|
-
provider: str,
|
|
138
|
-
api_key: str,
|
|
139
|
-
|
|
298
|
+
def list_models(
|
|
299
|
+
self,
|
|
300
|
+
*,
|
|
301
|
+
provider: str,
|
|
302
|
+
api_key: str,
|
|
303
|
+
request_id: Optional[str] = None,
|
|
304
|
+
timeout_s: Optional[float] = None,
|
|
305
|
+
max_retries: Optional[int] = None,
|
|
306
|
+
retry_backoff_s: float = 0.5,
|
|
307
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
140
308
|
"""List models available for the specified provider."""
|
|
141
309
|
if not provider:
|
|
142
310
|
raise ValueError("provider must be provided.")
|
|
@@ -148,7 +316,54 @@ class LLMClient:
|
|
|
148
316
|
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
149
317
|
)
|
|
150
318
|
|
|
151
|
-
return provider_client.list_models(
|
|
319
|
+
return provider_client.list_models(
|
|
320
|
+
api_key=api_key,
|
|
321
|
+
request_id=request_id,
|
|
322
|
+
timeout_s=timeout_s,
|
|
323
|
+
max_retries=max_retries,
|
|
324
|
+
retry_backoff_s=retry_backoff_s,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
async def async_list_models(
|
|
328
|
+
self,
|
|
329
|
+
*,
|
|
330
|
+
provider: str,
|
|
331
|
+
api_key: str,
|
|
332
|
+
request_id: Optional[str] = None,
|
|
333
|
+
timeout_s: Optional[float] = None,
|
|
334
|
+
max_retries: Optional[int] = None,
|
|
335
|
+
retry_backoff_s: float = 0.5,
|
|
336
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
337
|
+
"""List models available for the specified provider (async)."""
|
|
338
|
+
if not provider:
|
|
339
|
+
raise ValueError("provider must be provided.")
|
|
340
|
+
|
|
341
|
+
provider_client = self._providers.get(provider.lower())
|
|
342
|
+
if provider_client is None:
|
|
343
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
344
|
+
raise ValueError(
|
|
345
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
async_method = getattr(provider_client, "async_list_models", None)
|
|
349
|
+
if async_method is not None and asyncio.iscoroutinefunction(async_method):
|
|
350
|
+
return await async_method(
|
|
351
|
+
api_key=api_key,
|
|
352
|
+
request_id=request_id,
|
|
353
|
+
timeout_s=timeout_s,
|
|
354
|
+
max_retries=max_retries,
|
|
355
|
+
retry_backoff_s=retry_backoff_s,
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
return await run_sync_in_thread(
|
|
359
|
+
lambda: provider_client.list_models(
|
|
360
|
+
api_key=api_key,
|
|
361
|
+
request_id=request_id,
|
|
362
|
+
timeout_s=timeout_s,
|
|
363
|
+
max_retries=max_retries,
|
|
364
|
+
retry_backoff_s=retry_backoff_s,
|
|
365
|
+
)
|
|
366
|
+
)
|
|
152
367
|
|
|
153
368
|
@staticmethod
|
|
154
369
|
def _discover_default_providers() -> Dict[str, SupportsGenerateResponse]:
|
llmconnector/client_cli.py
CHANGED
|
@@ -82,12 +82,35 @@ def _build_parser() -> argparse.ArgumentParser:
|
|
|
82
82
|
default=32000,
|
|
83
83
|
help="Maximum output tokens (provider-specific meaning)",
|
|
84
84
|
)
|
|
85
|
-
p_respond.add_argument(
|
|
86
|
-
"--reasoning-effort",
|
|
87
|
-
choices=["low", "medium", "high"],
|
|
88
|
-
default=None,
|
|
89
|
-
help="Optional reasoning effort hint if supported",
|
|
90
|
-
)
|
|
85
|
+
p_respond.add_argument(
|
|
86
|
+
"--reasoning-effort",
|
|
87
|
+
choices=["low", "medium", "high"],
|
|
88
|
+
default=None,
|
|
89
|
+
help="Optional reasoning effort hint if supported",
|
|
90
|
+
)
|
|
91
|
+
p_respond.add_argument(
|
|
92
|
+
"--request-id",
|
|
93
|
+
default=None,
|
|
94
|
+
help="Optional request identifier for tracing/logging",
|
|
95
|
+
)
|
|
96
|
+
p_respond.add_argument(
|
|
97
|
+
"--timeout-s",
|
|
98
|
+
type=float,
|
|
99
|
+
default=None,
|
|
100
|
+
help="Optional timeout in seconds",
|
|
101
|
+
)
|
|
102
|
+
p_respond.add_argument(
|
|
103
|
+
"--max-retries",
|
|
104
|
+
type=int,
|
|
105
|
+
default=0,
|
|
106
|
+
help="Number of retries for transient failures",
|
|
107
|
+
)
|
|
108
|
+
p_respond.add_argument(
|
|
109
|
+
"--retry-backoff-s",
|
|
110
|
+
type=float,
|
|
111
|
+
default=0.5,
|
|
112
|
+
help="Base delay in seconds for exponential backoff",
|
|
113
|
+
)
|
|
91
114
|
|
|
92
115
|
# models: list available models
|
|
93
116
|
p_models = subparsers.add_parser(
|
|
@@ -149,15 +172,19 @@ def _cmd_respond(args: argparse.Namespace) -> int:
|
|
|
149
172
|
print("Error: provide a prompt or at least one image.", file=sys.stderr)
|
|
150
173
|
return 2
|
|
151
174
|
try:
|
|
152
|
-
output = client.generate_response(
|
|
153
|
-
provider=provider,
|
|
154
|
-
api_key=api_key,
|
|
155
|
-
prompt=prompt,
|
|
156
|
-
model=model,
|
|
157
|
-
max_tokens=args.max_tokens,
|
|
158
|
-
reasoning_effort=args.reasoning_effort,
|
|
159
|
-
images=images,
|
|
160
|
-
|
|
175
|
+
output = client.generate_response(
|
|
176
|
+
provider=provider,
|
|
177
|
+
api_key=api_key,
|
|
178
|
+
prompt=prompt,
|
|
179
|
+
model=model,
|
|
180
|
+
max_tokens=args.max_tokens,
|
|
181
|
+
reasoning_effort=args.reasoning_effort,
|
|
182
|
+
images=images,
|
|
183
|
+
request_id=args.request_id,
|
|
184
|
+
timeout_s=args.timeout_s,
|
|
185
|
+
max_retries=args.max_retries,
|
|
186
|
+
retry_backoff_s=args.retry_backoff_s,
|
|
187
|
+
)
|
|
161
188
|
except Exception as exc: # pragma: no cover - CLI surface
|
|
162
189
|
print(f"Error: {exc}", file=sys.stderr)
|
|
163
190
|
return 2
|