ccs-llmconnector 1.0.5__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llmconnector/client.py CHANGED
@@ -2,43 +2,95 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from pathlib import Path
6
- from typing import TYPE_CHECKING, Dict, Optional, Protocol, Sequence, Union
7
-
8
- if TYPE_CHECKING:
9
- from .openai_client import ImageInput, OpenAIResponsesClient
10
- else:
11
- ImageInput = Union[str, Path]
12
-
13
-
14
- class SupportsGenerateResponse(Protocol):
15
- """Protocol describing provider clients."""
16
-
17
- def generate_response(
18
- self,
19
- *,
20
- api_key: str,
21
- prompt: str,
22
- model: str,
23
- max_tokens: int = 32000,
24
- reasoning_effort: Optional[str] = None,
25
- images: Optional[Sequence[ImageInput]] = None,
26
- ) -> str:
27
- ...
28
-
29
- def generate_image(
30
- self,
31
- *,
32
- api_key: str,
33
- prompt: str,
5
+ import asyncio
6
+ from typing import Dict, Optional, Protocol, Sequence
7
+
8
+ from .types import ImageInput, MessageSequence
9
+ from .utils import run_sync_in_thread
10
+
11
+
12
+ class SupportsGenerateResponse(Protocol):
13
+ """Protocol describing provider clients."""
14
+
15
+ def generate_response(
16
+ self,
17
+ *,
18
+ api_key: str,
19
+ prompt: Optional[str] = None,
20
+ model: str,
21
+ max_tokens: int = 32000,
22
+ reasoning_effort: Optional[str] = None,
23
+ images: Optional[Sequence[ImageInput]] = None,
24
+ messages: Optional[MessageSequence] = None,
25
+ request_id: Optional[str] = None,
26
+ timeout_s: Optional[float] = None,
27
+ max_retries: Optional[int] = None,
28
+ retry_backoff_s: float = 0.5,
29
+ ) -> str:
30
+ ...
31
+
32
+ async def async_generate_response(
33
+ self,
34
+ *,
35
+ api_key: str,
36
+ prompt: Optional[str] = None,
37
+ model: str,
38
+ max_tokens: int = 32000,
39
+ reasoning_effort: Optional[str] = None,
40
+ images: Optional[Sequence[ImageInput]] = None,
41
+ messages: Optional[MessageSequence] = None,
42
+ request_id: Optional[str] = None,
43
+ timeout_s: Optional[float] = None,
44
+ max_retries: Optional[int] = None,
45
+ retry_backoff_s: float = 0.5,
46
+ ) -> str:
47
+ ...
48
+
49
+ def generate_image(
50
+ self,
51
+ *,
52
+ api_key: str,
53
+ prompt: str,
34
54
  model: str,
35
- image_size: str = "2K",
36
- image: Optional[ImageInput] = None,
37
- ) -> bytes:
38
- ...
39
-
40
- def list_models(self, *, api_key: str) -> Sequence[dict[str, Optional[str]]]:
41
- ...
55
+ image_size: Optional[str] = None,
56
+ aspect_ratio: Optional[str] = None,
57
+ image: Optional[ImageInput] = None,
58
+ ) -> bytes:
59
+ ...
60
+
61
+ async def async_generate_image(
62
+ self,
63
+ *,
64
+ api_key: str,
65
+ prompt: str,
66
+ model: str,
67
+ image_size: Optional[str] = None,
68
+ aspect_ratio: Optional[str] = None,
69
+ image: Optional[ImageInput] = None,
70
+ ) -> bytes:
71
+ ...
72
+
73
+ def list_models(
74
+ self,
75
+ *,
76
+ api_key: str,
77
+ request_id: Optional[str] = None,
78
+ timeout_s: Optional[float] = None,
79
+ max_retries: Optional[int] = None,
80
+ retry_backoff_s: float = 0.5,
81
+ ) -> Sequence[dict[str, Optional[str]]]:
82
+ ...
83
+
84
+ async def async_list_models(
85
+ self,
86
+ *,
87
+ api_key: str,
88
+ request_id: Optional[str] = None,
89
+ timeout_s: Optional[float] = None,
90
+ max_retries: Optional[int] = None,
91
+ retry_backoff_s: float = 0.5,
92
+ ) -> Sequence[dict[str, Optional[str]]]:
93
+ ...
42
94
 
43
95
 
44
96
  class LLMClient:
@@ -68,17 +120,22 @@ class LLMClient:
68
120
 
69
121
  self._providers[name.lower()] = client
70
122
 
71
- def generate_response(
72
- self,
73
- *,
74
- provider: str,
75
- api_key: str,
76
- prompt: str,
77
- model: str,
78
- max_tokens: int = 32000,
79
- reasoning_effort: Optional[str] = None,
80
- images: Optional[Sequence[ImageInput]] = None,
81
- ) -> str:
123
+ def generate_response(
124
+ self,
125
+ *,
126
+ provider: str,
127
+ api_key: str,
128
+ prompt: Optional[str] = None,
129
+ model: str,
130
+ max_tokens: int = 32000,
131
+ reasoning_effort: Optional[str] = None,
132
+ images: Optional[Sequence[ImageInput]] = None,
133
+ messages: Optional[MessageSequence] = None,
134
+ request_id: Optional[str] = None,
135
+ timeout_s: Optional[float] = None,
136
+ max_retries: Optional[int] = None,
137
+ retry_backoff_s: float = 0.5,
138
+ ) -> str:
82
139
  """Generate a response using the selected provider."""
83
140
  if not provider:
84
141
  raise ValueError("provider must be provided.")
@@ -90,23 +147,88 @@ class LLMClient:
90
147
  f"Unknown provider '{provider}'. Available providers: {available}."
91
148
  )
92
149
 
93
- return provider_client.generate_response(
94
- api_key=api_key,
95
- prompt=prompt,
96
- model=model,
97
- max_tokens=max_tokens,
98
- reasoning_effort=reasoning_effort,
99
- images=images,
100
- )
150
+ return provider_client.generate_response(
151
+ api_key=api_key,
152
+ prompt=prompt,
153
+ model=model,
154
+ max_tokens=max_tokens,
155
+ reasoning_effort=reasoning_effort,
156
+ images=images,
157
+ messages=messages,
158
+ request_id=request_id,
159
+ timeout_s=timeout_s,
160
+ max_retries=max_retries,
161
+ retry_backoff_s=retry_backoff_s,
162
+ )
163
+
164
+ async def async_generate_response(
165
+ self,
166
+ *,
167
+ provider: str,
168
+ api_key: str,
169
+ prompt: Optional[str] = None,
170
+ model: str,
171
+ max_tokens: int = 32000,
172
+ reasoning_effort: Optional[str] = None,
173
+ images: Optional[Sequence[ImageInput]] = None,
174
+ messages: Optional[MessageSequence] = None,
175
+ request_id: Optional[str] = None,
176
+ timeout_s: Optional[float] = None,
177
+ max_retries: Optional[int] = None,
178
+ retry_backoff_s: float = 0.5,
179
+ ) -> str:
180
+ """Generate a response using the selected provider (async)."""
181
+ if not provider:
182
+ raise ValueError("provider must be provided.")
183
+
184
+ provider_client = self._providers.get(provider.lower())
185
+ if provider_client is None:
186
+ available = ", ".join(sorted(self._providers)) or "<none>"
187
+ raise ValueError(
188
+ f"Unknown provider '{provider}'. Available providers: {available}."
189
+ )
190
+
191
+ async_method = getattr(provider_client, "async_generate_response", None)
192
+ if async_method is not None and asyncio.iscoroutinefunction(async_method):
193
+ return await async_method(
194
+ api_key=api_key,
195
+ prompt=prompt,
196
+ model=model,
197
+ max_tokens=max_tokens,
198
+ reasoning_effort=reasoning_effort,
199
+ images=images,
200
+ messages=messages,
201
+ request_id=request_id,
202
+ timeout_s=timeout_s,
203
+ max_retries=max_retries,
204
+ retry_backoff_s=retry_backoff_s,
205
+ )
206
+
207
+ return await run_sync_in_thread(
208
+ lambda: provider_client.generate_response(
209
+ api_key=api_key,
210
+ prompt=prompt,
211
+ model=model,
212
+ max_tokens=max_tokens,
213
+ reasoning_effort=reasoning_effort,
214
+ images=images,
215
+ messages=messages,
216
+ request_id=request_id,
217
+ timeout_s=timeout_s,
218
+ max_retries=max_retries,
219
+ retry_backoff_s=retry_backoff_s,
220
+ )
221
+ )
101
222
 
102
- def generate_image(
103
- self,
104
- *,
105
- provider: str,
106
- api_key: str,
223
+ def generate_image(
224
+ self,
225
+ *,
226
+ provider: str,
227
+ api_key: str,
107
228
  prompt: str,
108
229
  model: str,
109
- image_size: str = "2K",
230
+ image_size: Optional[str] = None,
231
+ aspect_ratio: Optional[str] = None,
110
232
  image: Optional[ImageInput] = None,
111
233
  ) -> bytes:
112
234
  """Generate an image using the selected provider."""
@@ -120,20 +242,69 @@ class LLMClient:
120
242
  f"Unknown provider '{provider}'. Available providers: {available}."
121
243
  )
122
244
 
123
- return provider_client.generate_image(
124
- api_key=api_key,
125
- prompt=prompt,
126
- model=model,
127
- image_size=image_size,
128
- image=image,
129
- )
245
+ return provider_client.generate_image(
246
+ api_key=api_key,
247
+ prompt=prompt,
248
+ model=model,
249
+ image_size=image_size,
250
+ aspect_ratio=aspect_ratio,
251
+ image=image,
252
+ )
253
+
254
+ async def async_generate_image(
255
+ self,
256
+ *,
257
+ provider: str,
258
+ api_key: str,
259
+ prompt: str,
260
+ model: str,
261
+ image_size: Optional[str] = None,
262
+ aspect_ratio: Optional[str] = None,
263
+ image: Optional[ImageInput] = None,
264
+ ) -> bytes:
265
+ """Generate an image using the selected provider (async)."""
266
+ if not provider:
267
+ raise ValueError("provider must be provided.")
268
+
269
+ provider_client = self._providers.get(provider.lower())
270
+ if provider_client is None:
271
+ available = ", ".join(sorted(self._providers)) or "<none>"
272
+ raise ValueError(
273
+ f"Unknown provider '{provider}'. Available providers: {available}."
274
+ )
275
+
276
+ async_method = getattr(provider_client, "async_generate_image", None)
277
+ if async_method is not None and asyncio.iscoroutinefunction(async_method):
278
+ return await async_method(
279
+ api_key=api_key,
280
+ prompt=prompt,
281
+ model=model,
282
+ image_size=image_size,
283
+ aspect_ratio=aspect_ratio,
284
+ image=image,
285
+ )
286
+
287
+ return await run_sync_in_thread(
288
+ lambda: provider_client.generate_image(
289
+ api_key=api_key,
290
+ prompt=prompt,
291
+ model=model,
292
+ image_size=image_size,
293
+ aspect_ratio=aspect_ratio,
294
+ image=image,
295
+ )
296
+ )
130
297
 
131
- def list_models(
132
- self,
133
- *,
134
- provider: str,
135
- api_key: str,
136
- ) -> Sequence[dict[str, Optional[str]]]:
298
+ def list_models(
299
+ self,
300
+ *,
301
+ provider: str,
302
+ api_key: str,
303
+ request_id: Optional[str] = None,
304
+ timeout_s: Optional[float] = None,
305
+ max_retries: Optional[int] = None,
306
+ retry_backoff_s: float = 0.5,
307
+ ) -> Sequence[dict[str, Optional[str]]]:
137
308
  """List models available for the specified provider."""
138
309
  if not provider:
139
310
  raise ValueError("provider must be provided.")
@@ -145,7 +316,54 @@ class LLMClient:
145
316
  f"Unknown provider '{provider}'. Available providers: {available}."
146
317
  )
147
318
 
148
- return provider_client.list_models(api_key=api_key)
319
+ return provider_client.list_models(
320
+ api_key=api_key,
321
+ request_id=request_id,
322
+ timeout_s=timeout_s,
323
+ max_retries=max_retries,
324
+ retry_backoff_s=retry_backoff_s,
325
+ )
326
+
327
+ async def async_list_models(
328
+ self,
329
+ *,
330
+ provider: str,
331
+ api_key: str,
332
+ request_id: Optional[str] = None,
333
+ timeout_s: Optional[float] = None,
334
+ max_retries: Optional[int] = None,
335
+ retry_backoff_s: float = 0.5,
336
+ ) -> Sequence[dict[str, Optional[str]]]:
337
+ """List models available for the specified provider (async)."""
338
+ if not provider:
339
+ raise ValueError("provider must be provided.")
340
+
341
+ provider_client = self._providers.get(provider.lower())
342
+ if provider_client is None:
343
+ available = ", ".join(sorted(self._providers)) or "<none>"
344
+ raise ValueError(
345
+ f"Unknown provider '{provider}'. Available providers: {available}."
346
+ )
347
+
348
+ async_method = getattr(provider_client, "async_list_models", None)
349
+ if async_method is not None and asyncio.iscoroutinefunction(async_method):
350
+ return await async_method(
351
+ api_key=api_key,
352
+ request_id=request_id,
353
+ timeout_s=timeout_s,
354
+ max_retries=max_retries,
355
+ retry_backoff_s=retry_backoff_s,
356
+ )
357
+
358
+ return await run_sync_in_thread(
359
+ lambda: provider_client.list_models(
360
+ api_key=api_key,
361
+ request_id=request_id,
362
+ timeout_s=timeout_s,
363
+ max_retries=max_retries,
364
+ retry_backoff_s=retry_backoff_s,
365
+ )
366
+ )
149
367
 
150
368
  @staticmethod
151
369
  def _discover_default_providers() -> Dict[str, SupportsGenerateResponse]:
@@ -82,12 +82,35 @@ def _build_parser() -> argparse.ArgumentParser:
82
82
  default=32000,
83
83
  help="Maximum output tokens (provider-specific meaning)",
84
84
  )
85
- p_respond.add_argument(
86
- "--reasoning-effort",
87
- choices=["low", "medium", "high"],
88
- default=None,
89
- help="Optional reasoning effort hint if supported",
90
- )
85
+ p_respond.add_argument(
86
+ "--reasoning-effort",
87
+ choices=["low", "medium", "high"],
88
+ default=None,
89
+ help="Optional reasoning effort hint if supported",
90
+ )
91
+ p_respond.add_argument(
92
+ "--request-id",
93
+ default=None,
94
+ help="Optional request identifier for tracing/logging",
95
+ )
96
+ p_respond.add_argument(
97
+ "--timeout-s",
98
+ type=float,
99
+ default=None,
100
+ help="Optional timeout in seconds",
101
+ )
102
+ p_respond.add_argument(
103
+ "--max-retries",
104
+ type=int,
105
+ default=0,
106
+ help="Number of retries for transient failures",
107
+ )
108
+ p_respond.add_argument(
109
+ "--retry-backoff-s",
110
+ type=float,
111
+ default=0.5,
112
+ help="Base delay in seconds for exponential backoff",
113
+ )
91
114
 
92
115
  # models: list available models
93
116
  p_models = subparsers.add_parser(
@@ -149,15 +172,19 @@ def _cmd_respond(args: argparse.Namespace) -> int:
149
172
  print("Error: provide a prompt or at least one image.", file=sys.stderr)
150
173
  return 2
151
174
  try:
152
- output = client.generate_response(
153
- provider=provider,
154
- api_key=api_key,
155
- prompt=prompt,
156
- model=model,
157
- max_tokens=args.max_tokens,
158
- reasoning_effort=args.reasoning_effort,
159
- images=images,
160
- )
175
+ output = client.generate_response(
176
+ provider=provider,
177
+ api_key=api_key,
178
+ prompt=prompt,
179
+ model=model,
180
+ max_tokens=args.max_tokens,
181
+ reasoning_effort=args.reasoning_effort,
182
+ images=images,
183
+ request_id=args.request_id,
184
+ timeout_s=args.timeout_s,
185
+ max_retries=args.max_retries,
186
+ retry_backoff_s=args.retry_backoff_s,
187
+ )
161
188
  except Exception as exc: # pragma: no cover - CLI surface
162
189
  print(f"Error: {exc}", file=sys.stderr)
163
190
  return 2