ccs-llmconnector 1.0.6__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llmconnector/client.py CHANGED
@@ -2,13 +2,11 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from pathlib import Path
6
- from typing import TYPE_CHECKING, Dict, Optional, Protocol, Sequence, Union
5
+ import asyncio
6
+ from typing import Dict, Optional, Protocol, Sequence
7
7
 
8
- if TYPE_CHECKING:
9
- from .openai_client import ImageInput, OpenAIResponsesClient
10
- else:
11
- ImageInput = Union[str, Path]
8
+ from .types import ImageInput, MessageSequence
9
+ from .utils import run_sync_in_thread
12
10
 
13
11
 
14
12
  class SupportsGenerateResponse(Protocol):
@@ -18,11 +16,33 @@ class SupportsGenerateResponse(Protocol):
18
16
  self,
19
17
  *,
20
18
  api_key: str,
21
- prompt: str,
19
+ prompt: Optional[str] = None,
20
+ model: str,
21
+ max_tokens: int = 32000,
22
+ reasoning_effort: Optional[str] = None,
23
+ images: Optional[Sequence[ImageInput]] = None,
24
+ messages: Optional[MessageSequence] = None,
25
+ request_id: Optional[str] = None,
26
+ timeout_s: Optional[float] = None,
27
+ max_retries: Optional[int] = None,
28
+ retry_backoff_s: float = 0.5,
29
+ ) -> str:
30
+ ...
31
+
32
+ async def async_generate_response(
33
+ self,
34
+ *,
35
+ api_key: str,
36
+ prompt: Optional[str] = None,
22
37
  model: str,
23
38
  max_tokens: int = 32000,
24
39
  reasoning_effort: Optional[str] = None,
25
40
  images: Optional[Sequence[ImageInput]] = None,
41
+ messages: Optional[MessageSequence] = None,
42
+ request_id: Optional[str] = None,
43
+ timeout_s: Optional[float] = None,
44
+ max_retries: Optional[int] = None,
45
+ retry_backoff_s: float = 0.5,
26
46
  ) -> str:
27
47
  ...
28
48
 
@@ -38,7 +58,38 @@ class SupportsGenerateResponse(Protocol):
38
58
  ) -> bytes:
39
59
  ...
40
60
 
41
- def list_models(self, *, api_key: str) -> Sequence[dict[str, Optional[str]]]:
61
+ async def async_generate_image(
62
+ self,
63
+ *,
64
+ api_key: str,
65
+ prompt: str,
66
+ model: str,
67
+ image_size: Optional[str] = None,
68
+ aspect_ratio: Optional[str] = None,
69
+ image: Optional[ImageInput] = None,
70
+ ) -> bytes:
71
+ ...
72
+
73
+ def list_models(
74
+ self,
75
+ *,
76
+ api_key: str,
77
+ request_id: Optional[str] = None,
78
+ timeout_s: Optional[float] = None,
79
+ max_retries: Optional[int] = None,
80
+ retry_backoff_s: float = 0.5,
81
+ ) -> Sequence[dict[str, Optional[str]]]:
82
+ ...
83
+
84
+ async def async_list_models(
85
+ self,
86
+ *,
87
+ api_key: str,
88
+ request_id: Optional[str] = None,
89
+ timeout_s: Optional[float] = None,
90
+ max_retries: Optional[int] = None,
91
+ retry_backoff_s: float = 0.5,
92
+ ) -> Sequence[dict[str, Optional[str]]]:
42
93
  ...
43
94
 
44
95
 
@@ -74,11 +125,16 @@ class LLMClient:
74
125
  *,
75
126
  provider: str,
76
127
  api_key: str,
77
- prompt: str,
128
+ prompt: Optional[str] = None,
78
129
  model: str,
79
130
  max_tokens: int = 32000,
80
131
  reasoning_effort: Optional[str] = None,
81
132
  images: Optional[Sequence[ImageInput]] = None,
133
+ messages: Optional[MessageSequence] = None,
134
+ request_id: Optional[str] = None,
135
+ timeout_s: Optional[float] = None,
136
+ max_retries: Optional[int] = None,
137
+ retry_backoff_s: float = 0.5,
82
138
  ) -> str:
83
139
  """Generate a response using the selected provider."""
84
140
  if not provider:
@@ -98,6 +154,70 @@ class LLMClient:
98
154
  max_tokens=max_tokens,
99
155
  reasoning_effort=reasoning_effort,
100
156
  images=images,
157
+ messages=messages,
158
+ request_id=request_id,
159
+ timeout_s=timeout_s,
160
+ max_retries=max_retries,
161
+ retry_backoff_s=retry_backoff_s,
162
+ )
163
+
164
+ async def async_generate_response(
165
+ self,
166
+ *,
167
+ provider: str,
168
+ api_key: str,
169
+ prompt: Optional[str] = None,
170
+ model: str,
171
+ max_tokens: int = 32000,
172
+ reasoning_effort: Optional[str] = None,
173
+ images: Optional[Sequence[ImageInput]] = None,
174
+ messages: Optional[MessageSequence] = None,
175
+ request_id: Optional[str] = None,
176
+ timeout_s: Optional[float] = None,
177
+ max_retries: Optional[int] = None,
178
+ retry_backoff_s: float = 0.5,
179
+ ) -> str:
180
+ """Generate a response using the selected provider (async)."""
181
+ if not provider:
182
+ raise ValueError("provider must be provided.")
183
+
184
+ provider_client = self._providers.get(provider.lower())
185
+ if provider_client is None:
186
+ available = ", ".join(sorted(self._providers)) or "<none>"
187
+ raise ValueError(
188
+ f"Unknown provider '{provider}'. Available providers: {available}."
189
+ )
190
+
191
+ async_method = getattr(provider_client, "async_generate_response", None)
192
+ if async_method is not None and asyncio.iscoroutinefunction(async_method):
193
+ return await async_method(
194
+ api_key=api_key,
195
+ prompt=prompt,
196
+ model=model,
197
+ max_tokens=max_tokens,
198
+ reasoning_effort=reasoning_effort,
199
+ images=images,
200
+ messages=messages,
201
+ request_id=request_id,
202
+ timeout_s=timeout_s,
203
+ max_retries=max_retries,
204
+ retry_backoff_s=retry_backoff_s,
205
+ )
206
+
207
+ return await run_sync_in_thread(
208
+ lambda: provider_client.generate_response(
209
+ api_key=api_key,
210
+ prompt=prompt,
211
+ model=model,
212
+ max_tokens=max_tokens,
213
+ reasoning_effort=reasoning_effort,
214
+ images=images,
215
+ messages=messages,
216
+ request_id=request_id,
217
+ timeout_s=timeout_s,
218
+ max_retries=max_retries,
219
+ retry_backoff_s=retry_backoff_s,
220
+ )
101
221
  )
102
222
 
103
223
  def generate_image(
@@ -131,11 +251,59 @@ class LLMClient:
131
251
  image=image,
132
252
  )
133
253
 
254
+ async def async_generate_image(
255
+ self,
256
+ *,
257
+ provider: str,
258
+ api_key: str,
259
+ prompt: str,
260
+ model: str,
261
+ image_size: Optional[str] = None,
262
+ aspect_ratio: Optional[str] = None,
263
+ image: Optional[ImageInput] = None,
264
+ ) -> bytes:
265
+ """Generate an image using the selected provider (async)."""
266
+ if not provider:
267
+ raise ValueError("provider must be provided.")
268
+
269
+ provider_client = self._providers.get(provider.lower())
270
+ if provider_client is None:
271
+ available = ", ".join(sorted(self._providers)) or "<none>"
272
+ raise ValueError(
273
+ f"Unknown provider '{provider}'. Available providers: {available}."
274
+ )
275
+
276
+ async_method = getattr(provider_client, "async_generate_image", None)
277
+ if async_method is not None and asyncio.iscoroutinefunction(async_method):
278
+ return await async_method(
279
+ api_key=api_key,
280
+ prompt=prompt,
281
+ model=model,
282
+ image_size=image_size,
283
+ aspect_ratio=aspect_ratio,
284
+ image=image,
285
+ )
286
+
287
+ return await run_sync_in_thread(
288
+ lambda: provider_client.generate_image(
289
+ api_key=api_key,
290
+ prompt=prompt,
291
+ model=model,
292
+ image_size=image_size,
293
+ aspect_ratio=aspect_ratio,
294
+ image=image,
295
+ )
296
+ )
297
+
134
298
  def list_models(
135
299
  self,
136
300
  *,
137
301
  provider: str,
138
302
  api_key: str,
303
+ request_id: Optional[str] = None,
304
+ timeout_s: Optional[float] = None,
305
+ max_retries: Optional[int] = None,
306
+ retry_backoff_s: float = 0.5,
139
307
  ) -> Sequence[dict[str, Optional[str]]]:
140
308
  """List models available for the specified provider."""
141
309
  if not provider:
@@ -148,7 +316,54 @@ class LLMClient:
148
316
  f"Unknown provider '{provider}'. Available providers: {available}."
149
317
  )
150
318
 
151
- return provider_client.list_models(api_key=api_key)
319
+ return provider_client.list_models(
320
+ api_key=api_key,
321
+ request_id=request_id,
322
+ timeout_s=timeout_s,
323
+ max_retries=max_retries,
324
+ retry_backoff_s=retry_backoff_s,
325
+ )
326
+
327
+ async def async_list_models(
328
+ self,
329
+ *,
330
+ provider: str,
331
+ api_key: str,
332
+ request_id: Optional[str] = None,
333
+ timeout_s: Optional[float] = None,
334
+ max_retries: Optional[int] = None,
335
+ retry_backoff_s: float = 0.5,
336
+ ) -> Sequence[dict[str, Optional[str]]]:
337
+ """List models available for the specified provider (async)."""
338
+ if not provider:
339
+ raise ValueError("provider must be provided.")
340
+
341
+ provider_client = self._providers.get(provider.lower())
342
+ if provider_client is None:
343
+ available = ", ".join(sorted(self._providers)) or "<none>"
344
+ raise ValueError(
345
+ f"Unknown provider '{provider}'. Available providers: {available}."
346
+ )
347
+
348
+ async_method = getattr(provider_client, "async_list_models", None)
349
+ if async_method is not None and asyncio.iscoroutinefunction(async_method):
350
+ return await async_method(
351
+ api_key=api_key,
352
+ request_id=request_id,
353
+ timeout_s=timeout_s,
354
+ max_retries=max_retries,
355
+ retry_backoff_s=retry_backoff_s,
356
+ )
357
+
358
+ return await run_sync_in_thread(
359
+ lambda: provider_client.list_models(
360
+ api_key=api_key,
361
+ request_id=request_id,
362
+ timeout_s=timeout_s,
363
+ max_retries=max_retries,
364
+ retry_backoff_s=retry_backoff_s,
365
+ )
366
+ )
152
367
 
153
368
  @staticmethod
154
369
  def _discover_default_providers() -> Dict[str, SupportsGenerateResponse]:
@@ -88,6 +88,29 @@ def _build_parser() -> argparse.ArgumentParser:
88
88
  default=None,
89
89
  help="Optional reasoning effort hint if supported",
90
90
  )
91
+ p_respond.add_argument(
92
+ "--request-id",
93
+ default=None,
94
+ help="Optional request identifier for tracing/logging",
95
+ )
96
+ p_respond.add_argument(
97
+ "--timeout-s",
98
+ type=float,
99
+ default=None,
100
+ help="Optional timeout in seconds",
101
+ )
102
+ p_respond.add_argument(
103
+ "--max-retries",
104
+ type=int,
105
+ default=0,
106
+ help="Number of retries for transient failures",
107
+ )
108
+ p_respond.add_argument(
109
+ "--retry-backoff-s",
110
+ type=float,
111
+ default=0.5,
112
+ help="Base delay in seconds for exponential backoff",
113
+ )
91
114
 
92
115
  # models: list available models
93
116
  p_models = subparsers.add_parser(
@@ -157,6 +180,10 @@ def _cmd_respond(args: argparse.Namespace) -> int:
157
180
  max_tokens=args.max_tokens,
158
181
  reasoning_effort=args.reasoning_effort,
159
182
  images=images,
183
+ request_id=args.request_id,
184
+ timeout_s=args.timeout_s,
185
+ max_retries=args.max_retries,
186
+ retry_backoff_s=args.retry_backoff_s,
160
187
  )
161
188
  except Exception as exc: # pragma: no cover - CLI surface
162
189
  print(f"Error: {exc}", file=sys.stderr)