ccs-llmconnector 1.0.5__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/PKG-INFO +53 -8
  2. ccs_llmconnector-1.0.5/src/ccs_llmconnector.egg-info/PKG-INFO → ccs_llmconnector-1.1.0/README.md +81 -59
  3. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/pyproject.toml +10 -7
  4. ccs_llmconnector-1.0.5/README.md → ccs_llmconnector-1.1.0/src/ccs_llmconnector.egg-info/PKG-INFO +65 -4
  5. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/src/ccs_llmconnector.egg-info/SOURCES.txt +3 -1
  6. ccs_llmconnector-1.1.0/src/ccs_llmconnector.egg-info/requires.txt +15 -0
  7. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/src/llmconnector/__init__.py +21 -11
  8. ccs_llmconnector-1.1.0/src/llmconnector/anthropic_client.py +376 -0
  9. ccs_llmconnector-1.1.0/src/llmconnector/client.py +406 -0
  10. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/src/llmconnector/client_cli.py +42 -15
  11. ccs_llmconnector-1.1.0/src/llmconnector/gemini_client.py +499 -0
  12. ccs_llmconnector-1.1.0/src/llmconnector/grok_client.py +316 -0
  13. ccs_llmconnector-1.1.0/src/llmconnector/openai_client.py +306 -0
  14. ccs_llmconnector-1.1.0/src/llmconnector/types.py +49 -0
  15. ccs_llmconnector-1.1.0/src/llmconnector/utils.py +78 -0
  16. ccs_llmconnector-1.0.5/src/ccs_llmconnector.egg-info/requires.txt +0 -4
  17. ccs_llmconnector-1.0.5/src/llmconnector/anthropic_client.py +0 -233
  18. ccs_llmconnector-1.0.5/src/llmconnector/client.py +0 -188
  19. ccs_llmconnector-1.0.5/src/llmconnector/gemini_client.py +0 -296
  20. ccs_llmconnector-1.0.5/src/llmconnector/grok_client.py +0 -186
  21. ccs_llmconnector-1.0.5/src/llmconnector/openai_client.py +0 -173
  22. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/LICENSE +0 -0
  23. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/MANIFEST.in +0 -0
  24. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/setup.cfg +0 -0
  25. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/src/ccs_llmconnector.egg-info/dependency_links.txt +0 -0
  26. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/src/ccs_llmconnector.egg-info/entry_points.txt +0 -0
  27. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/src/ccs_llmconnector.egg-info/top_level.txt +0 -0
  28. {ccs_llmconnector-1.0.5 → ccs_llmconnector-1.1.0}/src/llmconnector/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ccs-llmconnector
3
- Version: 1.0.5
3
+ Version: 1.1.0
4
4
  Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
5
  Author: CCS
6
6
  License: MIT
@@ -9,9 +9,16 @@ Requires-Python: >=3.8
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE
11
11
  Requires-Dist: openai>=1.0.0
12
- Requires-Dist: google-genai
13
- Requires-Dist: anthropic
14
- Requires-Dist: xai-sdk
12
+ Provides-Extra: gemini
13
+ Requires-Dist: google-genai; extra == "gemini"
14
+ Provides-Extra: anthropic
15
+ Requires-Dist: anthropic; extra == "anthropic"
16
+ Provides-Extra: xai
17
+ Requires-Dist: xai-sdk; extra == "xai"
18
+ Provides-Extra: all
19
+ Requires-Dist: google-genai; extra == "all"
20
+ Requires-Dist: anthropic; extra == "all"
21
+ Requires-Dist: xai-sdk; extra == "all"
15
22
  Dynamic: license-file
16
23
 
17
24
  # ccs-llmconnector
@@ -29,16 +36,24 @@ the models available to your account with each provider.
29
36
  # from PyPI (normalized project name)
30
37
  pip install ccs-llmconnector
31
38
 
39
+ # install additional providers
40
+ pip install "ccs-llmconnector[gemini]"
41
+ pip install "ccs-llmconnector[anthropic]"
42
+ pip install "ccs-llmconnector[xai]"
43
+ pip install "ccs-llmconnector[all]"
44
+
32
45
  # or from source (this repository)
33
46
  pip install .
34
47
  ```
35
48
 
36
49
  ### Requirements
37
50
 
38
- - `openai` (installed automatically with the package)
39
- - `google-genai` (installed automatically with the package)
40
- - `anthropic` (installed automatically with the package)
41
- - `xai-sdk` (installed automatically with the package; requires Python 3.10+)
51
+ - `openai` (installed automatically with the base package)
52
+ - Optional extras:
53
+ - `ccs-llmconnector[gemini]` -> `google-genai`
54
+ - `ccs-llmconnector[anthropic]` -> `anthropic`
55
+ - `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
56
+ - `ccs-llmconnector[all]` -> all providers
42
57
 
43
58
  ## Components
44
59
 
@@ -48,6 +63,17 @@ pip install .
48
63
  - `GrokClient` - wrapper around the xAI Grok chat API, usable when `xai-sdk` is installed. Includes a model discovery helper.
49
64
  - `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
50
65
 
66
+ ## Common Options
67
+
68
+ All clients expose the same optional controls:
69
+
70
+ - `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
71
+ - `request_id`: free-form request identifier for tracing/logging.
72
+ - `timeout_s`: optional timeout in seconds (best-effort depending on provider).
73
+ - `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
74
+
75
+ Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
76
+
51
77
  ## GeminiClient
52
78
 
53
79
  ### Usage
@@ -104,6 +130,7 @@ image_bytes = client.generate_image(
104
130
  prompt="Generate an infographic of the current weather in Tokyo.",
105
131
  model="gemini-3-pro-image-preview",
106
132
  image_size="2K", # Optional, defaults to "2K"
133
+ aspect_ratio="16:9", # Optional, e.g. "16:9", "4:3"
107
134
  )
108
135
 
109
136
  with open("weather_tokyo.png", "wb") as f:
@@ -325,6 +352,15 @@ response_via_router = llm_client.generate_response(
325
352
  max_tokens=1500,
326
353
  )
327
354
 
355
+ # async usage
356
+ # response_via_router = await llm_client.async_generate_response(
357
+ # provider="openai",
358
+ # api_key="sk-your-api-key",
359
+ # messages=[{"role": "system", "content": "You are concise."}],
360
+ # prompt="Summarize the plan.",
361
+ # model="gpt-4o-mini",
362
+ # )
363
+
328
364
  gemini_response = llm_client.generate_response(
329
365
  provider="gemini", # google-genai is installed with llmconnector
330
366
  api_key="your-gemini-api-key",
@@ -351,6 +387,7 @@ image_bytes = llm_client.generate_image(
351
387
  api_key="your-gemini-api-key",
352
388
  prompt="A futuristic city",
353
389
  model="gemini-3-pro-image-preview",
390
+ aspect_ratio="16:9",
354
391
  )
355
392
  ```
356
393
 
@@ -371,10 +408,15 @@ for model in llm_client.list_models(provider="openai", api_key="sk-your-api-key"
371
408
  | `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
372
409
  | `api_key` | `str` | Yes | Provider-specific API key. |
373
410
  | `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
411
+ | `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
374
412
  | `model` | `str` | Yes | Provider-specific model identifier. |
375
413
  | `max_tokens` | `int` | No | Defaults to `32000`. |
376
414
  | `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
377
415
  | `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
416
+ | `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
417
+ | `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
418
+ | `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
419
+ | `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
378
420
 
379
421
  Use `LLMClient.register_provider(name, client)` to add additional providers that implement
380
422
  `generate_response` with the same signature.
@@ -397,6 +439,9 @@ Examples:
397
439
  # Generate a response
398
440
  client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
399
441
 
442
+ # Generate with retry/timeout controls
443
+ client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
444
+
400
445
  # List models for one provider (human-readable)
401
446
  client_cli models --provider gemini
402
447
 
@@ -1,19 +1,3 @@
1
- Metadata-Version: 2.4
2
- Name: ccs-llmconnector
3
- Version: 1.0.5
4
- Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
- Author: CCS
6
- License: MIT
7
- Project-URL: Homepage, https://cleancodesolutions.de
8
- Requires-Python: >=3.8
9
- Description-Content-Type: text/markdown
10
- License-File: LICENSE
11
- Requires-Dist: openai>=1.0.0
12
- Requires-Dist: google-genai
13
- Requires-Dist: anthropic
14
- Requires-Dist: xai-sdk
15
- Dynamic: license-file
16
-
17
1
  # ccs-llmconnector
18
2
 
19
3
  `ccs-llmconnector` is a thin Python wrapper around leading large-language-model SDKs,
@@ -25,30 +9,49 @@ the models available to your account with each provider.
25
9
 
26
10
  ## Installation
27
11
 
28
- ```bash
29
- # from PyPI (normalized project name)
30
- pip install ccs-llmconnector
31
-
32
- # or from source (this repository)
33
- pip install .
34
- ```
35
-
36
- ### Requirements
37
-
38
- - `openai` (installed automatically with the package)
39
- - `google-genai` (installed automatically with the package)
40
- - `anthropic` (installed automatically with the package)
41
- - `xai-sdk` (installed automatically with the package; requires Python 3.10+)
42
-
43
- ## Components
12
+ ```bash
13
+ # from PyPI (normalized project name)
14
+ pip install ccs-llmconnector
15
+
16
+ # install additional providers
17
+ pip install "ccs-llmconnector[gemini]"
18
+ pip install "ccs-llmconnector[anthropic]"
19
+ pip install "ccs-llmconnector[xai]"
20
+ pip install "ccs-llmconnector[all]"
21
+
22
+ # or from source (this repository)
23
+ pip install .
24
+ ```
25
+
26
+ ### Requirements
27
+
28
+ - `openai` (installed automatically with the base package)
29
+ - Optional extras:
30
+ - `ccs-llmconnector[gemini]` -> `google-genai`
31
+ - `ccs-llmconnector[anthropic]` -> `anthropic`
32
+ - `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
33
+ - `ccs-llmconnector[all]` -> all providers
34
+
35
+ ## Components
44
36
 
45
37
  - `OpenAIResponsesClient` - direct wrapper around the OpenAI Responses API, ideal when your project only targets OpenAI models. Includes a model discovery helper.
46
38
  - `GeminiClient` - thin wrapper around the Google Gemini SDK, usable when `google-genai` is installed. Includes a model discovery helper.
47
39
  - `AnthropicClient` - lightweight wrapper around the Anthropic Claude Messages API, usable when `anthropic` is installed. Includes a model discovery helper.
48
40
  - `GrokClient` - wrapper around the xAI Grok chat API, usable when `xai-sdk` is installed. Includes a model discovery helper.
49
- - `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
50
-
51
- ## GeminiClient
41
+ - `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
42
+
43
+ ## Common Options
44
+
45
+ All clients expose the same optional controls:
46
+
47
+ - `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
48
+ - `request_id`: free-form request identifier for tracing/logging.
49
+ - `timeout_s`: optional timeout in seconds (best-effort depending on provider).
50
+ - `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
51
+
52
+ Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
53
+
54
+ ## GeminiClient
52
55
 
53
56
  ### Usage
54
57
 
@@ -104,6 +107,7 @@ image_bytes = client.generate_image(
104
107
  prompt="Generate an infographic of the current weather in Tokyo.",
105
108
  model="gemini-3-pro-image-preview",
106
109
  image_size="2K", # Optional, defaults to "2K"
110
+ aspect_ratio="16:9", # Optional, e.g. "16:9", "4:3"
107
111
  )
108
112
 
109
113
  with open("weather_tokyo.png", "wb") as f:
@@ -317,13 +321,22 @@ from llmconnector import LLMClient
317
321
 
318
322
  llm_client = LLMClient()
319
323
 
320
- response_via_router = llm_client.generate_response(
321
- provider="openai", # selects the OpenAI wrapper
322
- api_key="sk-your-api-key",
323
- prompt="List three advantages of integration testing.",
324
- model="gpt-4o",
325
- max_tokens=1500,
326
- )
324
+ response_via_router = llm_client.generate_response(
325
+ provider="openai", # selects the OpenAI wrapper
326
+ api_key="sk-your-api-key",
327
+ prompt="List three advantages of integration testing.",
328
+ model="gpt-4o",
329
+ max_tokens=1500,
330
+ )
331
+
332
+ # async usage
333
+ # response_via_router = await llm_client.async_generate_response(
334
+ # provider="openai",
335
+ # api_key="sk-your-api-key",
336
+ # messages=[{"role": "system", "content": "You are concise."}],
337
+ # prompt="Summarize the plan.",
338
+ # model="gpt-4o-mini",
339
+ # )
327
340
 
328
341
  gemini_response = llm_client.generate_response(
329
342
  provider="gemini", # google-genai is installed with llmconnector
@@ -351,6 +364,7 @@ image_bytes = llm_client.generate_image(
351
364
  api_key="your-gemini-api-key",
352
365
  prompt="A futuristic city",
353
366
  model="gemini-3-pro-image-preview",
367
+ aspect_ratio="16:9",
354
368
  )
355
369
  ```
356
370
 
@@ -366,15 +380,20 @@ for model in llm_client.list_models(provider="openai", api_key="sk-your-api-key"
366
380
 
367
381
  ### Parameters
368
382
 
369
- | Parameter | Type | Required | Description |
370
- |-----------|------|----------|-------------|
371
- | `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
372
- | `api_key` | `str` | Yes | Provider-specific API key. |
373
- | `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
374
- | `model` | `str` | Yes | Provider-specific model identifier. |
375
- | `max_tokens` | `int` | No | Defaults to `32000`. |
376
- | `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
377
- | `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
383
+ | Parameter | Type | Required | Description |
384
+ |-----------|------|----------|-------------|
385
+ | `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
386
+ | `api_key` | `str` | Yes | Provider-specific API key. |
387
+ | `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
388
+ | `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
389
+ | `model` | `str` | Yes | Provider-specific model identifier. |
390
+ | `max_tokens` | `int` | No | Defaults to `32000`. |
391
+ | `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
392
+ | `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
393
+ | `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
394
+ | `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
395
+ | `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
396
+ | `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
378
397
 
379
398
  Use `LLMClient.register_provider(name, client)` to add additional providers that implement
380
399
  `generate_response` with the same signature.
@@ -391,14 +410,17 @@ listing models.
391
410
  - Anthropic: `ANTHROPIC_API_KEY`
392
411
  - Grok/xAI: `GROK_API_KEY` or `XAI_API_KEY` (either works)
393
412
 
394
- Examples:
395
-
396
- ```bash
397
- # Generate a response
398
- client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
399
-
400
- # List models for one provider (human-readable)
401
- client_cli models --provider gemini
413
+ Examples:
414
+
415
+ ```bash
416
+ # Generate a response
417
+ client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
418
+
419
+ # Generate with retry/timeout controls
420
+ client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
421
+
422
+ # List models for one provider (human-readable)
423
+ client_cli models --provider gemini
402
424
 
403
425
  # List models for one provider (JSON)
404
426
  client_cli models --provider anthropic --json
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ccs-llmconnector"
7
- version = "1.0.5"
7
+ version = "1.1.0"
8
8
  description = "Lightweight wrapper around different LLM provider Python SDK Responses APIs."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -12,12 +12,15 @@ license = {text = "MIT"}
12
12
  authors = [
13
13
  {name = "CCS"},
14
14
  ]
15
- dependencies = [
16
- "openai>=1.0.0",
17
- "google-genai",
18
- "anthropic",
19
- "xai-sdk",
20
- ]
15
+ dependencies = [
16
+ "openai>=1.0.0",
17
+ ]
18
+
19
+ [project.optional-dependencies]
20
+ gemini = ["google-genai"]
21
+ anthropic = ["anthropic"]
22
+ xai = ["xai-sdk"]
23
+ all = ["google-genai", "anthropic", "xai-sdk"]
21
24
 
22
25
  [project.urls]
23
26
  Homepage = "https://cleancodesolutions.de"
@@ -1,3 +1,26 @@
1
+ Metadata-Version: 2.4
2
+ Name: ccs-llmconnector
3
+ Version: 1.1.0
4
+ Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
+ Author: CCS
6
+ License: MIT
7
+ Project-URL: Homepage, https://cleancodesolutions.de
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE
11
+ Requires-Dist: openai>=1.0.0
12
+ Provides-Extra: gemini
13
+ Requires-Dist: google-genai; extra == "gemini"
14
+ Provides-Extra: anthropic
15
+ Requires-Dist: anthropic; extra == "anthropic"
16
+ Provides-Extra: xai
17
+ Requires-Dist: xai-sdk; extra == "xai"
18
+ Provides-Extra: all
19
+ Requires-Dist: google-genai; extra == "all"
20
+ Requires-Dist: anthropic; extra == "all"
21
+ Requires-Dist: xai-sdk; extra == "all"
22
+ Dynamic: license-file
23
+
1
24
  # ccs-llmconnector
2
25
 
3
26
  `ccs-llmconnector` is a thin Python wrapper around leading large-language-model SDKs,
@@ -13,16 +36,24 @@ the models available to your account with each provider.
13
36
  # from PyPI (normalized project name)
14
37
  pip install ccs-llmconnector
15
38
 
39
+ # install additional providers
40
+ pip install "ccs-llmconnector[gemini]"
41
+ pip install "ccs-llmconnector[anthropic]"
42
+ pip install "ccs-llmconnector[xai]"
43
+ pip install "ccs-llmconnector[all]"
44
+
16
45
  # or from source (this repository)
17
46
  pip install .
18
47
  ```
19
48
 
20
49
  ### Requirements
21
50
 
22
- - `openai` (installed automatically with the package)
23
- - `google-genai` (installed automatically with the package)
24
- - `anthropic` (installed automatically with the package)
25
- - `xai-sdk` (installed automatically with the package; requires Python 3.10+)
51
+ - `openai` (installed automatically with the base package)
52
+ - Optional extras:
53
+ - `ccs-llmconnector[gemini]` -> `google-genai`
54
+ - `ccs-llmconnector[anthropic]` -> `anthropic`
55
+ - `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
56
+ - `ccs-llmconnector[all]` -> all providers
26
57
 
27
58
  ## Components
28
59
 
@@ -32,6 +63,17 @@ pip install .
32
63
  - `GrokClient` - wrapper around the xAI Grok chat API, usable when `xai-sdk` is installed. Includes a model discovery helper.
33
64
  - `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
34
65
 
66
+ ## Common Options
67
+
68
+ All clients expose the same optional controls:
69
+
70
+ - `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
71
+ - `request_id`: free-form request identifier for tracing/logging.
72
+ - `timeout_s`: optional timeout in seconds (best-effort depending on provider).
73
+ - `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
74
+
75
+ Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
76
+
35
77
  ## GeminiClient
36
78
 
37
79
  ### Usage
@@ -88,6 +130,7 @@ image_bytes = client.generate_image(
88
130
  prompt="Generate an infographic of the current weather in Tokyo.",
89
131
  model="gemini-3-pro-image-preview",
90
132
  image_size="2K", # Optional, defaults to "2K"
133
+ aspect_ratio="16:9", # Optional, e.g. "16:9", "4:3"
91
134
  )
92
135
 
93
136
  with open("weather_tokyo.png", "wb") as f:
@@ -309,6 +352,15 @@ response_via_router = llm_client.generate_response(
309
352
  max_tokens=1500,
310
353
  )
311
354
 
355
+ # async usage
356
+ # response_via_router = await llm_client.async_generate_response(
357
+ # provider="openai",
358
+ # api_key="sk-your-api-key",
359
+ # messages=[{"role": "system", "content": "You are concise."}],
360
+ # prompt="Summarize the plan.",
361
+ # model="gpt-4o-mini",
362
+ # )
363
+
312
364
  gemini_response = llm_client.generate_response(
313
365
  provider="gemini", # google-genai is installed with llmconnector
314
366
  api_key="your-gemini-api-key",
@@ -335,6 +387,7 @@ image_bytes = llm_client.generate_image(
335
387
  api_key="your-gemini-api-key",
336
388
  prompt="A futuristic city",
337
389
  model="gemini-3-pro-image-preview",
390
+ aspect_ratio="16:9",
338
391
  )
339
392
  ```
340
393
 
@@ -355,10 +408,15 @@ for model in llm_client.list_models(provider="openai", api_key="sk-your-api-key"
355
408
  | `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
356
409
  | `api_key` | `str` | Yes | Provider-specific API key. |
357
410
  | `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
411
+ | `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
358
412
  | `model` | `str` | Yes | Provider-specific model identifier. |
359
413
  | `max_tokens` | `int` | No | Defaults to `32000`. |
360
414
  | `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
361
415
  | `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
416
+ | `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
417
+ | `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
418
+ | `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
419
+ | `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
362
420
 
363
421
  Use `LLMClient.register_provider(name, client)` to add additional providers that implement
364
422
  `generate_response` with the same signature.
@@ -381,6 +439,9 @@ Examples:
381
439
  # Generate a response
382
440
  client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
383
441
 
442
+ # Generate with retry/timeout controls
443
+ client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
444
+
384
445
  # List models for one provider (human-readable)
385
446
  client_cli models --provider gemini
386
447
 
@@ -15,4 +15,6 @@ src/llmconnector/client_cli.py
15
15
  src/llmconnector/gemini_client.py
16
16
  src/llmconnector/grok_client.py
17
17
  src/llmconnector/openai_client.py
18
- src/llmconnector/py.typed
18
+ src/llmconnector/py.typed
19
+ src/llmconnector/types.py
20
+ src/llmconnector/utils.py
@@ -0,0 +1,15 @@
1
+ openai>=1.0.0
2
+
3
+ [all]
4
+ google-genai
5
+ anthropic
6
+ xai-sdk
7
+
8
+ [anthropic]
9
+ anthropic
10
+
11
+ [gemini]
12
+ google-genai
13
+
14
+ [xai]
15
+ xai-sdk
@@ -2,17 +2,27 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from typing import TYPE_CHECKING, Any
6
-
7
- from .client import LLMClient
8
-
9
- if TYPE_CHECKING:
10
- from .anthropic_client import AnthropicClient
11
- from .gemini_client import GeminiClient
12
- from .grok_client import GrokClient
13
- from .openai_client import OpenAIResponsesClient
14
-
15
- __all__ = ["LLMClient", "OpenAIResponsesClient", "GeminiClient", "AnthropicClient", "GrokClient"]
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from .client import LLMClient
8
+ from .types import ImageInput, Message, MessageSequence
9
+
10
+ if TYPE_CHECKING:
11
+ from .anthropic_client import AnthropicClient
12
+ from .gemini_client import GeminiClient
13
+ from .grok_client import GrokClient
14
+ from .openai_client import OpenAIResponsesClient
15
+
16
+ __all__ = [
17
+ "LLMClient",
18
+ "OpenAIResponsesClient",
19
+ "GeminiClient",
20
+ "AnthropicClient",
21
+ "GrokClient",
22
+ "ImageInput",
23
+ "Message",
24
+ "MessageSequence",
25
+ ]
16
26
 
17
27
 
18
28
  def __getattr__(name: str) -> Any: