ccs-llmconnector 1.0.6__tar.gz → 1.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/PKG-INFO +51 -8
  2. ccs_llmconnector-1.0.6/src/ccs_llmconnector.egg-info/PKG-INFO → ccs_llmconnector-1.1.1/README.md +40 -20
  3. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/pyproject.toml +7 -4
  4. ccs_llmconnector-1.0.6/README.md → ccs_llmconnector-1.1.1/src/ccs_llmconnector.egg-info/PKG-INFO +63 -4
  5. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/src/ccs_llmconnector.egg-info/SOURCES.txt +3 -1
  6. ccs_llmconnector-1.1.1/src/ccs_llmconnector.egg-info/requires.txt +15 -0
  7. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/src/llmconnector/__init__.py +11 -1
  8. ccs_llmconnector-1.1.1/src/llmconnector/anthropic_client.py +376 -0
  9. ccs_llmconnector-1.1.1/src/llmconnector/client.py +406 -0
  10. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/src/llmconnector/client_cli.py +27 -0
  11. ccs_llmconnector-1.1.1/src/llmconnector/gemini_client.py +496 -0
  12. ccs_llmconnector-1.1.1/src/llmconnector/grok_client.py +316 -0
  13. ccs_llmconnector-1.1.1/src/llmconnector/openai_client.py +306 -0
  14. ccs_llmconnector-1.1.1/src/llmconnector/types.py +49 -0
  15. ccs_llmconnector-1.1.1/src/llmconnector/utils.py +78 -0
  16. ccs_llmconnector-1.0.6/src/ccs_llmconnector.egg-info/requires.txt +0 -4
  17. ccs_llmconnector-1.0.6/src/llmconnector/anthropic_client.py +0 -233
  18. ccs_llmconnector-1.0.6/src/llmconnector/client.py +0 -191
  19. ccs_llmconnector-1.0.6/src/llmconnector/gemini_client.py +0 -299
  20. ccs_llmconnector-1.0.6/src/llmconnector/grok_client.py +0 -186
  21. ccs_llmconnector-1.0.6/src/llmconnector/openai_client.py +0 -174
  22. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/LICENSE +0 -0
  23. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/MANIFEST.in +0 -0
  24. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/setup.cfg +0 -0
  25. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/src/ccs_llmconnector.egg-info/dependency_links.txt +0 -0
  26. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/src/ccs_llmconnector.egg-info/entry_points.txt +0 -0
  27. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/src/ccs_llmconnector.egg-info/top_level.txt +0 -0
  28. {ccs_llmconnector-1.0.6 → ccs_llmconnector-1.1.1}/src/llmconnector/py.typed +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ccs-llmconnector
3
- Version: 1.0.6
3
+ Version: 1.1.1
4
4
  Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
5
  Author: CCS
6
6
  License: MIT
@@ -9,9 +9,16 @@ Requires-Python: >=3.8
9
9
  Description-Content-Type: text/markdown
10
10
  License-File: LICENSE
11
11
  Requires-Dist: openai>=1.0.0
12
- Requires-Dist: google-genai
13
- Requires-Dist: anthropic
14
- Requires-Dist: xai-sdk
12
+ Provides-Extra: gemini
13
+ Requires-Dist: google-genai; extra == "gemini"
14
+ Provides-Extra: anthropic
15
+ Requires-Dist: anthropic; extra == "anthropic"
16
+ Provides-Extra: xai
17
+ Requires-Dist: xai-sdk; extra == "xai"
18
+ Provides-Extra: all
19
+ Requires-Dist: google-genai; extra == "all"
20
+ Requires-Dist: anthropic; extra == "all"
21
+ Requires-Dist: xai-sdk; extra == "all"
15
22
  Dynamic: license-file
16
23
 
17
24
  # ccs-llmconnector
@@ -29,16 +36,24 @@ the models available to your account with each provider.
29
36
  # from PyPI (normalized project name)
30
37
  pip install ccs-llmconnector
31
38
 
39
+ # install additional providers
40
+ pip install "ccs-llmconnector[gemini]"
41
+ pip install "ccs-llmconnector[anthropic]"
42
+ pip install "ccs-llmconnector[xai]"
43
+ pip install "ccs-llmconnector[all]"
44
+
32
45
  # or from source (this repository)
33
46
  pip install .
34
47
  ```
35
48
 
36
49
  ### Requirements
37
50
 
38
- - `openai` (installed automatically with the package)
39
- - `google-genai` (installed automatically with the package)
40
- - `anthropic` (installed automatically with the package)
41
- - `xai-sdk` (installed automatically with the package; requires Python 3.10+)
51
+ - `openai` (installed automatically with the base package)
52
+ - Optional extras:
53
+ - `ccs-llmconnector[gemini]` -> `google-genai`
54
+ - `ccs-llmconnector[anthropic]` -> `anthropic`
55
+ - `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
56
+ - `ccs-llmconnector[all]` -> all providers
42
57
 
43
58
  ## Components
44
59
 
@@ -48,6 +63,17 @@ pip install .
48
63
  - `GrokClient` - wrapper around the xAI Grok chat API, usable when `xai-sdk` is installed. Includes a model discovery helper.
49
64
  - `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
50
65
 
66
+ ## Common Options
67
+
68
+ All clients expose the same optional controls:
69
+
70
+ - `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
71
+ - `request_id`: free-form request identifier for tracing/logging.
72
+ - `timeout_s`: optional timeout in seconds (best-effort depending on provider).
73
+ - `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
74
+
75
+ Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
76
+
51
77
  ## GeminiClient
52
78
 
53
79
  ### Usage
@@ -326,6 +352,15 @@ response_via_router = llm_client.generate_response(
326
352
  max_tokens=1500,
327
353
  )
328
354
 
355
+ # async usage
356
+ # response_via_router = await llm_client.async_generate_response(
357
+ # provider="openai",
358
+ # api_key="sk-your-api-key",
359
+ # messages=[{"role": "system", "content": "You are concise."}],
360
+ # prompt="Summarize the plan.",
361
+ # model="gpt-4o-mini",
362
+ # )
363
+
329
364
  gemini_response = llm_client.generate_response(
330
365
  provider="gemini", # google-genai is installed with llmconnector
331
366
  api_key="your-gemini-api-key",
@@ -373,10 +408,15 @@ for model in llm_client.list_models(provider="openai", api_key="sk-your-api-key"
373
408
  | `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
374
409
  | `api_key` | `str` | Yes | Provider-specific API key. |
375
410
  | `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
411
+ | `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
376
412
  | `model` | `str` | Yes | Provider-specific model identifier. |
377
413
  | `max_tokens` | `int` | No | Defaults to `32000`. |
378
414
  | `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
379
415
  | `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
416
+ | `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
417
+ | `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
418
+ | `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
419
+ | `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
380
420
 
381
421
  Use `LLMClient.register_provider(name, client)` to add additional providers that implement
382
422
  `generate_response` with the same signature.
@@ -399,6 +439,9 @@ Examples:
399
439
  # Generate a response
400
440
  client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
401
441
 
442
+ # Generate with retry/timeout controls
443
+ client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
444
+
402
445
  # List models for one provider (human-readable)
403
446
  client_cli models --provider gemini
404
447
 
@@ -1,19 +1,3 @@
1
- Metadata-Version: 2.4
2
- Name: ccs-llmconnector
3
- Version: 1.0.6
4
- Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
- Author: CCS
6
- License: MIT
7
- Project-URL: Homepage, https://cleancodesolutions.de
8
- Requires-Python: >=3.8
9
- Description-Content-Type: text/markdown
10
- License-File: LICENSE
11
- Requires-Dist: openai>=1.0.0
12
- Requires-Dist: google-genai
13
- Requires-Dist: anthropic
14
- Requires-Dist: xai-sdk
15
- Dynamic: license-file
16
-
17
1
  # ccs-llmconnector
18
2
 
19
3
  `ccs-llmconnector` is a thin Python wrapper around leading large-language-model SDKs,
@@ -29,16 +13,24 @@ the models available to your account with each provider.
29
13
  # from PyPI (normalized project name)
30
14
  pip install ccs-llmconnector
31
15
 
16
+ # install additional providers
17
+ pip install "ccs-llmconnector[gemini]"
18
+ pip install "ccs-llmconnector[anthropic]"
19
+ pip install "ccs-llmconnector[xai]"
20
+ pip install "ccs-llmconnector[all]"
21
+
32
22
  # or from source (this repository)
33
23
  pip install .
34
24
  ```
35
25
 
36
26
  ### Requirements
37
27
 
38
- - `openai` (installed automatically with the package)
39
- - `google-genai` (installed automatically with the package)
40
- - `anthropic` (installed automatically with the package)
41
- - `xai-sdk` (installed automatically with the package; requires Python 3.10+)
28
+ - `openai` (installed automatically with the base package)
29
+ - Optional extras:
30
+ - `ccs-llmconnector[gemini]` -> `google-genai`
31
+ - `ccs-llmconnector[anthropic]` -> `anthropic`
32
+ - `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
33
+ - `ccs-llmconnector[all]` -> all providers
42
34
 
43
35
  ## Components
44
36
 
@@ -48,6 +40,17 @@ pip install .
48
40
  - `GrokClient` - wrapper around the xAI Grok chat API, usable when `xai-sdk` is installed. Includes a model discovery helper.
49
41
  - `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
50
42
 
43
+ ## Common Options
44
+
45
+ All clients expose the same optional controls:
46
+
47
+ - `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
48
+ - `request_id`: free-form request identifier for tracing/logging.
49
+ - `timeout_s`: optional timeout in seconds (best-effort depending on provider).
50
+ - `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
51
+
52
+ Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
53
+
51
54
  ## GeminiClient
52
55
 
53
56
  ### Usage
@@ -326,6 +329,15 @@ response_via_router = llm_client.generate_response(
326
329
  max_tokens=1500,
327
330
  )
328
331
 
332
+ # async usage
333
+ # response_via_router = await llm_client.async_generate_response(
334
+ # provider="openai",
335
+ # api_key="sk-your-api-key",
336
+ # messages=[{"role": "system", "content": "You are concise."}],
337
+ # prompt="Summarize the plan.",
338
+ # model="gpt-4o-mini",
339
+ # )
340
+
329
341
  gemini_response = llm_client.generate_response(
330
342
  provider="gemini", # google-genai is installed with llmconnector
331
343
  api_key="your-gemini-api-key",
@@ -373,10 +385,15 @@ for model in llm_client.list_models(provider="openai", api_key="sk-your-api-key"
373
385
  | `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
374
386
  | `api_key` | `str` | Yes | Provider-specific API key. |
375
387
  | `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
388
+ | `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
376
389
  | `model` | `str` | Yes | Provider-specific model identifier. |
377
390
  | `max_tokens` | `int` | No | Defaults to `32000`. |
378
391
  | `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
379
392
  | `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
393
+ | `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
394
+ | `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
395
+ | `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
396
+ | `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
380
397
 
381
398
  Use `LLMClient.register_provider(name, client)` to add additional providers that implement
382
399
  `generate_response` with the same signature.
@@ -399,6 +416,9 @@ Examples:
399
416
  # Generate a response
400
417
  client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
401
418
 
419
+ # Generate with retry/timeout controls
420
+ client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
421
+
402
422
  # List models for one provider (human-readable)
403
423
  client_cli models --provider gemini
404
424
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "ccs-llmconnector"
7
- version = "1.0.6"
7
+ version = "1.1.1"
8
8
  description = "Lightweight wrapper around different LLM provider Python SDK Responses APIs."
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.8"
@@ -14,11 +14,14 @@ authors = [
14
14
  ]
15
15
  dependencies = [
16
16
  "openai>=1.0.0",
17
- "google-genai",
18
- "anthropic",
19
- "xai-sdk",
20
17
  ]
21
18
 
19
+ [project.optional-dependencies]
20
+ gemini = ["google-genai"]
21
+ anthropic = ["anthropic"]
22
+ xai = ["xai-sdk"]
23
+ all = ["google-genai", "anthropic", "xai-sdk"]
24
+
22
25
  [project.urls]
23
26
  Homepage = "https://cleancodesolutions.de"
24
27
 
@@ -1,3 +1,26 @@
1
+ Metadata-Version: 2.4
2
+ Name: ccs-llmconnector
3
+ Version: 1.1.1
4
+ Summary: Lightweight wrapper around different LLM provider Python SDK Responses APIs.
5
+ Author: CCS
6
+ License: MIT
7
+ Project-URL: Homepage, https://cleancodesolutions.de
8
+ Requires-Python: >=3.8
9
+ Description-Content-Type: text/markdown
10
+ License-File: LICENSE
11
+ Requires-Dist: openai>=1.0.0
12
+ Provides-Extra: gemini
13
+ Requires-Dist: google-genai; extra == "gemini"
14
+ Provides-Extra: anthropic
15
+ Requires-Dist: anthropic; extra == "anthropic"
16
+ Provides-Extra: xai
17
+ Requires-Dist: xai-sdk; extra == "xai"
18
+ Provides-Extra: all
19
+ Requires-Dist: google-genai; extra == "all"
20
+ Requires-Dist: anthropic; extra == "all"
21
+ Requires-Dist: xai-sdk; extra == "all"
22
+ Dynamic: license-file
23
+
1
24
  # ccs-llmconnector
2
25
 
3
26
  `ccs-llmconnector` is a thin Python wrapper around leading large-language-model SDKs,
@@ -13,16 +36,24 @@ the models available to your account with each provider.
13
36
  # from PyPI (normalized project name)
14
37
  pip install ccs-llmconnector
15
38
 
39
+ # install additional providers
40
+ pip install "ccs-llmconnector[gemini]"
41
+ pip install "ccs-llmconnector[anthropic]"
42
+ pip install "ccs-llmconnector[xai]"
43
+ pip install "ccs-llmconnector[all]"
44
+
16
45
  # or from source (this repository)
17
46
  pip install .
18
47
  ```
19
48
 
20
49
  ### Requirements
21
50
 
22
- - `openai` (installed automatically with the package)
23
- - `google-genai` (installed automatically with the package)
24
- - `anthropic` (installed automatically with the package)
25
- - `xai-sdk` (installed automatically with the package; requires Python 3.10+)
51
+ - `openai` (installed automatically with the base package)
52
+ - Optional extras:
53
+ - `ccs-llmconnector[gemini]` -> `google-genai`
54
+ - `ccs-llmconnector[anthropic]` -> `anthropic`
55
+ - `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
56
+ - `ccs-llmconnector[all]` -> all providers
26
57
 
27
58
  ## Components
28
59
 
@@ -32,6 +63,17 @@ pip install .
32
63
  - `GrokClient` - wrapper around the xAI Grok chat API, usable when `xai-sdk` is installed. Includes a model discovery helper.
33
64
  - `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
34
65
 
66
+ ## Common Options
67
+
68
+ All clients expose the same optional controls:
69
+
70
+ - `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
71
+ - `request_id`: free-form request identifier for tracing/logging.
72
+ - `timeout_s`: optional timeout in seconds (best-effort depending on provider).
73
+ - `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
74
+
75
+ Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
76
+
35
77
  ## GeminiClient
36
78
 
37
79
  ### Usage
@@ -310,6 +352,15 @@ response_via_router = llm_client.generate_response(
310
352
  max_tokens=1500,
311
353
  )
312
354
 
355
+ # async usage
356
+ # response_via_router = await llm_client.async_generate_response(
357
+ # provider="openai",
358
+ # api_key="sk-your-api-key",
359
+ # messages=[{"role": "system", "content": "You are concise."}],
360
+ # prompt="Summarize the plan.",
361
+ # model="gpt-4o-mini",
362
+ # )
363
+
313
364
  gemini_response = llm_client.generate_response(
314
365
  provider="gemini", # google-genai is installed with llmconnector
315
366
  api_key="your-gemini-api-key",
@@ -357,10 +408,15 @@ for model in llm_client.list_models(provider="openai", api_key="sk-your-api-key"
357
408
  | `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
358
409
  | `api_key` | `str` | Yes | Provider-specific API key. |
359
410
  | `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
411
+ | `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
360
412
  | `model` | `str` | Yes | Provider-specific model identifier. |
361
413
  | `max_tokens` | `int` | No | Defaults to `32000`. |
362
414
  | `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
363
415
  | `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
416
+ | `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
417
+ | `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
418
+ | `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
419
+ | `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
364
420
 
365
421
  Use `LLMClient.register_provider(name, client)` to add additional providers that implement
366
422
  `generate_response` with the same signature.
@@ -383,6 +439,9 @@ Examples:
383
439
  # Generate a response
384
440
  client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
385
441
 
442
+ # Generate with retry/timeout controls
443
+ client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
444
+
386
445
  # List models for one provider (human-readable)
387
446
  client_cli models --provider gemini
388
447
 
@@ -15,4 +15,6 @@ src/llmconnector/client_cli.py
15
15
  src/llmconnector/gemini_client.py
16
16
  src/llmconnector/grok_client.py
17
17
  src/llmconnector/openai_client.py
18
- src/llmconnector/py.typed
18
+ src/llmconnector/py.typed
19
+ src/llmconnector/types.py
20
+ src/llmconnector/utils.py
@@ -0,0 +1,15 @@
1
+ openai>=1.0.0
2
+
3
+ [all]
4
+ google-genai
5
+ anthropic
6
+ xai-sdk
7
+
8
+ [anthropic]
9
+ anthropic
10
+
11
+ [gemini]
12
+ google-genai
13
+
14
+ [xai]
15
+ xai-sdk
@@ -5,6 +5,7 @@ from __future__ import annotations
5
5
  from typing import TYPE_CHECKING, Any
6
6
 
7
7
  from .client import LLMClient
8
+ from .types import ImageInput, Message, MessageSequence
8
9
 
9
10
  if TYPE_CHECKING:
10
11
  from .anthropic_client import AnthropicClient
@@ -12,7 +13,16 @@ if TYPE_CHECKING:
12
13
  from .grok_client import GrokClient
13
14
  from .openai_client import OpenAIResponsesClient
14
15
 
15
- __all__ = ["LLMClient", "OpenAIResponsesClient", "GeminiClient", "AnthropicClient", "GrokClient"]
16
+ __all__ = [
17
+ "LLMClient",
18
+ "OpenAIResponsesClient",
19
+ "GeminiClient",
20
+ "AnthropicClient",
21
+ "GrokClient",
22
+ "ImageInput",
23
+ "Message",
24
+ "MessageSequence",
25
+ ]
16
26
 
17
27
 
18
28
  def __getattr__(name: str) -> Any: