ccs-llmconnector 1.1.1__tar.gz → 1.1.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ccs_llmconnector-1.1.1/src/ccs_llmconnector.egg-info → ccs_llmconnector-1.1.4}/PKG-INFO +1 -1
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/README.md +79 -79
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/pyproject.toml +1 -1
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4/src/ccs_llmconnector.egg-info}/PKG-INFO +1 -1
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/__init__.py +23 -21
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/anthropic_client.py +266 -266
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/client.py +566 -301
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/client_cli.py +42 -42
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/gemini_client.py +390 -57
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/grok_client.py +270 -270
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/openai_client.py +407 -263
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/types.py +66 -48
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/utils.py +77 -77
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/LICENSE +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/MANIFEST.in +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/setup.cfg +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/ccs_llmconnector.egg-info/SOURCES.txt +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/ccs_llmconnector.egg-info/dependency_links.txt +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/ccs_llmconnector.egg-info/entry_points.txt +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/ccs_llmconnector.egg-info/requires.txt +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/ccs_llmconnector.egg-info/top_level.txt +0 -0
- {ccs_llmconnector-1.1.1 → ccs_llmconnector-1.1.4}/src/llmconnector/py.typed +0 -0
|
@@ -9,49 +9,49 @@ the models available to your account with each provider.
|
|
|
9
9
|
|
|
10
10
|
## Installation
|
|
11
11
|
|
|
12
|
-
```bash
|
|
13
|
-
# from PyPI (normalized project name)
|
|
14
|
-
pip install ccs-llmconnector
|
|
15
|
-
|
|
16
|
-
# install additional providers
|
|
17
|
-
pip install "ccs-llmconnector[gemini]"
|
|
18
|
-
pip install "ccs-llmconnector[anthropic]"
|
|
19
|
-
pip install "ccs-llmconnector[xai]"
|
|
20
|
-
pip install "ccs-llmconnector[all]"
|
|
21
|
-
|
|
22
|
-
# or from source (this repository)
|
|
23
|
-
pip install .
|
|
24
|
-
```
|
|
25
|
-
|
|
26
|
-
### Requirements
|
|
27
|
-
|
|
28
|
-
- `openai` (installed automatically with the base package)
|
|
29
|
-
- Optional extras:
|
|
30
|
-
- `ccs-llmconnector[gemini]` -> `google-genai`
|
|
31
|
-
- `ccs-llmconnector[anthropic]` -> `anthropic`
|
|
32
|
-
- `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
|
|
33
|
-
- `ccs-llmconnector[all]` -> all providers
|
|
34
|
-
|
|
35
|
-
## Components
|
|
12
|
+
```bash
|
|
13
|
+
# from PyPI (normalized project name)
|
|
14
|
+
pip install ccs-llmconnector
|
|
15
|
+
|
|
16
|
+
# install additional providers
|
|
17
|
+
pip install "ccs-llmconnector[gemini]"
|
|
18
|
+
pip install "ccs-llmconnector[anthropic]"
|
|
19
|
+
pip install "ccs-llmconnector[xai]"
|
|
20
|
+
pip install "ccs-llmconnector[all]"
|
|
21
|
+
|
|
22
|
+
# or from source (this repository)
|
|
23
|
+
pip install .
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
### Requirements
|
|
27
|
+
|
|
28
|
+
- `openai` (installed automatically with the base package)
|
|
29
|
+
- Optional extras:
|
|
30
|
+
- `ccs-llmconnector[gemini]` -> `google-genai`
|
|
31
|
+
- `ccs-llmconnector[anthropic]` -> `anthropic`
|
|
32
|
+
- `ccs-llmconnector[xai]` -> `xai-sdk` (Python 3.10+)
|
|
33
|
+
- `ccs-llmconnector[all]` -> all providers
|
|
34
|
+
|
|
35
|
+
## Components
|
|
36
36
|
|
|
37
37
|
- `OpenAIResponsesClient` - direct wrapper around the OpenAI Responses API, ideal when your project only targets OpenAI models. Includes a model discovery helper.
|
|
38
38
|
- `GeminiClient` - thin wrapper around the Google Gemini SDK, usable when `google-genai` is installed. Includes a model discovery helper.
|
|
39
39
|
- `AnthropicClient` - lightweight wrapper around the Anthropic Claude Messages API, usable when `anthropic` is installed. Includes a model discovery helper.
|
|
40
40
|
- `GrokClient` - wrapper around the xAI Grok chat API, usable when `xai-sdk` is installed. Includes a model discovery helper.
|
|
41
|
-
- `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
|
|
42
|
-
|
|
43
|
-
## Common Options
|
|
44
|
-
|
|
45
|
-
All clients expose the same optional controls:
|
|
46
|
-
|
|
47
|
-
- `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
|
|
48
|
-
- `request_id`: free-form request identifier for tracing/logging.
|
|
49
|
-
- `timeout_s`: optional timeout in seconds (best-effort depending on provider).
|
|
50
|
-
- `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
|
|
51
|
-
|
|
52
|
-
Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
|
|
53
|
-
|
|
54
|
-
## GeminiClient
|
|
41
|
+
- `LLMClient` - provider router that delegates to registered clients (OpenAI included by default) so additional vendors can be added without changing call sites.
|
|
42
|
+
|
|
43
|
+
## Common Options
|
|
44
|
+
|
|
45
|
+
All clients expose the same optional controls:
|
|
46
|
+
|
|
47
|
+
- `messages`: list of `{role, content}` entries (e.g., `system`, `user`, `assistant`). If both `prompt` and `messages` are provided, `prompt` is appended as the last user message.
|
|
48
|
+
- `request_id`: free-form request identifier for tracing/logging.
|
|
49
|
+
- `timeout_s`: optional timeout in seconds (best-effort depending on provider).
|
|
50
|
+
- `max_retries` and `retry_backoff_s`: retry count and exponential backoff base delay.
|
|
51
|
+
|
|
52
|
+
Async counterparts are available as `async_generate_response`, `async_generate_image`, and `async_list_models`.
|
|
53
|
+
|
|
54
|
+
## GeminiClient
|
|
55
55
|
|
|
56
56
|
### Usage
|
|
57
57
|
|
|
@@ -321,22 +321,22 @@ from llmconnector import LLMClient
|
|
|
321
321
|
|
|
322
322
|
llm_client = LLMClient()
|
|
323
323
|
|
|
324
|
-
response_via_router = llm_client.generate_response(
|
|
325
|
-
provider="openai", # selects the OpenAI wrapper
|
|
326
|
-
api_key="sk-your-api-key",
|
|
327
|
-
prompt="List three advantages of integration testing.",
|
|
328
|
-
model="gpt-4o",
|
|
329
|
-
max_tokens=1500,
|
|
330
|
-
)
|
|
331
|
-
|
|
332
|
-
# async usage
|
|
333
|
-
# response_via_router = await llm_client.async_generate_response(
|
|
334
|
-
# provider="openai",
|
|
335
|
-
# api_key="sk-your-api-key",
|
|
336
|
-
# messages=[{"role": "system", "content": "You are concise."}],
|
|
337
|
-
# prompt="Summarize the plan.",
|
|
338
|
-
# model="gpt-4o-mini",
|
|
339
|
-
# )
|
|
324
|
+
response_via_router = llm_client.generate_response(
|
|
325
|
+
provider="openai", # selects the OpenAI wrapper
|
|
326
|
+
api_key="sk-your-api-key",
|
|
327
|
+
prompt="List three advantages of integration testing.",
|
|
328
|
+
model="gpt-4o",
|
|
329
|
+
max_tokens=1500,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
# async usage
|
|
333
|
+
# response_via_router = await llm_client.async_generate_response(
|
|
334
|
+
# provider="openai",
|
|
335
|
+
# api_key="sk-your-api-key",
|
|
336
|
+
# messages=[{"role": "system", "content": "You are concise."}],
|
|
337
|
+
# prompt="Summarize the plan.",
|
|
338
|
+
# model="gpt-4o-mini",
|
|
339
|
+
# )
|
|
340
340
|
|
|
341
341
|
gemini_response = llm_client.generate_response(
|
|
342
342
|
provider="gemini", # google-genai is installed with llmconnector
|
|
@@ -380,20 +380,20 @@ for model in llm_client.list_models(provider="openai", api_key="sk-your-api-key"
|
|
|
380
380
|
|
|
381
381
|
### Parameters
|
|
382
382
|
|
|
383
|
-
| Parameter | Type | Required | Description |
|
|
384
|
-
|-----------|------|----------|-------------|
|
|
385
|
-
| `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
|
|
386
|
-
| `api_key` | `str` | Yes | Provider-specific API key. |
|
|
387
|
-
| `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
|
|
388
|
-
| `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
|
|
389
|
-
| `model` | `str` | Yes | Provider-specific model identifier. |
|
|
390
|
-
| `max_tokens` | `int` | No | Defaults to `32000`. |
|
|
391
|
-
| `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
|
|
392
|
-
| `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
|
|
393
|
-
| `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
|
|
394
|
-
| `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
|
|
395
|
-
| `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
|
|
396
|
-
| `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
|
|
383
|
+
| Parameter | Type | Required | Description |
|
|
384
|
+
|-----------|------|----------|-------------|
|
|
385
|
+
| `provider` | `str` | Yes | Registered provider key (default registry includes `'openai'`, `'gemini'`, `'anthropic'`, `'grok'`/`'xai'`). |
|
|
386
|
+
| `api_key` | `str` | Yes | Provider-specific API key. |
|
|
387
|
+
| `prompt` | `Optional[str]` | Conditional | Plain-text prompt. Required unless `images` is supplied. |
|
|
388
|
+
| `messages` | `Optional[Sequence[dict]]` | No | Chat-style messages (`role`, `content`). |
|
|
389
|
+
| `model` | `str` | Yes | Provider-specific model identifier. |
|
|
390
|
+
| `max_tokens` | `int` | No | Defaults to `32000`. |
|
|
391
|
+
| `reasoning_effort` | `Optional[str]` | No | Reasoning hint forwarded when supported. |
|
|
392
|
+
| `images` | `Optional[Sequence[str \| Path]]` | No | Image references forwarded to the provider implementation. |
|
|
393
|
+
| `request_id` | `Optional[str]` | No | Request identifier for tracing/logging. |
|
|
394
|
+
| `timeout_s` | `Optional[float]` | No | Timeout in seconds (best-effort). |
|
|
395
|
+
| `max_retries` | `Optional[int]` | No | Retry count for transient failures. |
|
|
396
|
+
| `retry_backoff_s` | `Optional[float]` | No | Base delay (seconds) for exponential backoff. |
|
|
397
397
|
|
|
398
398
|
Use `LLMClient.register_provider(name, client)` to add additional providers that implement
|
|
399
399
|
`generate_response` with the same signature.
|
|
@@ -410,17 +410,17 @@ listing models.
|
|
|
410
410
|
- Anthropic: `ANTHROPIC_API_KEY`
|
|
411
411
|
- Grok/xAI: `GROK_API_KEY` or `XAI_API_KEY` (either works)
|
|
412
412
|
|
|
413
|
-
Examples:
|
|
414
|
-
|
|
415
|
-
```bash
|
|
416
|
-
# Generate a response
|
|
417
|
-
client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
|
|
418
|
-
|
|
419
|
-
# Generate with retry/timeout controls
|
|
420
|
-
client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
|
|
421
|
-
|
|
422
|
-
# List models for one provider (human-readable)
|
|
423
|
-
client_cli models --provider gemini
|
|
413
|
+
Examples:
|
|
414
|
+
|
|
415
|
+
```bash
|
|
416
|
+
# Generate a response
|
|
417
|
+
client_cli respond --provider openai --model gpt-4o --prompt "Hello!"
|
|
418
|
+
|
|
419
|
+
# Generate with retry/timeout controls
|
|
420
|
+
client_cli respond --provider openai --model gpt-4o --prompt "Hello!" --timeout-s 30 --max-retries 2
|
|
421
|
+
|
|
422
|
+
# List models for one provider (human-readable)
|
|
423
|
+
client_cli models --provider gemini
|
|
424
424
|
|
|
425
425
|
# List models for one provider (JSON)
|
|
426
426
|
client_cli models --provider anthropic --json
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "ccs-llmconnector"
|
|
7
|
-
version = "1.1.
|
|
7
|
+
version = "1.1.4"
|
|
8
8
|
description = "Lightweight wrapper around different LLM provider Python SDK Responses APIs."
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
requires-python = ">=3.8"
|
|
@@ -2,27 +2,29 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
-
from typing import TYPE_CHECKING, Any
|
|
6
|
-
|
|
7
|
-
from .client import LLMClient
|
|
8
|
-
from .types import ImageInput, Message, MessageSequence
|
|
9
|
-
|
|
10
|
-
if TYPE_CHECKING:
|
|
11
|
-
from .anthropic_client import AnthropicClient
|
|
12
|
-
from .gemini_client import GeminiClient
|
|
13
|
-
from .grok_client import GrokClient
|
|
14
|
-
from .openai_client import OpenAIResponsesClient
|
|
15
|
-
|
|
16
|
-
__all__ = [
|
|
17
|
-
"LLMClient",
|
|
18
|
-
"OpenAIResponsesClient",
|
|
19
|
-
"GeminiClient",
|
|
20
|
-
"AnthropicClient",
|
|
21
|
-
"GrokClient",
|
|
22
|
-
"
|
|
23
|
-
"
|
|
24
|
-
"
|
|
25
|
-
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
from .client import LLMClient
|
|
8
|
+
from .types import ImageInput, LLMResponse, Message, MessageSequence, TokenUsage
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from .anthropic_client import AnthropicClient
|
|
12
|
+
from .gemini_client import GeminiClient
|
|
13
|
+
from .grok_client import GrokClient
|
|
14
|
+
from .openai_client import OpenAIResponsesClient
|
|
15
|
+
|
|
16
|
+
__all__ = [
|
|
17
|
+
"LLMClient",
|
|
18
|
+
"OpenAIResponsesClient",
|
|
19
|
+
"GeminiClient",
|
|
20
|
+
"AnthropicClient",
|
|
21
|
+
"GrokClient",
|
|
22
|
+
"LLMResponse",
|
|
23
|
+
"ImageInput",
|
|
24
|
+
"Message",
|
|
25
|
+
"MessageSequence",
|
|
26
|
+
"TokenUsage",
|
|
27
|
+
]
|
|
26
28
|
|
|
27
29
|
|
|
28
30
|
def __getattr__(name: str) -> Any:
|