ccs-llmconnector 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/METADATA +38 -2
- ccs_llmconnector-1.0.6.dist-info/RECORD +14 -0
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/licenses/LICENSE +22 -22
- llmconnector/__init__.py +39 -39
- llmconnector/anthropic_client.py +233 -217
- llmconnector/client.py +191 -148
- llmconnector/client_cli.py +325 -325
- llmconnector/gemini_client.py +299 -224
- llmconnector/grok_client.py +186 -170
- llmconnector/openai_client.py +90 -73
- llmconnector/py.typed +1 -1
- ccs_llmconnector-1.0.4.dist-info/RECORD +0 -14
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/WHEEL +0 -0
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/entry_points.txt +0 -0
- {ccs_llmconnector-1.0.4.dist-info → ccs_llmconnector-1.0.6.dist-info}/top_level.txt +0 -0
llmconnector/client.py
CHANGED
|
@@ -1,148 +1,191 @@
|
|
|
1
|
-
"""Provider-agnostic entry point for working with large language models."""
|
|
2
|
-
|
|
3
|
-
from __future__ import annotations
|
|
4
|
-
|
|
5
|
-
from pathlib import Path
|
|
6
|
-
from typing import TYPE_CHECKING, Dict, Optional, Protocol, Sequence, Union
|
|
7
|
-
|
|
8
|
-
if TYPE_CHECKING:
|
|
9
|
-
from .openai_client import ImageInput, OpenAIResponsesClient
|
|
10
|
-
else:
|
|
11
|
-
ImageInput = Union[str, Path]
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
class SupportsGenerateResponse(Protocol):
|
|
15
|
-
"""Protocol describing provider clients."""
|
|
16
|
-
|
|
17
|
-
def generate_response(
|
|
18
|
-
self,
|
|
19
|
-
*,
|
|
20
|
-
api_key: str,
|
|
21
|
-
prompt: str,
|
|
22
|
-
model: str,
|
|
23
|
-
max_tokens: int = 32000,
|
|
24
|
-
reasoning_effort: Optional[str] = None,
|
|
25
|
-
images: Optional[Sequence[ImageInput]] = None,
|
|
26
|
-
) -> str:
|
|
27
|
-
...
|
|
28
|
-
|
|
29
|
-
def
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
1
|
+
"""Provider-agnostic entry point for working with large language models."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING, Dict, Optional, Protocol, Sequence, Union
|
|
7
|
+
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
from .openai_client import ImageInput, OpenAIResponsesClient
|
|
10
|
+
else:
|
|
11
|
+
ImageInput = Union[str, Path]
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SupportsGenerateResponse(Protocol):
|
|
15
|
+
"""Protocol describing provider clients."""
|
|
16
|
+
|
|
17
|
+
def generate_response(
|
|
18
|
+
self,
|
|
19
|
+
*,
|
|
20
|
+
api_key: str,
|
|
21
|
+
prompt: str,
|
|
22
|
+
model: str,
|
|
23
|
+
max_tokens: int = 32000,
|
|
24
|
+
reasoning_effort: Optional[str] = None,
|
|
25
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
26
|
+
) -> str:
|
|
27
|
+
...
|
|
28
|
+
|
|
29
|
+
def generate_image(
|
|
30
|
+
self,
|
|
31
|
+
*,
|
|
32
|
+
api_key: str,
|
|
33
|
+
prompt: str,
|
|
34
|
+
model: str,
|
|
35
|
+
image_size: Optional[str] = None,
|
|
36
|
+
aspect_ratio: Optional[str] = None,
|
|
37
|
+
image: Optional[ImageInput] = None,
|
|
38
|
+
) -> bytes:
|
|
39
|
+
...
|
|
40
|
+
|
|
41
|
+
def list_models(self, *, api_key: str) -> Sequence[dict[str, Optional[str]]]:
|
|
42
|
+
...
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class LLMClient:
|
|
46
|
+
"""Central client capable of routing requests to different providers."""
|
|
47
|
+
|
|
48
|
+
def __init__(
|
|
49
|
+
self,
|
|
50
|
+
providers: Optional[Dict[str, SupportsGenerateResponse]] = None,
|
|
51
|
+
) -> None:
|
|
52
|
+
self._providers: Dict[str, SupportsGenerateResponse] = {}
|
|
53
|
+
default_providers = providers or self._discover_default_providers()
|
|
54
|
+
for name, client in default_providers.items():
|
|
55
|
+
self.register_provider(name, client)
|
|
56
|
+
|
|
57
|
+
if not self._providers:
|
|
58
|
+
raise RuntimeError(
|
|
59
|
+
"No provider implementations registered. Install the required extras "
|
|
60
|
+
"for your target provider (e.g. `pip install openai`)."
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
def register_provider(self, name: str, client: SupportsGenerateResponse) -> None:
|
|
64
|
+
"""Register or overwrite a provider implementation."""
|
|
65
|
+
if not name:
|
|
66
|
+
raise ValueError("Provider name must be provided.")
|
|
67
|
+
if client is None:
|
|
68
|
+
raise ValueError("Provider client must be provided.")
|
|
69
|
+
|
|
70
|
+
self._providers[name.lower()] = client
|
|
71
|
+
|
|
72
|
+
def generate_response(
|
|
73
|
+
self,
|
|
74
|
+
*,
|
|
75
|
+
provider: str,
|
|
76
|
+
api_key: str,
|
|
77
|
+
prompt: str,
|
|
78
|
+
model: str,
|
|
79
|
+
max_tokens: int = 32000,
|
|
80
|
+
reasoning_effort: Optional[str] = None,
|
|
81
|
+
images: Optional[Sequence[ImageInput]] = None,
|
|
82
|
+
) -> str:
|
|
83
|
+
"""Generate a response using the selected provider."""
|
|
84
|
+
if not provider:
|
|
85
|
+
raise ValueError("provider must be provided.")
|
|
86
|
+
|
|
87
|
+
provider_client = self._providers.get(provider.lower())
|
|
88
|
+
if provider_client is None:
|
|
89
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
90
|
+
raise ValueError(
|
|
91
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
return provider_client.generate_response(
|
|
95
|
+
api_key=api_key,
|
|
96
|
+
prompt=prompt,
|
|
97
|
+
model=model,
|
|
98
|
+
max_tokens=max_tokens,
|
|
99
|
+
reasoning_effort=reasoning_effort,
|
|
100
|
+
images=images,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
def generate_image(
|
|
104
|
+
self,
|
|
105
|
+
*,
|
|
106
|
+
provider: str,
|
|
107
|
+
api_key: str,
|
|
108
|
+
prompt: str,
|
|
109
|
+
model: str,
|
|
110
|
+
image_size: Optional[str] = None,
|
|
111
|
+
aspect_ratio: Optional[str] = None,
|
|
112
|
+
image: Optional[ImageInput] = None,
|
|
113
|
+
) -> bytes:
|
|
114
|
+
"""Generate an image using the selected provider."""
|
|
115
|
+
if not provider:
|
|
116
|
+
raise ValueError("provider must be provided.")
|
|
117
|
+
|
|
118
|
+
provider_client = self._providers.get(provider.lower())
|
|
119
|
+
if provider_client is None:
|
|
120
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
121
|
+
raise ValueError(
|
|
122
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
return provider_client.generate_image(
|
|
126
|
+
api_key=api_key,
|
|
127
|
+
prompt=prompt,
|
|
128
|
+
model=model,
|
|
129
|
+
image_size=image_size,
|
|
130
|
+
aspect_ratio=aspect_ratio,
|
|
131
|
+
image=image,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
def list_models(
|
|
135
|
+
self,
|
|
136
|
+
*,
|
|
137
|
+
provider: str,
|
|
138
|
+
api_key: str,
|
|
139
|
+
) -> Sequence[dict[str, Optional[str]]]:
|
|
140
|
+
"""List models available for the specified provider."""
|
|
141
|
+
if not provider:
|
|
142
|
+
raise ValueError("provider must be provided.")
|
|
143
|
+
|
|
144
|
+
provider_client = self._providers.get(provider.lower())
|
|
145
|
+
if provider_client is None:
|
|
146
|
+
available = ", ".join(sorted(self._providers)) or "<none>"
|
|
147
|
+
raise ValueError(
|
|
148
|
+
f"Unknown provider '{provider}'. Available providers: {available}."
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
return provider_client.list_models(api_key=api_key)
|
|
152
|
+
|
|
153
|
+
@staticmethod
|
|
154
|
+
def _discover_default_providers() -> Dict[str, SupportsGenerateResponse]:
|
|
155
|
+
providers: Dict[str, SupportsGenerateResponse] = {}
|
|
156
|
+
try:
|
|
157
|
+
from .openai_client import OpenAIResponsesClient # type: ignore
|
|
158
|
+
except ModuleNotFoundError as exc:
|
|
159
|
+
if exc.name != "openai":
|
|
160
|
+
raise
|
|
161
|
+
return providers
|
|
162
|
+
|
|
163
|
+
providers["openai"] = OpenAIResponsesClient()
|
|
164
|
+
|
|
165
|
+
try:
|
|
166
|
+
from .gemini_client import GeminiClient # type: ignore
|
|
167
|
+
except ModuleNotFoundError as exc:
|
|
168
|
+
if exc.name not in {"google", "google.genai"}:
|
|
169
|
+
raise
|
|
170
|
+
else:
|
|
171
|
+
providers["gemini"] = GeminiClient()
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
from .anthropic_client import AnthropicClient # type: ignore
|
|
175
|
+
except ModuleNotFoundError as exc:
|
|
176
|
+
if exc.name != "anthropic":
|
|
177
|
+
raise
|
|
178
|
+
else:
|
|
179
|
+
providers["anthropic"] = AnthropicClient()
|
|
180
|
+
|
|
181
|
+
try:
|
|
182
|
+
from .grok_client import GrokClient # type: ignore
|
|
183
|
+
except ModuleNotFoundError as exc:
|
|
184
|
+
if exc.name != "xai_sdk":
|
|
185
|
+
raise
|
|
186
|
+
else:
|
|
187
|
+
grok_client = GrokClient()
|
|
188
|
+
providers["grok"] = grok_client
|
|
189
|
+
providers.setdefault("xai", grok_client)
|
|
190
|
+
|
|
191
|
+
return providers
|